text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
"""
Django settings for hello project.
Generated by 'django-admin startproject' using Django 1.9.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ')nw@1z2xt-dy2f$1mfpzyuohxv-tmu4+5-q55)*(e6obam-p=4'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
's3direct',
'cat',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'example.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'example.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
# If AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY are not defined,
# django-s3direct will attempt to use the EC2 instance profile instead.
AWS_ACCESS_KEY_ID = os.environ.get('AWS_ACCESS_KEY_ID', '')
AWS_SECRET_ACCESS_KEY = os.environ.get('AWS_SECRET_ACCESS_KEY', '')
AWS_STORAGE_BUCKET_NAME = os.environ.get('AWS_STORAGE_BUCKET_NAME', 'test-bucket')
S3DIRECT_REGION = os.environ.get('S3DIRECT_REGION', 'us-east-1')
def create_filename(filename):
import uuid
ext = filename.split('.')[-1]
filename = '%s.%s' % (uuid.uuid4().hex, ext)
return os.path.join('custom', filename)
S3DIRECT_DESTINATIONS = {
# Allow anybody to upload any MIME type
'misc': {
'key': '/'
},
# Allow staff users to upload any MIME type
'pdfs': {
'key': 'uploads/pdfs',
'auth': lambda u: u.is_staff
},
# Allow anybody to upload jpeg's and png's. Limit sizes to 5kb - 20mb
'images': {
'key': 'uploads/images',
'auth': lambda u: True,
'allowed': [
'image/jpeg',
'image/png'
],
'content_length_range': (5000, 20000000),
},
# Allow authenticated users to upload mp4's
'videos': {
'key': 'uploads/videos',
'auth': lambda u: u.is_authenticated(),
'allowed': ['video/mp4']
},
# Allow anybody to upload any MIME type with a custom name function
'custom_filename': {
'key': create_filename
},
}
| AlexRiina/django-s3direct | example/example/settings.py | Python | mit | 4,212 | 0.000237 |
"""
"""
from .register import get_registered_layers
#custom layer import begins
import axpy
import flatten
import argmax
import reshape
import roipooling
import priorbox
import permute
import detection_out
import normalize
import select
import crop
import reduction
#custom layer import ends
custom_layers = get_registered_layers()
def set_args(f, params, node=None):
""" set args for function 'f' using the parameters in node.layer.parameters
Args:
f (function): a python function object
params (object): a object contains attributes needed by f's arguments
Returns:
arg_names (list): a list of argument names
kwargs (dict): a dict contains needed arguments
"""
from ..protobuf_to_dict import protobuf_to_dict
argc = f.__code__.co_argcount
arg_list = f.__code__.co_varnames[0:argc]
kwargs = {}
for arg_name in arg_list:
if arg_name in params:
kwargs[arg_name] = params[arg_name]
if node is not None and len(node.metadata):
kwargs.update(node.metadata)
return arg_list, kwargs
def has_layer(kind):
""" test whether this layer exists in custom layer
"""
return kind in custom_layers
def compute_output_shape(kind, node):
assert kind in custom_layers, "layer[%s] not exist in custom layers" % (
kind)
shape_func = custom_layers[kind]['shape']
parents = node.parents
inputs = [list(p.output_shape) for p in parents]
arg_names, kwargs = set_args(shape_func, node.params)
if len(inputs) == 1:
inputs = inputs[0]
return shape_func(inputs, **kwargs)
def make_node(template, kind, node):
""" make a PaddleNode for custom layer which means construct
a piece of code to define a layer implemented in 'custom_layers'
Args:
@template (PaddleNode): a factory to new a instance of PaddleNode
@kind (str): type of custom layer
@node (graph.Node): a layer in the net
Returns:
instance of PaddleNode
"""
assert kind in custom_layers, "layer[%s] not exist in custom layers" % (
kind)
layer_func = custom_layers[kind]['layer']
#construct arguments needed by custom layer function from node's parameters
arg_names, kwargs = set_args(layer_func, node.params, node)
return template('custom_layer', kind, **kwargs)
def make_custom_layer(kind, inputs, name, *args, **kwargs):
""" execute a custom layer which is implemented by users
Args:
@kind (str): type name of this layer
@inputs (vars): variable list created by fluid
@namme (str): name for this layer
@args (tuple): other positional arguments
@kwargs (dict): other kv arguments
Returns:
output (var): output variable for this layer
"""
assert kind in custom_layers, "layer[%s] not exist in custom layers" % (
kind)
layer_func = custom_layers[kind]['layer']
return layer_func(inputs, name, *args, **kwargs)
| lcy-seso/models | fluid/image_classification/caffe2fluid/kaffe/custom_layers/__init__.py | Python | apache-2.0 | 2,996 | 0.001335 |
# -*- encoding: utf-8 -*-
"""Test class for Locations UI"""
from fauxfactory import gen_ipaddr, gen_string
from nailgun import entities
from robottelo.config import settings
from robottelo.datafactory import generate_strings_list, invalid_values_list
from robottelo.decorators import run_only_on, tier1, tier2
from robottelo.constants import (
ANY_CONTEXT,
INSTALL_MEDIUM_URL,
LIBVIRT_RESOURCE_URL,
OS_TEMPLATE_DATA_FILE,
)
from robottelo.helpers import get_data_file
from robottelo.test import UITestCase
from robottelo.ui.factory import make_loc, make_templates, set_context
from robottelo.ui.locators import common_locators, locators, tab_locators
from robottelo.ui.session import Session
def valid_org_loc_data():
"""Returns a list of valid org/location data"""
return [
{'org_name': gen_string('alpha', 10),
'loc_name': gen_string('alpha', 10)},
{'org_name': gen_string('numeric', 10),
'loc_name': gen_string('numeric', 10)},
{'org_name': gen_string('alphanumeric', 10),
'loc_name': gen_string('alphanumeric', 10)},
{'org_name': gen_string('utf8', 10),
'loc_name': gen_string('utf8', 10)},
{'org_name': gen_string('latin1', 20),
'loc_name': gen_string('latin1', 10)},
{'org_name': gen_string('html', 20),
'loc_name': gen_string('html', 10)}
]
def valid_env_names():
"""Returns a list of valid environment names"""
return [
gen_string('alpha'),
gen_string('numeric'),
gen_string('alphanumeric'),
]
class LocationTestCase(UITestCase):
"""Implements Location tests in UI"""
location = None
# Auto Search
@run_only_on('sat')
@tier1
def test_positive_auto_search(self):
"""Can auto-complete search for location by partial name
@feature: Locations
@assert: Created location can be auto search by its partial name
"""
loc_name = gen_string('alpha')
with Session(self.browser) as session:
page = session.nav.go_to_loc
make_loc(session, name=loc_name)
auto_search = self.location.auto_complete_search(
page,
locators['location.select_name'],
loc_name[:3],
loc_name,
search_key='name'
)
self.assertIsNotNone(auto_search)
# Positive Create
@run_only_on('sat')
@tier1
def test_positive_create_with_name(self):
"""Create Location with valid name only
@feature: Locations
@assert: Location is created, label is auto-generated
"""
with Session(self.browser) as session:
for loc_name in generate_strings_list():
with self.subTest(loc_name):
make_loc(session, name=loc_name)
self.assertIsNotNone(self.location.search(loc_name))
@run_only_on('sat')
@tier1
def test_negative_create_with_invalid_names(self):
"""Create location with invalid name
@feature: Locations
@assert: location is not created
"""
with Session(self.browser) as session:
for loc_name in invalid_values_list(interface='ui'):
with self.subTest(loc_name):
make_loc(session, name=loc_name)
error = session.nav.wait_until_element(
common_locators['name_haserror'])
self.assertIsNotNone(error)
@run_only_on('sat')
@tier1
def test_negative_create_with_same_name(self):
"""Create location with valid values, then create a new one
with same values.
@feature: Locations
@assert: location is not created
"""
loc_name = gen_string('utf8')
with Session(self.browser) as session:
make_loc(session, name=loc_name)
self.assertIsNotNone(self.location.search(loc_name))
make_loc(session, name=loc_name)
error = session.nav.wait_until_element(
common_locators['name_haserror'])
self.assertIsNotNone(error)
@run_only_on('sat')
@tier2
def test_positive_create_with_location_and_org(self):
"""Create and select both organization and location.
@feature: Locations
@assert: Both organization and location are selected.
"""
with Session(self.browser) as session:
for test_data in valid_org_loc_data():
with self.subTest(test_data):
org_name = test_data['org_name']
loc_name = test_data['loc_name']
org = entities.Organization(name=org_name).create()
self.assertEqual(org.name, org_name)
make_loc(session, name=loc_name)
self.assertIsNotNone(self.location.search(loc_name))
location = session.nav.go_to_select_loc(loc_name)
organization = session.nav.go_to_select_org(org_name)
self.assertEqual(location, loc_name)
self.assertEqual(organization, org_name)
# Positive Update
@run_only_on('sat')
@tier1
def test_positive_update_name(self):
"""Create Location with valid values then update its name
@feature: Locations
@assert: Location name is updated
"""
loc_name = gen_string('alpha')
with Session(self.browser) as session:
make_loc(session, name=loc_name)
self.assertIsNotNone(self.location.search(loc_name))
for new_name in generate_strings_list():
with self.subTest(new_name):
self.location.update(loc_name, new_name=new_name)
self.assertIsNotNone(self.location.search(new_name))
loc_name = new_name # for next iteration
# Negative Update
@run_only_on('sat')
@tier1
def test_negative_update_with_too_long_name(self):
"""Create Location with valid values then fail to update
its name
@feature: Locations
@assert: Location name is not updated
"""
loc_name = gen_string('alphanumeric')
with Session(self.browser) as session:
make_loc(session, name=loc_name)
self.assertIsNotNone(self.location.search(loc_name))
new_name = gen_string('alpha', 247)
self.location.update(loc_name, new_name=new_name)
error = session.nav.wait_until_element(
common_locators['name_haserror'])
self.assertIsNotNone(error)
@run_only_on('sat')
@tier1
def test_positive_delete(self):
"""Create location with valid values then delete it.
@feature: Location Positive Delete test.
@assert: Location is deleted
"""
with Session(self.browser) as session:
for loc_name in generate_strings_list():
with self.subTest(loc_name):
entities.Location(name=loc_name).create()
session.nav.go_to_loc()
self.location.delete(loc_name)
@run_only_on('sat')
@tier2
def test_positive_add_subnet(self):
"""Add a subnet by using location name and subnet name
@feature: Locations
@assert: subnet is added
"""
strategy, value = common_locators['entity_deselect']
with Session(self.browser) as session:
for subnet_name in generate_strings_list():
with self.subTest(subnet_name):
loc_name = gen_string('alpha')
subnet = entities.Subnet(
name=subnet_name,
network=gen_ipaddr(ip3=True),
mask='255.255.255.0',
).create()
self.assertEqual(subnet.name, subnet_name)
make_loc(session, name=loc_name)
self.assertIsNotNone(self.location.search(loc_name))
self.location.update(loc_name, new_subnets=[subnet_name])
self.location.search(loc_name).click()
session.nav.click(tab_locators['context.tab_subnets'])
element = session.nav.wait_until_element(
(strategy, value % subnet_name))
self.assertIsNotNone(element)
@run_only_on('sat')
@tier2
def test_positive_add_domain(self):
"""Add a domain to a Location
@feature: Locations
@assert: Domain is added to Location
"""
strategy, value = common_locators['entity_deselect']
with Session(self.browser) as session:
for domain_name in generate_strings_list():
with self.subTest(domain_name):
loc_name = gen_string('alpha')
domain = entities.Domain(name=domain_name).create()
self.assertEqual(domain.name, domain_name)
make_loc(session, name=loc_name)
self.assertIsNotNone(self.location.search(loc_name))
self.location.update(loc_name, new_domains=[domain_name])
self.location.search(loc_name).click()
session.nav.click(tab_locators['context.tab_domains'])
element = session.nav.wait_until_element(
(strategy, value % domain_name))
self.assertIsNotNone(element)
@run_only_on('sat')
@tier2
def test_positive_add_user(self):
"""Create user then add that user by using the location name
@feature: Locations
@assert: User is added to location
"""
strategy, value = common_locators['entity_deselect']
with Session(self.browser) as session:
# User names does not accept html values
for user_name in generate_strings_list(
length=10,
exclude_types=['html']):
with self.subTest(user_name):
loc_name = gen_string('alpha')
password = gen_string('alpha')
user = entities.User(
login=user_name,
firstname=user_name,
lastname=user_name,
password=password,
).create()
self.assertEqual(user.login, user_name)
make_loc(session, name=loc_name)
self.assertIsNotNone(self.location.search(loc_name))
self.location.update(loc_name, new_users=[user_name])
self.location.search(loc_name).click()
session.nav.click(tab_locators['context.tab_users'])
element = session.nav.wait_until_element(
(strategy, value % user_name))
self.assertIsNotNone(element)
@run_only_on('sat')
@tier1
def test_positive_check_all_values_hostgroup(self):
"""check whether host group has the 'All values' checked.
@feature: Locations
@assert: host group 'All values' checkbox is checked.
"""
loc_name = gen_string('alpha')
with Session(self.browser) as session:
make_loc(session, name=loc_name)
self.assertIsNotNone(self.location.search(loc_name))
selected = self.location.check_all_values(
session.nav.go_to_loc,
loc_name,
locators['location.select_name'],
tab_locators['context.tab_hostgrps'],
context='location',
)
self.assertIsNotNone(selected)
@run_only_on('sat')
@tier2
def test_positive_add_hostgroup(self):
"""Add a hostgroup by using the location name and hostgroup name
@feature: Locations
@assert: hostgroup is added to location
"""
strategy, value = common_locators['all_values_selection']
with Session(self.browser) as session:
for host_grp_name in generate_strings_list():
with self.subTest(host_grp_name):
loc_name = gen_string('alpha')
host_grp = entities.HostGroup(name=host_grp_name).create()
self.assertEqual(host_grp.name, host_grp_name)
make_loc(session, name=loc_name)
self.assertIsNotNone(self.location.search(loc_name))
self.location.search(loc_name).click()
session.nav.click(tab_locators['context.tab_hostgrps'])
element = session.nav.wait_until_element(
(strategy, value % host_grp_name))
self.assertIsNotNone(element)
@run_only_on('sat')
@tier2
def test_positive_add_org(self):
"""Add a organization by using the location name
@feature: Locations
@assert: organization is added to location
"""
strategy, value = common_locators['entity_deselect']
with Session(self.browser) as session:
for org_name in generate_strings_list():
with self.subTest(org_name):
loc_name = gen_string('alpha')
org = entities.Organization(name=org_name).create()
self.assertEqual(org.name, org_name)
make_loc(session, name=loc_name)
self.assertIsNotNone(self.location.search(loc_name))
self.location.update(
loc_name, new_organizations=[org_name])
self.location.search(loc_name).click()
session.nav.click(
tab_locators['context.tab_organizations'])
element = session.nav.wait_until_element(
(strategy, value % org_name))
self.assertIsNotNone(element)
@run_only_on('sat')
@tier2
def test_add_environment(self):
"""Add environment by using location name and environment name
@feature: Locations
@assert: environment is added
"""
strategy, value = common_locators['entity_deselect']
with Session(self.browser) as session:
for env_name in valid_env_names():
with self.subTest(env_name):
loc_name = gen_string('alpha')
env = entities.Environment(name=env_name).create()
self.assertEqual(env.name, env_name)
make_loc(session, name=loc_name)
self.assertIsNotNone(self.location.search(loc_name))
self.location.update(loc_name, new_envs=[env_name])
self.location.search(loc_name).click()
session.nav.click(tab_locators['context.tab_env'])
element = session.nav.wait_until_element(
(strategy, value % env_name))
self.assertIsNotNone(element)
@run_only_on('sat')
@tier2
def test_add_compresource(self):
"""Add compute resource using the location name and
compute resource name
@feature: Locations
@assert: compute resource is added successfully
"""
strategy, value = common_locators['entity_deselect']
with Session(self.browser) as session:
for resource_name in generate_strings_list():
with self.subTest(resource_name):
loc_name = gen_string('alpha')
url = LIBVIRT_RESOURCE_URL % settings.server.hostname
resource = entities.LibvirtComputeResource(
name=resource_name, url=url).create()
self.assertEqual(resource.name, resource_name)
make_loc(session, name=loc_name)
self.assertIsNotNone(self.location.search(loc_name))
self.location.update(
loc_name, new_resources=[resource_name])
self.location.search(loc_name).click()
session.nav.click(tab_locators['context.tab_resources'])
element = session.nav.wait_until_element(
(strategy, value % resource_name))
self.assertIsNotNone(element)
@run_only_on('sat')
@tier2
def test_positive_add_medium(self):
"""Add medium by using the location name and medium name
@feature: Locations
@assert: medium is added
"""
strategy, value = common_locators['entity_deselect']
with Session(self.browser) as session:
for medium_name in generate_strings_list():
with self.subTest(medium_name):
loc_name = gen_string('alpha')
medium = entities.Media(
name=medium_name,
path_=INSTALL_MEDIUM_URL % gen_string('alpha', 6),
os_family='Redhat',
).create()
self.assertEqual(medium.name, medium_name)
make_loc(session, name=loc_name)
self.assertIsNotNone(self.location.search(loc_name))
self.location.update(loc_name, new_medias=[medium_name])
self.location.search(loc_name).click()
session.nav.click(tab_locators['context.tab_media'])
element = session.nav.wait_until_element(
(strategy, value % medium_name))
self.assertIsNotNone(element)
@run_only_on('sat')
@tier1
def test_positive_check_all_values_template(self):
"""check whether config template has the 'All values' checked.
@feature: Locations
@assert: configtemplate 'All values' checkbox is checked.
"""
loc_name = gen_string('alpha')
with Session(self.browser) as session:
page = session.nav.go_to_loc
make_loc(session, name=loc_name)
self.assertIsNotNone(self.location.search(loc_name))
selected = self.location.check_all_values(
page, loc_name, locators['location.select_name'],
tab_locators['context.tab_template'], context='location')
self.assertIsNotNone(selected)
@run_only_on('sat')
@tier2
def test_positive_add_template(self):
"""Add config template by using location name and config
template name.
@feature: Locations
@assert: config template is added.
"""
strategy, value = common_locators['all_values_selection']
with Session(self.browser) as session:
for template in generate_strings_list():
with self.subTest(template):
loc_name = gen_string('alpha')
make_loc(session, name=loc_name)
self.assertIsNotNone(self.location.search(loc_name))
make_templates(
session,
name=template,
template_path=get_data_file(OS_TEMPLATE_DATA_FILE),
custom_really=True,
template_type='provision',
)
self.assertIsNotNone(self.template.search(template))
self.location.search(loc_name).click()
session.nav.click(tab_locators['context.tab_template'])
element = session.nav.wait_until_element(
(strategy, value % template))
self.assertIsNotNone(element)
@run_only_on('sat')
@tier2
def test_positive_remove_environment(self):
"""Remove environment by using location name & environment name
@feature: Locations
@assert: environment is removed from Location
"""
strategy, value = common_locators['entity_select']
strategy1, value1 = common_locators['entity_deselect']
with Session(self.browser) as session:
for env_name in valid_env_names():
with self.subTest(env_name):
loc_name = gen_string('alpha')
env = entities.Environment(name=env_name).create()
self.assertEqual(env.name, env_name)
set_context(session, org=ANY_CONTEXT['org'])
make_loc(session, name=loc_name, envs=[env_name])
self.location.search(loc_name).click()
session.nav.click(tab_locators['context.tab_env'])
element = session.nav.wait_until_element(
(strategy1, value1 % env_name))
# Item is listed in 'Selected Items' list and not
# 'All Items' list.
self.assertIsNotNone(element)
self.location.update(loc_name, envs=[env_name])
self.location.search(loc_name).click()
session.nav.click(tab_locators['context.tab_env'])
element = session.nav.wait_until_element(
(strategy, value % env_name))
# Item is listed in 'All Items' list and not
# 'Selected Items' list.
self.assertIsNotNone(element)
@run_only_on('sat')
@tier2
def test_positive_remove_subnet(self):
"""Remove subnet by using location name and subnet name
@feature: Locations
@assert: subnet is added then removed
"""
strategy, value = common_locators['entity_select']
strategy1, value1 = common_locators['entity_deselect']
with Session(self.browser) as session:
for subnet_name in generate_strings_list():
with self.subTest(subnet_name):
loc_name = gen_string('alpha')
subnet = entities.Subnet(
name=subnet_name,
network=gen_ipaddr(ip3=True),
mask='255.255.255.0',
).create()
self.assertEqual(subnet.name, subnet_name)
set_context(session, org=ANY_CONTEXT['org'])
make_loc(session, name=loc_name, subnets=[subnet_name])
self.location.search(loc_name).click()
session.nav.click(tab_locators['context.tab_subnets'])
element = session.nav.wait_until_element(
(strategy1, value1 % subnet_name))
# Item is listed in 'Selected Items' list and not
# 'All Items' list.
self.assertIsNotNone(element)
self.location.update(loc_name, subnets=[subnet_name])
self.location.search(loc_name).click()
self.location.click(tab_locators['context.tab_subnets'])
element = session.nav.wait_until_element(
(strategy, value % subnet_name))
# Item is listed in 'All Items' list and not
# 'Selected Items' list.
self.assertIsNotNone(element)
@run_only_on('sat')
@tier2
def test_positive_remove_domain(self):
"""Add a domain to an location and remove it by location name
and domain name
@feature: Locations
@assert: the domain is removed from the location
"""
strategy, value = common_locators['entity_select']
strategy1, value1 = common_locators['entity_deselect']
with Session(self.browser) as session:
for domain_name in generate_strings_list():
with self.subTest(domain_name):
loc_name = gen_string('alpha')
domain = entities.Domain(name=domain_name).create()
self.assertEqual(domain.name, domain_name)
set_context(session, org=ANY_CONTEXT['org'])
make_loc(session, name=loc_name, domains=[domain_name])
self.location.search(loc_name).click()
session.nav.click(tab_locators['context.tab_domains'])
element = session.nav.wait_until_element(
(strategy1, value1 % domain_name))
# Item is listed in 'Selected Items' list and not
# 'All Items' list.
self.assertIsNotNone(element)
self.location.update(loc_name, domains=[domain_name])
self.location.search(loc_name).click()
session.nav.click(tab_locators['context.tab_domains'])
element = session.nav.wait_until_element(
(strategy, value % domain_name))
# Item is listed in 'All Items' list and not
# 'Selected Items' list.
self.assertIsNotNone(element)
@run_only_on('sat')
@tier2
def test_positive_remove_user(self):
"""Create admin users then add user and remove it by using the
location name
@feature: Locations
@assert: The user is added then removed from the location
"""
strategy, value = common_locators['entity_select']
strategy1, value1 = common_locators['entity_deselect']
with Session(self.browser) as session:
# User names does not accept html values
for user_name in generate_strings_list(
length=10,
exclude_types=['html']):
with self.subTest(user_name):
loc_name = gen_string('alpha')
user = entities.User(
login=user_name,
firstname=user_name,
lastname=user_name,
password=gen_string('alpha'),
).create()
self.assertEqual(user.login, user_name)
set_context(session, org=ANY_CONTEXT['org'])
make_loc(session, name=loc_name, users=[user_name])
self.location.search(loc_name).click()
session.nav.click(tab_locators['context.tab_users'])
element = session.nav.wait_until_element(
(strategy1, value1 % user_name))
# Item is listed in 'Selected Items' list and not
# 'All Items' list.
self.assertIsNotNone(element)
self.location.update(loc_name, users=[user_name])
self.location.search(loc_name).click()
session.nav.click(tab_locators['context.tab_users'])
element = session.nav.wait_until_element(
(strategy, value % user_name))
# Item is listed in 'All Items' list and not
# 'Selected Items' list.
self.assertIsNotNone(element)
@run_only_on('sat')
@tier2
def test_positive_remove_hostgroup(self):
"""Add a hostgroup and remove it by using the location name and
hostgroup name
@feature: Locations
@assert: hostgroup is added to location then removed
"""
strategy, value = common_locators['all_values_selection']
with Session(self.browser) as session:
for host_grp_name in generate_strings_list():
with self.subTest(host_grp_name):
loc_name = gen_string('alpha')
host_grp = entities.HostGroup(name=host_grp_name).create()
self.assertEqual(host_grp.name, host_grp_name)
set_context(session, org=ANY_CONTEXT['org'])
make_loc(session, name=loc_name)
self.location.search(loc_name).click()
session.nav.click(tab_locators['context.tab_hostgrps'])
element = session.nav.wait_until_element(
(strategy, value % host_grp_name))
# Item is listed in 'Selected Items' list and not
# 'All Items' list.
self.assertIsNotNone(element)
self.hostgroup.delete(host_grp_name)
self.location.search(loc_name).click()
session.nav.click(tab_locators['context.tab_hostgrps'])
element = session.nav.wait_until_element(
(strategy, value % host_grp_name))
# Item is listed in 'All Items' list and not
# 'Selected Items' list.
self.assertIsNone(element)
@run_only_on('sat')
@tier2
def test_positive_remove_compresource(self):
"""Remove compute resource by using the location name and
compute resource name
@feature: Locations
@assert: compute resource is added then removed
"""
strategy, value = common_locators['entity_select']
strategy1, value1 = common_locators['entity_deselect']
with Session(self.browser) as session:
for resource_name in generate_strings_list():
with self.subTest(resource_name):
loc_name = gen_string('alpha')
url = LIBVIRT_RESOURCE_URL % settings.server.hostname
resource = entities.LibvirtComputeResource(
name=resource_name, url=url
).create()
self.assertEqual(resource.name, resource_name)
set_context(session, org=ANY_CONTEXT['org'])
make_loc(session, name=loc_name, resources=[resource_name])
self.location.search(loc_name).click()
session.nav.click(tab_locators['context.tab_resources'])
element = self.location.wait_until_element(
(strategy1, value1 % resource_name))
# Item is listed in 'Selected Items' list and not
# 'All Items' list.
self.assertIsNotNone(element)
self.location.update(loc_name, resources=[resource_name])
self.location.search(loc_name).click()
session.nav.click(tab_locators['context.tab_resources'])
element = session.nav.wait_until_element(
(strategy, value % resource_name))
# Item is listed in 'All Items' list and not
# 'Selected Items' list.
self.assertIsNotNone(element)
@run_only_on('sat')
@tier2
def test_positive_remove_medium(self):
"""Remove medium by using location name and medium name
@feature: Locations
@assert: medium is added then removed
"""
strategy, value = common_locators['entity_select']
strategy1, value1 = common_locators['entity_deselect']
with Session(self.browser) as session:
for medium_name in generate_strings_list():
with self.subTest(medium_name):
loc_name = gen_string('alpha')
medium = entities.Media(
name=medium_name,
path_=INSTALL_MEDIUM_URL % gen_string('alpha', 6),
os_family='Redhat',
).create()
self.assertEqual(medium.name, medium_name)
set_context(session, org=ANY_CONTEXT['org'])
make_loc(session, name=loc_name, medias=[medium_name])
self.location.search(loc_name).click()
session.nav.click(tab_locators['context.tab_media'])
element = session.nav.wait_until_element(
(strategy1, value1 % medium_name))
# Item is listed in 'Selected Items' list and not
# 'All Items' list.
self.assertIsNotNone(element)
self.location.update(loc_name, medias=[medium_name])
self.location.search(loc_name).click()
session.nav.click(tab_locators['context.tab_media'])
element = session.nav.wait_until_element(
(strategy, value % medium_name))
# Item is listed in 'All Items' list and not
# 'Selected Items' list.
self.assertIsNotNone(element)
@run_only_on('sat')
@tier2
def test_positive_remove_template(self):
"""
Remove config template
@feature: Locations
@assert: config template is added and then removed
"""
strategy, value = common_locators['all_values_selection']
with Session(self.browser) as session:
for template_name in generate_strings_list(length=8):
with self.subTest(template_name):
loc_name = gen_string('alpha')
set_context(session, org=ANY_CONTEXT['org'])
make_templates(
session,
name=template_name,
template_path=get_data_file(OS_TEMPLATE_DATA_FILE),
template_type='provision',
custom_really=True,
)
self.assertIsNotNone(self.template.search(template_name))
make_loc(session, name=loc_name)
self.location.search(loc_name).click()
session.nav.click(tab_locators['context.tab_template'])
element = session.nav.wait_until_element(
(strategy, value % template_name))
# Item is listed in 'Selected Items' list and not
# 'All Items' list.
self.assertIsNotNone(element)
self.template.delete(template_name)
self.location.search(loc_name).click()
session.nav.click(tab_locators['context.tab_template'])
element = session.nav.wait_until_element(
(strategy, value % template_name))
# Item is listed in 'All Items' list and not
# 'Selected Items' list.
self.assertIsNone(element)
| anarang/robottelo | tests/foreman/ui/test_location.py | Python | gpl-3.0 | 34,753 | 0 |
from __future__ import division, absolute_import, print_function
import copy
import pickle
import sys
import platform
import gc
import copy
import warnings
import tempfile
from os import path
from io import BytesIO
from itertools import chain
import numpy as np
from numpy.testing import (
run_module_suite, TestCase, assert_, assert_equal,
assert_almost_equal, assert_array_equal, assert_array_almost_equal,
assert_raises, assert_warns, dec
)
from numpy.testing.utils import _assert_valid_refcount
from numpy.compat import asbytes, asunicode, asbytes_nested, long, sixu
rlevel = 1
class TestRegression(TestCase):
def test_invalid_round(self,level=rlevel):
"""Ticket #3"""
v = 4.7599999999999998
assert_array_equal(np.array([v]), np.array(v))
def test_mem_empty(self,level=rlevel):
"""Ticket #7"""
np.empty((1,), dtype=[('x', np.int64)])
def test_pickle_transposed(self,level=rlevel):
"""Ticket #16"""
a = np.transpose(np.array([[2, 9], [7, 0], [3, 8]]))
f = BytesIO()
pickle.dump(a, f)
f.seek(0)
b = pickle.load(f)
f.close()
assert_array_equal(a, b)
def test_typeNA(self,level=rlevel):
"""Ticket #31"""
assert_equal(np.typeNA[np.int64], 'Int64')
assert_equal(np.typeNA[np.uint64], 'UInt64')
def test_dtype_names(self,level=rlevel):
"""Ticket #35"""
dt = np.dtype([(('name', 'label'), np.int32, 3)])
def test_reduce(self,level=rlevel):
"""Ticket #40"""
assert_almost_equal(np.add.reduce([1., .5], dtype=None), 1.5)
def test_zeros_order(self,level=rlevel):
"""Ticket #43"""
np.zeros([3], int, 'C')
np.zeros([3], order='C')
np.zeros([3], int, order='C')
def test_asarray_with_order(self,level=rlevel):
"""Check that nothing is done when order='F' and array C/F-contiguous"""
a = np.ones(2)
assert_(a is np.asarray(a, order='F'))
def test_ravel_with_order(self,level=rlevel):
"""Check that ravel works when order='F' and array C/F-contiguous"""
a = np.ones(2)
assert_(not a.ravel('F').flags.owndata)
def test_sort_bigendian(self,level=rlevel):
"""Ticket #47"""
a = np.linspace(0, 10, 11)
c = a.astype(np.dtype('<f8'))
c.sort()
assert_array_almost_equal(c, a)
def test_negative_nd_indexing(self,level=rlevel):
"""Ticket #49"""
c = np.arange(125).reshape((5, 5, 5))
origidx = np.array([-1, 0, 1])
idx = np.array(origidx)
c[idx]
assert_array_equal(idx, origidx)
def test_char_dump(self,level=rlevel):
"""Ticket #50"""
f = BytesIO()
ca = np.char.array(np.arange(1000, 1010), itemsize=4)
ca.dump(f)
f.seek(0)
ca = np.load(f)
f.close()
def test_noncontiguous_fill(self,level=rlevel):
"""Ticket #58."""
a = np.zeros((5, 3))
b = a[:, :2,]
def rs():
b.shape = (10,)
self.assertRaises(AttributeError, rs)
def test_bool(self,level=rlevel):
"""Ticket #60"""
x = np.bool_(1)
def test_indexing1(self,level=rlevel):
"""Ticket #64"""
descr = [('x', [('y', [('z', 'c16', (2,)),]),]),]
buffer = ((([6j, 4j],),),)
h = np.array(buffer, dtype=descr)
h['x']['y']['z']
def test_indexing2(self,level=rlevel):
"""Ticket #65"""
descr = [('x', 'i4', (2,))]
buffer = ([3, 2],)
h = np.array(buffer, dtype=descr)
h['x']
def test_round(self,level=rlevel):
"""Ticket #67"""
x = np.array([1+2j])
assert_almost_equal(x**(-1), [1/(1+2j)])
def test_scalar_compare(self,level=rlevel):
# Trac Ticket #72
# https://github.com/numpy/numpy/issues/565
a = np.array(['test', 'auto'])
assert_array_equal(a == 'auto', np.array([False, True]))
self.assertTrue(a[1] == 'auto')
self.assertTrue(a[0] != 'auto')
b = np.linspace(0, 10, 11)
# This should return true for now, but will eventually raise an error:
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
self.assertTrue(b != 'auto')
self.assertTrue(b[0] != 'auto')
def test_unicode_swapping(self,level=rlevel):
"""Ticket #79"""
ulen = 1
ucs_value = sixu('\U0010FFFF')
ua = np.array([[[ucs_value*ulen]*2]*3]*4, dtype='U%s' % ulen)
ua2 = ua.newbyteorder()
def test_object_array_fill(self,level=rlevel):
"""Ticket #86"""
x = np.zeros(1, 'O')
x.fill([])
def test_mem_dtype_align(self,level=rlevel):
"""Ticket #93"""
self.assertRaises(TypeError, np.dtype,
{'names':['a'],'formats':['foo']}, align=1)
@dec.knownfailureif((sys.version_info[0] >= 3) or
(sys.platform == "win32" and
platform.architecture()[0] == "64bit"),
"numpy.intp('0xff', 16) not supported on Py3, "
"as it does not inherit from Python int")
def test_intp(self,level=rlevel):
"""Ticket #99"""
i_width = np.int_(0).nbytes*2 - 1
np.intp('0x' + 'f'*i_width, 16)
self.assertRaises(OverflowError, np.intp, '0x' + 'f'*(i_width+1), 16)
self.assertRaises(ValueError, np.intp, '0x1', 32)
assert_equal(255, np.intp('0xFF', 16))
assert_equal(1024, np.intp(1024))
def test_endian_bool_indexing(self,level=rlevel):
"""Ticket #105"""
a = np.arange(10., dtype='>f8')
b = np.arange(10., dtype='<f8')
xa = np.where((a>2) & (a<6))
xb = np.where((b>2) & (b<6))
ya = ((a>2) & (a<6))
yb = ((b>2) & (b<6))
assert_array_almost_equal(xa, ya.nonzero())
assert_array_almost_equal(xb, yb.nonzero())
assert_(np.all(a[ya] > 0.5))
assert_(np.all(b[yb] > 0.5))
def test_endian_where(self,level=rlevel):
"""GitHub issue #369"""
net = np.zeros(3, dtype='>f4')
net[1] = 0.00458849
net[2] = 0.605202
max_net = net.max()
test = np.where(net <= 0., max_net, net)
correct = np.array([ 0.60520202, 0.00458849, 0.60520202])
assert_array_almost_equal(test, correct)
def test_endian_recarray(self,level=rlevel):
"""Ticket #2185"""
dt = np.dtype([
('head', '>u4'),
('data', '>u4', 2),
])
buf = np.recarray(1, dtype=dt)
buf[0]['head'] = 1
buf[0]['data'][:] = [1, 1]
h = buf[0]['head']
d = buf[0]['data'][0]
buf[0]['head'] = h
buf[0]['data'][0] = d
assert_(buf[0]['head'] == 1)
def test_mem_dot(self,level=rlevel):
"""Ticket #106"""
x = np.random.randn(0, 1)
y = np.random.randn(10, 1)
# Dummy array to detect bad memory access:
_z = np.ones(10)
_dummy = np.empty((0, 10))
z = np.lib.stride_tricks.as_strided(_z, _dummy.shape, _dummy.strides)
np.dot(x, np.transpose(y), out=z)
assert_equal(_z, np.ones(10))
# Do the same for the built-in dot:
np.core.multiarray.dot(x, np.transpose(y), out=z)
assert_equal(_z, np.ones(10))
def test_arange_endian(self,level=rlevel):
"""Ticket #111"""
ref = np.arange(10)
x = np.arange(10, dtype='<f8')
assert_array_equal(ref, x)
x = np.arange(10, dtype='>f8')
assert_array_equal(ref, x)
# Longfloat support is not consistent enough across
# platforms for this test to be meaningful.
# def test_longfloat_repr(self,level=rlevel):
# """Ticket #112"""
# if np.longfloat(0).itemsize > 8:
# a = np.exp(np.array([1000],dtype=np.longfloat))
# assert_(str(a)[1:9] == str(a[0])[:8])
def test_argmax(self,level=rlevel):
"""Ticket #119"""
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
aargmax = a.argmax(i)
def test_mem_divmod(self,level=rlevel):
"""Ticket #126"""
for i in range(10):
divmod(np.array([i])[0], 10)
def test_hstack_invalid_dims(self,level=rlevel):
"""Ticket #128"""
x = np.arange(9).reshape((3, 3))
y = np.array([0, 0, 0])
self.assertRaises(ValueError, np.hstack, (x, y))
def test_squeeze_type(self,level=rlevel):
"""Ticket #133"""
a = np.array([3])
b = np.array(3)
assert_(type(a.squeeze()) is np.ndarray)
assert_(type(b.squeeze()) is np.ndarray)
def test_add_identity(self,level=rlevel):
"""Ticket #143"""
assert_equal(0, np.add.identity)
def test_numpy_float_python_long_addition(self):
# Check that numpy float and python longs can be added correctly.
a = np.float_(23.) + 2**135
assert_equal(a, 23. + 2**135)
def test_binary_repr_0(self,level=rlevel):
"""Ticket #151"""
assert_equal('0', np.binary_repr(0))
def test_rec_iterate(self,level=rlevel):
"""Ticket #160"""
descr = np.dtype([('i', int), ('f', float), ('s', '|S3')])
x = np.rec.array([(1, 1.1, '1.0'),
(2, 2.2, '2.0')], dtype=descr)
x[0].tolist()
[i for i in x[0]]
def test_unicode_string_comparison(self,level=rlevel):
"""Ticket #190"""
a = np.array('hello', np.unicode_)
b = np.array('world')
a == b
def test_tobytes_FORTRANORDER_discontiguous(self,level=rlevel):
"""Fix in r2836"""
# Create non-contiguous Fortran ordered array
x = np.array(np.random.rand(3, 3), order='F')[:, :2]
assert_array_almost_equal(x.ravel(), np.fromstring(x.tobytes()))
def test_flat_assignment(self,level=rlevel):
"""Correct behaviour of ticket #194"""
x = np.empty((3, 1))
x.flat = np.arange(3)
assert_array_almost_equal(x, [[0], [1], [2]])
x.flat = np.arange(3, dtype=float)
assert_array_almost_equal(x, [[0], [1], [2]])
def test_broadcast_flat_assignment(self,level=rlevel):
"""Ticket #194"""
x = np.empty((3, 1))
def bfa(): x[:] = np.arange(3)
def bfb(): x[:] = np.arange(3, dtype=float)
self.assertRaises(ValueError, bfa)
self.assertRaises(ValueError, bfb)
def test_nonarray_assignment(self):
# See also Issue gh-2870, test for non-array assignment
# and equivalent unsafe casted array assignment
a = np.arange(10)
b = np.ones(10, dtype=bool)
r = np.arange(10)
def assign(a, b, c):
a[b] = c
assert_raises(ValueError, assign, a, b, np.nan)
a[b] = np.array(np.nan) # but not this.
assert_raises(ValueError, assign, a, r, np.nan)
a[r] = np.array(np.nan)
def test_unpickle_dtype_with_object(self,level=rlevel):
"""Implemented in r2840"""
dt = np.dtype([('x', int), ('y', np.object_), ('z', 'O')])
f = BytesIO()
pickle.dump(dt, f)
f.seek(0)
dt_ = pickle.load(f)
f.close()
assert_equal(dt, dt_)
def test_mem_array_creation_invalid_specification(self,level=rlevel):
"""Ticket #196"""
dt = np.dtype([('x', int), ('y', np.object_)])
# Wrong way
self.assertRaises(ValueError, np.array, [1, 'object'], dt)
# Correct way
np.array([(1, 'object')], dt)
def test_recarray_single_element(self,level=rlevel):
"""Ticket #202"""
a = np.array([1, 2, 3], dtype=np.int32)
b = a.copy()
r = np.rec.array(a, shape=1, formats=['3i4'], names=['d'])
assert_array_equal(a, b)
assert_equal(a, r[0][0])
def test_zero_sized_array_indexing(self,level=rlevel):
"""Ticket #205"""
tmp = np.array([])
def index_tmp(): tmp[np.array(10)]
self.assertRaises(IndexError, index_tmp)
def test_chararray_rstrip(self,level=rlevel):
"""Ticket #222"""
x = np.chararray((1,), 5)
x[0] = asbytes('a ')
x = x.rstrip()
assert_equal(x[0], asbytes('a'))
def test_object_array_shape(self,level=rlevel):
"""Ticket #239"""
assert_equal(np.array([[1, 2], 3, 4], dtype=object).shape, (3,))
assert_equal(np.array([[1, 2], [3, 4]], dtype=object).shape, (2, 2))
assert_equal(np.array([(1, 2), (3, 4)], dtype=object).shape, (2, 2))
assert_equal(np.array([], dtype=object).shape, (0,))
assert_equal(np.array([[], [], []], dtype=object).shape, (3, 0))
assert_equal(np.array([[3, 4], [5, 6], None], dtype=object).shape, (3,))
def test_mem_around(self,level=rlevel):
"""Ticket #243"""
x = np.zeros((1,))
y = [0]
decimal = 6
np.around(abs(x-y), decimal) <= 10.0**(-decimal)
def test_character_array_strip(self,level=rlevel):
"""Ticket #246"""
x = np.char.array(("x", "x ", "x "))
for c in x: assert_equal(c, "x")
def test_lexsort(self,level=rlevel):
"""Lexsort memory error"""
v = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
assert_equal(np.lexsort(v), 0)
def test_lexsort_invalid_sequence(self):
# Issue gh-4123
class BuggySequence(object):
def __len__(self):
return 4
def __getitem__(self, key):
raise KeyError
assert_raises(KeyError, np.lexsort, BuggySequence())
def test_pickle_py2_bytes_encoding(self):
# Check that arrays and scalars pickled on Py2 are
# unpickleable on Py3 using encoding='bytes'
test_data = [
# (original, py2_pickle)
(np.unicode_('\u6f2c'),
asbytes("cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n"
"(S'U1'\np2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI4\nI4\n"
"I0\ntp6\nbS',o\\x00\\x00'\np7\ntp8\nRp9\n.")),
(np.array([9e123], dtype=np.float64),
asbytes("cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\n"
"p1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\n"
"p7\n(S'f8'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'<'\np11\nNNNI-1\nI-1\n"
"I0\ntp12\nbI00\nS'O\\x81\\xb7Z\\xaa:\\xabY'\np13\ntp14\nb.")),
(np.array([(9e123,)], dtype=[('name', float)]),
asbytes("cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n"
"(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n"
"(S'V8'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nN(S'name'\np12\ntp13\n"
"(dp14\ng12\n(g7\n(S'f8'\np15\nI0\nI1\ntp16\nRp17\n(I3\nS'<'\np18\nNNNI-1\n"
"I-1\nI0\ntp19\nbI0\ntp20\nsI8\nI1\nI0\ntp21\n"
"bI00\nS'O\\x81\\xb7Z\\xaa:\\xabY'\np22\ntp23\nb.")),
]
if sys.version_info[:2] >= (3, 4):
# encoding='bytes' was added in Py3.4
for original, data in test_data:
result = pickle.loads(data, encoding='bytes')
assert_equal(result, original)
if isinstance(result, np.ndarray) and result.dtype.names:
for name in result.dtype.names:
assert_(isinstance(name, str))
def test_pickle_dtype(self,level=rlevel):
"""Ticket #251"""
pickle.dumps(np.float)
def test_swap_real(self, level=rlevel):
"""Ticket #265"""
assert_equal(np.arange(4, dtype='>c8').imag.max(), 0.0)
assert_equal(np.arange(4, dtype='<c8').imag.max(), 0.0)
assert_equal(np.arange(4, dtype='>c8').real.max(), 3.0)
assert_equal(np.arange(4, dtype='<c8').real.max(), 3.0)
def test_object_array_from_list(self, level=rlevel):
"""Ticket #270"""
a = np.array([1, 'A', None])
def test_multiple_assign(self, level=rlevel):
"""Ticket #273"""
a = np.zeros((3, 1), int)
a[[1, 2]] = 1
def test_empty_array_type(self, level=rlevel):
assert_equal(np.array([]).dtype, np.zeros(0).dtype)
def test_void_copyswap(self, level=rlevel):
dt = np.dtype([('one', '<i4'), ('two', '<i4')])
x = np.array((1, 2), dtype=dt)
x = x.byteswap()
assert_(x['one'] > 1 and x['two'] > 2)
def test_method_args(self, level=rlevel):
# Make sure methods and functions have same default axis
# keyword and arguments
funcs1= ['argmax', 'argmin', 'sum', ('product', 'prod'),
('sometrue', 'any'),
('alltrue', 'all'), 'cumsum', ('cumproduct', 'cumprod'),
'ptp', 'cumprod', 'prod', 'std', 'var', 'mean',
'round', 'min', 'max', 'argsort', 'sort']
funcs2 = ['compress', 'take', 'repeat']
for func in funcs1:
arr = np.random.rand(8, 7)
arr2 = arr.copy()
if isinstance(func, tuple):
func_meth = func[1]
func = func[0]
else:
func_meth = func
res1 = getattr(arr, func_meth)()
res2 = getattr(np, func)(arr2)
if res1 is None:
res1 = arr
if res1.dtype.kind in 'uib':
assert_((res1 == res2).all(), func)
else:
assert_(abs(res1-res2).max() < 1e-8, func)
for func in funcs2:
arr1 = np.random.rand(8, 7)
arr2 = np.random.rand(8, 7)
res1 = None
if func == 'compress':
arr1 = arr1.ravel()
res1 = getattr(arr2, func)(arr1)
else:
arr2 = (15*arr2).astype(int).ravel()
if res1 is None:
res1 = getattr(arr1, func)(arr2)
res2 = getattr(np, func)(arr1, arr2)
assert_(abs(res1-res2).max() < 1e-8, func)
def test_mem_lexsort_strings(self, level=rlevel):
"""Ticket #298"""
lst = ['abc', 'cde', 'fgh']
np.lexsort((lst,))
def test_fancy_index(self, level=rlevel):
"""Ticket #302"""
x = np.array([1, 2])[np.array([0])]
assert_equal(x.shape, (1,))
def test_recarray_copy(self, level=rlevel):
"""Ticket #312"""
dt = [('x', np.int16), ('y', np.float64)]
ra = np.array([(1, 2.3)], dtype=dt)
rb = np.rec.array(ra, dtype=dt)
rb['x'] = 2.
assert_(ra['x'] != rb['x'])
def test_rec_fromarray(self, level=rlevel):
"""Ticket #322"""
x1 = np.array([[1, 2], [3, 4], [5, 6]])
x2 = np.array(['a', 'dd', 'xyz'])
x3 = np.array([1.1, 2, 3])
np.rec.fromarrays([x1, x2, x3], formats="(2,)i4,a3,f8")
def test_object_array_assign(self, level=rlevel):
x = np.empty((2, 2), object)
x.flat[2] = (1, 2, 3)
assert_equal(x.flat[2], (1, 2, 3))
def test_ndmin_float64(self, level=rlevel):
"""Ticket #324"""
x = np.array([1, 2, 3], dtype=np.float64)
assert_equal(np.array(x, dtype=np.float32, ndmin=2).ndim, 2)
assert_equal(np.array(x, dtype=np.float64, ndmin=2).ndim, 2)
def test_ndmin_order(self, level=rlevel):
"""Issue #465 and related checks"""
assert_(np.array([1, 2], order='C', ndmin=3).flags.c_contiguous)
assert_(np.array([1, 2], order='F', ndmin=3).flags.f_contiguous)
assert_(np.array(np.ones((2, 2), order='F'), ndmin=3).flags.f_contiguous)
assert_(np.array(np.ones((2, 2), order='C'), ndmin=3).flags.c_contiguous)
def test_mem_axis_minimization(self, level=rlevel):
"""Ticket #327"""
data = np.arange(5)
data = np.add.outer(data, data)
def test_mem_float_imag(self, level=rlevel):
"""Ticket #330"""
np.float64(1.0).imag
def test_dtype_tuple(self, level=rlevel):
"""Ticket #334"""
assert_(np.dtype('i4') == np.dtype(('i4', ())))
def test_dtype_posttuple(self, level=rlevel):
"""Ticket #335"""
np.dtype([('col1', '()i4')])
def test_numeric_carray_compare(self, level=rlevel):
"""Ticket #341"""
assert_equal(np.array(['X'], 'c'), asbytes('X'))
def test_string_array_size(self, level=rlevel):
"""Ticket #342"""
self.assertRaises(ValueError,
np.array, [['X'], ['X', 'X', 'X']], '|S1')
def test_dtype_repr(self, level=rlevel):
"""Ticket #344"""
dt1=np.dtype(('uint32', 2))
dt2=np.dtype(('uint32', (2,)))
assert_equal(dt1.__repr__(), dt2.__repr__())
def test_reshape_order(self, level=rlevel):
"""Make sure reshape order works."""
a = np.arange(6).reshape(2, 3, order='F')
assert_equal(a, [[0, 2, 4], [1, 3, 5]])
a = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
b = a[:, 1]
assert_equal(b.reshape(2, 2, order='F'), [[2, 6], [4, 8]])
def test_reshape_zero_strides(self, level=rlevel):
"""Issue #380, test reshaping of zero strided arrays"""
a = np.ones(1)
a = np.lib.stride_tricks.as_strided(a, shape=(5,), strides=(0,))
assert_(a.reshape(5, 1).strides[0] == 0)
def test_reshape_zero_size(self, level=rlevel):
"""GitHub Issue #2700, setting shape failed for 0-sized arrays"""
a = np.ones((0, 2))
a.shape = (-1, 2)
# Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides.
# With NPY_RELAXED_STRIDES_CHECKING the test becomes superfluous.
@dec.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max)
def test_reshape_trailing_ones_strides(self):
# GitHub issue gh-2949, bad strides for trailing ones of new shape
a = np.zeros(12, dtype=np.int32)[::2] # not contiguous
strides_c = (16, 8, 8, 8)
strides_f = (8, 24, 48, 48)
assert_equal(a.reshape(3, 2, 1, 1).strides, strides_c)
assert_equal(a.reshape(3, 2, 1, 1, order='F').strides, strides_f)
assert_equal(np.array(0, dtype=np.int32).reshape(1, 1).strides, (4, 4))
def test_repeat_discont(self, level=rlevel):
"""Ticket #352"""
a = np.arange(12).reshape(4, 3)[:, 2]
assert_equal(a.repeat(3), [2, 2, 2, 5, 5, 5, 8, 8, 8, 11, 11, 11])
def test_array_index(self, level=rlevel):
"""Make sure optimization is not called in this case."""
a = np.array([1, 2, 3])
a2 = np.array([[1, 2, 3]])
assert_equal(a[np.where(a==3)], a2[np.where(a2==3)])
def test_object_argmax(self, level=rlevel):
a = np.array([1, 2, 3], dtype=object)
assert_(a.argmax() == 2)
def test_recarray_fields(self, level=rlevel):
"""Ticket #372"""
dt0 = np.dtype([('f0', 'i4'), ('f1', 'i4')])
dt1 = np.dtype([('f0', 'i8'), ('f1', 'i8')])
for a in [np.array([(1, 2), (3, 4)], "i4,i4"),
np.rec.array([(1, 2), (3, 4)], "i4,i4"),
np.rec.array([(1, 2), (3, 4)]),
np.rec.fromarrays([(1, 2), (3, 4)], "i4,i4"),
np.rec.fromarrays([(1, 2), (3, 4)])]:
assert_(a.dtype in [dt0, dt1])
def test_random_shuffle(self, level=rlevel):
"""Ticket #374"""
a = np.arange(5).reshape((5, 1))
b = a.copy()
np.random.shuffle(b)
assert_equal(np.sort(b, axis=0), a)
def test_refcount_vdot(self, level=rlevel):
"""Changeset #3443"""
_assert_valid_refcount(np.vdot)
def test_startswith(self, level=rlevel):
ca = np.char.array(['Hi', 'There'])
assert_equal(ca.startswith('H'), [True, False])
def test_noncommutative_reduce_accumulate(self, level=rlevel):
"""Ticket #413"""
tosubtract = np.arange(5)
todivide = np.array([2.0, 0.5, 0.25])
assert_equal(np.subtract.reduce(tosubtract), -10)
assert_equal(np.divide.reduce(todivide), 16.0)
assert_array_equal(np.subtract.accumulate(tosubtract),
np.array([0, -1, -3, -6, -10]))
assert_array_equal(np.divide.accumulate(todivide),
np.array([2., 4., 16.]))
def test_convolve_empty(self, level=rlevel):
"""Convolve should raise an error for empty input array."""
self.assertRaises(ValueError, np.convolve, [], [1])
self.assertRaises(ValueError, np.convolve, [1], [])
def test_multidim_byteswap(self, level=rlevel):
"""Ticket #449"""
r=np.array([(1, (0, 1, 2))], dtype="i2,3i2")
assert_array_equal(r.byteswap(),
np.array([(256, (0, 256, 512))], r.dtype))
def test_string_NULL(self, level=rlevel):
"""Changeset 3557"""
assert_equal(np.array("a\x00\x0b\x0c\x00").item(),
'a\x00\x0b\x0c')
def test_junk_in_string_fields_of_recarray(self, level=rlevel):
"""Ticket #483"""
r = np.array([[asbytes('abc')]], dtype=[('var1', '|S20')])
assert_(asbytes(r['var1'][0][0]) == asbytes('abc'))
def test_take_output(self, level=rlevel):
"""Ensure that 'take' honours output parameter."""
x = np.arange(12).reshape((3, 4))
a = np.take(x, [0, 2], axis=1)
b = np.zeros_like(a)
np.take(x, [0, 2], axis=1, out=b)
assert_array_equal(a, b)
def test_take_object_fail(self):
# Issue gh-3001
d = 123.
a = np.array([d, 1], dtype=object)
ref_d = sys.getrefcount(d)
try:
a.take([0, 100])
except IndexError:
pass
assert_(ref_d == sys.getrefcount(d))
def test_array_str_64bit(self, level=rlevel):
"""Ticket #501"""
s = np.array([1, np.nan], dtype=np.float64)
with np.errstate(all='raise'):
sstr = np.array_str(s)
def test_frompyfunc_endian(self, level=rlevel):
"""Ticket #503"""
from math import radians
uradians = np.frompyfunc(radians, 1, 1)
big_endian = np.array([83.4, 83.5], dtype='>f8')
little_endian = np.array([83.4, 83.5], dtype='<f8')
assert_almost_equal(uradians(big_endian).astype(float),
uradians(little_endian).astype(float))
def test_mem_string_arr(self, level=rlevel):
"""Ticket #514"""
s = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
t = []
np.hstack((t, s ))
def test_arr_transpose(self, level=rlevel):
"""Ticket #516"""
x = np.random.rand(*(2,)*16)
y = x.transpose(list(range(16)))
def test_string_mergesort(self, level=rlevel):
"""Ticket #540"""
x = np.array(['a']*32)
assert_array_equal(x.argsort(kind='m'), np.arange(32))
def test_argmax_byteorder(self, level=rlevel):
"""Ticket #546"""
a = np.arange(3, dtype='>f')
assert_(a[a.argmax()] == a.max())
def test_rand_seed(self, level=rlevel):
"""Ticket #555"""
for l in np.arange(4):
np.random.seed(l)
def test_mem_deallocation_leak(self, level=rlevel):
"""Ticket #562"""
a = np.zeros(5, dtype=float)
b = np.array(a, dtype=float)
del a, b
def test_mem_on_invalid_dtype(self):
"Ticket #583"
self.assertRaises(ValueError, np.fromiter, [['12', ''], ['13', '']], str)
def test_dot_negative_stride(self, level=rlevel):
"""Ticket #588"""
x = np.array([[1, 5, 25, 125., 625]])
y = np.array([[20.], [160.], [640.], [1280.], [1024.]])
z = y[::-1].copy()
y2 = y[::-1]
assert_equal(np.dot(x, z), np.dot(x, y2))
def test_object_casting(self, level=rlevel):
# This used to trigger the object-type version of
# the bitwise_or operation, because float64 -> object
# casting succeeds
def rs():
x = np.ones([484, 286])
y = np.zeros([484, 286])
x |= y
self.assertRaises(TypeError, rs)
def test_unicode_scalar(self, level=rlevel):
"""Ticket #600"""
x = np.array(["DROND", "DROND1"], dtype="U6")
el = x[1]
new = pickle.loads(pickle.dumps(el))
assert_equal(new, el)
def test_arange_non_native_dtype(self, level=rlevel):
"""Ticket #616"""
for T in ('>f4', '<f4'):
dt = np.dtype(T)
assert_equal(np.arange(0, dtype=dt).dtype, dt)
assert_equal(np.arange(0.5, dtype=dt).dtype, dt)
assert_equal(np.arange(5, dtype=dt).dtype, dt)
def test_bool_flat_indexing_invalid_nr_elements(self, level=rlevel):
s = np.ones(10, dtype=float)
x = np.array((15,), dtype=float)
def ia(x, s, v): x[(s>0)] = v
# After removing deprecation, the following are ValueErrors.
# This might seem odd as compared to the value error below. This
# is due to the fact that the new code always uses "nonzero" logic
# and the boolean special case is not taken.
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
warnings.simplefilter('ignore', np.VisibleDeprecationWarning)
self.assertRaises(IndexError, ia, x, s, np.zeros(9, dtype=float))
self.assertRaises(IndexError, ia, x, s, np.zeros(11, dtype=float))
# Old special case (different code path):
self.assertRaises(ValueError, ia, x.flat, s, np.zeros(9, dtype=float))
self.assertRaises(ValueError, ia, x.flat, s, np.zeros(11, dtype=float))
def test_mem_scalar_indexing(self, level=rlevel):
"""Ticket #603"""
x = np.array([0], dtype=float)
index = np.array(0, dtype=np.int32)
x[index]
def test_binary_repr_0_width(self, level=rlevel):
assert_equal(np.binary_repr(0, width=3), '000')
def test_fromstring(self, level=rlevel):
assert_equal(np.fromstring("12:09:09", dtype=int, sep=":"),
[12, 9, 9])
def test_searchsorted_variable_length(self, level=rlevel):
x = np.array(['a', 'aa', 'b'])
y = np.array(['d', 'e'])
assert_equal(x.searchsorted(y), [3, 3])
def test_string_argsort_with_zeros(self, level=rlevel):
"""Check argsort for strings containing zeros."""
x = np.fromstring("\x00\x02\x00\x01", dtype="|S2")
assert_array_equal(x.argsort(kind='m'), np.array([1, 0]))
assert_array_equal(x.argsort(kind='q'), np.array([1, 0]))
def test_string_sort_with_zeros(self, level=rlevel):
"""Check sort for strings containing zeros."""
x = np.fromstring("\x00\x02\x00\x01", dtype="|S2")
y = np.fromstring("\x00\x01\x00\x02", dtype="|S2")
assert_array_equal(np.sort(x, kind="q"), y)
def test_copy_detection_zero_dim(self, level=rlevel):
"""Ticket #658"""
np.indices((0, 3, 4)).T.reshape(-1, 3)
def test_flat_byteorder(self, level=rlevel):
"""Ticket #657"""
x = np.arange(10)
assert_array_equal(x.astype('>i4'), x.astype('<i4').flat[:])
assert_array_equal(x.astype('>i4').flat[:], x.astype('<i4'))
def test_uint64_from_negative(self, level=rlevel) :
assert_equal(np.uint64(-2), np.uint64(18446744073709551614))
def test_sign_bit(self, level=rlevel):
x = np.array([0, -0.0, 0])
assert_equal(str(np.abs(x)), '[ 0. 0. 0.]')
def test_flat_index_byteswap(self, level=rlevel):
for dt in (np.dtype('<i4'), np.dtype('>i4')):
x = np.array([-1, 0, 1], dtype=dt)
assert_equal(x.flat[0].dtype, x[0].dtype)
def test_copy_detection_corner_case(self, level=rlevel):
"""Ticket #658"""
np.indices((0, 3, 4)).T.reshape(-1, 3)
# Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides.
# With NPY_RELAXED_STRIDES_CHECKING the test becomes superfluous,
# 0-sized reshape itself is tested elsewhere.
@dec.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max)
def test_copy_detection_corner_case2(self, level=rlevel):
"""Ticket #771: strides are not set correctly when reshaping 0-sized
arrays"""
b = np.indices((0, 3, 4)).T.reshape(-1, 3)
assert_equal(b.strides, (3 * b.itemsize, b.itemsize))
def test_object_array_refcounting(self, level=rlevel):
"""Ticket #633"""
if not hasattr(sys, 'getrefcount'):
return
# NB. this is probably CPython-specific
cnt = sys.getrefcount
a = object()
b = object()
c = object()
cnt0_a = cnt(a)
cnt0_b = cnt(b)
cnt0_c = cnt(c)
# -- 0d -> 1-d broadcast slice assignment
arr = np.zeros(5, dtype=np.object_)
arr[:] = a
assert_equal(cnt(a), cnt0_a + 5)
arr[:] = b
assert_equal(cnt(a), cnt0_a)
assert_equal(cnt(b), cnt0_b + 5)
arr[:2] = c
assert_equal(cnt(b), cnt0_b + 3)
assert_equal(cnt(c), cnt0_c + 2)
del arr
# -- 1-d -> 2-d broadcast slice assignment
arr = np.zeros((5, 2), dtype=np.object_)
arr0 = np.zeros(2, dtype=np.object_)
arr0[0] = a
assert_(cnt(a) == cnt0_a + 1)
arr0[1] = b
assert_(cnt(b) == cnt0_b + 1)
arr[:,:] = arr0
assert_(cnt(a) == cnt0_a + 6)
assert_(cnt(b) == cnt0_b + 6)
arr[:, 0] = None
assert_(cnt(a) == cnt0_a + 1)
del arr, arr0
# -- 2-d copying + flattening
arr = np.zeros((5, 2), dtype=np.object_)
arr[:, 0] = a
arr[:, 1] = b
assert_(cnt(a) == cnt0_a + 5)
assert_(cnt(b) == cnt0_b + 5)
arr2 = arr.copy()
assert_(cnt(a) == cnt0_a + 10)
assert_(cnt(b) == cnt0_b + 10)
arr2 = arr[:, 0].copy()
assert_(cnt(a) == cnt0_a + 10)
assert_(cnt(b) == cnt0_b + 5)
arr2 = arr.flatten()
assert_(cnt(a) == cnt0_a + 10)
assert_(cnt(b) == cnt0_b + 10)
del arr, arr2
# -- concatenate, repeat, take, choose
arr1 = np.zeros((5, 1), dtype=np.object_)
arr2 = np.zeros((5, 1), dtype=np.object_)
arr1[...] = a
arr2[...] = b
assert_(cnt(a) == cnt0_a + 5)
assert_(cnt(b) == cnt0_b + 5)
arr3 = np.concatenate((arr1, arr2))
assert_(cnt(a) == cnt0_a + 5 + 5)
assert_(cnt(b) == cnt0_b + 5 + 5)
arr3 = arr1.repeat(3, axis=0)
assert_(cnt(a) == cnt0_a + 5 + 3*5)
arr3 = arr1.take([1, 2, 3], axis=0)
assert_(cnt(a) == cnt0_a + 5 + 3)
x = np.array([[0], [1], [0], [1], [1]], int)
arr3 = x.choose(arr1, arr2)
assert_(cnt(a) == cnt0_a + 5 + 2)
assert_(cnt(b) == cnt0_b + 5 + 3)
def test_mem_custom_float_to_array(self, level=rlevel):
"""Ticket 702"""
class MyFloat(object):
def __float__(self):
return 1.0
tmp = np.atleast_1d([MyFloat()])
tmp2 = tmp.astype(float)
def test_object_array_refcount_self_assign(self, level=rlevel):
"""Ticket #711"""
class VictimObject(object):
deleted = False
def __del__(self):
self.deleted = True
d = VictimObject()
arr = np.zeros(5, dtype=np.object_)
arr[:] = d
del d
arr[:] = arr # refcount of 'd' might hit zero here
assert_(not arr[0].deleted)
arr[:] = arr # trying to induce a segfault by doing it again...
assert_(not arr[0].deleted)
def test_mem_fromiter_invalid_dtype_string(self, level=rlevel):
x = [1, 2, 3]
self.assertRaises(ValueError,
np.fromiter, [xi for xi in x], dtype='S')
def test_reduce_big_object_array(self, level=rlevel):
"""Ticket #713"""
oldsize = np.setbufsize(10*16)
a = np.array([None]*161, object)
assert_(not np.any(a))
np.setbufsize(oldsize)
def test_mem_0d_array_index(self, level=rlevel):
"""Ticket #714"""
np.zeros(10)[np.array(0)]
def test_floats_from_string(self, level=rlevel):
"""Ticket #640, floats from string"""
fsingle = np.single('1.234')
fdouble = np.double('1.234')
flongdouble = np.longdouble('1.234')
assert_almost_equal(fsingle, 1.234)
assert_almost_equal(fdouble, 1.234)
assert_almost_equal(flongdouble, 1.234)
def test_nonnative_endian_fill(self, level=rlevel):
""" Non-native endian arrays were incorrectly filled with scalars before
r5034.
"""
if sys.byteorder == 'little':
dtype = np.dtype('>i4')
else:
dtype = np.dtype('<i4')
x = np.empty([1], dtype=dtype)
x.fill(1)
assert_equal(x, np.array([1], dtype=dtype))
def test_dot_alignment_sse2(self, level=rlevel):
"""Test for ticket #551, changeset r5140"""
x = np.zeros((30, 40))
y = pickle.loads(pickle.dumps(x))
# y is now typically not aligned on a 8-byte boundary
z = np.ones((1, y.shape[0]))
# This shouldn't cause a segmentation fault:
np.dot(z, y)
def test_astype_copy(self, level=rlevel):
"""Ticket #788, changeset r5155"""
# The test data file was generated by scipy.io.savemat.
# The dtype is float64, but the isbuiltin attribute is 0.
data_dir = path.join(path.dirname(__file__), 'data')
filename = path.join(data_dir, "astype_copy.pkl")
if sys.version_info[0] >= 3:
f = open(filename, 'rb')
xp = pickle.load(f, encoding='latin1')
f.close()
else:
f = open(filename)
xp = pickle.load(f)
f.close()
xpd = xp.astype(np.float64)
assert_((xp.__array_interface__['data'][0] !=
xpd.__array_interface__['data'][0]))
def test_compress_small_type(self, level=rlevel):
"""Ticket #789, changeset 5217.
"""
# compress with out argument segfaulted if cannot cast safely
import numpy as np
a = np.array([[1, 2], [3, 4]])
b = np.zeros((2, 1), dtype = np.single)
try:
a.compress([True, False], axis = 1, out = b)
raise AssertionError("compress with an out which cannot be "
"safely casted should not return "
"successfully")
except TypeError:
pass
def test_attributes(self, level=rlevel):
"""Ticket #791
"""
class TestArray(np.ndarray):
def __new__(cls, data, info):
result = np.array(data)
result = result.view(cls)
result.info = info
return result
def __array_finalize__(self, obj):
self.info = getattr(obj, 'info', '')
dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba')
assert_(dat.info == 'jubba')
dat.resize((4, 2))
assert_(dat.info == 'jubba')
dat.sort()
assert_(dat.info == 'jubba')
dat.fill(2)
assert_(dat.info == 'jubba')
dat.put([2, 3, 4], [6, 3, 4])
assert_(dat.info == 'jubba')
dat.setfield(4, np.int32, 0)
assert_(dat.info == 'jubba')
dat.setflags()
assert_(dat.info == 'jubba')
assert_(dat.all(1).info == 'jubba')
assert_(dat.any(1).info == 'jubba')
assert_(dat.argmax(1).info == 'jubba')
assert_(dat.argmin(1).info == 'jubba')
assert_(dat.argsort(1).info == 'jubba')
assert_(dat.astype(TestArray).info == 'jubba')
assert_(dat.byteswap().info == 'jubba')
assert_(dat.clip(2, 7).info == 'jubba')
assert_(dat.compress([0, 1, 1]).info == 'jubba')
assert_(dat.conj().info == 'jubba')
assert_(dat.conjugate().info == 'jubba')
assert_(dat.copy().info == 'jubba')
dat2 = TestArray([2, 3, 1, 0], 'jubba')
choices = [[0, 1, 2, 3], [10, 11, 12, 13],
[20, 21, 22, 23], [30, 31, 32, 33]]
assert_(dat2.choose(choices).info == 'jubba')
assert_(dat.cumprod(1).info == 'jubba')
assert_(dat.cumsum(1).info == 'jubba')
assert_(dat.diagonal().info == 'jubba')
assert_(dat.flatten().info == 'jubba')
assert_(dat.getfield(np.int32, 0).info == 'jubba')
assert_(dat.imag.info == 'jubba')
assert_(dat.max(1).info == 'jubba')
assert_(dat.mean(1).info == 'jubba')
assert_(dat.min(1).info == 'jubba')
assert_(dat.newbyteorder().info == 'jubba')
assert_(dat.nonzero()[0].info == 'jubba')
assert_(dat.nonzero()[1].info == 'jubba')
assert_(dat.prod(1).info == 'jubba')
assert_(dat.ptp(1).info == 'jubba')
assert_(dat.ravel().info == 'jubba')
assert_(dat.real.info == 'jubba')
assert_(dat.repeat(2).info == 'jubba')
assert_(dat.reshape((2, 4)).info == 'jubba')
assert_(dat.round().info == 'jubba')
assert_(dat.squeeze().info == 'jubba')
assert_(dat.std(1).info == 'jubba')
assert_(dat.sum(1).info == 'jubba')
assert_(dat.swapaxes(0, 1).info == 'jubba')
assert_(dat.take([2, 3, 5]).info == 'jubba')
assert_(dat.transpose().info == 'jubba')
assert_(dat.T.info == 'jubba')
assert_(dat.var(1).info == 'jubba')
assert_(dat.view(TestArray).info == 'jubba')
def test_recarray_tolist(self, level=rlevel):
"""Ticket #793, changeset r5215
"""
# Comparisons fail for NaN, so we can't use random memory
# for the test.
buf = np.zeros(40, dtype=np.int8)
a = np.recarray(2, formats="i4,f8,f8", names="id,x,y", buf=buf)
b = a.tolist()
assert_( a[0].tolist() == b[0])
assert_( a[1].tolist() == b[1])
def test_nonscalar_item_method(self):
# Make sure that .item() fails graciously when it should
a = np.arange(5)
assert_raises(ValueError, a.item)
def test_char_array_creation(self, level=rlevel):
a = np.array('123', dtype='c')
b = np.array(asbytes_nested(['1', '2', '3']))
assert_equal(a, b)
def test_unaligned_unicode_access(self, level=rlevel) :
"""Ticket #825"""
for i in range(1, 9) :
msg = 'unicode offset: %d chars'%i
t = np.dtype([('a', 'S%d'%i), ('b', 'U2')])
x = np.array([(asbytes('a'), sixu('b'))], dtype=t)
if sys.version_info[0] >= 3:
assert_equal(str(x), "[(b'a', 'b')]", err_msg=msg)
else:
assert_equal(str(x), "[('a', u'b')]", err_msg=msg)
def test_sign_for_complex_nan(self, level=rlevel):
"""Ticket 794."""
with np.errstate(invalid='ignore'):
C = np.array([-np.inf, -2+1j, 0, 2-1j, np.inf, np.nan])
have = np.sign(C)
want = np.array([-1+0j, -1+0j, 0+0j, 1+0j, 1+0j, np.nan])
assert_equal(have, want)
def test_for_equal_names(self, level=rlevel):
"""Ticket #674"""
dt = np.dtype([('foo', float), ('bar', float)])
a = np.zeros(10, dt)
b = list(a.dtype.names)
b[0] = "notfoo"
a.dtype.names = b
assert_(a.dtype.names[0] == "notfoo")
assert_(a.dtype.names[1] == "bar")
def test_for_object_scalar_creation(self, level=rlevel):
"""Ticket #816"""
a = np.object_()
b = np.object_(3)
b2 = np.object_(3.0)
c = np.object_([4, 5])
d = np.object_([None, {}, []])
assert_(a is None)
assert_(type(b) is int)
assert_(type(b2) is float)
assert_(type(c) is np.ndarray)
assert_(c.dtype == object)
assert_(d.dtype == object)
def test_array_resize_method_system_error(self):
"""Ticket #840 - order should be an invalid keyword."""
x = np.array([[0, 1], [2, 3]])
self.assertRaises(TypeError, x.resize, (2, 2), order='C')
def test_for_zero_length_in_choose(self, level=rlevel):
"Ticket #882"
a = np.array(1)
self.assertRaises(ValueError, lambda x: x.choose([]), a)
def test_array_ndmin_overflow(self):
"Ticket #947."
self.assertRaises(ValueError, lambda: np.array([1], ndmin=33))
def test_errobj_reference_leak(self, level=rlevel):
"""Ticket #955"""
with np.errstate(all="ignore"):
z = int(0)
p = np.int32(-1)
gc.collect()
n_before = len(gc.get_objects())
z**p # this shouldn't leak a reference to errobj
gc.collect()
n_after = len(gc.get_objects())
assert_(n_before >= n_after, (n_before, n_after))
def test_void_scalar_with_titles(self, level=rlevel):
"""No ticket"""
data = [('john', 4), ('mary', 5)]
dtype1 = [(('source:yy', 'name'), 'O'), (('source:xx', 'id'), int)]
arr = np.array(data, dtype=dtype1)
assert_(arr[0][0] == 'john')
assert_(arr[0][1] == 4)
def test_void_scalar_constructor(self):
#Issue #1550
#Create test string data, construct void scalar from data and assert
#that void scalar contains original data.
test_string = np.array("test")
test_string_void_scalar = np.core.multiarray.scalar(
np.dtype(("V", test_string.dtype.itemsize)), test_string.tobytes())
assert_(test_string_void_scalar.view(test_string.dtype) == test_string)
#Create record scalar, construct from data and assert that
#reconstructed scalar is correct.
test_record = np.ones((), "i,i")
test_record_void_scalar = np.core.multiarray.scalar(
test_record.dtype, test_record.tobytes())
assert_(test_record_void_scalar == test_record)
#Test pickle and unpickle of void and record scalars
assert_(pickle.loads(pickle.dumps(test_string)) == test_string)
assert_(pickle.loads(pickle.dumps(test_record)) == test_record)
def test_blasdot_uninitialized_memory(self):
"""Ticket #950"""
for m in [0, 1, 2]:
for n in [0, 1, 2]:
for k in range(3):
# Try to ensure that x->data contains non-zero floats
x = np.array([123456789e199], dtype=np.float64)
x.resize((m, 0))
y = np.array([123456789e199], dtype=np.float64)
y.resize((0, n))
# `dot` should just return zero (m,n) matrix
z = np.dot(x, y)
assert_(np.all(z == 0))
assert_(z.shape == (m, n))
def test_zeros(self):
"""Regression test for #1061."""
# Set a size which cannot fit into a 64 bits signed integer
sz = 2 ** 64
good = 'Maximum allowed dimension exceeded'
try:
np.empty(sz)
except ValueError as e:
if not str(e) == good:
self.fail("Got msg '%s', expected '%s'" % (e, good))
except Exception as e:
self.fail("Got exception of type %s instead of ValueError" % type(e))
def test_huge_arange(self):
"""Regression test for #1062."""
# Set a size which cannot fit into a 64 bits signed integer
sz = 2 ** 64
good = 'Maximum allowed size exceeded'
try:
a = np.arange(sz)
self.assertTrue(np.size == sz)
except ValueError as e:
if not str(e) == good:
self.fail("Got msg '%s', expected '%s'" % (e, good))
except Exception as e:
self.fail("Got exception of type %s instead of ValueError" % type(e))
def test_fromiter_bytes(self):
"""Ticket #1058"""
a = np.fromiter(list(range(10)), dtype='b')
b = np.fromiter(list(range(10)), dtype='B')
assert_(np.alltrue(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
assert_(np.alltrue(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
def test_array_from_sequence_scalar_array(self):
"""Ticket #1078: segfaults when creating an array with a sequence of 0d
arrays."""
a = np.array((np.ones(2), np.array(2)))
assert_equal(a.shape, (2,))
assert_equal(a.dtype, np.dtype(object))
assert_equal(a[0], np.ones(2))
assert_equal(a[1], np.array(2))
a = np.array(((1,), np.array(1)))
assert_equal(a.shape, (2,))
assert_equal(a.dtype, np.dtype(object))
assert_equal(a[0], (1,))
assert_equal(a[1], np.array(1))
def test_array_from_sequence_scalar_array2(self):
"""Ticket #1081: weird array with strange input..."""
t = np.array([np.array([]), np.array(0, object)])
assert_equal(t.shape, (2,))
assert_equal(t.dtype, np.dtype(object))
def test_array_too_big(self):
"""Ticket #1080."""
assert_raises(ValueError, np.zeros, [975]*7, np.int8)
assert_raises(ValueError, np.zeros, [26244]*5, np.int8)
def test_dtype_keyerrors_(self):
"""Ticket #1106."""
dt = np.dtype([('f1', np.uint)])
assert_raises(KeyError, dt.__getitem__, "f2")
assert_raises(IndexError, dt.__getitem__, 1)
assert_raises(ValueError, dt.__getitem__, 0.0)
def test_lexsort_buffer_length(self):
"""Ticket #1217, don't segfault."""
a = np.ones(100, dtype=np.int8)
b = np.ones(100, dtype=np.int32)
i = np.lexsort((a[::-1], b))
assert_equal(i, np.arange(100, dtype=np.int))
def test_object_array_to_fixed_string(self):
"""Ticket #1235."""
a = np.array(['abcdefgh', 'ijklmnop'], dtype=np.object_)
b = np.array(a, dtype=(np.str_, 8))
assert_equal(a, b)
c = np.array(a, dtype=(np.str_, 5))
assert_equal(c, np.array(['abcde', 'ijklm']))
d = np.array(a, dtype=(np.str_, 12))
assert_equal(a, d)
e = np.empty((2, ), dtype=(np.str_, 8))
e[:] = a[:]
assert_equal(a, e)
def test_unicode_to_string_cast(self):
"""Ticket #1240."""
a = np.array(
[ [sixu('abc'), sixu('\u03a3')],
[sixu('asdf'), sixu('erw')]
], dtype='U')
def fail():
b = np.array(a, 'S4')
self.assertRaises(UnicodeEncodeError, fail)
def test_mixed_string_unicode_array_creation(self):
a = np.array(['1234', sixu('123')])
assert_(a.itemsize == 16)
a = np.array([sixu('123'), '1234'])
assert_(a.itemsize == 16)
a = np.array(['1234', sixu('123'), '12345'])
assert_(a.itemsize == 20)
a = np.array([sixu('123'), '1234', sixu('12345')])
assert_(a.itemsize == 20)
a = np.array([sixu('123'), '1234', sixu('1234')])
assert_(a.itemsize == 16)
def test_misaligned_objects_segfault(self):
"""Ticket #1198 and #1267"""
a1 = np.zeros((10,), dtype='O,c')
a2 = np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'], 'S10')
a1['f0'] = a2
r = repr(a1)
np.argmax(a1['f0'])
a1['f0'][1] = "FOO"
a1['f0'] = "FOO"
a3 = np.array(a1['f0'], dtype='S')
np.nonzero(a1['f0'])
a1.sort()
a4 = copy.deepcopy(a1)
def test_misaligned_scalars_segfault(self):
"""Ticket #1267"""
s1 = np.array(('a', 'Foo'), dtype='c,O')
s2 = np.array(('b', 'Bar'), dtype='c,O')
s1['f1'] = s2['f1']
s1['f1'] = 'Baz'
def test_misaligned_dot_product_objects(self):
"""Ticket #1267"""
# This didn't require a fix, but it's worth testing anyway, because
# it may fail if .dot stops enforcing the arrays to be BEHAVED
a = np.array([[(1, 'a'), (0, 'a')], [(0, 'a'), (1, 'a')]], dtype='O,c')
b = np.array([[(4, 'a'), (1, 'a')], [(2, 'a'), (2, 'a')]], dtype='O,c')
np.dot(a['f0'], b['f0'])
def test_byteswap_complex_scalar(self):
"""Ticket #1259 and gh-441"""
for dtype in [np.dtype('<'+t) for t in np.typecodes['Complex']]:
z = np.array([2.2-1.1j], dtype)
x = z[0] # always native-endian
y = x.byteswap()
if x.dtype.byteorder == z.dtype.byteorder:
# little-endian machine
assert_equal(x, np.fromstring(y.tobytes(), dtype=dtype.newbyteorder()))
else:
# big-endian machine
assert_equal(x, np.fromstring(y.tobytes(), dtype=dtype))
# double check real and imaginary parts:
assert_equal(x.real, y.real.byteswap())
assert_equal(x.imag, y.imag.byteswap())
def test_structured_arrays_with_objects1(self):
"""Ticket #1299"""
stra = 'aaaa'
strb = 'bbbb'
x = np.array([[(0, stra), (1, strb)]], 'i8,O')
x[x.nonzero()] = x.ravel()[:1]
assert_(x[0, 1] == x[0, 0])
def test_structured_arrays_with_objects2(self):
"""Ticket #1299 second test"""
stra = 'aaaa'
strb = 'bbbb'
numb = sys.getrefcount(strb)
numa = sys.getrefcount(stra)
x = np.array([[(0, stra), (1, strb)]], 'i8,O')
x[x.nonzero()] = x.ravel()[:1]
assert_(sys.getrefcount(strb) == numb)
assert_(sys.getrefcount(stra) == numa + 2)
def test_duplicate_title_and_name(self):
"""Ticket #1254"""
def func():
x = np.dtype([(('a', 'a'), 'i'), ('b', 'i')])
self.assertRaises(ValueError, func)
def test_signed_integer_division_overflow(self):
"""Ticket #1317."""
def test_type(t):
min = np.array([np.iinfo(t).min])
min //= -1
with np.errstate(divide="ignore"):
for t in (np.int8, np.int16, np.int32, np.int64, np.int, np.long):
test_type(t)
def test_buffer_hashlib(self):
try:
from hashlib import md5
except ImportError:
from md5 import new as md5
x = np.array([1, 2, 3], dtype=np.dtype('<i4'))
assert_equal(md5(x).hexdigest(), '2a1dd1e1e59d0a384c26951e316cd7e6')
def test_0d_string_scalar(self):
# Bug #1436; the following should succeed
np.asarray('x', '>c')
def test_log1p_compiler_shenanigans(self):
# Check if log1p is behaving on 32 bit intel systems.
assert_(np.isfinite(np.log1p(np.exp2(-53))))
def test_fromiter_comparison(self, level=rlevel):
a = np.fromiter(list(range(10)), dtype='b')
b = np.fromiter(list(range(10)), dtype='B')
assert_(np.alltrue(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
assert_(np.alltrue(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
def test_fromstring_crash(self):
# Ticket #1345: the following should not cause a crash
np.fromstring(asbytes('aa, aa, 1.0'), sep=',')
def test_ticket_1539(self):
dtypes = [x for x in np.typeDict.values()
if (issubclass(x, np.number)
and not issubclass(x, np.timedelta64))]
a = np.array([], dtypes[0])
failures = []
# ignore complex warnings
with warnings.catch_warnings():
warnings.simplefilter('ignore', np.ComplexWarning)
for x in dtypes:
b = a.astype(x)
for y in dtypes:
c = a.astype(y)
try:
np.dot(b, c)
except TypeError as e:
failures.append((x, y))
if failures:
raise AssertionError("Failures: %r" % failures)
def test_ticket_1538(self):
x = np.finfo(np.float32)
for name in 'eps epsneg max min resolution tiny'.split():
assert_equal(type(getattr(x, name)), np.float32,
err_msg=name)
def test_ticket_1434(self):
# Check that the out= argument in var and std has an effect
data = np.array(((1, 2, 3), (4, 5, 6), (7, 8, 9)))
out = np.zeros((3,))
ret = data.var(axis=1, out=out)
assert_(ret is out)
assert_array_equal(ret, data.var(axis=1))
ret = data.std(axis=1, out=out)
assert_(ret is out)
assert_array_equal(ret, data.std(axis=1))
def test_complex_nan_maximum(self):
cnan = complex(0, np.nan)
assert_equal(np.maximum(1, cnan), cnan)
def test_subclass_int_tuple_assignment(self):
# ticket #1563
class Subclass(np.ndarray):
def __new__(cls, i):
return np.ones((i,)).view(cls)
x = Subclass(5)
x[(0,)] = 2 # shouldn't raise an exception
assert_equal(x[0], 2)
def test_ufunc_no_unnecessary_views(self):
# ticket #1548
class Subclass(np.ndarray):
pass
x = np.array([1, 2, 3]).view(Subclass)
y = np.add(x, x, x)
assert_equal(id(x), id(y))
def test_take_refcount(self):
# ticket #939
a = np.arange(16, dtype=np.float)
a.shape = (4, 4)
lut = np.ones((5 + 3, 4), np.float)
rgba = np.empty(shape=a.shape + (4,), dtype=lut.dtype)
c1 = sys.getrefcount(rgba)
try:
lut.take(a, axis=0, mode='clip', out=rgba)
except TypeError:
pass
c2 = sys.getrefcount(rgba)
assert_equal(c1, c2)
def test_fromfile_tofile_seeks(self):
# On Python 3, tofile/fromfile used to get (#1610) the Python
# file handle out of sync
f0 = tempfile.NamedTemporaryFile()
f = f0.file
f.write(np.arange(255, dtype='u1').tobytes())
f.seek(20)
ret = np.fromfile(f, count=4, dtype='u1')
assert_equal(ret, np.array([20, 21, 22, 23], dtype='u1'))
assert_equal(f.tell(), 24)
f.seek(40)
np.array([1, 2, 3], dtype='u1').tofile(f)
assert_equal(f.tell(), 43)
f.seek(40)
data = f.read(3)
assert_equal(data, asbytes("\x01\x02\x03"))
f.seek(80)
f.read(4)
data = np.fromfile(f, dtype='u1', count=4)
assert_equal(data, np.array([84, 85, 86, 87], dtype='u1'))
f.close()
def test_complex_scalar_warning(self):
for tp in [np.csingle, np.cdouble, np.clongdouble]:
x = tp(1+2j)
assert_warns(np.ComplexWarning, float, x)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
assert_equal(float(x), float(x.real))
def test_complex_scalar_complex_cast(self):
for tp in [np.csingle, np.cdouble, np.clongdouble]:
x = tp(1+2j)
assert_equal(complex(x), 1+2j)
def test_complex_boolean_cast(self):
"""Ticket #2218"""
for tp in [np.csingle, np.cdouble, np.clongdouble]:
x = np.array([0, 0+0.5j, 0.5+0j], dtype=tp)
assert_equal(x.astype(bool), np.array([0, 1, 1], dtype=bool))
assert_(np.any(x))
assert_(np.all(x[1:]))
def test_uint_int_conversion(self):
x = 2**64 - 1
assert_equal(int(np.uint64(x)), x)
def test_duplicate_field_names_assign(self):
ra = np.fromiter(((i*3, i*2) for i in range(10)), dtype='i8,f8')
ra.dtype.names = ('f1', 'f2')
rep = repr(ra) # should not cause a segmentation fault
assert_raises(ValueError, setattr, ra.dtype, 'names', ('f1', 'f1'))
def test_eq_string_and_object_array(self):
# From e-mail thread "__eq__ with str and object" (Keith Goodman)
a1 = np.array(['a', 'b'], dtype=object)
a2 = np.array(['a', 'c'])
assert_array_equal(a1 == a2, [True, False])
assert_array_equal(a2 == a1, [True, False])
def test_nonzero_byteswap(self):
a = np.array([0x80000000, 0x00000080, 0], dtype=np.uint32)
a.dtype = np.float32
assert_equal(a.nonzero()[0], [1])
a = a.byteswap().newbyteorder()
assert_equal(a.nonzero()[0], [1]) # [0] if nonzero() ignores swap
def test_find_common_type_boolean(self):
# Ticket #1695
assert_(np.find_common_type([], ['?', '?']) == '?')
def test_empty_mul(self):
a = np.array([1.])
a[1:1] *= 2
assert_equal(a, [1.])
def test_array_side_effect(self):
assert_equal(np.dtype('S10').itemsize, 10)
A = np.array([['abc', 2], ['long ', '0123456789']], dtype=np.string_)
# This was throwing an exception because in ctors.c,
# discover_itemsize was calling PyObject_Length without checking
# the return code. This failed to get the length of the number 2,
# and the exception hung around until something checked
# PyErr_Occurred() and returned an error.
assert_equal(np.dtype('S10').itemsize, 10)
def test_any_float(self):
# all and any for floats
a = np.array([0.1, 0.9])
assert_(np.any(a))
assert_(np.all(a))
def test_large_float_sum(self):
a = np.arange(10000, dtype='f')
assert_equal(a.sum(dtype='d'), a.astype('d').sum())
def test_ufunc_casting_out(self):
a = np.array(1.0, dtype=np.float32)
b = np.array(1.0, dtype=np.float64)
c = np.array(1.0, dtype=np.float32)
np.add(a, b, out=c)
assert_equal(c, 2.0)
def test_array_scalar_contiguous(self):
# Array scalars are both C and Fortran contiguous
assert_(np.array(1.0).flags.c_contiguous)
assert_(np.array(1.0).flags.f_contiguous)
assert_(np.array(np.float32(1.0)).flags.c_contiguous)
assert_(np.array(np.float32(1.0)).flags.f_contiguous)
def test_squeeze_contiguous(self):
"""Similar to GitHub issue #387"""
a = np.zeros((1, 2)).squeeze()
b = np.zeros((2, 2, 2), order='F')[:,:, ::2].squeeze()
assert_(a.flags.c_contiguous)
assert_(a.flags.f_contiguous)
assert_(b.flags.f_contiguous)
def test_reduce_contiguous(self):
"""GitHub issue #387"""
a = np.add.reduce(np.zeros((2, 1, 2)), (0, 1))
b = np.add.reduce(np.zeros((2, 1, 2)), 1)
assert_(a.flags.c_contiguous)
assert_(a.flags.f_contiguous)
assert_(b.flags.c_contiguous)
def test_object_array_self_reference(self):
# Object arrays with references to themselves can cause problems
a = np.array(0, dtype=object)
a[()] = a
assert_raises(TypeError, int, a)
assert_raises(TypeError, long, a)
assert_raises(TypeError, float, a)
assert_raises(TypeError, oct, a)
assert_raises(TypeError, hex, a)
# Test the same for a circular reference.
b = np.array(a, dtype=object)
a[()] = b
assert_raises(TypeError, int, a)
# Numpy has no tp_traverse currently, so circular references
# cannot be detected. So resolve it:
a[()] = 0
# This was causing a to become like the above
a = np.array(0, dtype=object)
a[...] += 1
assert_equal(a, 1)
def test_object_array_self_copy(self):
# An object array being copied into itself DECREF'ed before INCREF'ing
# causing segmentation faults (gh-3787)
a = np.array(object(), dtype=object)
np.copyto(a, a)
assert_equal(sys.getrefcount(a[()]), 2)
a[()].__class__ # will segfault if object was deleted
def test_zerosize_accumulate(self):
"Ticket #1733"
x = np.array([[42, 0]], dtype=np.uint32)
assert_equal(np.add.accumulate(x[:-1, 0]), [])
def test_objectarray_setfield(self):
# Setfield should not overwrite Object fields with non-Object data
x = np.array([1, 2, 3], dtype=object)
assert_raises(TypeError, x.setfield, 4, np.int32, 0)
def test_setting_rank0_string(self):
"Ticket #1736"
s1 = asbytes("hello1")
s2 = asbytes("hello2")
a = np.zeros((), dtype="S10")
a[()] = s1
assert_equal(a, np.array(s1))
a[()] = np.array(s2)
assert_equal(a, np.array(s2))
a = np.zeros((), dtype='f4')
a[()] = 3
assert_equal(a, np.array(3))
a[()] = np.array(4)
assert_equal(a, np.array(4))
def test_string_astype(self):
"Ticket #1748"
s1 = asbytes('black')
s2 = asbytes('white')
s3 = asbytes('other')
a = np.array([[s1], [s2], [s3]])
assert_equal(a.dtype, np.dtype('S5'))
b = a.astype(np.dtype('S0'))
assert_equal(b.dtype, np.dtype('S5'))
def test_ticket_1756(self):
"""Ticket #1756 """
s = asbytes('0123456789abcdef')
a = np.array([s]*5)
for i in range(1, 17):
a1 = np.array(a, "|S%d"%i)
a2 = np.array([s[:i]]*5)
assert_equal(a1, a2)
def test_fields_strides(self):
"Ticket #1760"
r=np.fromstring('abcdefghijklmnop'*4*3, dtype='i4,(2,3)u2')
assert_equal(r[0:3:2]['f1'], r['f1'][0:3:2])
assert_equal(r[0:3:2]['f1'][0], r[0:3:2][0]['f1'])
assert_equal(r[0:3:2]['f1'][0][()], r[0:3:2][0]['f1'][()])
assert_equal(r[0:3:2]['f1'][0].strides, r[0:3:2][0]['f1'].strides)
def test_alignment_update(self):
"""Check that alignment flag is updated on stride setting"""
a = np.arange(10)
assert_(a.flags.aligned)
a.strides = 3
assert_(not a.flags.aligned)
def test_ticket_1770(self):
"Should not segfault on python 3k"
import numpy as np
try:
a = np.zeros((1,), dtype=[('f1', 'f')])
a['f1'] = 1
a['f2'] = 1
except ValueError:
pass
except:
raise AssertionError
def test_ticket_1608(self):
"x.flat shouldn't modify data"
x = np.array([[1, 2], [3, 4]]).T
y = np.array(x.flat)
assert_equal(x, [[1, 3], [2, 4]])
def test_pickle_string_overwrite(self):
import re
data = np.array([1], dtype='b')
blob = pickle.dumps(data, protocol=1)
data = pickle.loads(blob)
# Check that loads does not clobber interned strings
s = re.sub("a(.)", "\x01\\1", "a_")
assert_equal(s[0], "\x01")
data[0] = 0xbb
s = re.sub("a(.)", "\x01\\1", "a_")
assert_equal(s[0], "\x01")
def test_pickle_bytes_overwrite(self):
if sys.version_info[0] >= 3:
data = np.array([1], dtype='b')
data = pickle.loads(pickle.dumps(data))
data[0] = 0xdd
bytestring = "\x01 ".encode('ascii')
assert_equal(bytestring[0:1], '\x01'.encode('ascii'))
def test_pickle_py2_array_latin1_hack(self):
# Check that unpickling hacks in Py3 that support
# encoding='latin1' work correctly.
# Python2 output for pickle.dumps(numpy.array([129], dtype='b'))
data = asbytes("cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\n"
"tp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'i1'\np8\n"
"I0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nNNNI-1\nI-1\nI0\ntp12\nbI00\nS'\\x81'\n"
"p13\ntp14\nb.")
if sys.version_info[0] >= 3:
# This should work:
result = pickle.loads(data, encoding='latin1')
assert_array_equal(result, np.array([129], dtype='b'))
# Should not segfault:
assert_raises(Exception, pickle.loads, data, encoding='koi8-r')
def test_pickle_py2_scalar_latin1_hack(self):
# Check that scalar unpickling hack in Py3 that supports
# encoding='latin1' work correctly.
# Python2 output for pickle.dumps(...)
datas = [
# (original, python2_pickle, koi8r_validity)
(np.unicode_('\u6bd2'),
asbytes("cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n"
"(S'U1'\np2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI4\nI4\nI0\n"
"tp6\nbS'\\xd2k\\x00\\x00'\np7\ntp8\nRp9\n."),
'invalid'),
(np.float64(9e123),
asbytes("cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n(S'f8'\n"
"p2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI-1\nI-1\nI0\ntp6\n"
"bS'O\\x81\\xb7Z\\xaa:\\xabY'\np7\ntp8\nRp9\n."),
'invalid'),
(np.bytes_(asbytes('\x9c')), # different 8-bit code point in KOI8-R vs latin1
asbytes("cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n(S'S1'\np2\n"
"I0\nI1\ntp3\nRp4\n(I3\nS'|'\np5\nNNNI1\nI1\nI0\ntp6\nbS'\\x9c'\np7\n"
"tp8\nRp9\n."),
'different'),
]
if sys.version_info[0] >= 3:
for original, data, koi8r_validity in datas:
result = pickle.loads(data, encoding='latin1')
assert_equal(result, original)
# Decoding under non-latin1 encoding (e.g.) KOI8-R can
# produce bad results, but should not segfault.
if koi8r_validity == 'different':
# Unicode code points happen to lie within latin1,
# but are different in koi8-r, resulting to silent
# bogus results
result = pickle.loads(data, encoding='koi8-r')
assert_(result != original)
elif koi8r_validity == 'invalid':
# Unicode code points outside latin1, so results
# to an encoding exception
assert_raises(ValueError, pickle.loads, data, encoding='koi8-r')
else:
raise ValueError(koi8r_validity)
def test_structured_type_to_object(self):
a_rec = np.array([(0, 1), (3, 2)], dtype='i4,i8')
a_obj = np.empty((2,), dtype=object)
a_obj[0] = (0, 1)
a_obj[1] = (3, 2)
# astype records -> object
assert_equal(a_rec.astype(object), a_obj)
# '=' records -> object
b = np.empty_like(a_obj)
b[...] = a_rec
assert_equal(b, a_obj)
# '=' object -> records
b = np.empty_like(a_rec)
b[...] = a_obj
assert_equal(b, a_rec)
def test_assign_obj_listoflists(self):
# Ticket # 1870
# The inner list should get assigned to the object elements
a = np.zeros(4, dtype=object)
b = a.copy()
a[0] = [1]
a[1] = [2]
a[2] = [3]
a[3] = [4]
b[...] = [[1], [2], [3], [4]]
assert_equal(a, b)
# The first dimension should get broadcast
a = np.zeros((2, 2), dtype=object)
a[...] = [[1, 2]]
assert_equal(a, [[1, 2], [1, 2]])
def test_memoryleak(self):
# Ticket #1917 - ensure that array data doesn't leak
for i in range(1000):
# 100MB times 1000 would give 100GB of memory usage if it leaks
a = np.empty((100000000,), dtype='i1')
del a
def test_ufunc_reduce_memoryleak(self):
a = np.arange(6)
acnt = sys.getrefcount(a)
res = np.add.reduce(a)
assert_equal(sys.getrefcount(a), acnt)
def test_search_sorted_invalid_arguments(self):
# Ticket #2021, should not segfault.
x = np.arange(0, 4, dtype='datetime64[D]')
assert_raises(TypeError, x.searchsorted, 1)
def test_string_truncation(self):
# Ticket #1990 - Data can be truncated in creation of an array from a
# mixed sequence of numeric values and strings
for val in [True, 1234, 123.4, complex(1, 234)]:
for tostr in [asunicode, asbytes]:
b = np.array([val, tostr('xx')])
assert_equal(tostr(b[0]), tostr(val))
b = np.array([tostr('xx'), val])
assert_equal(tostr(b[1]), tostr(val))
# test also with longer strings
b = np.array([val, tostr('xxxxxxxxxx')])
assert_equal(tostr(b[0]), tostr(val))
b = np.array([tostr('xxxxxxxxxx'), val])
assert_equal(tostr(b[1]), tostr(val))
def test_string_truncation_ucs2(self):
# Ticket #2081. Python compiled with two byte unicode
# can lead to truncation if itemsize is not properly
# adjusted for Numpy's four byte unicode.
if sys.version_info[0] >= 3:
a = np.array(['abcd'])
else:
a = np.array([sixu('abcd')])
assert_equal(a.dtype.itemsize, 16)
def test_unique_stable(self):
# Ticket #2063 must always choose stable sort for argsort to
# get consistent results
v = np.array(([0]*5 + [1]*6 + [2]*6)*4)
res = np.unique(v, return_index=True)
tgt = (np.array([0, 1, 2]), np.array([ 0, 5, 11]))
assert_equal(res, tgt)
def test_unicode_alloc_dealloc_match(self):
# Ticket #1578, the mismatch only showed up when running
# python-debug for python versions >= 2.7, and then as
# a core dump and error message.
a = np.array(['abc'], dtype=np.unicode)[0]
del a
def test_refcount_error_in_clip(self):
# Ticket #1588
a = np.zeros((2,), dtype='>i2').clip(min=0)
x = a + a
# This used to segfault:
y = str(x)
# Check the final string:
assert_(y == "[0 0]")
def test_searchsorted_wrong_dtype(self):
# Ticket #2189, it used to segfault, so we check that it raises the
# proper exception.
a = np.array([('a', 1)], dtype='S1, int')
assert_raises(TypeError, np.searchsorted, a, 1.2)
# Ticket #2066, similar problem:
dtype = np.format_parser(['i4', 'i4'], [], [])
a = np.recarray((2, ), dtype)
assert_raises(TypeError, np.searchsorted, a, 1)
def test_complex64_alignment(self):
# Issue gh-2668 (trac 2076), segfault on sparc due to misalignment
dtt = np.complex64
arr = np.arange(10, dtype=dtt)
# 2D array
arr2 = np.reshape(arr, (2, 5))
# Fortran write followed by (C or F) read caused bus error
data_str = arr2.tobytes('F')
data_back = np.ndarray(arr2.shape,
arr2.dtype,
buffer=data_str,
order='F')
assert_array_equal(arr2, data_back)
def test_structured_count_nonzero(self):
arr = np.array([0, 1]).astype('i4, (2)i4')[:1]
count = np.count_nonzero(arr)
assert_equal(count, 0)
def test_copymodule_preserves_f_contiguity(self):
a = np.empty((2, 2), order='F')
b = copy.copy(a)
c = copy.deepcopy(a)
assert_(b.flags.fortran)
assert_(b.flags.f_contiguous)
assert_(c.flags.fortran)
assert_(c.flags.f_contiguous)
def test_fortran_order_buffer(self):
import numpy as np
a = np.array([['Hello', 'Foob']], dtype='U5', order='F')
arr = np.ndarray(shape=[1, 2, 5], dtype='U1', buffer=a)
arr2 = np.array([[[sixu('H'), sixu('e'), sixu('l'), sixu('l'), sixu('o')],
[sixu('F'), sixu('o'), sixu('o'), sixu('b'), sixu('')]]])
assert_array_equal(arr, arr2)
def test_assign_from_sequence_error(self):
# Ticket #4024.
arr = np.array([1, 2, 3])
assert_raises(ValueError, arr.__setitem__, slice(None), [9, 9])
arr.__setitem__(slice(None), [9])
assert_equal(arr, [9, 9, 9])
def test_format_on_flex_array_element(self):
# Ticket #4369.
dt = np.dtype([('date', '<M8[D]'), ('val', '<f8')])
arr = np.array([('2000-01-01', 1)], dt)
formatted = '{0}'.format(arr[0])
assert_equal(formatted, str(arr[0]))
def test_deepcopy_on_0d_array(self):
# Ticket #3311.
arr = np.array(3)
arr_cp = copy.deepcopy(arr)
assert_equal(arr, arr_cp)
assert_equal(arr.shape, arr_cp.shape)
assert_equal(int(arr), int(arr_cp))
self.assertTrue(arr is not arr_cp)
self.assertTrue(isinstance(arr_cp, type(arr)))
def test_bool_subscript_crash(self):
# gh-4494
c = np.rec.array([(1, 2, 3), (4, 5, 6)])
masked = c[np.array([True, False])]
base = masked.base
del masked, c
base.dtype
def test_richcompare_crash(self):
# gh-4613
import operator as op
# dummy class where __array__ throws exception
class Foo(object):
__array_priority__ = 1002
def __array__(self,*args,**kwargs):
raise Exception()
rhs = Foo()
lhs = np.array(1)
for f in [op.lt, op.le, op.gt, op.ge]:
if sys.version_info[0] >= 3:
assert_raises(TypeError, f, lhs, rhs)
else:
f(lhs, rhs)
assert_(not op.eq(lhs, rhs))
assert_(op.ne(lhs, rhs))
def test_richcompare_scalar_and_subclass(self):
# gh-4709
class Foo(np.ndarray):
def __eq__(self, other):
return "OK"
x = np.array([1,2,3]).view(Foo)
assert_equal(10 == x, "OK")
assert_equal(np.int32(10) == x, "OK")
assert_equal(np.array([10]) == x, "OK")
def test_pickle_empty_string(self):
# gh-3926
import pickle
test_string = np.string_('')
assert_equal(pickle.loads(pickle.dumps(test_string)), test_string)
def test_frompyfunc_many_args(self):
# gh-5672
def passer(*args):
pass
assert_raises(ValueError, np.frompyfunc, passer, 32, 1)
def test_repeat_broadcasting(self):
# gh-5743
a = np.arange(60).reshape(3, 4, 5)
for axis in chain(range(-a.ndim, a.ndim), [None]):
assert_equal(a.repeat(2, axis=axis), a.repeat([2], axis=axis))
def test_frompyfunc_nout_0(self):
# gh-2014
def f(x):
x[0], x[-1] = x[-1], x[0]
uf = np.frompyfunc(f, 1, 0)
a = np.array([[1, 2, 3], [4, 5], [6, 7, 8, 9]])
assert_equal(uf(a), ())
assert_array_equal(a, [[3, 2, 1], [5, 4], [9, 7, 8, 6]])
if __name__ == "__main__":
run_module_suite()
| Linkid/numpy | numpy/core/tests/test_regression.py | Python | bsd-3-clause | 78,626 | 0.002073 |
# coding: utf-8
from __future__ import absolute_import
#
from esm.models.bind_resource import BindResource
from .base_model_ import Model
from ..util import deserialize_model
class BindingRequest(Model):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, app_guid: str=None, plan_id: str=None, service_id: str=None, parameters: object=None, bind_resource: BindResource=None):
"""
BindingRequest - a model defined in Swagger
:param app_guid: The app_guid of this BindingRequest.
:type app_guid: str
:param plan_id: The plan_id of this BindingRequest.
:type plan_id: str
:param service_id: The service_id of this BindingRequest.
:type service_id: str
:param parameters: The parameters of this BindingRequest.
:type parameters: object
:param bind_resource: The bind_resource of this BindingRequest.
:type bind_resource: BindResource
"""
self.swagger_types = {
'app_guid': str,
'plan_id': str,
'service_id': str,
'parameters': object,
'bind_resource': BindResource
}
self.attribute_map = {
'app_guid': 'app_guid',
'plan_id': 'plan_id',
'service_id': 'service_id',
'parameters': 'parameters',
'bind_resource': 'bind_resource'
}
self._app_guid = app_guid
self._plan_id = plan_id
self._service_id = service_id
self._parameters = parameters
self._bind_resource = bind_resource
@classmethod
def from_dict(cls, dikt) -> 'BindingRequest':
"""
Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The BindingRequest of this BindingRequest.
:rtype: BindingRequest
"""
return deserialize_model(dikt, cls)
@property
def app_guid(self) -> str:
"""
Gets the app_guid of this BindingRequest.
Deprecated in favor of bind_resource.app_guid. GUID of an application associated with the binding to be created. If present, MUST be a non-empty string.
:return: The app_guid of this BindingRequest.
:rtype: str
"""
return self._app_guid
@app_guid.setter
def app_guid(self, app_guid: str):
"""
Sets the app_guid of this BindingRequest.
Deprecated in favor of bind_resource.app_guid. GUID of an application associated with the binding to be created. If present, MUST be a non-empty string.
:param app_guid: The app_guid of this BindingRequest.
:type app_guid: str
"""
self._app_guid = app_guid
@property
def plan_id(self) -> str:
"""
Gets the plan_id of this BindingRequest.
ID of the plan from the catalog. MUST be a non-empty string.
:return: The plan_id of this BindingRequest.
:rtype: str
"""
return self._plan_id
@plan_id.setter
def plan_id(self, plan_id: str):
"""
Sets the plan_id of this BindingRequest.
ID of the plan from the catalog. MUST be a non-empty string.
:param plan_id: The plan_id of this BindingRequest.
:type plan_id: str
"""
if plan_id is None:
raise ValueError("Invalid value for `plan_id`, must not be `None`")
self._plan_id = plan_id
@property
def service_id(self) -> str:
"""
Gets the service_id of this BindingRequest.
ID of the service from the catalog. MUST be a non-empty string.
:return: The service_id of this BindingRequest.
:rtype: str
"""
return self._service_id
@service_id.setter
def service_id(self, service_id: str):
"""
Sets the service_id of this BindingRequest.
ID of the service from the catalog. MUST be a non-empty string.
:param service_id: The service_id of this BindingRequest.
:type service_id: str
"""
if service_id is None:
raise ValueError("Invalid value for `service_id`, must not be `None`")
self._service_id = service_id
@property
def parameters(self) -> object:
"""
Gets the parameters of this BindingRequest.
Configuration options for the service binding. An opaque object, controller treats this as a blob. Brokers SHOULD ensure that the client has provided valid configuration parameters and values for the operation.
:return: The parameters of this BindingRequest.
:rtype: object
"""
return self._parameters
@parameters.setter
def parameters(self, parameters: object):
"""
Sets the parameters of this BindingRequest.
Configuration options for the service binding. An opaque object, controller treats this as a blob. Brokers SHOULD ensure that the client has provided valid configuration parameters and values for the operation.
:param parameters: The parameters of this BindingRequest.
:type parameters: object
"""
self._parameters = parameters
@property
def bind_resource(self) -> BindResource:
"""
Gets the bind_resource of this BindingRequest.
A JSON object that contains data for platform resources associated with the binding to be created. See Bind Resource Object for more information.
:return: The bind_resource of this BindingRequest.
:rtype: BindResource
"""
return self._bind_resource
@bind_resource.setter
def bind_resource(self, bind_resource: BindResource):
"""
Sets the bind_resource of this BindingRequest.
A JSON object that contains data for platform resources associated with the binding to be created. See Bind Resource Object for more information.
:param bind_resource: The bind_resource of this BindingRequest.
:type bind_resource: BindResource
"""
self._bind_resource = bind_resource
| EduJGURJC/elastest-service-manager | src/esm/models/binding_request.py | Python | apache-2.0 | 6,152 | 0.004714 |
# -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand, CommandError
from lxml import etree
from app.detective.utils import to_class_name, to_camelcase, to_underscores
import re
# Defines the owl and rdf namespaces
namespaces = {
'owl': 'http://www.w3.org/2002/07/owl#',
'rdf': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#',
'rdfs': 'http://www.w3.org/2000/01/rdf-schema#'
}
# transform property name
pron = lambda name: to_underscores(to_camelcase(name))
# get local tag
def get(sets, el):
if hasattr(sets, "iterchildren"):
props = [ e for e in sets.iterchildren() if re.search('#}%s$' % el, e.tag) ]
return props[0].text if len(props) else ''
else:
return ""
# Merge 2 list and remove duplicates using the given field as reference
def merge(first_list, second_list, field):
refs = [ x[field] for x in second_list ]
return second_list + [ x for x in first_list if x[field] not in refs ]
class Command(BaseCommand):
help = "Parse the given OWL file to generate its neo4django models."
args = 'filename.owl'
root = None
def handle(self, *args, **options):
if not args:
raise CommandError('Please specify path to ontology file.')
# Gives the ontology URI. Only needed for documentation purposes
ontologyURI = "http://www.semanticweb.org/nkb/ontologies/2013/6/impact-investment#"
# This string will contain the models.py file
headers = [
"# -*- coding: utf-8 -*-",
"# The ontology can be found in its entirety at %s" % ontologyURI,
"from neo4django.db import models",
"from neo4django.graph_auth.models import User",
""
]
# This array contains the correspondance between data types
correspondanceTypes = {
"string" : "StringProperty",
"anyURI" : "URLProperty",
"int" : "IntegerProperty",
"nonNegativeInteger" : "IntegerProperty",
"nonPositiveInteger" : "IntegerProperty",
"PositiveInteger" : "IntegerProperty",
"NegativeInteger" : "IntegerProperty",
# Looking forward the neo4django float support!
# See also: https://github.com/scholrly/neo4django/issues/197
"float" : "StringProperty",
"integer" : "IntegerProperty",
"dateTimeStamp" : "DateTimeProperty",
"dateTime" : "DateTimeProperty",
"boolean" : "BooleanProperty"
}
try :
# Parses the file with etree
tree = etree.parse(args[0])
except:
raise CommandError('Unable to parse the given file.')
self.root = tree.getroot()
models = []
# Finds all the Classes
for ontologyClassElement in self.root.findall("owl:Class", namespaces):
# Finds the URI of the class
classURI = ontologyClassElement.attrib["{http://www.w3.org/1999/02/22-rdf-syntax-ns#}about"]
#Finds the name of the class
className = to_class_name(classURI.split("#")[1])
# By default, the class has no parent
parentClass = "models.NodeModel"
# Declares an array to store the relationships and properties from this class
relations = []
properties = []
scope = get(ontologyClassElement, "scope").replace("'", "\\'")
# Class help text
help_text = get(ontologyClassElement, "help_text").replace("'", "\\'")
# Verbose names
verbose_name = get(ontologyClassElement, "verbose_name").replace("'", "\\'")
verbose_name_plural = get(ontologyClassElement, "verbose_name_plural").replace("'", "\\'")
# Finds all the subClasses of the Class
for subClassElement in ontologyClassElement.findall("rdfs:subClassOf", namespaces):
# If the Class is actually an extension of another Class
if "{http://www.w3.org/1999/02/22-rdf-syntax-ns#}resource" in subClassElement.attrib:
parentClassURI = subClassElement.attrib["{http://www.w3.org/1999/02/22-rdf-syntax-ns#}resource"]
parentClass = to_class_name(parentClassURI.split("#")[1])
else:
for restriction in subClassElement.findall("owl:Restriction", namespaces):
# If there is a relationship defined in the subclass
if restriction.find("owl:onClass", namespaces) is not None:
# Finds the relationship and its elements
# (destination Class and type)
relationClass = restriction.find("owl:onClass", namespaces)
relation = {}
relation["URI"] = relationClass.attrib["{http://www.w3.org/1999/02/22-rdf-syntax-ns#}resource"]
relation["name"] = to_class_name(relation["URI"].split("#")[1])
# Exception when the relation's destination is
# an individual from the same class
if relation["name"] == className:
relation["name"] = '"self"'
else:
relation["name"] = '"%s"' % relation["name"]
relationType = restriction.find("owl:onProperty", namespaces)
relationTypeURI = relationType.attrib["{http://www.w3.org/1999/02/22-rdf-syntax-ns#}resource"]
relation["type"] = relationTypeURI.split("#")[1]
# Guesses the destination of the relation based on the name.
# Name should be "has_..."
if relation["type"].find('has') == 0:
relation["destination"] = pron(relation["type"][3:])
# Get the property's options
options = self.propOptions(relation["type"])
# Help text
relation["help_text"] = get(options, "help_text").replace("'", "\\'")
# Verbose name
relation["verbose_name"] = get(options, "verbose_name")
relation["type"] = relation["type"]
# Adds the relationship to the array containing all relationships for the class only
# if the relation has a destination
if "destination" in relation:
relations.append(relation)
# If there is a property defined in the subclass
elif restriction.find("owl:onDataRange", namespaces) is not None or restriction.find("owl:someValuesFrom", namespaces) is not None:
propertyTypeElement = restriction.find("owl:onProperty", namespaces)
propertyTypeURI = propertyTypeElement.attrib["{http://www.w3.org/1999/02/22-rdf-syntax-ns#}resource"]
propertyType = propertyTypeURI.split("#")[1]
if restriction.find("owl:onDataRange", namespaces) is not None:
dataTypeElement = restriction.find("owl:onDataRange", namespaces)
else:
dataTypeElement = restriction.find("owl:someValuesFrom", namespaces)
dataTypeURI = dataTypeElement.attrib["{http://www.w3.org/1999/02/22-rdf-syntax-ns#}resource"]
t = dataTypeURI.split("#")[1]
if t in correspondanceTypes:
dataType = correspondanceTypes[t]
# Get the property's options
options = self.propOptions(propertyType)
prop = {
"name" : propertyType,
"type" : dataType,
# Help text
"help_text": get(options, "help_text").replace("'", "\\'"),
# Verbose name
"verbose_name": get(options, "verbose_name")
}
properties.append(prop)
else:
raise CommandError("Property '%s' of '%s' using unkown type: %s" % (propertyType, className, t) )
models.append({
"className" : className,
"scope" : scope,
"help_text" : help_text,
"verbose_name" : verbose_name,
"verbose_name_plural": verbose_name_plural,
"parentClass" : parentClass,
"properties" : properties,
"relations" : relations,
"dependencies" : [parentClass]
})
# Topological sort of the model to avoid dependance missings
models = self.topolgical_sort(models)
# Output the models file
self.print_models(models, headers)
# option of the given property
def propOptions(self, name):
options = None
attr = "{http://www.w3.org/1999/02/22-rdf-syntax-ns#}about"
for p in self.root.findall("owl:ObjectProperty", namespaces):
if re.search('#%s$' % name, p.attrib[attr]):
options = p
for p in self.root.findall("owl:DatatypeProperty", namespaces):
if re.search('#%s$' % name, p.attrib[attr]):
options = p
return options
@staticmethod
def print_models(models=[], headers=[]):
modelsContents = headers
for m in models:
# Writes the class in models.py
modelsContents.append("\nclass "+ m["className"] +"(models.NodeModel):")
# Defines properties and relations that every model have
m["properties"].insert(0,
{
"name" : "_author",
"type": "IntArrayProperty",
# Verbose name
"verbose_name": "author",
"help_text": "People that edited this entity."
}
)
m["properties"].insert(1,
{
"name" : "_status",
"type": "IntegerProperty",
# Verbose name
"verbose_name": "status",
"help_text": ""
}
)
# Since neo4django doesn't support model inheritance correctly
# we use models.NodeModel for every model
# and duplicates parent's attributes into its child
if m["parentClass"] != "models.NodeModel":
modelsContents.append("\t_parent = u'%s'" % m["parentClass"])
# Find the models that could be the parent of the current one
parents = [model for model in models if model["className"] == m["parentClass"] ]
# We found at least one parent
if len(parents):
# We take the first one
parent = parents[0]
# We merge the properties and the relationships
m["properties"] = merge(parent["properties"], m["properties"], "name")
m["relations"] = merge(parent["relations"], m["relations"], "destination")
if m["scope"] != '' and m["scope"] != None:
modelsContents.append("\t_topic = u'%s'" % m["scope"])
if m["help_text"] != None:
modelsContents.append("\t_description = u'%s'" % m["help_text"])
# Writes the properties
for prop in m["properties"]:
opt = [
"null=True",
"help_text=u'%s'" % prop["help_text"]
]
if prop["verbose_name"] != '':
opt.append("verbose_name=u'%s'" % prop["verbose_name"])
field = "\t%s = models.%s(%s)"
opt = ( pron(prop["name"]), prop["type"], ",".join(opt))
modelsContents.append(field % opt )
# Writes the relationships
for rel in m["relations"]:
opt = [
rel["name"],
"null=True",
# Add class name prefix to relation type
"rel_type='%s+'" % pron( m["className"] + "_" + rel["type"] ),
"help_text=u'%s'" % rel["help_text"]
]
if prop["verbose_name"] != '':
opt.append("verbose_name=u'%s'" % rel["verbose_name"])
field = "\t%s = models.Relationship(%s)"
modelsContents.append(field % (rel["destination"], ",".join(opt) ) )
modelsContents.append("\n\tclass Meta:")
if m["verbose_name"] != '':
modelsContents.append("\t\tverbose_name = u'%s'" % m["verbose_name"])
if m["verbose_name_plural"] != '':
modelsContents.append("\t\tverbose_name_plural = u'%s'" % m["verbose_name_plural"])
if m["verbose_name"] == '' and m["verbose_name_plural"] == '':
modelsContents.append("\t\tpass")
if len([p for p in m["properties"] if p["name"] == "name" ]):
modelsContents.append("\n\tdef __unicode__(self):")
modelsContents.append("\t\treturn self.name or u\"Unkown\"")
print "\n".join(modelsContents).encode("UTF-8")
@staticmethod
def topolgical_sort(graph_unsorted):
"""
:src http://blog.jupo.org/2012/04/06/topological-sorting-acyclic-directed-graphs/
Repeatedly go through all of the nodes in the graph, moving each of
the nodes that has all its edges resolved, onto a sequence that
forms our sorted graph. A node has all of its edges resolved and
can be moved once all the nodes its edges point to, have been moved
from the unsorted graph onto the sorted one.
"""
# This is the list we'll return, that stores each node/edges pair
# in topological order.
graph_sorted = []
# Run until the unsorted graph is empty.
while graph_unsorted:
# Go through each of the node/edges pairs in the unsorted
# graph. If a set of edges doesn't contain any nodes that
# haven't been resolved, that is, that are still in the
# unsorted graph, remove the pair from the unsorted graph,
# and append it to the sorted graph. Note here that by using
# using the items() method for iterating, a copy of the
# unsorted graph is used, allowing us to modify the unsorted
# graph as we move through it. We also keep a flag for
# checking that that graph is acyclic, which is true if any
# nodes are resolved during each pass through the graph. If
# not, we need to bail out as the graph therefore can't be
# sorted.
acyclic = False
for index, item in enumerate(graph_unsorted):
edges = item["dependencies"]
node_unsorted = [item_unsorted["className"] for item_unsorted in graph_unsorted]
for edge in edges:
if edge in node_unsorted:
break
else:
acyclic = True
del graph_unsorted[index]
graph_sorted.append(item)
if not acyclic:
# Uh oh, we've passed through all the unsorted nodes and
# weren't able to resolve any of them, which means there
# are nodes with cyclic edges that will never be resolved,
# so we bail out with an error.
raise RuntimeError("A cyclic dependency occurred")
return graph_sorted
| carlvlewis/detective.io | app/detective/management/commands/parseowl.py | Python | lgpl-3.0 | 16,471 | 0.006982 |
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from calvin.runtime.south.plugins.async import async
from calvin.utilities.calvinlogger import get_logger
_log = get_logger(__name__)
class TimerEvent(async.DelayedCall):
def __init__(self, actor_id, delay, trigger_loop, repeats=False):
super(TimerEvent, self).__init__(delay, callback=self.trigger)
self._actor_id = actor_id
self._triggered = False
self.trigger_loop = trigger_loop
self.repeats = repeats
_log.debug("Set calvinsys timer %f %s on %s" % (delay, "repeat" if self.repeats else "", self._actor_id))
@property
def triggered(self):
return self._triggered
def ack(self):
self._triggered = False
def trigger(self):
_log.debug("Trigger calvinsys timer on %s" % (self._actor_id))
self._triggered = True
if self.repeats:
self.reset()
self.trigger_loop(actor_ids=[self._actor_id])
class TimerHandler(object):
def __init__(self, node, actor):
super(TimerHandler, self).__init__()
self._actor = actor
self.node = node
def once(self, delay):
return TimerEvent(self._actor.id, delay, self.node.sched.trigger_loop)
def repeat(self, delay):
return TimerEvent(self._actor.id, delay, self.node.sched.trigger_loop, repeats=True)
def register(node, actor, events=None):
"""
Registers is called when the Event-system object is created.
Place an object in the event object - in this case the
nodes only timer object.
Also register any hooks for actor migration.
@TODO: Handle migration (automagically and otherwise.)
"""
return TimerHandler(node=node, actor=actor)
| les69/calvin-base | calvin/calvinsys/events/timer.py | Python | apache-2.0 | 2,309 | 0.002599 |
import serial
ser = serial.Serial('/dev/ttyUSB2',38400)
while True:
try:
x = ser.read()
f=open('gesture_command.txt','w')
f.write(x)
f.close()
except:
print "Gesture serial : port error!"
break
| Mr-Robots/Gesture-controlled-surveillance-vehicle | Ti_Monitor/Gesture_serial.py | Python | gpl-2.0 | 251 | 0.015936 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from molo.core.models import ArticlePage, ArticlePageRecommendedSections
from wagtail.wagtailcore.blocks import StreamValue
def create_recomended_articles(main_article, article_list):
'''
Creates recommended article objects from article_list
and _prepends_ to existing recommended articles.
'''
existing_recommended_articles = [
ra.recommended_article.specific
for ra in main_article.recommended_articles.all()]
ArticlePageRecommendedSections.objects.filter(page=main_article).delete()
for hyperlinked_article in article_list:
ArticlePageRecommendedSections(
page=main_article,
recommended_article=hyperlinked_article).save()
# re-create existing recommended articles
for article in existing_recommended_articles:
if article not in article_list:
ArticlePageRecommendedSections(
page=main_article,
recommended_article=article).save()
def convert_articles(apps, schema_editor):
'''
Derived from https://github.com/wagtail/wagtail/issues/2110
'''
articles = ArticlePage.objects.all().exact_type(ArticlePage)
for article in articles:
stream_data = []
linked_articles = []
for block in article.body.stream_data:
if block['type'] == 'page':
if ArticlePage.objects.filter(id=block['value']):
linked_articles.append(ArticlePage.objects.get(
id=block['value']))
else:
# add block to new stream_data
stream_data.append(block)
if linked_articles:
create_recomended_articles(article, linked_articles)
stream_block = article.body.stream_block
article.body = StreamValue(stream_block, stream_data, is_lazy=True)
article.save()
section = article.get_parent().specific
section.enable_recommended_section = True
section.enable_next_section = True
section.save()
class Migration(migrations.Migration):
dependencies = [
('iogt', '0002_create_importers_group'),
]
operations = [
migrations.RunPython(convert_articles),
]
| praekelt/molo-iogt | iogt/migrations/0003_convert_recomended_articles.py | Python | bsd-2-clause | 2,316 | 0 |
# Copyright (c) 2015 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from oslo_log import log as logging
from oslo_policy import policy as oslo_policy
from oslo_utils import excutils
from pecan import hooks
import webob
from neutron._i18n import _
from neutron.common import constants as const
from neutron.extensions import quotasv2
from neutron import manager
from neutron.pecan_wsgi import constants as pecan_constants
from neutron.pecan_wsgi.controllers import quota
from neutron.pecan_wsgi.hooks import utils
from neutron import policy
LOG = logging.getLogger(__name__)
def _custom_getter(resource, resource_id):
"""Helper function to retrieve resources not served by any plugin."""
if resource == quotasv2.RESOURCE_NAME:
return quota.get_tenant_quotas(resource_id)[quotasv2.RESOURCE_NAME]
def fetch_resource(method, neutron_context, controller,
collection, resource, resource_id,
parent_id=None):
field_list = []
if method == 'PUT':
attrs = controller.resource_info
if not attrs:
# this isn't a request for a normal resource. it could be
# an action like removing a network from a dhcp agent.
# return None and assume the custom controller for this will
# handle the necessary logic.
return
field_list = [name for (name, value) in attrs.items()
if (value.get('required_by_policy') or
value.get('primary_key') or 'default' not in value)]
plugin = manager.NeutronManager.get_plugin_for_resource(collection)
if plugin:
if utils.is_member_action(controller):
getter = controller.parent_controller.plugin_shower
else:
getter = controller.plugin_shower
getter_args = [neutron_context, resource_id]
if parent_id:
getter_args.append(parent_id)
return getter(*getter_args, fields=field_list)
else:
# Some legit resources, like quota, do not have a plugin yet.
# Retrieving the original object is nevertheless important
# for policy checks.
return _custom_getter(resource, resource_id)
class PolicyHook(hooks.PecanHook):
priority = 140
def before(self, state):
# This hook should be run only for PUT,POST and DELETE methods and for
# requests targeting a neutron resource
resources = state.request.context.get('resources', [])
if state.request.method not in ('POST', 'PUT', 'DELETE'):
return
# As this routine will likely alter the resources, do a shallow copy
resources_copy = resources[:]
neutron_context = state.request.context.get('neutron_context')
resource = state.request.context.get('resource')
# If there is no resource for this request, don't bother running authZ
# policies
if not resource:
return
controller = utils.get_controller(state)
if not controller or utils.is_member_action(controller):
return
collection = state.request.context.get('collection')
needs_prefetch = (state.request.method == 'PUT' or
state.request.method == 'DELETE')
policy.init()
action = controller.plugin_handlers[
pecan_constants.ACTION_MAP[state.request.method]]
# NOTE(salv-orlando): As bulk updates are not supported, in case of PUT
# requests there will be only a single item to process, and its
# identifier would have been already retrieved by the lookup process;
# in the case of DELETE requests there won't be any item to process in
# the request body
original_resources = []
if needs_prefetch:
try:
item = resources_copy.pop()
except IndexError:
# Ops... this was a delete after all!
item = {}
resource_id = state.request.context.get('resource_id')
parent_id = state.request.context.get('parent_id')
method = state.request.method
resource_obj = fetch_resource(method, neutron_context, controller,
collection, resource, resource_id,
parent_id=parent_id)
if resource_obj:
original_resources.append(resource_obj)
obj = copy.copy(resource_obj)
obj.update(item)
obj[const.ATTRIBUTES_TO_UPDATE] = item.keys()
# Put back the item in the list so that policies could be
# enforced
resources_copy.append(obj)
# TODO(salv-orlando): as other hooks might need to prefetch resources,
# store them in the request context. However, this should be done in a
# separate hook which is conveniently called before all other hooks
state.request.context['original_resources'] = original_resources
for item in resources_copy:
try:
policy.enforce(
neutron_context, action, item,
pluralized=collection)
except oslo_policy.PolicyNotAuthorized:
with excutils.save_and_reraise_exception() as ctxt:
# If a tenant is modifying it's own object, it's safe to
# return a 403. Otherwise, pretend that it doesn't exist
# to avoid giving away information.
controller = utils.get_controller(state)
s_action = controller.plugin_handlers[controller.SHOW]
if not policy.check(neutron_context, s_action, item,
pluralized=collection):
ctxt.reraise = False
msg = _('The resource could not be found.')
raise webob.exc.HTTPNotFound(msg)
def after(self, state):
neutron_context = state.request.context.get('neutron_context')
resource = state.request.context.get('resource')
collection = state.request.context.get('collection')
controller = utils.get_controller(state)
if not resource:
# can't filter a resource we don't recognize
return
# NOTE(kevinbenton): extension listing isn't controlled by policy
if resource == 'extension':
return
try:
data = state.response.json
except ValueError:
return
if state.request.method not in pecan_constants.ACTION_MAP:
return
if not data or (resource not in data and collection not in data):
return
policy.init()
is_single = resource in data
action_type = pecan_constants.ACTION_MAP[state.request.method]
if action_type == 'get':
action = controller.plugin_handlers[controller.SHOW]
else:
action = controller.plugin_handlers[action_type]
key = resource if is_single else collection
to_process = [data[resource]] if is_single else data[collection]
# in the single case, we enforce which raises on violation
# in the plural case, we just check so violating items are hidden
policy_method = policy.enforce if is_single else policy.check
plugin = manager.NeutronManager.get_plugin_for_resource(collection)
try:
resp = [self._get_filtered_item(state.request, controller,
resource, collection, item)
for item in to_process
if (state.request.method != 'GET' or
policy_method(neutron_context, action, item,
plugin=plugin,
pluralized=collection))]
except oslo_policy.PolicyNotAuthorized:
# This exception must be explicitly caught as the exception
# translation hook won't be called if an error occurs in the
# 'after' handler. Instead of raising an HTTPNotFound exception,
# we have to set the status_code here to prevent the catch_errors
# middleware from turning this into a 500.
state.response.status_code = 404
return
if is_single:
resp = resp[0]
state.response.json = {key: resp}
def _get_filtered_item(self, request, controller, resource, collection,
data):
neutron_context = request.context.get('neutron_context')
to_exclude = self._exclude_attributes_by_policy(
neutron_context, controller, resource, collection, data)
return self._filter_attributes(request, data, to_exclude)
def _filter_attributes(self, request, data, fields_to_strip):
# This routine will remove the fields that were requested to the
# plugin for policy evaluation but were not specified in the
# API request
return dict(item for item in data.items()
if item[0] not in fields_to_strip)
def _exclude_attributes_by_policy(self, context, controller, resource,
collection, data):
"""Identifies attributes to exclude according to authZ policies.
Return a list of attribute names which should be stripped from the
response returned to the user because the user is not authorized
to see them.
"""
attributes_to_exclude = []
for attr_name in data.keys():
# TODO(amotoki): All attribute maps have tenant_id and
# it determines excluded attributes based on tenant_id.
# We need to migrate tenant_id to project_id later
# as attr_info is referred to in various places and we need
# to check all logs carefully.
if attr_name == 'project_id':
continue
attr_data = controller.resource_info.get(attr_name)
if attr_data and attr_data['is_visible']:
if policy.check(
context,
# NOTE(kevinbenton): this used to reference a
# _plugin_handlers dict, why?
'get_%s:%s' % (resource, attr_name),
data,
might_not_exist=True,
pluralized=collection):
# this attribute is visible, check next one
continue
# if the code reaches this point then either the policy check
# failed or the attribute was not visible in the first place
attributes_to_exclude.append(attr_name)
# TODO(amotoki): As mentioned in the above TODO,
# we treat project_id and tenant_id equivalently.
# This should be migrated to project_id later.
if attr_name == 'tenant_id':
attributes_to_exclude.append('project_id')
if attributes_to_exclude:
LOG.debug("Attributes excluded by policy engine: %s",
attributes_to_exclude)
return attributes_to_exclude
| eayunstack/neutron | neutron/pecan_wsgi/hooks/policy_enforcement.py | Python | apache-2.0 | 11,798 | 0.000085 |
"""
====================
Breadth-first search
====================
Basic algorithms for breadth-first searching the nodes of a graph.
"""
import networkx as nx
from collections import defaultdict, deque
__author__ = """\n""".join(['Aric Hagberg <aric.hagberg@gmail.com>'])
__all__ = ['bfs_edges', 'bfs_tree', 'bfs_predecessors', 'bfs_successors']
def bfs_edges(G, source, reverse=False):
"""Produce edges in a breadth-first-search starting at source.
Parameters
----------
G : NetworkX graph
source : node
Specify starting node for breadth-first search and return edges in
the component reachable from source.
reverse : bool, optional
If True traverse a directed graph in the reverse direction
Returns
-------
edges: generator
A generator of edges in the breadth-first-search.
Examples
--------
>>> G = nx.Graph()
>>> G.add_path([0,1,2])
>>> print(list(nx.bfs_edges(G,0)))
[(0, 1), (1, 2)]
Notes
-----
Based on http://www.ics.uci.edu/~eppstein/PADS/BFS.py
by D. Eppstein, July 2004.
"""
if reverse and isinstance(G, nx.DiGraph):
neighbors = G.predecessors
else:
neighbors = G.neighbors
visited = set([source])
queue = deque([(source, neighbors(source))])
while queue:
parent, children = queue[0]
try:
child = next(children)
if child not in visited:
yield parent, child
visited.add(child)
queue.append((child, neighbors(child)))
except StopIteration:
queue.popleft()
def bfs_tree(G, source, reverse=False):
"""Return an oriented tree constructed from of a breadth-first-search
starting at source.
Parameters
----------
G : NetworkX graph
source : node
Specify starting node for breadth-first search and return edges in
the component reachable from source.
reverse : bool, optional
If True traverse a directed graph in the reverse direction
Returns
-------
T: NetworkX DiGraph
An oriented tree
Examples
--------
>>> G = nx.Graph()
>>> G.add_path([0,1,2])
>>> print(list(nx.bfs_edges(G,0)))
[(0, 1), (1, 2)]
Notes
-----
Based on http://www.ics.uci.edu/~eppstein/PADS/BFS.py
by D. Eppstein, July 2004.
"""
T = nx.DiGraph()
T.add_node(source)
T.add_edges_from(bfs_edges(G,source,reverse=reverse))
return T
def bfs_predecessors(G, source):
"""Return dictionary of predecessors in breadth-first-search from source.
Parameters
----------
G : NetworkX graph
source : node
Specify starting node for breadth-first search and return edges in
the component reachable from source.
Returns
-------
pred: dict
A dictionary with nodes as keys and predecessor nodes as values.
Examples
--------
>>> G = nx.Graph()
>>> G.add_path([0,1,2])
>>> print(nx.bfs_predecessors(G,0))
{1: 0, 2: 1}
Notes
-----
Based on http://www.ics.uci.edu/~eppstein/PADS/BFS.py
by D. Eppstein, July 2004.
"""
return dict((t,s) for s,t in bfs_edges(G,source))
def bfs_successors(G, source):
"""Return dictionary of successors in breadth-first-search from source.
Parameters
----------
G : NetworkX graph
source : node
Specify starting node for breadth-first search and return edges in
the component reachable from source.
Returns
-------
succ: dict
A dictionary with nodes as keys and list of succssors nodes as values.
Examples
--------
>>> G = nx.Graph()
>>> G.add_path([0,1,2])
>>> print(nx.bfs_successors(G,0))
{0: [1], 1: [2]}
Notes
-----
Based on http://www.ics.uci.edu/~eppstein/PADS/BFS.py
by D. Eppstein, July 2004.
"""
d = defaultdict(list)
for s,t in bfs_edges(G,source):
d[s].append(t)
return dict(d)
| jcurbelo/networkx | networkx/algorithms/traversal/breadth_first_search.py | Python | bsd-3-clause | 3,994 | 0.002754 |
# Performs network checks
from subprocess import Popen, PIPE
from includes.output import *
class FirewallChecks:
# Constructor
def __init__(self, uuid = None):
# if uuid == None then check the host
self.uuid = uuid
def checkIpfwRule(self, permission, fromIP, toIP, toPort, direction):
cmd = ['ipfw', 'list']
# add the jexec command if we're dealing with a container
if (self.uuid is not None):
cmd = ['jexec', 'trd-' + self.uuid] + cmd
process = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
stdOut, stdErr = process.communicate()
stdOutString = stdOut.decode('utf-8')
stdErrString = stdErr.decode('utf-8')
for line in stdOutString.splitlines():
words = line.split()
# chcek against this line
if (words[1] == permission) and (words[7] == fromIP) and (words[9] == toIP) and (words[11] == toPort):
return True
return False
# checks that a value exists in an ipfw table
def checkIpfwTable(self, tableNum, value):
cmd = ['ipfw', 'table',str(tableNum), 'list']
# add the jexec command if we're dealing with a container
if (self.uuid is not None):
cmd = ['jexec', 'trd-' + self.uuid] + cmd
process = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
stdOut, stdErr = process.communicate()
stdOutString = stdOut.decode('utf-8')
stdErrString = stdErr.decode('utf-8')
if (process.returncode != 0):
e_error("Failed to check ipfw table")
print(stdOutString)
print(stdErrString)
print('exitcode: ' + process.returncode)
exit(process.returncode)
# loop over the lines looking for our value
for line in stdOutString.splitlines():
if (line.split()[0] == value):
return True
return False
| tredly/tredly | tests/testobjects/firewallchecks.py | Python | mit | 2,018 | 0.007433 |
# orm/attributes.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Defines instrumentation for class attributes and their interaction
with instances.
This module is usually not directly visible to user applications, but
defines a large part of the ORM's interactivity.
"""
import operator
from .. import util, event, inspection
from . import interfaces, collections, exc as orm_exc
from .base import instance_state, instance_dict, manager_of_class
from .base import PASSIVE_NO_RESULT, ATTR_WAS_SET, ATTR_EMPTY, NO_VALUE,\
NEVER_SET, NO_CHANGE, CALLABLES_OK, SQL_OK, RELATED_OBJECT_OK,\
INIT_OK, NON_PERSISTENT_OK, LOAD_AGAINST_COMMITTED, PASSIVE_OFF,\
PASSIVE_RETURN_NEVER_SET, PASSIVE_NO_INITIALIZE, PASSIVE_NO_FETCH,\
PASSIVE_NO_FETCH_RELATED, PASSIVE_ONLY_PERSISTENT, NO_AUTOFLUSH
from .base import state_str, instance_str
@inspection._self_inspects
class QueryableAttribute(interfaces._MappedAttribute,
interfaces.InspectionAttr,
interfaces.PropComparator):
"""Base class for :term:`descriptor` objects that intercept
attribute events on behalf of a :class:`.MapperProperty`
object. The actual :class:`.MapperProperty` is accessible
via the :attr:`.QueryableAttribute.property`
attribute.
.. seealso::
:class:`.InstrumentedAttribute`
:class:`.MapperProperty`
:attr:`.Mapper.all_orm_descriptors`
:attr:`.Mapper.attrs`
"""
is_attribute = True
def __init__(self, class_, key, impl=None,
comparator=None, parententity=None,
of_type=None):
self.class_ = class_
self.key = key
self.impl = impl
self.comparator = comparator
self._parententity = parententity
self._of_type = of_type
manager = manager_of_class(class_)
# manager is None in the case of AliasedClass
if manager:
# propagate existing event listeners from
# immediate superclass
for base in manager._bases:
if key in base:
self.dispatch._update(base[key].dispatch)
@util.memoized_property
def _supports_population(self):
return self.impl.supports_population
def get_history(self, instance, passive=PASSIVE_OFF):
return self.impl.get_history(instance_state(instance),
instance_dict(instance), passive)
def __selectable__(self):
# TODO: conditionally attach this method based on clause_element ?
return self
@util.memoized_property
def info(self):
"""Return the 'info' dictionary for the underlying SQL element.
The behavior here is as follows:
* If the attribute is a column-mapped property, i.e.
:class:`.ColumnProperty`, which is mapped directly
to a schema-level :class:`.Column` object, this attribute
will return the :attr:`.SchemaItem.info` dictionary associated
with the core-level :class:`.Column` object.
* If the attribute is a :class:`.ColumnProperty` but is mapped to
any other kind of SQL expression other than a :class:`.Column`,
the attribute will refer to the :attr:`.MapperProperty.info`
dictionary associated directly with the :class:`.ColumnProperty`,
assuming the SQL expression itself does not have its own ``.info``
attribute (which should be the case, unless a user-defined SQL
construct has defined one).
* If the attribute refers to any other kind of
:class:`.MapperProperty`, including :class:`.RelationshipProperty`,
the attribute will refer to the :attr:`.MapperProperty.info`
dictionary associated with that :class:`.MapperProperty`.
* To access the :attr:`.MapperProperty.info` dictionary of the
:class:`.MapperProperty` unconditionally, including for a
:class:`.ColumnProperty` that's associated directly with a
:class:`.schema.Column`, the attribute can be referred to using
:attr:`.QueryableAttribute.property` attribute, as
``MyClass.someattribute.property.info``.
.. versionadded:: 0.8.0
.. seealso::
:attr:`.SchemaItem.info`
:attr:`.MapperProperty.info`
"""
return self.comparator.info
@util.memoized_property
def parent(self):
"""Return an inspection instance representing the parent.
This will be either an instance of :class:`.Mapper`
or :class:`.AliasedInsp`, depending upon the nature
of the parent entity which this attribute is associated
with.
"""
return inspection.inspect(self._parententity)
@property
def expression(self):
return self.comparator.__clause_element__()
def __clause_element__(self):
return self.comparator.__clause_element__()
def _query_clause_element(self):
"""like __clause_element__(), but called specifically
by :class:`.Query` to allow special behavior."""
return self.comparator._query_clause_element()
def adapt_to_entity(self, adapt_to_entity):
assert not self._of_type
return self.__class__(adapt_to_entity.entity,
self.key, impl=self.impl,
comparator=self.comparator.adapt_to_entity(
adapt_to_entity),
parententity=adapt_to_entity)
def of_type(self, cls):
return QueryableAttribute(
self.class_,
self.key,
self.impl,
self.comparator.of_type(cls),
self._parententity,
of_type=cls)
def label(self, name):
return self._query_clause_element().label(name)
def operate(self, op, *other, **kwargs):
return op(self.comparator, *other, **kwargs)
def reverse_operate(self, op, other, **kwargs):
return op(other, self.comparator, **kwargs)
def hasparent(self, state, optimistic=False):
return self.impl.hasparent(state, optimistic=optimistic) is not False
def __getattr__(self, key):
try:
return getattr(self.comparator, key)
except AttributeError:
raise AttributeError(
'Neither %r object nor %r object associated with %s '
'has an attribute %r' % (
type(self).__name__,
type(self.comparator).__name__,
self,
key)
)
def __str__(self):
return "%s.%s" % (self.class_.__name__, self.key)
@util.memoized_property
def property(self):
"""Return the :class:`.MapperProperty` associated with this
:class:`.QueryableAttribute`.
Return values here will commonly be instances of
:class:`.ColumnProperty` or :class:`.RelationshipProperty`.
"""
return self.comparator.property
class InstrumentedAttribute(QueryableAttribute):
"""Class bound instrumented attribute which adds basic
:term:`descriptor` methods.
See :class:`.QueryableAttribute` for a description of most features.
"""
def __set__(self, instance, value):
self.impl.set(instance_state(instance),
instance_dict(instance), value, None)
def __delete__(self, instance):
self.impl.delete(instance_state(instance), instance_dict(instance))
def __get__(self, instance, owner):
if instance is None:
return self
dict_ = instance_dict(instance)
if self._supports_population and self.key in dict_:
return dict_[self.key]
else:
return self.impl.get(instance_state(instance), dict_)
def create_proxied_attribute(descriptor):
"""Create an QueryableAttribute / user descriptor hybrid.
Returns a new QueryableAttribute type that delegates descriptor
behavior and getattr() to the given descriptor.
"""
# TODO: can move this to descriptor_props if the need for this
# function is removed from ext/hybrid.py
class Proxy(QueryableAttribute):
"""Presents the :class:`.QueryableAttribute` interface as a
proxy on top of a Python descriptor / :class:`.PropComparator`
combination.
"""
def __init__(self, class_, key, descriptor,
comparator,
adapt_to_entity=None, doc=None,
original_property=None):
self.class_ = class_
self.key = key
self.descriptor = descriptor
self.original_property = original_property
self._comparator = comparator
self._adapt_to_entity = adapt_to_entity
self.__doc__ = doc
@property
def property(self):
return self.comparator.property
@util.memoized_property
def comparator(self):
if util.callable(self._comparator):
self._comparator = self._comparator()
if self._adapt_to_entity:
self._comparator = self._comparator.adapt_to_entity(
self._adapt_to_entity)
return self._comparator
def adapt_to_entity(self, adapt_to_entity):
return self.__class__(adapt_to_entity.entity,
self.key,
self.descriptor,
self._comparator,
adapt_to_entity)
def __get__(self, instance, owner):
if instance is None:
return self
else:
return self.descriptor.__get__(instance, owner)
def __str__(self):
return "%s.%s" % (self.class_.__name__, self.key)
def __getattr__(self, attribute):
"""Delegate __getattr__ to the original descriptor and/or
comparator."""
try:
return getattr(descriptor, attribute)
except AttributeError:
try:
return getattr(self.comparator, attribute)
except AttributeError:
raise AttributeError(
'Neither %r object nor %r object associated with %s '
'has an attribute %r' % (
type(descriptor).__name__,
type(self.comparator).__name__,
self,
attribute)
)
Proxy.__name__ = type(descriptor).__name__ + 'Proxy'
util.monkeypatch_proxied_specials(Proxy, type(descriptor),
name='descriptor',
from_instance=descriptor)
return Proxy
OP_REMOVE = util.symbol("REMOVE")
OP_APPEND = util.symbol("APPEND")
OP_REPLACE = util.symbol("REPLACE")
class Event(object):
"""A token propagated throughout the course of a chain of attribute
events.
Serves as an indicator of the source of the event and also provides
a means of controlling propagation across a chain of attribute
operations.
The :class:`.Event` object is sent as the ``initiator`` argument
when dealing with the :meth:`.AttributeEvents.append`,
:meth:`.AttributeEvents.set`,
and :meth:`.AttributeEvents.remove` events.
The :class:`.Event` object is currently interpreted by the backref
event handlers, and is used to control the propagation of operations
across two mutually-dependent attributes.
.. versionadded:: 0.9.0
:var impl: The :class:`.AttributeImpl` which is the current event
initiator.
:var op: The symbol :attr:`.OP_APPEND`, :attr:`.OP_REMOVE` or
:attr:`.OP_REPLACE`, indicating the source operation.
"""
__slots__ = 'impl', 'op', 'parent_token'
def __init__(self, attribute_impl, op):
self.impl = attribute_impl
self.op = op
self.parent_token = self.impl.parent_token
def __eq__(self, other):
return isinstance(other, Event) and \
other.impl is self.impl and \
other.op == self.op
@property
def key(self):
return self.impl.key
def hasparent(self, state):
return self.impl.hasparent(state)
class AttributeImpl(object):
"""internal implementation for instrumented attributes."""
def __init__(self, class_, key,
callable_, dispatch, trackparent=False, extension=None,
compare_function=None, active_history=False,
parent_token=None, expire_missing=True,
send_modified_events=True,
**kwargs):
"""Construct an AttributeImpl.
\class_
associated class
key
string name of the attribute
\callable_
optional function which generates a callable based on a parent
instance, which produces the "default" values for a scalar or
collection attribute when it's first accessed, if not present
already.
trackparent
if True, attempt to track if an instance has a parent attached
to it via this attribute.
extension
a single or list of AttributeExtension object(s) which will
receive set/delete/append/remove/etc. events. Deprecated.
The event package is now used.
compare_function
a function that compares two values which are normally
assignable to this attribute.
active_history
indicates that get_history() should always return the "old" value,
even if it means executing a lazy callable upon attribute change.
parent_token
Usually references the MapperProperty, used as a key for
the hasparent() function to identify an "owning" attribute.
Allows multiple AttributeImpls to all match a single
owner attribute.
expire_missing
if False, don't add an "expiry" callable to this attribute
during state.expire_attributes(None), if no value is present
for this key.
send_modified_events
if False, the InstanceState._modified_event method will have no
effect; this means the attribute will never show up as changed in a
history entry.
"""
self.class_ = class_
self.key = key
self.callable_ = callable_
self.dispatch = dispatch
self.trackparent = trackparent
self.parent_token = parent_token or self
self.send_modified_events = send_modified_events
if compare_function is None:
self.is_equal = operator.eq
else:
self.is_equal = compare_function
# TODO: pass in the manager here
# instead of doing a lookup
attr = manager_of_class(class_)[key]
for ext in util.to_list(extension or []):
ext._adapt_listener(attr, ext)
if active_history:
self.dispatch._active_history = True
self.expire_missing = expire_missing
__slots__ = (
'class_', 'key', 'callable_', 'dispatch', 'trackparent',
'parent_token', 'send_modified_events', 'is_equal', 'expire_missing'
)
def __str__(self):
return "%s.%s" % (self.class_.__name__, self.key)
def _get_active_history(self):
"""Backwards compat for impl.active_history"""
return self.dispatch._active_history
def _set_active_history(self, value):
self.dispatch._active_history = value
active_history = property(_get_active_history, _set_active_history)
def hasparent(self, state, optimistic=False):
"""Return the boolean value of a `hasparent` flag attached to
the given state.
The `optimistic` flag determines what the default return value
should be if no `hasparent` flag can be located.
As this function is used to determine if an instance is an
*orphan*, instances that were loaded from storage should be
assumed to not be orphans, until a True/False value for this
flag is set.
An instance attribute that is loaded by a callable function
will also not have a `hasparent` flag.
"""
msg = "This AttributeImpl is not configured to track parents."
assert self.trackparent, msg
return state.parents.get(id(self.parent_token), optimistic) \
is not False
def sethasparent(self, state, parent_state, value):
"""Set a boolean flag on the given item corresponding to
whether or not it is attached to a parent object via the
attribute represented by this ``InstrumentedAttribute``.
"""
msg = "This AttributeImpl is not configured to track parents."
assert self.trackparent, msg
id_ = id(self.parent_token)
if value:
state.parents[id_] = parent_state
else:
if id_ in state.parents:
last_parent = state.parents[id_]
if last_parent is not False and \
last_parent.key != parent_state.key:
if last_parent.obj() is None:
raise orm_exc.StaleDataError(
"Removing state %s from parent "
"state %s along attribute '%s', "
"but the parent record "
"has gone stale, can't be sure this "
"is the most recent parent." %
(state_str(state),
state_str(parent_state),
self.key))
return
state.parents[id_] = False
def get_history(self, state, dict_, passive=PASSIVE_OFF):
raise NotImplementedError()
def get_all_pending(self, state, dict_, passive=PASSIVE_NO_INITIALIZE):
"""Return a list of tuples of (state, obj)
for all objects in this attribute's current state
+ history.
Only applies to object-based attributes.
This is an inlining of existing functionality
which roughly corresponds to:
get_state_history(
state,
key,
passive=PASSIVE_NO_INITIALIZE).sum()
"""
raise NotImplementedError()
def initialize(self, state, dict_):
"""Initialize the given state's attribute with an empty value."""
# As of 1.0, we don't actually set a value in
# dict_. This is so that the state of the object does not get
# modified without emitting the appropriate events.
return None
def get(self, state, dict_, passive=PASSIVE_OFF):
"""Retrieve a value from the given object.
If a callable is assembled on this object's attribute, and
passive is False, the callable will be executed and the
resulting value will be set as the new value for this attribute.
"""
if self.key in dict_:
return dict_[self.key]
else:
# if history present, don't load
key = self.key
if key not in state.committed_state or \
state.committed_state[key] is NEVER_SET:
if not passive & CALLABLES_OK:
return PASSIVE_NO_RESULT
if key in state.expired_attributes:
value = state._load_expired(state, passive)
elif key in state.callables:
callable_ = state.callables[key]
value = callable_(state, passive)
elif self.callable_:
value = self.callable_(state, passive)
else:
value = ATTR_EMPTY
if value is PASSIVE_NO_RESULT or value is NEVER_SET:
return value
elif value is ATTR_WAS_SET:
try:
return dict_[key]
except KeyError:
# TODO: no test coverage here.
raise KeyError(
"Deferred loader for attribute "
"%r failed to populate "
"correctly" % key)
elif value is not ATTR_EMPTY:
return self.set_committed_value(state, dict_, value)
if not passive & INIT_OK:
return NEVER_SET
else:
# Return a new, empty value
return self.initialize(state, dict_)
def append(self, state, dict_, value, initiator, passive=PASSIVE_OFF):
self.set(state, dict_, value, initiator, passive=passive)
def remove(self, state, dict_, value, initiator, passive=PASSIVE_OFF):
self.set(state, dict_, None, initiator,
passive=passive, check_old=value)
def pop(self, state, dict_, value, initiator, passive=PASSIVE_OFF):
self.set(state, dict_, None, initiator,
passive=passive, check_old=value, pop=True)
def set(self, state, dict_, value, initiator,
passive=PASSIVE_OFF, check_old=None, pop=False):
raise NotImplementedError()
def get_committed_value(self, state, dict_, passive=PASSIVE_OFF):
"""return the unchanged value of this attribute"""
if self.key in state.committed_state:
value = state.committed_state[self.key]
if value in (NO_VALUE, NEVER_SET):
return None
else:
return value
else:
return self.get(state, dict_, passive=passive)
def set_committed_value(self, state, dict_, value):
"""set an attribute value on the given instance and 'commit' it."""
dict_[self.key] = value
state._commit(dict_, [self.key])
return value
class ScalarAttributeImpl(AttributeImpl):
"""represents a scalar value-holding InstrumentedAttribute."""
accepts_scalar_loader = True
uses_objects = False
supports_population = True
collection = False
__slots__ = '_replace_token', '_append_token', '_remove_token'
def __init__(self, *arg, **kw):
super(ScalarAttributeImpl, self).__init__(*arg, **kw)
self._replace_token = self._append_token = None
self._remove_token = None
def _init_append_token(self):
self._replace_token = self._append_token = Event(self, OP_REPLACE)
return self._replace_token
_init_append_or_replace_token = _init_append_token
def _init_remove_token(self):
self._remove_token = Event(self, OP_REMOVE)
return self._remove_token
def delete(self, state, dict_):
# TODO: catch key errors, convert to attributeerror?
if self.dispatch._active_history:
old = self.get(state, dict_, PASSIVE_RETURN_NEVER_SET)
else:
old = dict_.get(self.key, NO_VALUE)
if self.dispatch.remove:
self.fire_remove_event(state, dict_, old, self._remove_token)
state._modified_event(dict_, self, old)
del dict_[self.key]
def get_history(self, state, dict_, passive=PASSIVE_OFF):
if self.key in dict_:
return History.from_scalar_attribute(self, state, dict_[self.key])
else:
if passive & INIT_OK:
passive ^= INIT_OK
current = self.get(state, dict_, passive=passive)
if current is PASSIVE_NO_RESULT:
return HISTORY_BLANK
else:
return History.from_scalar_attribute(self, state, current)
def set(self, state, dict_, value, initiator,
passive=PASSIVE_OFF, check_old=None, pop=False):
if self.dispatch._active_history:
old = self.get(state, dict_, PASSIVE_RETURN_NEVER_SET)
else:
old = dict_.get(self.key, NO_VALUE)
if self.dispatch.set:
value = self.fire_replace_event(state, dict_,
value, old, initiator)
state._modified_event(dict_, self, old)
dict_[self.key] = value
def fire_replace_event(self, state, dict_, value, previous, initiator):
for fn in self.dispatch.set:
value = fn(
state, value, previous,
initiator or self._replace_token or
self._init_append_or_replace_token())
return value
def fire_remove_event(self, state, dict_, value, initiator):
for fn in self.dispatch.remove:
fn(state, value,
initiator or self._remove_token or self._init_remove_token())
@property
def type(self):
self.property.columns[0].type
class ScalarObjectAttributeImpl(ScalarAttributeImpl):
"""represents a scalar-holding InstrumentedAttribute,
where the target object is also instrumented.
Adds events to delete/set operations.
"""
accepts_scalar_loader = False
uses_objects = True
supports_population = True
collection = False
__slots__ = ()
def delete(self, state, dict_):
old = self.get(state, dict_)
self.fire_remove_event(
state, dict_, old,
self._remove_token or self._init_remove_token())
del dict_[self.key]
def get_history(self, state, dict_, passive=PASSIVE_OFF):
if self.key in dict_:
return History.from_object_attribute(self, state, dict_[self.key])
else:
if passive & INIT_OK:
passive ^= INIT_OK
current = self.get(state, dict_, passive=passive)
if current is PASSIVE_NO_RESULT:
return HISTORY_BLANK
else:
return History.from_object_attribute(self, state, current)
def get_all_pending(self, state, dict_, passive=PASSIVE_NO_INITIALIZE):
if self.key in dict_:
current = dict_[self.key]
elif passive & CALLABLES_OK:
current = self.get(state, dict_, passive=passive)
else:
return []
# can't use __hash__(), can't use __eq__() here
if current is not None and \
current is not PASSIVE_NO_RESULT and \
current is not NEVER_SET:
ret = [(instance_state(current), current)]
else:
ret = [(None, None)]
if self.key in state.committed_state:
original = state.committed_state[self.key]
if original is not None and \
original is not PASSIVE_NO_RESULT and \
original is not NEVER_SET and \
original is not current:
ret.append((instance_state(original), original))
return ret
def set(self, state, dict_, value, initiator,
passive=PASSIVE_OFF, check_old=None, pop=False):
"""Set a value on the given InstanceState.
"""
if self.dispatch._active_history:
old = self.get(
state, dict_, passive=PASSIVE_ONLY_PERSISTENT | NO_AUTOFLUSH)
else:
old = self.get(state, dict_, passive=PASSIVE_NO_FETCH ^ INIT_OK)
if check_old is not None and \
old is not PASSIVE_NO_RESULT and \
check_old is not old:
if pop:
return
else:
raise ValueError(
"Object %s not associated with %s on attribute '%s'" % (
instance_str(check_old),
state_str(state),
self.key
))
value = self.fire_replace_event(state, dict_, value, old, initiator)
dict_[self.key] = value
def fire_remove_event(self, state, dict_, value, initiator):
if self.trackparent and value is not None:
self.sethasparent(instance_state(value), state, False)
for fn in self.dispatch.remove:
fn(state, value, initiator or
self._remove_token or self._init_remove_token())
state._modified_event(dict_, self, value)
def fire_replace_event(self, state, dict_, value, previous, initiator):
if self.trackparent:
if (previous is not value and
previous not in (None, PASSIVE_NO_RESULT, NEVER_SET)):
self.sethasparent(instance_state(previous), state, False)
for fn in self.dispatch.set:
value = fn(
state, value, previous, initiator or
self._replace_token or self._init_append_or_replace_token())
state._modified_event(dict_, self, previous)
if self.trackparent:
if value is not None:
self.sethasparent(instance_state(value), state, True)
return value
class CollectionAttributeImpl(AttributeImpl):
"""A collection-holding attribute that instruments changes in membership.
Only handles collections of instrumented objects.
InstrumentedCollectionAttribute holds an arbitrary, user-specified
container object (defaulting to a list) and brokers access to the
CollectionAdapter, a "view" onto that object that presents consistent bag
semantics to the orm layer independent of the user data implementation.
"""
accepts_scalar_loader = False
uses_objects = True
supports_population = True
collection = True
__slots__ = (
'copy', 'collection_factory', '_append_token', '_remove_token',
'_duck_typed_as'
)
def __init__(self, class_, key, callable_, dispatch,
typecallable=None, trackparent=False, extension=None,
copy_function=None, compare_function=None, **kwargs):
super(CollectionAttributeImpl, self).__init__(
class_,
key,
callable_, dispatch,
trackparent=trackparent,
extension=extension,
compare_function=compare_function,
**kwargs)
if copy_function is None:
copy_function = self.__copy
self.copy = copy_function
self.collection_factory = typecallable
self._append_token = None
self._remove_token = None
self._duck_typed_as = util.duck_type_collection(
self.collection_factory())
if getattr(self.collection_factory, "_sa_linker", None):
@event.listens_for(self, "init_collection")
def link(target, collection, collection_adapter):
collection._sa_linker(collection_adapter)
@event.listens_for(self, "dispose_collection")
def unlink(target, collection, collection_adapter):
collection._sa_linker(None)
def _init_append_token(self):
self._append_token = Event(self, OP_APPEND)
return self._append_token
def _init_remove_token(self):
self._remove_token = Event(self, OP_REMOVE)
return self._remove_token
def __copy(self, item):
return [y for y in collections.collection_adapter(item)]
def get_history(self, state, dict_, passive=PASSIVE_OFF):
current = self.get(state, dict_, passive=passive)
if current is PASSIVE_NO_RESULT:
return HISTORY_BLANK
else:
return History.from_collection(self, state, current)
def get_all_pending(self, state, dict_, passive=PASSIVE_NO_INITIALIZE):
# NOTE: passive is ignored here at the moment
if self.key not in dict_:
return []
current = dict_[self.key]
current = getattr(current, '_sa_adapter')
if self.key in state.committed_state:
original = state.committed_state[self.key]
if original not in (NO_VALUE, NEVER_SET):
current_states = [((c is not None) and
instance_state(c) or None, c)
for c in current]
original_states = [((c is not None) and
instance_state(c) or None, c)
for c in original]
current_set = dict(current_states)
original_set = dict(original_states)
return \
[(s, o) for s, o in current_states
if s not in original_set] + \
[(s, o) for s, o in current_states
if s in original_set] + \
[(s, o) for s, o in original_states
if s not in current_set]
return [(instance_state(o), o) for o in current]
def fire_append_event(self, state, dict_, value, initiator):
for fn in self.dispatch.append:
value = fn(
state, value,
initiator or self._append_token or self._init_append_token())
state._modified_event(dict_, self, NEVER_SET, True)
if self.trackparent and value is not None:
self.sethasparent(instance_state(value), state, True)
return value
def fire_pre_remove_event(self, state, dict_, initiator):
state._modified_event(dict_, self, NEVER_SET, True)
def fire_remove_event(self, state, dict_, value, initiator):
if self.trackparent and value is not None:
self.sethasparent(instance_state(value), state, False)
for fn in self.dispatch.remove:
fn(state, value,
initiator or self._remove_token or self._init_remove_token())
state._modified_event(dict_, self, NEVER_SET, True)
def delete(self, state, dict_):
if self.key not in dict_:
return
state._modified_event(dict_, self, NEVER_SET, True)
collection = self.get_collection(state, state.dict)
collection.clear_with_event()
# TODO: catch key errors, convert to attributeerror?
del dict_[self.key]
def initialize(self, state, dict_):
"""Initialize this attribute with an empty collection."""
_, user_data = self._initialize_collection(state)
dict_[self.key] = user_data
return user_data
def _initialize_collection(self, state):
adapter, collection = state.manager.initialize_collection(
self.key, state, self.collection_factory)
self.dispatch.init_collection(state, collection, adapter)
return adapter, collection
def append(self, state, dict_, value, initiator, passive=PASSIVE_OFF):
collection = self.get_collection(state, dict_, passive=passive)
if collection is PASSIVE_NO_RESULT:
value = self.fire_append_event(state, dict_, value, initiator)
assert self.key not in dict_, \
"Collection was loaded during event handling."
state._get_pending_mutation(self.key).append(value)
else:
collection.append_with_event(value, initiator)
def remove(self, state, dict_, value, initiator, passive=PASSIVE_OFF):
collection = self.get_collection(state, state.dict, passive=passive)
if collection is PASSIVE_NO_RESULT:
self.fire_remove_event(state, dict_, value, initiator)
assert self.key not in dict_, \
"Collection was loaded during event handling."
state._get_pending_mutation(self.key).remove(value)
else:
collection.remove_with_event(value, initiator)
def pop(self, state, dict_, value, initiator, passive=PASSIVE_OFF):
try:
# TODO: better solution here would be to add
# a "popper" role to collections.py to complement
# "remover".
self.remove(state, dict_, value, initiator, passive=passive)
except (ValueError, KeyError, IndexError):
pass
def set(self, state, dict_, value, initiator=None,
passive=PASSIVE_OFF, pop=False, _adapt=True):
iterable = orig_iterable = value
# pulling a new collection first so that an adaptation exception does
# not trigger a lazy load of the old collection.
new_collection, user_data = self._initialize_collection(state)
if _adapt:
if new_collection._converter is not None:
iterable = new_collection._converter(iterable)
else:
setting_type = util.duck_type_collection(iterable)
receiving_type = self._duck_typed_as
if setting_type is not receiving_type:
given = iterable is None and 'None' or \
iterable.__class__.__name__
wanted = self._duck_typed_as.__name__
raise TypeError(
"Incompatible collection type: %s is not %s-like" % (
given, wanted))
# If the object is an adapted collection, return the (iterable)
# adapter.
if hasattr(iterable, '_sa_iterator'):
iterable = iterable._sa_iterator()
elif setting_type is dict:
if util.py3k:
iterable = iterable.values()
else:
iterable = getattr(
iterable, 'itervalues', iterable.values)()
else:
iterable = iter(iterable)
new_values = list(iterable)
old = self.get(state, dict_, passive=PASSIVE_ONLY_PERSISTENT)
if old is PASSIVE_NO_RESULT:
old = self.initialize(state, dict_)
elif old is orig_iterable:
# ignore re-assignment of the current collection, as happens
# implicitly with in-place operators (foo.collection |= other)
return
# place a copy of "old" in state.committed_state
state._modified_event(dict_, self, old, True)
old_collection = old._sa_adapter
dict_[self.key] = user_data
collections.bulk_replace(
new_values, old_collection, new_collection)
del old._sa_adapter
self.dispatch.dispose_collection(state, old, old_collection)
def _invalidate_collection(self, collection):
adapter = getattr(collection, '_sa_adapter')
adapter.invalidated = True
def set_committed_value(self, state, dict_, value):
"""Set an attribute value on the given instance and 'commit' it."""
collection, user_data = self._initialize_collection(state)
if value:
collection.append_multiple_without_event(value)
state.dict[self.key] = user_data
state._commit(dict_, [self.key])
if self.key in state._pending_mutations:
# pending items exist. issue a modified event,
# add/remove new items.
state._modified_event(dict_, self, user_data, True)
pending = state._pending_mutations.pop(self.key)
added = pending.added_items
removed = pending.deleted_items
for item in added:
collection.append_without_event(item)
for item in removed:
collection.remove_without_event(item)
return user_data
def get_collection(self, state, dict_,
user_data=None, passive=PASSIVE_OFF):
"""Retrieve the CollectionAdapter associated with the given state.
Creates a new CollectionAdapter if one does not exist.
"""
if user_data is None:
user_data = self.get(state, dict_, passive=passive)
if user_data is PASSIVE_NO_RESULT:
return user_data
return getattr(user_data, '_sa_adapter')
def backref_listeners(attribute, key, uselist):
"""Apply listeners to synchronize a two-way relationship."""
# use easily recognizable names for stack traces
parent_token = attribute.impl.parent_token
parent_impl = attribute.impl
def _acceptable_key_err(child_state, initiator, child_impl):
raise ValueError(
"Bidirectional attribute conflict detected: "
'Passing object %s to attribute "%s" '
'triggers a modify event on attribute "%s" '
'via the backref "%s".' % (
state_str(child_state),
initiator.parent_token,
child_impl.parent_token,
attribute.impl.parent_token
)
)
def emit_backref_from_scalar_set_event(state, child, oldchild, initiator):
if oldchild is child:
return child
if oldchild is not None and \
oldchild is not PASSIVE_NO_RESULT and \
oldchild is not NEVER_SET:
# With lazy=None, there's no guarantee that the full collection is
# present when updating via a backref.
old_state, old_dict = instance_state(oldchild),\
instance_dict(oldchild)
impl = old_state.manager[key].impl
if initiator.impl is not impl or \
initiator.op not in (OP_REPLACE, OP_REMOVE):
impl.pop(old_state,
old_dict,
state.obj(),
parent_impl._append_token or
parent_impl._init_append_token(),
passive=PASSIVE_NO_FETCH)
if child is not None:
child_state, child_dict = instance_state(child),\
instance_dict(child)
child_impl = child_state.manager[key].impl
if initiator.parent_token is not parent_token and \
initiator.parent_token is not child_impl.parent_token:
_acceptable_key_err(state, initiator, child_impl)
elif initiator.impl is not child_impl or \
initiator.op not in (OP_APPEND, OP_REPLACE):
child_impl.append(
child_state,
child_dict,
state.obj(),
initiator,
passive=PASSIVE_NO_FETCH)
return child
def emit_backref_from_collection_append_event(state, child, initiator):
if child is None:
return
child_state, child_dict = instance_state(child), \
instance_dict(child)
child_impl = child_state.manager[key].impl
if initiator.parent_token is not parent_token and \
initiator.parent_token is not child_impl.parent_token:
_acceptable_key_err(state, initiator, child_impl)
elif initiator.impl is not child_impl or \
initiator.op not in (OP_APPEND, OP_REPLACE):
child_impl.append(
child_state,
child_dict,
state.obj(),
initiator,
passive=PASSIVE_NO_FETCH)
return child
def emit_backref_from_collection_remove_event(state, child, initiator):
if child is not None:
child_state, child_dict = instance_state(child),\
instance_dict(child)
child_impl = child_state.manager[key].impl
if initiator.impl is not child_impl or \
initiator.op not in (OP_REMOVE, OP_REPLACE):
child_impl.pop(
child_state,
child_dict,
state.obj(),
initiator,
passive=PASSIVE_NO_FETCH)
if uselist:
event.listen(attribute, "append",
emit_backref_from_collection_append_event,
retval=True, raw=True)
else:
event.listen(attribute, "set",
emit_backref_from_scalar_set_event,
retval=True, raw=True)
# TODO: need coverage in test/orm/ of remove event
event.listen(attribute, "remove",
emit_backref_from_collection_remove_event,
retval=True, raw=True)
_NO_HISTORY = util.symbol('NO_HISTORY')
_NO_STATE_SYMBOLS = frozenset([
id(PASSIVE_NO_RESULT),
id(NO_VALUE),
id(NEVER_SET)])
History = util.namedtuple("History", [
"added", "unchanged", "deleted"
])
class History(History):
"""A 3-tuple of added, unchanged and deleted values,
representing the changes which have occurred on an instrumented
attribute.
The easiest way to get a :class:`.History` object for a particular
attribute on an object is to use the :func:`.inspect` function::
from sqlalchemy import inspect
hist = inspect(myobject).attrs.myattribute.history
Each tuple member is an iterable sequence:
* ``added`` - the collection of items added to the attribute (the first
tuple element).
* ``unchanged`` - the collection of items that have not changed on the
attribute (the second tuple element).
* ``deleted`` - the collection of items that have been removed from the
attribute (the third tuple element).
"""
def __bool__(self):
return self != HISTORY_BLANK
__nonzero__ = __bool__
def empty(self):
"""Return True if this :class:`.History` has no changes
and no existing, unchanged state.
"""
return not bool(
(self.added or self.deleted)
or self.unchanged
)
def sum(self):
"""Return a collection of added + unchanged + deleted."""
return (self.added or []) +\
(self.unchanged or []) +\
(self.deleted or [])
def non_deleted(self):
"""Return a collection of added + unchanged."""
return (self.added or []) +\
(self.unchanged or [])
def non_added(self):
"""Return a collection of unchanged + deleted."""
return (self.unchanged or []) +\
(self.deleted or [])
def has_changes(self):
"""Return True if this :class:`.History` has changes."""
return bool(self.added or self.deleted)
def as_state(self):
return History(
[(c is not None)
and instance_state(c) or None
for c in self.added],
[(c is not None)
and instance_state(c) or None
for c in self.unchanged],
[(c is not None)
and instance_state(c) or None
for c in self.deleted],
)
@classmethod
def from_scalar_attribute(cls, attribute, state, current):
original = state.committed_state.get(attribute.key, _NO_HISTORY)
if original is _NO_HISTORY:
if current is NEVER_SET:
return cls((), (), ())
else:
return cls((), [current], ())
# don't let ClauseElement expressions here trip things up
elif attribute.is_equal(current, original) is True:
return cls((), [current], ())
else:
# current convention on native scalars is to not
# include information
# about missing previous value in "deleted", but
# we do include None, which helps in some primary
# key situations
if id(original) in _NO_STATE_SYMBOLS:
deleted = ()
else:
deleted = [original]
if current is NEVER_SET:
return cls((), (), deleted)
else:
return cls([current], (), deleted)
@classmethod
def from_object_attribute(cls, attribute, state, current):
original = state.committed_state.get(attribute.key, _NO_HISTORY)
if original is _NO_HISTORY:
if current is NO_VALUE or current is NEVER_SET:
return cls((), (), ())
else:
return cls((), [current], ())
elif current is original:
return cls((), [current], ())
else:
# current convention on related objects is to not
# include information
# about missing previous value in "deleted", and
# to also not include None - the dependency.py rules
# ignore the None in any case.
if id(original) in _NO_STATE_SYMBOLS or original is None:
deleted = ()
else:
deleted = [original]
if current is NO_VALUE or current is NEVER_SET:
return cls((), (), deleted)
else:
return cls([current], (), deleted)
@classmethod
def from_collection(cls, attribute, state, current):
original = state.committed_state.get(attribute.key, _NO_HISTORY)
if current is NO_VALUE or current is NEVER_SET:
return cls((), (), ())
current = getattr(current, '_sa_adapter')
if original in (NO_VALUE, NEVER_SET):
return cls(list(current), (), ())
elif original is _NO_HISTORY:
return cls((), list(current), ())
else:
current_states = [((c is not None) and instance_state(c)
or None, c)
for c in current
]
original_states = [((c is not None) and instance_state(c)
or None, c)
for c in original
]
current_set = dict(current_states)
original_set = dict(original_states)
return cls(
[o for s, o in current_states if s not in original_set],
[o for s, o in current_states if s in original_set],
[o for s, o in original_states if s not in current_set]
)
HISTORY_BLANK = History(None, None, None)
def get_history(obj, key, passive=PASSIVE_OFF):
"""Return a :class:`.History` record for the given object
and attribute key.
:param obj: an object whose class is instrumented by the
attributes package.
:param key: string attribute name.
:param passive: indicates loading behavior for the attribute
if the value is not already present. This is a
bitflag attribute, which defaults to the symbol
:attr:`.PASSIVE_OFF` indicating all necessary SQL
should be emitted.
"""
if passive is True:
util.warn_deprecated("Passing True for 'passive' is deprecated. "
"Use attributes.PASSIVE_NO_INITIALIZE")
passive = PASSIVE_NO_INITIALIZE
elif passive is False:
util.warn_deprecated("Passing False for 'passive' is "
"deprecated. Use attributes.PASSIVE_OFF")
passive = PASSIVE_OFF
return get_state_history(instance_state(obj), key, passive)
def get_state_history(state, key, passive=PASSIVE_OFF):
return state.get_history(key, passive)
def has_parent(cls, obj, key, optimistic=False):
"""TODO"""
manager = manager_of_class(cls)
state = instance_state(obj)
return manager.has_parent(state, key, optimistic)
def register_attribute(class_, key, **kw):
comparator = kw.pop('comparator', None)
parententity = kw.pop('parententity', None)
doc = kw.pop('doc', None)
desc = register_descriptor(class_, key,
comparator, parententity, doc=doc)
register_attribute_impl(class_, key, **kw)
return desc
def register_attribute_impl(class_, key,
uselist=False, callable_=None,
useobject=False,
impl_class=None, backref=None, **kw):
manager = manager_of_class(class_)
if uselist:
factory = kw.pop('typecallable', None)
typecallable = manager.instrument_collection_class(
key, factory or list)
else:
typecallable = kw.pop('typecallable', None)
dispatch = manager[key].dispatch
if impl_class:
impl = impl_class(class_, key, typecallable, dispatch, **kw)
elif uselist:
impl = CollectionAttributeImpl(class_, key, callable_, dispatch,
typecallable=typecallable, **kw)
elif useobject:
impl = ScalarObjectAttributeImpl(class_, key, callable_,
dispatch, **kw)
else:
impl = ScalarAttributeImpl(class_, key, callable_, dispatch, **kw)
manager[key].impl = impl
if backref:
backref_listeners(manager[key], backref, uselist)
manager.post_configure_attribute(key)
return manager[key]
def register_descriptor(class_, key, comparator=None,
parententity=None, doc=None):
manager = manager_of_class(class_)
descriptor = InstrumentedAttribute(class_, key, comparator=comparator,
parententity=parententity)
descriptor.__doc__ = doc
manager.instrument_attribute(key, descriptor)
return descriptor
def unregister_attribute(class_, key):
manager_of_class(class_).uninstrument_attribute(key)
def init_collection(obj, key):
"""Initialize a collection attribute and return the collection adapter.
This function is used to provide direct access to collection internals
for a previously unloaded attribute. e.g.::
collection_adapter = init_collection(someobject, 'elements')
for elem in values:
collection_adapter.append_without_event(elem)
For an easier way to do the above, see
:func:`~sqlalchemy.orm.attributes.set_committed_value`.
obj is an instrumented object instance. An InstanceState
is accepted directly for backwards compatibility but
this usage is deprecated.
"""
state = instance_state(obj)
dict_ = state.dict
return init_state_collection(state, dict_, key)
def init_state_collection(state, dict_, key):
"""Initialize a collection attribute and return the collection adapter."""
attr = state.manager[key].impl
user_data = attr.initialize(state, dict_)
return attr.get_collection(state, dict_, user_data)
def set_committed_value(instance, key, value):
"""Set the value of an attribute with no history events.
Cancels any previous history present. The value should be
a scalar value for scalar-holding attributes, or
an iterable for any collection-holding attribute.
This is the same underlying method used when a lazy loader
fires off and loads additional data from the database.
In particular, this method can be used by application code
which has loaded additional attributes or collections through
separate queries, which can then be attached to an instance
as though it were part of its original loaded state.
"""
state, dict_ = instance_state(instance), instance_dict(instance)
state.manager[key].impl.set_committed_value(state, dict_, value)
def set_attribute(instance, key, value):
"""Set the value of an attribute, firing history events.
This function may be used regardless of instrumentation
applied directly to the class, i.e. no descriptors are required.
Custom attribute management schemes will need to make usage
of this method to establish attribute state as understood
by SQLAlchemy.
"""
state, dict_ = instance_state(instance), instance_dict(instance)
state.manager[key].impl.set(state, dict_, value, None)
def get_attribute(instance, key):
"""Get the value of an attribute, firing any callables required.
This function may be used regardless of instrumentation
applied directly to the class, i.e. no descriptors are required.
Custom attribute management schemes will need to make usage
of this method to make usage of attribute state as understood
by SQLAlchemy.
"""
state, dict_ = instance_state(instance), instance_dict(instance)
return state.manager[key].impl.get(state, dict_)
def del_attribute(instance, key):
"""Delete the value of an attribute, firing history events.
This function may be used regardless of instrumentation
applied directly to the class, i.e. no descriptors are required.
Custom attribute management schemes will need to make usage
of this method to establish attribute state as understood
by SQLAlchemy.
"""
state, dict_ = instance_state(instance), instance_dict(instance)
state.manager[key].impl.delete(state, dict_)
def flag_modified(instance, key):
"""Mark an attribute on an instance as 'modified'.
This sets the 'modified' flag on the instance and
establishes an unconditional change event for the given attribute.
"""
state, dict_ = instance_state(instance), instance_dict(instance)
impl = state.manager[key].impl
state._modified_event(dict_, impl, NO_VALUE, force=True)
| ThiefMaster/sqlalchemy | lib/sqlalchemy/orm/attributes.py | Python | mit | 57,173 | 0.000122 |
from django.test import TestCase
from manager.models import Page
from datetime import datetime, timedelta
from django.utils import timezone
class PageTestCase(TestCase):
def setUp(self):
now = timezone.now()
Page.objects.create(url="testurl", description="test description")
def test_regular_page_active(self):
"""Page with no pause or time/date range is active."""
page = Page.objects.get(url="/testurl")
self.assertFalse(page.is_paused())
self.assertTrue(page.is_active())
def test_paused_page_not_active(self):
"""Page that has been paused is not active."""
page = Page.objects.get(url="/testurl")
page.pause_at = timezone.now().replace(hour=12)
current_time = timezone.now().replace(hour=13)
self.assertTrue(page.is_paused(current_time))
self.assertFalse(page.is_active(current_time))
def test_previously_paused_page_active(self):
"""Page that has is not paused but has been in the past is active."""
page = Page.objects.get(url="/testurl")
page.paused_at = timezone.now() - timedelta(hours=48)
self.assertFalse(page.is_paused())
self.assertTrue(page.is_active())
page.paused_at = timezone.now()
morning = timezone.now().replace(hour=6)
self.assertFalse(page.is_paused(morning))
self.assertTrue(page.is_active(morning))
def test_page_active_time_of_day(self):
"""Page has certain times of day it should be visible."""
page = Page.objects.get(url="/testurl")
now = timezone.now().replace(hour=12)
# Default page has no times -> active
self.assertTrue(page.is_active(now))
# Set start time in the future
page.active_time_start = now.replace(hour=13).time()
self.assertFalse(page.is_active(now))
# Set time to be past start time
now = now.replace(hour=14)
self.assertTrue(page.is_active(now))
# Set end time in the future, still active
page.active_time_end = now.replace(hour=15).time()
self.assertTrue(page.is_active(now))
# Set time to be past end-time -> inactive
now = now.replace(hour=16)
self.assertFalse(page.is_active(now))
# Set start time in the future but bigger than end-time
page.active_time_start = now.replace(hour=17).time()
self.assertFalse(page.is_active(now))
# Time bigger than start time in the evening
now = now.replace(hour=19)
self.assertTrue(page.is_active(now))
def test_page_date_range(self):
"""Page has certains dates it should be visible."""
page = Page.objects.get(url="/testurl")
now = timezone.now()
today = now.date()
page.active_date_start = today
self.assertTrue(page.is_active(now))
page.active_date_start = today + timedelta(days=1)
self.assertFalse(page.is_active(now))
page.active_date_start = today - timedelta(days=7)
page.active_date_end = today - timedelta(days=3)
self.assertFalse(page.is_active(now))
def test_page_weekdays(self):
"""Page is active on certain weekdays"""
page = Page.objects.get(url="/testurl")
now = datetime(2014, 4, 28, 16, 53) # Monday
page.active_date_start = now.date()
self.assertTrue(page.is_active(now))
page.monday = False
self.assertFalse(page.is_active(now))
now = now + timedelta(days=1)
self.assertTrue(page.is_active(now))
| olkku/tf-info | manager/tests.py | Python | bsd-3-clause | 3,567 | 0.001402 |
# -*- coding: utf-8 -*-
"""
RED Plugin
Copyright (C) 2014 Ishraq Ibne Ashraf <ishraq@tinkerforge.com>
Copyright (C) 2014 Olaf Lüke <olaf@tinkerforge.com>
Copyright (C) 2014-2015 Matthias Bolte <matthias@tinkerforge.com>
red_tab_settings_brickd.py: RED settings brickd tab implementation
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public
License along with this program; if not, write to the
Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA.
"""
from PyQt5.QtWidgets import QMessageBox, QWidget
from brickv.plugin_system.plugins.red.ui_red_tab_settings_brickd import Ui_REDTabSettingsBrickd
from brickv.plugin_system.plugins.red.api import *
from brickv.plugin_system.plugins.red import config_parser
from brickv.async_call import async_call
from brickv.utils import get_main_window
# Constants
BRICKD_CONF_PATH = '/etc/brickd.conf'
CBOX_BRICKD_LOG_LEVEL_ERROR = 0
CBOX_BRICKD_LOG_LEVEL_WARN = 1
CBOX_BRICKD_LOG_LEVEL_INFO = 2
CBOX_BRICKD_LOG_LEVEL_DEBUG = 3
CBOX_BRICKD_LED_TRIGGER_CPU = 0
CBOX_BRICKD_LED_TRIGGER_GPIO = 1
CBOX_BRICKD_LED_TRIGGER_HEARTBEAT = 2
CBOX_BRICKD_LED_TRIGGER_MMC = 3
CBOX_BRICKD_LED_TRIGGER_OFF = 4
CBOX_BRICKD_LED_TRIGGER_ON = 5
class REDTabSettingsBrickd(QWidget, Ui_REDTabSettingsBrickd):
def __init__(self):
QWidget.__init__(self)
self.setupUi(self)
self.session = None # Set from REDTabSettings
self.script_manager = None # Set from REDTabSettings
self.image_version = None # Set from REDTabSettings
self.service_state = None # Set from REDTabSettings
self.brickd_conf = {}
self.cbox_brickd_ll.addItem('Error')
self.cbox_brickd_ll.addItem('Warn')
self.cbox_brickd_ll.addItem('Info')
self.cbox_brickd_ll.addItem('Debug')
self.cbox_brickd_rt.addItem('cpu')
self.cbox_brickd_rt.addItem('gpio')
self.cbox_brickd_rt.addItem('heartbeat')
self.cbox_brickd_rt.addItem('mmc')
self.cbox_brickd_rt.addItem('off')
self.cbox_brickd_rt.addItem('on')
self.cbox_brickd_gt.addItem('cpu')
self.cbox_brickd_gt.addItem('gpio')
self.cbox_brickd_gt.addItem('heartbeat')
self.cbox_brickd_gt.addItem('mmc')
self.cbox_brickd_gt.addItem('off')
self.cbox_brickd_gt.addItem('on')
# Signals/slots
self.pbutton_brickd_save.clicked.connect(self.slot_brickd_save_clicked)
self.pbutton_brickd_refresh.clicked.connect(self.slot_brickd_refresh_clicked)
self.sbox_brickd_la_ip1.valueChanged.connect(self.brickd_settings_changed)
self.sbox_brickd_la_ip2.valueChanged.connect(self.brickd_settings_changed)
self.sbox_brickd_la_ip3.valueChanged.connect(self.brickd_settings_changed)
self.sbox_brickd_la_ip4.valueChanged.connect(self.brickd_settings_changed)
self.sbox_brickd_lp.valueChanged.connect(self.brickd_settings_changed)
self.sbox_brickd_lwsp.valueChanged.connect(self.brickd_settings_changed)
self.ledit_brickd_secret.textEdited.connect(self.brickd_settings_changed)
self.cbox_brickd_ll.currentIndexChanged.connect(self.brickd_settings_changed)
self.cbox_brickd_rt.currentIndexChanged.connect(self.brickd_settings_changed)
self.cbox_brickd_gt.currentIndexChanged.connect(self.brickd_settings_changed)
self.sbox_brickd_spi_dly.valueChanged.connect(self.brickd_settings_changed)
self.sbox_brickd_rs485_dly.valueChanged.connect(self.brickd_settings_changed)
def tab_on_focus(self):
self.brickd_conf_rfile = REDFile(self.session)
self.slot_brickd_refresh_clicked()
def tab_off_focus(self):
pass
def tab_destroy(self):
pass
def brickd_button_refresh_enabled(self, state):
self.pbutton_brickd_refresh.setEnabled(state)
if state:
self.pbutton_brickd_refresh.setText('Refresh')
else:
self.pbutton_brickd_refresh.setText('Refreshing...')
def brickd_button_save_enabled(self, state):
self.pbutton_brickd_save.setEnabled(state)
def update_brickd_widget_data(self):
if self.brickd_conf == None:
return
# Fill keys with default values if not available
if not 'listen.address' in self.brickd_conf:
self.brickd_conf['listen.address'] = '0.0.0.0'
if not 'listen.plain_port' in self.brickd_conf:
self.brickd_conf['listen.plain_port'] = '4223'
if not 'listen.websocket_port' in self.brickd_conf:
self.brickd_conf['listen.websocket_port'] = '0'
if not 'authentication.secret' in self.brickd_conf:
self.brickd_conf['authentication.secret'] = ''
if not 'log.level' in self.brickd_conf:
self.brickd_conf['log.level'] = 'info'
if not 'led_trigger.green' in self.brickd_conf:
self.brickd_conf['led_trigger.green'] = 'heartbeat'
if not 'led_trigger.red' in self.brickd_conf:
self.brickd_conf['led_trigger.red'] = 'off'
if not 'poll_delay.spi' in self.brickd_conf:
self.brickd_conf['poll_delay.spi'] = '50'
if not 'poll_delay.rs485' in self.brickd_conf:
self.brickd_conf['poll_delay.rs485'] = '4000'
l_addr = self.brickd_conf['listen.address'].split('.')
self.sbox_brickd_la_ip1.setValue(int(l_addr[0]))
self.sbox_brickd_la_ip2.setValue(int(l_addr[1]))
self.sbox_brickd_la_ip3.setValue(int(l_addr[2]))
self.sbox_brickd_la_ip4.setValue(int(l_addr[3]))
self.sbox_brickd_lp.setValue(int(self.brickd_conf['listen.plain_port']))
self.sbox_brickd_lwsp.setValue(int(self.brickd_conf['listen.websocket_port']))
self.ledit_brickd_secret.setText(self.brickd_conf['authentication.secret'])
log_level = self.brickd_conf['log.level']
if log_level == 'debug':
self.cbox_brickd_ll.setCurrentIndex(CBOX_BRICKD_LOG_LEVEL_DEBUG)
elif log_level == 'info':
self.cbox_brickd_ll.setCurrentIndex(CBOX_BRICKD_LOG_LEVEL_INFO)
elif log_level == 'warn':
self.cbox_brickd_ll.setCurrentIndex(CBOX_BRICKD_LOG_LEVEL_WARN)
elif log_level == 'error':
self.cbox_brickd_ll.setCurrentIndex(CBOX_BRICKD_LOG_LEVEL_ERROR)
trigger_green = self.brickd_conf['led_trigger.green']
if trigger_green == 'cpu':
self.cbox_brickd_gt.setCurrentIndex(CBOX_BRICKD_LED_TRIGGER_CPU)
elif trigger_green == 'gpio':
self.cbox_brickd_gt.setCurrentIndex(CBOX_BRICKD_LED_TRIGGER_GPIO)
elif trigger_green == 'heartbeat':
self.cbox_brickd_gt.setCurrentIndex(CBOX_BRICKD_LED_TRIGGER_HEARTBEAT)
elif trigger_green == 'mmc':
self.cbox_brickd_gt.setCurrentIndex(CBOX_BRICKD_LED_TRIGGER_MMC)
elif trigger_green == 'off':
self.cbox_brickd_gt.setCurrentIndex(CBOX_BRICKD_LED_TRIGGER_OFF)
elif trigger_green == 'on':
self.cbox_brickd_gt.setCurrentIndex(CBOX_BRICKD_LED_TRIGGER_ON)
trigger_red = self.brickd_conf['led_trigger.red']
if trigger_red == 'cpu':
self.cbox_brickd_rt.setCurrentIndex(CBOX_BRICKD_LED_TRIGGER_CPU)
elif trigger_red == 'gpio':
self.cbox_brickd_rt.setCurrentIndex(CBOX_BRICKD_LED_TRIGGER_GPIO)
elif trigger_red == 'heartbeat':
self.cbox_brickd_rt.setCurrentIndex(CBOX_BRICKD_LED_TRIGGER_HEARTBEAT)
elif trigger_red == 'mmc':
self.cbox_brickd_rt.setCurrentIndex(CBOX_BRICKD_LED_TRIGGER_MMC)
elif trigger_red == 'off':
self.cbox_brickd_rt.setCurrentIndex(CBOX_BRICKD_LED_TRIGGER_OFF)
elif trigger_red == 'on':
self.cbox_brickd_rt.setCurrentIndex(CBOX_BRICKD_LED_TRIGGER_ON)
self.sbox_brickd_spi_dly.setValue(int(self.brickd_conf['poll_delay.spi']))
self.sbox_brickd_rs485_dly.setValue(int(self.brickd_conf['poll_delay.rs485']))
# The slots
def brickd_settings_changed(self, value):
self.brickd_button_save_enabled(True)
def slot_brickd_refresh_clicked(self):
self.brickd_button_refresh_enabled(False)
def cb_open(red_file):
def cb_read(red_file, result):
red_file.release()
if result and result.data is not None:
self.brickd_conf = config_parser.parse(result.data.decode('utf-8'))
self.update_brickd_widget_data()
else:
QMessageBox.critical(get_main_window(),
'Settings | Brick Daemon',
'Error reading brickd config file.')
self.brickd_button_refresh_enabled(True)
self.brickd_button_save_enabled(False)
red_file.read_async(4096, lambda x: cb_read(red_file, x))
def cb_open_error():
self.brickd_button_refresh_enabled(True)
QMessageBox.critical(get_main_window(),
'Settings | Brick Daemon',
'Error opening brickd config file.')
async_call(self.brickd_conf_rfile.open,
(BRICKD_CONF_PATH, REDFile.FLAG_READ_ONLY | REDFile.FLAG_NON_BLOCKING, 0, 0, 0),
cb_open,
cb_open_error)
def slot_brickd_save_clicked(self):
adr = '.'.join((str(self.sbox_brickd_la_ip1.value()),
str(self.sbox_brickd_la_ip2.value()),
str(self.sbox_brickd_la_ip3.value()),
str(self.sbox_brickd_la_ip4.value())))
self.brickd_conf['listen.address'] = adr
self.brickd_conf['listen.plain_port'] = str(self.sbox_brickd_lp.value())
self.brickd_conf['listen.websocket_port'] = str(self.sbox_brickd_lwsp.value())
self.brickd_conf['authentication.secret'] = self.ledit_brickd_secret.text()
index = self.cbox_brickd_ll.currentIndex()
if index == CBOX_BRICKD_LOG_LEVEL_ERROR:
self.brickd_conf['log.level'] = 'error'
elif index == CBOX_BRICKD_LOG_LEVEL_WARN:
self.brickd_conf['log.level'] = 'warn'
elif index == CBOX_BRICKD_LOG_LEVEL_INFO:
self.brickd_conf['log.level'] = 'info'
elif index == CBOX_BRICKD_LOG_LEVEL_DEBUG:
self.brickd_conf['log.level'] = 'debug'
index = self.cbox_brickd_gt.currentIndex()
if index == CBOX_BRICKD_LED_TRIGGER_CPU:
self.brickd_conf['led_trigger.green'] = 'cpu'
elif index == CBOX_BRICKD_LED_TRIGGER_GPIO:
self.brickd_conf['led_trigger.green'] = 'gpio'
elif index == CBOX_BRICKD_LED_TRIGGER_HEARTBEAT:
self.brickd_conf['led_trigger.green'] = 'heartbeat'
elif index == CBOX_BRICKD_LED_TRIGGER_MMC:
self.brickd_conf['led_trigger.green'] = 'mmc'
elif index == CBOX_BRICKD_LED_TRIGGER_OFF:
self.brickd_conf['led_trigger.green'] = 'off'
elif index == CBOX_BRICKD_LED_TRIGGER_ON:
self.brickd_conf['led_trigger.green'] = 'on'
index = self.cbox_brickd_rt.currentIndex()
if index == CBOX_BRICKD_LED_TRIGGER_CPU:
self.brickd_conf['led_trigger.red'] = 'cpu'
elif index == CBOX_BRICKD_LED_TRIGGER_GPIO:
self.brickd_conf['led_trigger.red'] = 'gpio'
elif index == CBOX_BRICKD_LED_TRIGGER_HEARTBEAT:
self.brickd_conf['led_trigger.red'] = 'heartbeat'
elif index == CBOX_BRICKD_LED_TRIGGER_MMC:
self.brickd_conf['led_trigger.red'] = 'mmc'
elif index == CBOX_BRICKD_LED_TRIGGER_OFF:
self.brickd_conf['led_trigger.red'] = 'off'
elif index == CBOX_BRICKD_LED_TRIGGER_ON:
self.brickd_conf['led_trigger.red'] = 'on'
self.brickd_conf['poll_delay.spi'] = str(self.sbox_brickd_spi_dly.value())
self.brickd_conf['poll_delay.rs485'] = str(self.sbox_brickd_rs485_dly.value())
config = config_parser.to_string(self.brickd_conf)
def cb_open(config, red_file):
def cb_write(red_file, result):
red_file.release()
get_main_window().setEnabled(True)
if result is not None:
QMessageBox.critical(get_main_window(),
'Settings | Brick Daemon',
'Error writing brickd config file.')
return
QMessageBox.information(get_main_window(),
'Settings | Brick Daemon',
'Saved configuration successfully, will now restart Brick Daemon.')
self.script_manager.execute_script('restart_brickd', None)
red_file.write_async(config, lambda x: cb_write(red_file, x), None)
def cb_open_error():
get_main_window().setEnabled(True)
QMessageBox.critical(get_main_window(),
'Settings | Brick Daemon',
'Error opening brickd config file.')
get_main_window().setEnabled(False)
async_call(self.brickd_conf_rfile.open,
(BRICKD_CONF_PATH,
REDFile.FLAG_WRITE_ONLY |
REDFile.FLAG_CREATE |
REDFile.FLAG_NON_BLOCKING |
REDFile.FLAG_TRUNCATE, 0o500, 0, 0),
lambda x: cb_open(config, x),
cb_open_error)
| Tinkerforge/brickv | src/brickv/plugin_system/plugins/red/red_tab_settings_brickd.py | Python | gpl-2.0 | 14,059 | 0.003272 |
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 17 05:52:09 2016
@author: hclqaVirtualBox1
"""
from object_test import session
import random
import string
import model
test_page = model.Page()
N = 5
test_page.title = ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(N))
test_page.content = u'Test content'
print(test_page.title)
session.add(test_page)
print("1 ----- TestPage ID")
print(test_page.id)
"""
At this point the test_page object is known to SQLAlchemy,
but not to the database. To send it to the database,
a flush operation can be forced:
"""
session.flush()
print("2 ----- TestPage ID")
print (test_page.id)
"""
Commits - Commits the changes in db
"""
session.commit()
"""
Delete - To delete the test_page object from the database you would use:
"""
session.delete(test_page)
session.flush()
print("3 ----- TestPage ID")
print(test_page.id)
"""
rollback - At this point you can either commit
the transaction or do a rollback.
Let’s do a rollback this time:
"""
session.rollback()
print("4 ----- TestPage ID")
print(test_page.id)
"""
Query - Queries are performed with query objects that are created from the
session. The simplest way to create and use a query object is like this:
"""
page_q = session.query(model.Page)
for page in page_q:
print(page.title)
print("---- page_q.all()")
print(page_q.all())
page = page_q.first()
print(page.title)
print(page_q[2:5])
print(page_q.get(1).title)
#
#
#"""
#Working with Objects
#-------------------
#Now let’s think about how you could add a comment to a page.
# One approach would be to insert a new row in the comment table using the
# SQL Expression API, ensuring that the pageid field contained the value 1
# so that the comment was associated with the correct page via a foreign key.
#
# The Object-Relational API provides a much better approach:
#"""
#
#comment1 = model.Comment()
#comment1.name= u'James'
#comment1.email = u'james@example.com'
#comment1.content = u'This page needs a bit more detail ;-)'
#comment2 = model.Comment()
#comment2.name = u'Mike'
#comment2.email = u'mike@example.com'
#page.comments.append(comment1)
#page.comments.append(comment2)
#session.commit()
| mayankjohri/LetsExplorePython | Section 2 - Advance Python/Chapter S2.04 - Database/code/sqlalchemy/runbook.py | Python | gpl-3.0 | 2,255 | 0.01155 |
#!/usr/bin/env python
##################################################################################################
## mtrecv.py
##
## Receive message via RockBLOCK over serial
##################################################################################################
import sys
import os
from rbControl import RockBlockControl
if __name__ == '__main__':
if len(sys.argv) == 1:
# TODO: configurable serial device
RockBlockControl("/dev/ttyUSB0").mt_recv()
else:
print "usage: %s" % os.path.basename(sys.argv[0])
exit(1)
| shimniok/rockblock | mtrecv.py | Python | mit | 576 | 0.006944 |
# Copyright 2016 Casey Jaymes
# This file is part of PySCAP.
#
# PySCAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PySCAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PySCAP. If not, see <http://www.gnu.org/licenses/>.
from scap.Model import Model
import logging
logger = logging.getLogger(__name__)
class EndorsementLineCodeType(Model):
MODEL_MAP = {
'tag_name': 'EndorsementLineCode',
'attributes': {
'Type': {},
'Code': {}, # from grPostal
'*': {},
}
}
| cjaymes/pyscap | src/scap/model/xal_2_0/EndorsementLineCodeType.py | Python | gpl-3.0 | 987 | 0.002026 |
import gi
gi.require_version('Gtk', '3.0')
gi.require_version('Gdk', '3.0')
gi.require_version('GObject', '2.0')
from gi.repository import Gtk, Gdk, GObject
from gcustom.audioWidget import cAudioWidget
from gcustom.progressBar import cProgressBar
class TimedStatusBar(Gtk.Statusbar):
def __init__(self, timeout):
super(TimedStatusBar, self).__init__()
self.timeout = timeout
self.last_tag = None
def clear(self):
self.pop(0)
self.last_tag = None
def del_timer(self):
if self.last_tag:
GObject.source_remove(self.last_tag)
def set_timer(self):
self.del_timer()
self.last_tag = GObject.timeout_add(self.timeout, self.clear)
def output(self, msg):
self.del_timer()
self.clear()
self.push(0, msg)
self.set_timer()
class EventDrawingArea(Gtk.EventBox):
def __init__(self):
super(EventDrawingArea, self).__init__()
self.DrawingArea = Gtk.DrawingArea()
self.add(self.DrawingArea)
self.set_events(Gdk.EventMask.SCROLL_MASK | Gdk.EventMask.POINTER_MOTION_MASK)
self.show_all()
class View(Gtk.Window):
width = 1024
height = 768
audioViewSize = 0.8
subtitlesViewSize = 0.7
widgets = {}
def __init__(self, prog_title):
super(View, self).__init__(title = prog_title)
self.prog_title = prog_title
self.set_default_size(self.width, self.height)
black = Gdk.RGBA(0,0,0,1)
# Widgets
self.widgets['video'] = EventDrawingArea()
self.widgets['video'].override_background_color(0, black)
self.widgets['audio'] = cAudioWidget()
self.widgets['audio'].override_background_color(0, black)
self.widgets['subtitles'] = Gtk.TreeView()
self.widgets['video-eventbox'] = Gtk.EventBox()
self.widgets['scale'] = Gtk.HScale.new_with_range(0,100,1)
self.widgets['scale'].set_property('draw-value', False)
self.widgets['scale'].set_property('has-origin', False)
self.widgets['progress-bar'] = cProgressBar(height = 5)
#self.widgets['statusbar'] = TimedStatusBar(4000)
# Toolbar
self.widgets['toolbar'] = Gtk.Toolbar()
self.widgets['saveFileTB'] = Gtk.ToolButton()
self.widgets['saveFileTB'].set_tooltip_text('Save Project')
self.widgets['saveFileTB'].set_stock_id(Gtk.STOCK_SAVE)
self.widgets['newFileTB'] = Gtk.ToolButton()
self.widgets['newFileTB'].set_tooltip_text('Create Project')
self.widgets['newFileTB'].set_stock_id(Gtk.STOCK_NEW)
self.widgets['openFileTB'] = Gtk.ToolButton()
self.widgets['openFileTB'].set_tooltip_text('Open Project')
self.widgets['openFileTB'].set_stock_id(Gtk.STOCK_OPEN)
self.widgets['separator1TB'] = Gtk.SeparatorToolItem()
self.widgets['undoTB'] = Gtk.ToolButton()
self.widgets['undoTB'].set_tooltip_text('Undo')
self.widgets['undoTB'].set_stock_id(Gtk.STOCK_UNDO)
self.widgets['redoTB'] = Gtk.ToolButton()
self.widgets['redoTB'].set_tooltip_text('Redo')
self.widgets['redoTB'].set_stock_id(Gtk.STOCK_REDO)
self.widgets['preferencesTB'] = Gtk.ToolButton()
self.widgets['preferencesTB'].set_tooltip_text('Preferences')
self.widgets['preferencesTB'].set_stock_id(Gtk.STOCK_PROPERTIES)
self.widgets['importSRTTB'] = Gtk.ToolButton()
self.widgets['importSRTTB'].set_tooltip_text('Import/Merge Subtitles')
self.widgets['importSRTTB'].set_stock_id(Gtk.STOCK_ADD)
self.widgets['splitSubsTB'] = Gtk.ToolButton()
self.widgets['splitSubsTB'].set_tooltip_text('Split Subtitle')
self.widgets['splitSubsTB'].set_stock_id(Gtk.STOCK_CUT)
self.widgets['visualSyncTB'] = Gtk.ToolButton()
self.widgets['visualSyncTB'].set_tooltip_text('Visual Sync')
self.widgets['visualSyncTB'].set_stock_id(Gtk.STOCK_REFRESH)
self.widgets['autoSyncOtherVersionTB'] = Gtk.ToolButton()
self.widgets['autoSyncOtherVersionTB'].set_tooltip_text('Try to automatically sync another version.')
self.widgets['autoSyncOtherVersionTB'].set_stock_id(Gtk.STOCK_UNINDENT)
self.widgets['checkTB'] = Gtk.ToolButton()
self.widgets['checkTB'].set_tooltip_text('Check Subtitles')
self.widgets['checkTB'].set_stock_id(Gtk.STOCK_SPELL_CHECK)
self.widgets['separator2TB'] = Gtk.SeparatorToolItem()
self.widgets['separator3TB'] = Gtk.SeparatorToolItem()
self.widgets['separator4TB'] = Gtk.SeparatorToolItem()
self.widgets['position-label'] = Gtk.Label('Position: 00:00:00,000 ')
self.widgets['duration-label'] = Gtk.Label('Duration: 00:00:00,000\t\t')
self.widgets['MergeSplitTB'] = Gtk.ToolButton()
self.widgets['MergeSplitTB'].set_tooltip_text('Merge/Split (non project subs)')
self.widgets['MergeSplitTB'].set_stock_id(Gtk.STOCK_PAGE_SETUP)
self.widgets['toolbar'].add(self.widgets['newFileTB'])
self.widgets['toolbar'].add(self.widgets['openFileTB'])
self.widgets['toolbar'].add(self.widgets['saveFileTB'])
self.widgets['toolbar'].add(self.widgets['separator1TB'])
self.widgets['toolbar'].add(self.widgets['preferencesTB'])
self.widgets['toolbar'].add(self.widgets['separator2TB'])
self.widgets['toolbar'].add(self.widgets['undoTB'])
self.widgets['toolbar'].add(self.widgets['redoTB'])
self.widgets['toolbar'].add(self.widgets['separator3TB'])
self.widgets['toolbar'].add(self.widgets['importSRTTB'])
self.widgets['toolbar'].add(self.widgets['splitSubsTB'])
#self.widgets['toolbar'].add(self.widgets['autoSyncOtherVersionTB'])
self.widgets['toolbar'].add(self.widgets['checkTB'])
self.widgets['toolbar'].add(self.widgets['visualSyncTB'])
self.widgets['toolbar'].add(self.widgets['separator4TB'])
self.widgets['toolbar'].add(self.widgets['MergeSplitTB'])
# AudioView Context Menu
self.widgets['AudioContextMenu'] = Gtk.Menu()
self.widgets['ACM-SplitHere'] = Gtk.MenuItem('Split Subtitle')
self.widgets['ACM-CreateHere'] = Gtk.MenuItem('New Subtitle')
self.widgets['ACM-DeleteSub'] = Gtk.MenuItem('Delete Subtitle')
self.widgets['ACM-ResetAudioScale'] = Gtk.MenuItem('Reset Vertical Zoom')
self.widgets['ACM-StickZoom'] = Gtk.CheckMenuItem('Stick Zoom')
self.widgets['ACM-StickZoom'].set_tooltip_text('Keep current horizontal zoom size')
self.widgets['AudioContextMenu'].add(self.widgets['ACM-CreateHere'])
self.widgets['AudioContextMenu'].add(self.widgets['ACM-SplitHere'])
self.widgets['AudioContextMenu'].add(self.widgets['ACM-DeleteSub'])
self.widgets['AudioContextMenu'].add(self.widgets['ACM-ResetAudioScale'])
self.widgets['AudioContextMenu'].add(self.widgets['ACM-StickZoom'])
self.widgets['ACM-StickZoom'].show()
self.widgets['ACM-SplitHere'].show()
self.widgets['ACM-CreateHere'].show()
self.widgets['ACM-DeleteSub'].show()
# Header Context Menu
self.widgets['HeaderContextMenu'] = Gtk.Menu()
self.widgets['HCM-N'] = Gtk.CheckMenuItem('N')
self.widgets['HCM-StartTime'] = Gtk.CheckMenuItem('StartTime')
self.widgets['HCM-StopTime'] = Gtk.CheckMenuItem('StopTime')
self.widgets['HCM-Duration'] = Gtk.CheckMenuItem('Duration')
self.widgets['HCM-Reference'] = Gtk.CheckMenuItem('Reference')
self.widgets['HCM-RS'] = Gtk.CheckMenuItem('RS')
self.widgets['HCM-Count'] = Gtk.CheckMenuItem('Count')
self.widgets['HCM-Info'] = Gtk.CheckMenuItem('Info')
self.widgets['HeaderContextMenu'].add(self.widgets['HCM-N'])
self.widgets['HeaderContextMenu'].add(self.widgets['HCM-StartTime'])
self.widgets['HeaderContextMenu'].add(self.widgets['HCM-StopTime'])
self.widgets['HeaderContextMenu'].add(self.widgets['HCM-Duration'])
self.widgets['HeaderContextMenu'].add(self.widgets['HCM-Reference'])
self.widgets['HeaderContextMenu'].add(self.widgets['HCM-RS'])
self.widgets['HeaderContextMenu'].add(self.widgets['HCM-Count'])
self.widgets['HeaderContextMenu'].add(self.widgets['HCM-Info'])
self.widgets['HCM-N'].set_active(True)
self.widgets['HCM-StartTime'].set_active(True)
self.widgets['HCM-StopTime'].set_active(True)
self.widgets['HCM-Duration'].set_active(True)
self.widgets['HCM-Reference'].set_active(True)
self.widgets['HCM-RS'].set_active(True)
self.widgets['HCM-Count'].set_active(True)
self.widgets['HCM-Info'].set_active(True)
self.widgets['HCM-N'].show()
self.widgets['HCM-StartTime'].show()
self.widgets['HCM-StopTime'].show()
self.widgets['HCM-Duration'].show()
self.widgets['HCM-Reference'].show()
self.widgets['HCM-RS'].show()
self.widgets['HCM-Count'].show()
self.widgets['HCM-Info'].show()
# TreeView Context Menu
self.widgets['TVContextMenu'] = Gtk.Menu()
self.widgets['TVCM-Delete'] = Gtk.MenuItem('Delete Subtitle(s)')
self.widgets['TVCM-Merge'] = Gtk.MenuItem('Merge Subtitles')
self.widgets['TVCM-Merge-To-Dialog'] = Gtk.MenuItem('Merge to Dialog')
self.widgets['TVCM-DurationEdit'] = Gtk.MenuItem('Edit Duration')
self.widgets['TVCM-TimeEditDialog'] = Gtk.MenuItem('Edit Time')
self.widgets['TVCM-SyncDialog'] = Gtk.MenuItem('Sync')
self.widgets['TVContextMenu'].add(self.widgets['TVCM-Merge'])
self.widgets['TVContextMenu'].add(self.widgets['TVCM-Merge-To-Dialog'])
self.widgets['TVContextMenu'].add(self.widgets['TVCM-Delete'])
self.widgets['TVContextMenu'].add(self.widgets['TVCM-DurationEdit'])
self.widgets['TVContextMenu'].add(self.widgets['TVCM-TimeEditDialog'])
self.widgets['TVContextMenu'].add(self.widgets['TVCM-SyncDialog'])
self.widgets['TVCM-Delete'].show()
self.widgets['TVCM-Merge'].show()
self.widgets['TVCM-Merge-To-Dialog'].show()
# Video Context Menu
self.widgets['VideoContextMenu'] = Gtk.Menu()
self.widgets['VCM-SceneDetect'] = Gtk.MenuItem('Detect Scene Changes')
self.widgets['VCM-StopDetection'] = Gtk.MenuItem('Stop Detection')
self.widgets['VCM-TwoPassSD'] = Gtk.CheckMenuItem('Two-pass Detection (faster)')
self.widgets['VCM-TwoPassSD'].set_active(True)
self.widgets['VCM-Detach'] = Gtk.MenuItem('Detach Video')
self.widgets['VCM-ChangeSubFont'] = Gtk.MenuItem('Change Subtitle Font')
self.widgets['VCM-Separator1'] = Gtk.SeparatorMenuItem()
self.widgets['VCM-Separator2'] = Gtk.SeparatorMenuItem()
self.widgets['VCM-Lock'] = Gtk.CheckMenuItem('Lock')
self.widgets['VCM-Close'] = Gtk.MenuItem('Close')
self.widgets['VideoContextMenu'].add(self.widgets['VCM-SceneDetect'])
self.widgets['VideoContextMenu'].add(self.widgets['VCM-StopDetection'])
self.widgets['VideoContextMenu'].add(self.widgets['VCM-TwoPassSD'])
self.widgets['VideoContextMenu'].add(self.widgets['VCM-Separator1'])
self.widgets['VideoContextMenu'].add(self.widgets['VCM-ChangeSubFont'])
self.widgets['VideoContextMenu'].add(self.widgets['VCM-Separator2'])
self.widgets['VideoContextMenu'].add(self.widgets['VCM-Detach'])
self.widgets['VideoContextMenu'].add(self.widgets['VCM-Lock'])
self.widgets['VideoContextMenu'].add(self.widgets['VCM-Close'])
self.widgets['VCM-SceneDetect'].show()
self.widgets['VCM-StopDetection'].hide()
self.widgets['VCM-TwoPassSD'].show()
self.widgets['VCM-Separator1'].show()
self.widgets['VCM-ChangeSubFont'].show()
self.widgets['VCM-Separator2'].show()
self.widgets['VCM-Detach'].show()
self.widgets['VCM-Lock'].hide()
self.widgets['VCM-Close'].hide()
# Containers
self.widgets['root-paned-container'] = Gtk.Paned(orientation = Gtk.Orientation.VERTICAL)
self.widgets['audio-video-container'] = Gtk.Paned(orientation = Gtk.Orientation.HORIZONTAL)
self.widgets['audio-video-container'].override_background_color(Gtk.StateType.NORMAL, black)
self.widgets['toolbar-subtitles-container'] = Gtk.VBox()
self.widgets['audio-scale-container'] = Gtk.VBox()
self.widgets['toolbar-reports-container'] = Gtk.HBox()
self.widgets['vertical-sub-scrollable'] = Gtk.ScrolledWindow()
self.widgets['vertical-sub-scrollable'].set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.AUTOMATIC)
# Layout
self.widgets['root-paned-container'].set_position((1 - self.subtitlesViewSize) * self.height)
self.widgets['audio-video-container'].set_position(self.audioViewSize * self.width)
self.widgets['video-eventbox'].add(self.widgets['video'])
self.widgets['audio-scale-container'].pack_start(self.widgets['audio'], True, True, 0)
self.widgets['audio-scale-container'].pack_start(self.widgets['progress-bar'], False, False, 0)
self.widgets['audio-scale-container'].pack_end(self.widgets['scale'], False, False, 0)
self.widgets['audio-video-container'].add(self.widgets['audio-scale-container'])
self.widgets['audio-video-container'].add(self.widgets['video-eventbox'])
self.widgets['toolbar-reports-container'].pack_start(self.widgets['toolbar'],True, True, 0)
self.widgets['toolbar-reports-container'].pack_start(self.widgets['duration-label'], False, False, 0)
self.widgets['toolbar-reports-container'].pack_end(self.widgets['position-label'], False, False, 50)
self.widgets['toolbar-subtitles-container'].pack_start(self.widgets['toolbar-reports-container'], False, False, 0)
self.widgets['vertical-sub-scrollable'].add(self.widgets['subtitles'])
self.widgets['toolbar-subtitles-container'].pack_start(self.widgets['vertical-sub-scrollable'], True, True, 0)
#self.widgets['toolbar-subtitles-container'].pack_end(self.widgets['statusbar'], False, False, 0)
self.widgets['root-paned-container'].add(self.widgets['audio-video-container'])
self.widgets['root-paned-container'].add(self.widgets['toolbar-subtitles-container'])
self.add(self.widgets['root-paned-container'])
def __getitem__(self, key):
return self.widgets[key]
| jtk1rk/xsubedit | view.py | Python | gpl-3.0 | 14,528 | 0.00351 |
"""
Django settings for myproject project.
Generated by 'django-admin startproject' using Django 1.9.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'le0az@o@j&x@5gl01_fp6&rj445lmxj15ngt2x^x#$ng71)^yd'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'apps.myapp',
'apps.outsideapp',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'myproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'myproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
| thinkAmi-sandbox/Django_separate_model_file-sample | myproject/settings.py | Python | unlicense | 3,216 | 0.001244 |
import asyncio
import unittest
import random
from gremlinpy import Gremlin
from . import ConnectionTestCases, EntityTestCases, MapperTestCases
from gizmo import Mapper, Request, Collection, Vertex, Edge
from gizmo.mapper import EntityMapper
class BaseTests(unittest.TestCase):
def setUp(self):
self.request = Request('localhost', port=8182)
self.gremlin = Gremlin('gizmo_testing')
self.mapper = Mapper(self.request, self.gremlin)
self.ioloop = asyncio.get_event_loop()
super(BaseTests, self).setUp()
def tearDown(self):
super(BaseTests, self).tearDown()
async def purge(self):
script = "%s.V().map{it.get().remove()}" % self.gremlin.gv
res = await self.mapper.query(script=script)
return res
class ConnectionTests(BaseTests, ConnectionTestCases):
pass
class EntityTests(EntityTestCases, BaseTests):
pass
class MapperTests(MapperTestCases, BaseTests):
pass
class CollectionTests(BaseTests):
pass
class TraversalTests(BaseTests):
pass
if __name__ == '__main__':
unittest.main()
| emehrkay/Gizmo | gizmo/test/integration/tinkerpop.py | Python | mit | 1,104 | 0.001812 |
from yajuu.extractors.extractor import Extractor
from yajuu.media.sources.source_list import SourceList
class SeasonExtractor(Extractor):
def __init__(self, media, season, range_):
super().__init__(media)
self.seasons = {}
self.season = season
self.start, self.end = range_
# Overwrite
self.sources = {}
def _should_process(self, episode_identifier):
try:
episode_number = int(episode_identifier)
except ValueError:
return False
return self.start <= episode_number <= self.end
def _add_source(self, identifier, source):
if identifier not in self.sources:
self.sources[identifier] = SourceList()
self.sources[identifier].add_source(source)
return True
def _add_sources(self, identifier, sources):
returned = []
if sources is None:
return
for source in sources:
returned.append(self._add_source(identifier, source))
return returned
| vivescere/yajuu | yajuu/extractors/season_extractor.py | Python | gpl-3.0 | 1,048 | 0.000954 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2009 Uninett AS
#
# This file is part of Network Administration Visualized (NAV).
#
# NAV is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License version 3 as published by the Free
# Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details. You should have received a copy of the GNU General Public
# License along with NAV. If not, see <http://www.gnu.org/licenses/>.
#
"""Unit tests for the dispatcher module."""
from __future__ import print_function
import types
import pytest
from nav.smsd import dispatcher
class TestDispatcherHandler(object):
"""Tests for the DispatcherHandler class.
Uses a subclass of the DispatcherHandler to provide a fake
dispatcher loader function. This loads a faked dispatcher
module/class that will cooperate with this unit test.
"""
config = {
'main': {'exit_on_permanent_error': 'yes'},
'dispatcher': {'dispatcherretry': '30',
'dispatcher1': 'FakeDispatcher'},
'FakeDispatcher': {}
}
def test_init_with_simple_config(self):
assert FakeDispatcherHandler(self.config)
def test_empty_message_list(self):
handler = FakeDispatcherHandler(self.config)
assert handler.sendsms('fakenumber', [])
def test_dispatcher_exception(self):
handler = FakeDispatcherHandler(self.config)
with pytest.raises(dispatcher.DispatcherError):
handler.sendsms('failure', [])
def test_dispatcher_unhandled_exception(self):
handler = FakeDispatcherHandler(self.config)
with pytest.raises(dispatcher.DispatcherError):
handler.sendsms('unhandled', [])
class FakeDispatcherHandler(dispatcher.DispatcherHandler):
def importbyname(self, name):
print("import by name: %r" % name)
fakemodule = types.ModuleType('fakedispatcher')
fakemodule.FakeDispatcher = FakeDispatcher
return fakemodule
class FakeDispatcher(object):
def __init__(self, *args, **kwargs):
self.lastfailed = None
pass
def sendsms(self, phone, msgs):
print("got phone %r and msgs %r" % (phone, msgs))
if phone == 'failure':
raise dispatcher.DispatcherError('FakeDispatcher failed')
elif phone == 'unhandled':
raise Exception('This exception should be unknown')
return (None, 1, 0, 1, 1)
| UNINETT/nav | tests/unittests/smsd/dispatcher_test.py | Python | gpl-2.0 | 2,660 | 0 |
# -*- coding: utf-8 -*-
import unittest
import os
import numpy as npy
from skrf.media import DefinedGammaZ0, Media
from skrf.network import Network
from skrf.frequency import Frequency
import skrf
class DefinedGammaZ0TestCase(unittest.TestCase):
def setUp(self):
self.files_dir = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'qucs_prj'
)
self.dummy_media = DefinedGammaZ0(
frequency = Frequency(1,100,21,'ghz'),
gamma=1j,
z0 = 50 ,
)
def test_impedance_mismatch(self):
"""
"""
fname = os.path.join(self.files_dir,\
'impedanceMismatch,50to25.s2p')
qucs_ntwk = Network(fname)
self.dummy_media.frequency = qucs_ntwk.frequency
skrf_ntwk = self.dummy_media.thru(z0=50)**\
self.dummy_media.thru(z0=25)
self.assertEqual(qucs_ntwk, skrf_ntwk)
def test_resistor(self):
"""
"""
fname = os.path.join(self.files_dir,\
'resistor,1ohm.s2p')
qucs_ntwk = Network(fname)
self.dummy_media.frequency = qucs_ntwk.frequency
skrf_ntwk = self.dummy_media.resistor(1)
self.assertEqual(qucs_ntwk, skrf_ntwk)
def test_capacitor(self):
"""
"""
fname = os.path.join(self.files_dir,\
'capacitor,p01pF.s2p')
qucs_ntwk = Network(fname)
self.dummy_media.frequency = qucs_ntwk.frequency
skrf_ntwk = self.dummy_media.capacitor(.01e-12)
self.assertEqual(qucs_ntwk, skrf_ntwk)
def test_inductor(self):
"""
"""
fname = os.path.join(self.files_dir,\
'inductor,p1nH.s2p')
qucs_ntwk = Network(fname)
self.dummy_media.frequency = qucs_ntwk.frequency
skrf_ntwk = self.dummy_media.inductor(.1e-9)
self.assertEqual(qucs_ntwk, skrf_ntwk)
def test_scalar_gamma_z0_media(self):
"""
test ability to create a Media from scalar quantities for gamma/z0
and change frequency resolution
"""
a = DefinedGammaZ0 (Frequency(1,10,101),gamma=1j,z0 = 50)
self.assertEqual(a.line(1),a.line(1))
# we should be able to re-sample the media
a.npoints = 21
self.assertEqual(len(a.gamma), len(a))
self.assertEqual(len(a.z0), len(a))
self.assertEqual(len(a.z0), len(a))
def test_vector_gamma_z0_media(self):
"""
test ability to create a Media from vector quantities for gamma/z0
"""
freq = Frequency(1,10,101)
a = DefinedGammaZ0(freq,
gamma = 1j*npy.ones(len(freq)) ,
z0 = 50*npy.ones(len(freq)),
)
self.assertEqual(a.line(1),a.line(1))
with self.assertRaises(NotImplementedError):
a.npoints=4
def test_write_csv(self):
fname = os.path.join(self.files_dir,\
'out.csv')
self.dummy_media.write_csv(fname)
os.remove(fname)
def test_from_csv(self):
fname = os.path.join(self.files_dir,\
'out.csv')
self.dummy_media.write_csv(fname)
a_media = DefinedGammaZ0.from_csv(fname)
self.assertEqual(a_media,self.dummy_media)
os.remove(fname)
class STwoPortsNetworkTestCase(unittest.TestCase):
"""
Check that S parameters of media base elements versus theoretical results.
"""
def setUp(self):
self.dummy_media = DefinedGammaZ0(
frequency=Frequency(1, 100, 21, 'GHz'),
gamma=1j,
z0=50,
)
def test_s_series_element(self):
"""
Series elements of impedance Z:
○---[Z]---○
○---------○
have S matrix of the form:
[ Z/Z0 / (Z/Z0 + 2) 2/(Z/Z0 + 2) ]
[ 2/(Z/Z0 + 2) Z/Z0 / (Z/Z0 + 2) ]
"""
R = 1.0 # Ohm
ntw = self.dummy_media.resistor(R)
Z0 = self.dummy_media.z0
S11 = (R/Z0) / (R/Z0 + 2)
S21 = 2 / (R/Z0 + 2)
npy.testing.assert_array_almost_equal(ntw.s[:,0,0], S11)
npy.testing.assert_array_almost_equal(ntw.s[:,0,1], S21)
npy.testing.assert_array_almost_equal(ntw.s[:,1,0], S21)
npy.testing.assert_array_almost_equal(ntw.s[:,1,1], S11)
def test_s_shunt_element(self):
"""
Shunt elements of admittance Y:
○---------○
|
[Y]
|
○---------○
have S matrix of the form:
[ -Y Z0 / (Y Z0 + 2) 2/(Y Z0 + 2) ]
[ 2/(Y Z0 + 2) Z/Z0 / (Y Z0 + 2) ]
"""
R = 1.0 # Ohm
ntw = self.dummy_media.shunt(self.dummy_media.resistor(R)**self.dummy_media.short())
Z0 = self.dummy_media.z0
S11 = -(1/R*Z0) / (1/R*Z0 + 2)
S21 = 2 / (1/R*Z0 + 2)
npy.testing.assert_array_almost_equal(ntw.s[:,0,0], S11)
npy.testing.assert_array_almost_equal(ntw.s[:,0,1], S21)
npy.testing.assert_array_almost_equal(ntw.s[:,1,0], S21)
npy.testing.assert_array_almost_equal(ntw.s[:,1,1], S11)
def test_s_lossless_line(self):
"""
Lossless transmission line of characteristic impedance z1, length l
and wavenumber beta
_______
○----- -----○
z0 z1 z0
○-----_______-----○
"""
l = 5.0
z1 = 30.0
z0 = self.dummy_media.z0
ntw = self.dummy_media.line(d=0, unit='m', z0=z0) \
** self.dummy_media.line(d=l, unit='m', z0=z1) \
** self.dummy_media.line(d=0, unit='m', z0=z0)
beta = self.dummy_media.beta
_z1 = z1/z0
S11 = 1j*(_z1**2 - 1)*npy.sin(beta*l) / \
(2*_z1*npy.cos(beta*l) + 1j*(_z1**2 + 1)*npy.sin(beta*l))
S21 = 2*_z1 / \
(2*_z1*npy.cos(beta*l) + 1j*(_z1**2 + 1)*npy.sin(beta*l))
npy.testing.assert_array_almost_equal(ntw.s[:,0,0], S11)
npy.testing.assert_array_almost_equal(ntw.s[:,0,1], S21)
npy.testing.assert_array_almost_equal(ntw.s[:,1,0], S21)
npy.testing.assert_array_almost_equal(ntw.s[:,1,1], S11)
def test_s_lossy_line(self):
"""
Lossy transmission line of characteristic impedance Z0, length l
and propagation constant gamma = alpha + j beta
○---------○
○---------○
has ABCD matrix of the form:
[ cosh(gamma l) Z0 sinh(gamma l) ]
[ 1/Z0 sinh(gamma l) cosh(gamma l) ]
"""
class ABCDTwoPortsNetworkTestCase(unittest.TestCase):
"""
Check that ABCD parameters of media base elements (such as lumped elements)
versus theoretical results.
"""
def setUp(self):
self.dummy_media = DefinedGammaZ0(
frequency=Frequency(1, 100, 21,'GHz'),
gamma=1j,
z0=50 ,
)
def test_abcd_series_element(self):
"""
Series elements of impedance Z:
○---[Z]---○
○---------○
have ABCD matrix of the form:
[ 1 Z ]
[ 0 1 ]
"""
R = 1.0 # Ohm
ntw = self.dummy_media.resistor(R)
npy.testing.assert_array_almost_equal(ntw.a[:,0,0], 1.0)
npy.testing.assert_array_almost_equal(ntw.a[:,0,1], R)
npy.testing.assert_array_almost_equal(ntw.a[:,1,0], 0.0)
npy.testing.assert_array_almost_equal(ntw.a[:,1,1], 1.0)
def test_abcd_shunt_element(self):
"""
Shunt elements of admittance Y:
○---------○
|
[Y]
|
○---------○
have ABCD matrix of the form:
[ 1 0 ]
[ Y 1 ]
"""
R = 1.0 # Ohm
ntw = self.dummy_media.shunt(self.dummy_media.resistor(R)**self.dummy_media.short())
npy.testing.assert_array_almost_equal(ntw.a[:,0,0], 1.0)
npy.testing.assert_array_almost_equal(ntw.a[:,0,1], 0.0)
npy.testing.assert_array_almost_equal(ntw.a[:,1,0], 1.0/R)
npy.testing.assert_array_almost_equal(ntw.a[:,1,1], 1.0)
def test_abcd_series_shunt_elements(self):
"""
Series and Shunt elements of impedance Zs and Zp:
○---[Zs]--------○
|
[Zp]
|
○--------------○
have ABCD matrix of the form:
[ 1 + Zs/Zp Zs ]
[ 1/Zp 1 ]
"""
Rs = 2.0
Rp = 3.0
serie_resistor = self.dummy_media.resistor(Rs)
shunt_resistor = self.dummy_media.shunt(self.dummy_media.resistor(Rp) ** self.dummy_media.short())
ntw = serie_resistor ** shunt_resistor
npy.testing.assert_array_almost_equal(ntw.a[:,0,0], 1.0+Rs/Rp)
npy.testing.assert_array_almost_equal(ntw.a[:,0,1], Rs)
npy.testing.assert_array_almost_equal(ntw.a[:,1,0], 1.0/Rp)
npy.testing.assert_array_almost_equal(ntw.a[:,1,1], 1.0)
def test_abcd_thru(self):
"""
Thru has ABCD matrix of the form:
[ 1 0 ]
[ 0 1 ]
"""
ntw = self.dummy_media.thru()
npy.testing.assert_array_almost_equal(ntw.a[:,0,0], 1.0)
npy.testing.assert_array_almost_equal(ntw.a[:,0,1], 0.0)
npy.testing.assert_array_almost_equal(ntw.a[:,1,0], 0.0)
npy.testing.assert_array_almost_equal(ntw.a[:,1,1], 1.0)
def test_abcd_lossless_line(self):
"""
Lossless transmission line of characteristic impedance Z0, length l
and wavenumber beta
○---------○
○---------○
has ABCD matrix of the form:
[ cos(beta l) j Z0 sin(beta l) ]
[ j/Z0 sin(beta l) cos(beta l) ]
"""
l = 5
z0 = 80
ntw = self.dummy_media.line(d=l, unit='m', z0=z0)
beta = self.dummy_media.beta
npy.testing.assert_array_almost_equal(ntw.a[:,0,0], npy.cos(beta*l))
npy.testing.assert_array_almost_equal(ntw.a[:,0,1], 1j*z0*npy.sin(beta*l))
npy.testing.assert_array_almost_equal(ntw.a[:,1,0], 1j/z0*npy.sin(beta*l))
npy.testing.assert_array_almost_equal(ntw.a[:,1,1], npy.cos(beta*l))
def test_abcd_lossy_line(self):
"""
Lossy transmission line of characteristic impedance Z0, length l
and propagation constant gamma = alpha + j beta
○---------○
○---------○
has ABCD matrix of the form:
[ cosh(gamma l) Z0 sinh(gamma l) ]
[ 1/Z0 sinh(gamma l) cosh(gamma l) ]
"""
l = 5.0
z0 = 30.0
alpha = 0.5
beta = 2.0
lossy_media = DefinedGammaZ0(
frequency=Frequency(1, 100, 21, 'GHz'),
gamma=alpha + 1j*beta,
z0=z0
)
ntw = lossy_media.line(d=l, unit='m', z0=z0)
gamma = lossy_media.gamma
npy.testing.assert_array_almost_equal(ntw.a[:,0,0], npy.cosh(gamma*l))
npy.testing.assert_array_almost_equal(ntw.a[:,0,1], z0*npy.sinh(gamma*l))
npy.testing.assert_array_almost_equal(ntw.a[:,1,0], 1.0/z0*npy.sinh(gamma*l))
npy.testing.assert_array_almost_equal(ntw.a[:,1,1], npy.cosh(gamma*l))
| scikit-rf/scikit-rf | skrf/media/tests/test_media.py | Python | bsd-3-clause | 11,278 | 0.011601 |
#!/usr/bin/env python
"""Tests for API call routers."""
from absl import app
from grr_response_core.lib.rdfvalues import structs as rdf_structs
from grr_response_core.lib.util import compatibility
from grr_response_proto import tests_pb2
from grr_response_server import access_control
from grr_response_server.gui import api_call_router
from grr.test_lib import test_lib
class SingleMethodDummyApiCallRouter(api_call_router.ApiCallRouter):
"""Dummy ApiCallRouter implementation overriding just a single method."""
@api_call_router.Http("GET", "/api/foo/bar")
def SomeRandomMethod(self, args, context=None):
pass
def CreateFlow(self, args, context=None):
pass
class SingleMethodDummyApiCallRouterChild(SingleMethodDummyApiCallRouter):
pass
class EmptyRouter(api_call_router.ApiCallRouterStub):
pass
class ApiCallRouterTest(test_lib.GRRBaseTest):
"""Tests for ApiCallRouter."""
def testAllAnnotatedMethodsAreNotImplemented(self):
# We can't initialize ApiCallRouter directly because it's abstract.
router = EmptyRouter()
for name in api_call_router.ApiCallRouter.GetAnnotatedMethods():
with self.assertRaises(NotImplementedError):
getattr(router, name)(None, context=None)
def testGetAnnotatedMethodsReturnsNonEmptyDict(self):
methods = api_call_router.ApiCallRouterStub.GetAnnotatedMethods()
self.assertTrue(methods)
def testGetAnnotatedMethodsReturnsMethodsFromAllClassesInMroChain(self):
self.assertIn("SomeRandomMethod",
SingleMethodDummyApiCallRouter.GetAnnotatedMethods())
self.assertIn("SomeRandomMethod",
SingleMethodDummyApiCallRouterChild.GetAnnotatedMethods())
def testHttpUrlParametersMatchArgs(self):
"""Tests that URL params are actual fields of ArgsType in HTTP routes."""
# Example:
# @ArgsType(api_client.ApiGetClientArgs)
# @Http("GET", "/api/clients/<client_id>")
methods = api_call_router.ApiCallRouterStub.GetAnnotatedMethods()
for method in methods.values():
if method.args_type is None:
continue # Skip methods like ListOutputPluginDescriptors.
valid_parameters = method.args_type.type_infos.descriptor_names
for name in method.GetQueryParamsNames():
self.assertIn(
name, valid_parameters,
"Parameter {} in route {} is not found in {}. "
"Valid parameters are {}.".format(
name, method.name, compatibility.GetName(method.args_type),
valid_parameters))
def testRouterMethodNamesAreInLengthLimit(self):
for name in api_call_router.ApiCallRouterStub.GetAnnotatedMethods():
self.assertLessEqual(
len(name), 128,
"Router method name {} exceeds MySQL length limit of 128.".format(
name))
class DisabledApiCallRouterTest(test_lib.GRRBaseTest):
"""Tests for ApiCallRouter."""
def testRaisesUnauthorizedAccess(self):
router = api_call_router.DisabledApiCallRouter()
with self.assertRaises(access_control.UnauthorizedAccess):
router.SearchClients(None)
class ApiSingleStringArgument(rdf_structs.RDFProtoStruct):
protobuf = tests_pb2.ApiSingleStringArgument
class RouterMethodMetadataTest(test_lib.GRRBaseTest):
"""Tests for RouterMethodMetadata."""
def testGetQueryParamsNamesReturnsEmptyListsOnEmptyMetadata(self):
m = api_call_router.RouterMethodMetadata("SomeMethod")
self.assertEqual(m.GetQueryParamsNames(), [])
def testGetQueryParamsNamesReturnsMandaotryParamsCorrectly(self):
m = api_call_router.RouterMethodMetadata(
"SomeMethod", http_methods=[("GET", "/a/<arg>/<bar:zoo>", {})])
self.assertEqual(m.GetQueryParamsNames(), ["arg", "zoo"])
def testGetQueryParamsNamesReturnsOptionalParamsForGET(self):
m = api_call_router.RouterMethodMetadata(
"SomeMethod",
args_type=ApiSingleStringArgument,
http_methods=[("GET", "/a/<foo>/<bar:zoo>", {})])
self.assertEqual(m.GetQueryParamsNames(), ["foo", "zoo", "arg"])
def testGetQueryParamsNamesReturnsNoOptionalParamsForPOST(self):
m = api_call_router.RouterMethodMetadata(
"SomeMethod",
args_type=ApiSingleStringArgument,
http_methods=[("POST", "/a/<foo>/<bar:zoo>", {})])
self.assertEqual(m.GetQueryParamsNames(), ["foo", "zoo"])
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
| google/grr | grr/server/grr_response_server/gui/api_call_router_test.py | Python | apache-2.0 | 4,408 | 0.006352 |
"""
Tests for L{txgithub.scripts.gist}
"""
import io
from collections import namedtuple
from twisted.python import usage
from twisted.trial.unittest import SynchronousTestCase
from twisted.internet.defer import Deferred, succeed
from txgithub.scripts import gist
from . _options import (_OptionsTestCaseMixin,
_FakeOptionsTestCaseMixin,
_FakePrintTestCaseMixin,
_FakeSystemExitTestCaseMixin,
_SystemExit)
class OptionsTestCase(_OptionsTestCaseMixin):
"""
Tests for L{gist.Options}
"""
files = ('files',)
required_args = files
options_factory = gist.Options
def test_single_file_ok(self):
"""
Files is an argument.
"""
self.config.parseOptions(self.files)
self.assertEqual(self.config['files'], self.files)
def test_files_ok(self):
"""
Multiple files are collected.
"""
self.config.parseOptions(["file1", "file2"])
self.assertEqual(self.config['files'], ("file1", "file2"))
def test_token_ok(self):
"""
--token is an option.
"""
token = 'some token'
self.assert_option(['--token=' + token], 'token', token)
def test_t_ok(self):
"""
-t is short for --token
"""
token = 'some token'
self.assert_option(['-t', token], 'token', token)
class RecordsFakeGistsEndpoint(object):
"""
Records and orchestrates L{FakeGistsEndpoint}.
"""
def __init__(self):
self.create_calls = []
self.create_returns = Deferred()
class FakeGistsEndpoint(object):
"""
A fake implementation of L{txgithub.api.GithubApi} that records
calls.
"""
def __init__(self, recorder):
self._recorder = recorder
def create(self, files):
self._recorder.create_calls.append(files)
return self._recorder.create_returns
class RecordsFakeGithubAPI(object):
"""
Records and orchestrates L{FakeGithubAPI}.
"""
def __init__(self):
self.init_calls = []
class FakeGithubAPI(object):
"""
A fake implementation of L{txgithub.api.GithubApi} that records
calls.
"""
def __init__(self, recorder, gists):
self._recorder = recorder
self.gists = gists
def _init(self, token):
self._recorder.init_calls.append(token)
return self
class PostGistTests(SynchronousTestCase):
"""
Tests for L{gist.postGist}.
"""
def setUp(self):
self.token = "token"
self.getToken_call_count = 0
self.getToken_returns = succeed(self.token)
self.gists_recorder = RecordsFakeGistsEndpoint()
self.gists = FakeGistsEndpoint(self.gists_recorder)
self.api_recorder = RecordsFakeGithubAPI()
self.fake_api = FakeGithubAPI(self.api_recorder, self.gists)
self.content = u"content"
self.stdin = io.StringIO(self.content)
self.open_calls = []
self.open_returns = io.StringIO(self.content)
self.print_calls = []
self.patch(gist, "getToken", self.fake_getToken)
self.patch(gist, "GithubApi", self.fake_api._init)
self.patch(gist, "_open", self.fake_open)
self.patch(gist, "stdin", self.stdin)
self.patch(gist, "_print", self.fake_print)
def fake_getToken(self):
"""
A fake get token implementation that records its calls.
"""
self.getToken_call_count += 1
return self.getToken_returns
def fake_open(self, filename):
"""
A fake L{open} that records its calls.
"""
self.open_calls.append(filename)
return self.open_returns
def fake_print(self, *args):
"""
A fake L{print} that records its calls.
"""
self.print_calls.append(args)
def test_getToken_by_default(self):
"""
When no token is provided, the get token implementation is
called to retrieve one.
"""
gist.postGist("reactor", token="", files=["something"])
self.assertEqual(self.getToken_call_count, 1)
self.assertEqual(self.api_recorder.init_calls, [self.token])
def test_token_used(self):
"""
The provided token is used to connect to GitHub.
"""
token = "my token"
gist.postGist("reactor", token=token, files=["something"])
self.assertEqual(self.getToken_call_count, 0)
self.assertEqual(self.api_recorder.init_calls, [token])
def test_stdin_gist(self):
"""
When no files are provided, the gist is read from stdin.
"""
gist.postGist("reactor", token=self.token, files=())
self.assertEqual(self.gists_recorder.create_calls, [
{
"gistfile1": {
"content": self.content,
},
}
])
self.assertEqual(self.stdin.tell(), len(self.content))
def test_files_used(self):
"""
The filenames provided are read and comprise the gist's content.
"""
filename = "some file"
gist.postGist("reactor", token=self.token, files=[filename])
self.assertEqual(self.open_calls, [filename])
self.assertTrue(self.open_returns.closed)
self.assertEqual(self.gists_recorder.create_calls, [
{
"some file": {
"content": self.content,
},
}
])
def test_response_printed(self):
"""
The URL in the API's response is printed.
"""
url = "https://something"
response = gist.postGist("reactor", token=self.token, files=[])
self.gists_recorder.create_returns.callback(
{
"html_url": url,
}
)
self.successResultOf(response)
self.assertEqual(self.print_calls, [(url,)])
_PostGistCall = namedtuple("_PostGistCall",
["reactor", "token", "files"])
class RunTests(_FakeOptionsTestCaseMixin,
_FakeSystemExitTestCaseMixin,
_FakePrintTestCaseMixin):
"""
Tests for L{txgithub.scripts.gist.run}
"""
def setUp(self):
super(RunTests, self).setUp()
self.postGist_calls = []
self.postGist_returns = "postGist return value"
self.patch(gist, "Options", lambda: self.options)
self.patch(gist, "_print", self.fake_print)
self.patch(gist, "exit", self.fake_exit)
self.patch(gist, "postGist", self.fake_postGist)
def fake_postGist(self, reactor, token, files):
"""
A fake L{gist.postGist} implementation that records its calls.
"""
self.postGist_calls.append(_PostGistCall(reactor, token, files))
return self.postGist_returns
def test_run_usage_error(self):
"""
A usage error results in a help message and an exit code of 1.
"""
errortext = "error text"
first_line = ': '.join([self.argv0, errortext])
self.options_recorder.parseOptions_raises = usage.UsageError(errortext)
self.assertRaises(_SystemExit,
gist.run, "reactor", self.argv0, "bad args")
self.assertEqual(self.options_recorder.parseOptions_calls,
[("bad args",)])
self.assertEqual(len(self.print_calls), 2)
self.assertEqual(self.print_calls[0], (first_line,))
self.assertIn("--help", self.print_calls[1][0])
self.assertEqual(len(self.exit_calls), 1)
[code] = self.exit_calls
self.assertEqual(code, 1)
self.assertNot(self.postGist_calls)
def test_run_ok(self):
"""
The post gist implementation is called with the options
specified on the command line.
"""
reactor = "reactor"
self.options["token"] = "the token"
self.options["files"] = ("file1",)
result = gist.run(reactor, self.argv0, "good args")
self.assertEqual(self.options_recorder.parseOptions_calls,
[("good args",)])
self.assertEqual(len(self.postGist_calls), 1)
[call] = self.postGist_calls
self.assertIs(call.reactor, reactor)
self.assertEqual(call.token, self.options["token"])
self.assertEqual(call.files, self.options["files"])
self.assertIs(result, self.postGist_returns)
| tomprince/txgithub | txgithub/scripts/tests/test_gist.py | Python | mit | 8,506 | 0 |
from __future__ import unicode_literals
from django.apps import AppConfig
class WawmembersConfig(AppConfig):
name = 'wawmembers'
| heidi666/WorldsAtWar | wawmembers/apps.py | Python | mit | 136 | 0 |
# -*- coding: utf-8 -*-
#
# eofs documentation build configuration file, created by
# sphinx-quickstart on Thu Jul 5 15:47:55 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
import time
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.append(os.path.abspath('sphinxext'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.extlinks',
'matplotlib.sphinxext.plot_directive',]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'eofs'
copyright = '2013-{} Andrew Dawson'.format(time.localtime().tm_year)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import eofs
version = eofs.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
highlight_language = 'python'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- extlinks configuration ----------------------------------------------------
# Allow e.g. :issue:`42` and :pr:`42` roles:
extlinks = {'issue': ('https://github.com/ajdawson/eofs/issues/%s', '#'),
'pr': ('https://github.com/ajdawson/eofs/pull/%s', '#')}
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx13'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {'**': ['sidebar_toc.html',
'relations.html',
'sourcelink.html',
'searchbox.html']}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {'index': 'index.html'}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'eofsdoc'
# Options for intersphinx.
intersphinx_mapping = {
'eof2': ('http://ajdawson.github.com/eof2', None),
'iris': ('http://scitools.org.uk/iris/docs/latest', None),
'numpy': ('http://docs.scipy.org/doc/numpy', None),
'xarray': ('http://xarray.pydata.org/en/stable', None),
'dask': ('https://docs.dask.org/en/latest', None),
}
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
'pointsize': '11pt',
# Additional stuff for the LaTeX preamble.
'preamble': """\\usepackage{amssymb}
\\usepackage{amsmath}""",
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('userguide/index', 'userguide.tex', 'eofs User Guide', 'Andrew Dawson',
'manual'),
('examples/index', 'examples.tex', 'eofs Examples', 'Andrew Dawson',
'manual'),
('api/index', 'api.tex', 'eofs API Reference', 'Andrew Dawson',
'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'eofs', 'eofs Documentation',
['Andrew Dawson'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'eofs', 'eofs Documentation',
'Andrew Dawson', 'eofs', 'EOF analysis in Python.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Autodoc settings -- #
autoclass_content = 'both'
autodoc_member_order = 'bysource'
autodoc_docstring_signature = True
autosummary_generate = True
| ajdawson/eofs | doc/conf.py | Python | gpl-3.0 | 9,234 | 0.00574 |
# Copyright (C) 2010 Jeremy S. Sanders
# Email: Jeremy Sanders <jeremy@jeremysanders.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
###############################################################################
from __future__ import division
import re
import math
import numpy as N
from . import dates
_formaterror = 'FormatError'
# a format statement in a string
_format_re = re.compile(r'%([-#0-9 +.hlL]*?)([diouxXeEfFgGcrs%])')
def localeFormat(totfmt, args, locale=None):
"""Format using fmt statement fmt, qt QLocale object locale and
arguments to formatting args.
* arguments are not supported in this formatting, nor is using
a dict to supply values for statement
"""
# substitute all format statements with string format statements
newfmt = _format_re.sub("%s", totfmt)
# do formatting separately for all statements
strings = []
i = 0
for f in _format_re.finditer(totfmt):
code = f.group(2)
if code == '%':
s = '%'
else:
try:
s = f.group() % args[i]
i += 1
except IndexError:
raise TypeError("Not enough arguments for format string")
s = s.replace('-', u'\u2212')
if locale is not None and code in 'eEfFgG':
s = s.replace('.', locale.decimalPoint())
strings.append(s)
if i != len(args):
raise TypeError("Not all arguments converted during string formatting")
return newfmt % tuple(strings)
def sciToHuman(val, cleanup=False):
"""Convert output from C formatting to human scientific notation.
if cleanup, remove zeros after decimal points
"""
# split around the exponent
leader, exponent = val.split('e')
# strip off trailing decimal point and zeros if no format args
if cleanup and leader.find('.') >= 0:
leader = leader.rstrip('0').rstrip('.')
# trim off leading 1
if leader == '1' and cleanup:
leader = ''
else:
# add multiply sign
leader += u'\u00d7'
return '%s10^{%i}' % (leader, int(exponent))
def formatSciNotation(num, formatargs, locale=None):
"""Format number into form X \times 10^{Y}.
This function trims trailing zeros and decimal point unless a formatting
argument is supplied
This is similar to the %e format string
formatargs is the standard argument in a format string to control the
number of decimal places, etc.
locale is a QLocale object
"""
# handle nan, inf, -inf
if not N.isfinite(num):
return str(num)
# create an initial formatting string
if formatargs:
formatstr = '%' + formatargs + 'e'
else:
formatstr = '%.10e'
# do formatting, catching errors
try:
text = formatstr % num
except:
return _formaterror
text = sciToHuman(text, cleanup=formatargs=='')
# do substitution of decimals
if locale is not None:
text = text.replace('.', locale.decimalPoint())
return text
def formatGeneral(num, fmtarg, locale=None):
"""General formatting which switches from normal to scientic
notation."""
if fmtarg:
# if an argument is given, we convert output
try:
retn = ('%'+fmtarg+'g') % num
except ValueError:
retn = _formaterror
if retn.find('e') >= 0:
# in scientific notation, so convert
retn = sciToHuman(retn, cleanup=False)
else:
a = abs(num)
# manually choose when to switch from normal to scientific
# as the default %g isn't very good
if a >= 1e4 or (a < 1e-2 and a > 1e-110):
retn = formatSciNotation(num, fmtarg, locale=locale)
else:
retn = '%.10g' % num
if locale is not None:
# replace decimal point with correct decimal point
retn = retn.replace('.', locale.decimalPoint())
return retn
engsuffixes = ( 'y', 'z', 'a', 'f', 'p', 'n',
u'\u03bc', 'm', '', 'k', 'M', 'G',
'T', 'P', 'E', 'Z', 'Y' )
def formatEngineering(num, fmtarg, locale=None):
"""Engineering suffix format notation using SI suffixes."""
if num != 0.:
logindex = math.log10( abs(num) ) / 3.
# for numbers < 1 round down suffix
if logindex < 0. and (int(logindex)-logindex) > 1e-6:
logindex -= 1
# make sure we don't go out of bounds
logindex = min( max(logindex, -8),
len(engsuffixes) - 9 )
suffix = engsuffixes[ int(logindex) + 8 ]
val = num / 10**( int(logindex) *3)
else:
suffix = ''
val = num
text = ('%' + fmtarg + 'g%s') % (val, suffix)
if locale is not None:
text = text.replace('.', locale.decimalPoint())
return text
# catch general veusz formatting expression
_formatRE = re.compile(r'%([-0-9.+# ]*)(VDVS|VD.|V.|[A-Za-z%])')
def formatNumber(num, formatstr, locale=None):
""" Format a number in different ways.
formatstr is a standard C format string, with some additions:
%Ve scientific notation X \times 10^{Y}
%Vg switches from normal notation to scientific outside 10^-2 to 10^4
%VE engineering suffix option
%VDx date formatting, where x is one of the arguments in
http://docs.python.org/lib/module-time.html in the function
strftime
"""
outitems = []
while formatstr:
# repeatedly try to do string format
match = _formatRE.search(formatstr)
if not match:
outitems.append(formatstr)
break
# argument and type of formatting
farg, ftype = match.groups()
# special veusz formatting
if ftype[:1] == 'V':
# special veusz formatting
if ftype == 'Ve':
out = formatSciNotation(num, farg, locale=locale)
elif ftype == 'Vg':
out = formatGeneral(num, farg, locale=locale)
elif ftype == 'VE':
out = formatEngineering(num, farg, locale=locale)
elif ftype[:2] == 'VD':
d = dates.floatToDateTime(num)
# date formatting (seconds since start of epoch)
if ftype[:4] == 'VDVS':
# special seconds operator
out = ('%'+ftype[4:]+'g') % (d.second+d.microsecond*1e-6)
else:
# use date formatting
try:
out = d.strftime(str('%'+ftype[2:]))
except ValueError:
out = _formaterror
else:
out = _formaterror
# replace hyphen with true minus sign
out = out.replace('-', u'\u2212')
elif ftype == '%':
out = '%'
else:
# standard C formatting
try:
out = localeFormat('%' + farg + ftype, (num,), locale=locale)
except:
out = _formaterror
outitems.append(formatstr[:match.start()])
outitems.append(out)
formatstr = formatstr[match.end():]
return ''.join(outitems)
| bks/veusz | veusz/utils/formatting.py | Python | gpl-2.0 | 7,914 | 0.002654 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ListTables
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-area120-tables
# [START area120tables_v1alpha1_generated_TablesService_ListTables_async]
from google.area120 import tables_v1alpha1
async def sample_list_tables():
# Create a client
client = tables_v1alpha1.TablesServiceAsyncClient()
# Initialize request argument(s)
request = tables_v1alpha1.ListTablesRequest(
)
# Make the request
page_result = client.list_tables(request=request)
# Handle the response
async for response in page_result:
print(response)
# [END area120tables_v1alpha1_generated_TablesService_ListTables_async]
| googleapis/python-area120-tables | samples/generated_samples/area120tables_v1alpha1_generated_tables_service_list_tables_async.py | Python | apache-2.0 | 1,496 | 0.000668 |
import unittest
from pkg_resources import resource_string
from .. import parse, parser, tree
def setup_java_class(content_to_add):
""" returns an example java class with the
given content_to_add contained within a method.
"""
template = """
public class Lambda {
public static void main(String args[]) {
%s
}
}
"""
return template % content_to_add
def filter_type_in_method(clazz, the_type, method_name):
""" yields the result of filtering the given class for the given
type inside the given method identified by its name.
"""
for path, node in clazz.filter(the_type):
for p in reversed(path):
if isinstance(p, tree.MethodDeclaration):
if p.name == method_name:
yield path, node
class LambdaSupportTest(unittest.TestCase):
""" Contains tests for java 8 lambda syntax. """
def assert_contains_lambda_expression_in_m(
self, clazz, method_name='main'):
""" asserts that the given tree contains a method with the supplied
method name containing a lambda expression.
"""
matches = list(filter_type_in_method(
clazz, tree.LambdaExpression, method_name))
if not matches:
self.fail('No matching lambda expression found.')
return matches
def test_lambda_support_no_parameters_no_body(self):
""" tests support for lambda with no parameters and no body. """
self.assert_contains_lambda_expression_in_m(
parse.parse(setup_java_class("() -> {};")))
def test_lambda_support_no_parameters_expression_body(self):
""" tests support for lambda with no parameters and an
expression body.
"""
test_classes = [
setup_java_class("() -> 3;"),
setup_java_class("() -> null;"),
setup_java_class("() -> { return 21; };"),
setup_java_class("() -> { System.exit(1); };"),
]
for test_class in test_classes:
clazz = parse.parse(test_class)
self.assert_contains_lambda_expression_in_m(clazz)
def test_lambda_support_no_parameters_complex_expression(self):
""" tests support for lambda with no parameters and a
complex expression body.
"""
code = """
() -> {
if (true) return 21;
else
{
int result = 21;
return result / 2;
}
};"""
self.assert_contains_lambda_expression_in_m(
parse.parse(setup_java_class(code)))
def test_parameter_no_type_expression_body(self):
""" tests support for lambda with parameters with inferred types. """
test_classes = [
setup_java_class("(bar) -> bar + 1;"),
setup_java_class("bar -> bar + 1;"),
setup_java_class("x -> x.length();"),
setup_java_class("y -> { y.boom(); };"),
]
for test_class in test_classes:
clazz = parse.parse(test_class)
self.assert_contains_lambda_expression_in_m(clazz)
def test_parameter_with_type_expression_body(self):
""" tests support for lambda with parameters with formal types. """
test_classes = [
setup_java_class("(int foo) -> { return foo + 2; };"),
setup_java_class("(String s) -> s.length();"),
setup_java_class("(int foo) -> foo + 1;"),
setup_java_class("(Thread th) -> { th.start(); };"),
setup_java_class("(String foo, String bar) -> "
"foo + bar;"),
]
for test_class in test_classes:
clazz = parse.parse(test_class)
self.assert_contains_lambda_expression_in_m(clazz)
def test_parameters_with_no_type_expression_body(self):
""" tests support for multiple lambda parameters
that are specified without their types.
"""
self.assert_contains_lambda_expression_in_m(
parse.parse(setup_java_class("(x, y) -> x + y;")))
def test_parameters_with_mixed_inferred_and_declared_types(self):
""" this tests that lambda type specification mixing is considered
invalid as per the specifications.
"""
with self.assertRaises(parser.JavaSyntaxError):
parse.parse(setup_java_class("(x, int y) -> x+y;"))
def test_parameters_inferred_types_with_modifiers(self):
""" this tests that lambda inferred type parameters with modifiers are
considered invalid as per the specifications.
"""
with self.assertRaises(parser.JavaSyntaxError):
parse.parse(setup_java_class("(x, final y) -> x+y;"))
def test_invalid_parameters_are_invalid(self):
""" this tests that invalid lambda parameters are are
considered invalid as per the specifications.
"""
with self.assertRaises(parser.JavaSyntaxError):
parse.parse(setup_java_class("(a b c) -> {};"))
def test_cast_works(self):
""" this tests that a cast expression works as expected. """
parse.parse(setup_java_class("String x = (String) A.x() ;"))
class MethodReferenceSyntaxTest(unittest.TestCase):
""" Contains tests for java 8 method reference syntax. """
def assert_contains_method_reference_expression_in_m(
self, clazz, method_name='main'):
""" asserts that the given class contains a method with the supplied
method name containing a method reference.
"""
matches = list(filter_type_in_method(
clazz, tree.MethodReference, method_name))
if not matches:
self.fail('No matching method reference found.')
return matches
def test_method_reference(self):
""" tests that method references are supported. """
self.assert_contains_method_reference_expression_in_m(
parse.parse(setup_java_class("String::length;")))
def test_method_reference_to_the_new_method(self):
""" test support for method references to 'new'. """
self.assert_contains_method_reference_expression_in_m(
parse.parse(setup_java_class("String::new;")))
def test_method_reference_to_the_new_method_with_explict_type(self):
""" test support for method references to 'new' with an
explicit type.
"""
self.assert_contains_method_reference_expression_in_m(
parse.parse(setup_java_class("String::<String> new;")))
def test_method_reference_from_super(self):
""" test support for method references from 'super'. """
self.assert_contains_method_reference_expression_in_m(
parse.parse(setup_java_class("super::toString;")))
def test_method_reference_from_super_with_identifier(self):
""" test support for method references from Identifier.super. """
self.assert_contains_method_reference_expression_in_m(
parse.parse(setup_java_class("String.super::toString;")))
@unittest.expectedFailure
def test_method_reference_explicit_type_arguments_for_generic_type(self):
""" currently there is no support for method references
for an explicit type.
"""
self.assert_contains_method_reference_expression_in_m(
parse.parse(setup_java_class("List<String>::size;")))
def test_method_reference_explicit_type_arguments(self):
""" test support for method references with an explicit type.
"""
self.assert_contains_method_reference_expression_in_m(
parse.parse(setup_java_class("Arrays::<String> sort;")))
@unittest.expectedFailure
def test_method_reference_from_array_type(self):
""" currently there is no support for method references
from a primary type.
"""
self.assert_contains_method_reference_expression_in_m(
parse.parse(setup_java_class("int[]::new;")))
class InterfaceSupportTest(unittest.TestCase):
""" Contains tests for java 8 interface extensions. """
def test_interface_support_static_methods(self):
parse.parse("""
interface Foo {
void foo();
static Foo create() {
return new Foo() {
@Override
void foo() {
System.out.println("foo");
}
};
}
}
""")
def test_interface_support_default_methods(self):
parse.parse("""
interface Foo {
default void foo() {
System.out.println("foo");
}
}
""")
def main():
unittest.main()
if __name__ == '__main__':
main()
| c2nes/javalang | javalang/test/test_java_8_syntax.py | Python | mit | 8,702 | 0.000115 |
# -*- coding: utf-8 -*-
import random
import time
from devp2p.utils import int_to_big_endian
from devp2p import kademlia
import pytest
import gevent
random.seed(42)
class WireMock(kademlia.WireInterface):
messages = [] # global messages
def __init__(self, sender):
assert isinstance(sender, kademlia.Node)
self.sender = sender
assert not self.messages
@classmethod
def empty(cls):
while cls.messages:
cls.messages.pop()
def send_ping(self, node):
echo = hex(random.randint(0, 2**256))[-32:]
self.messages.append((node, 'ping', self.sender, echo))
return echo
def send_pong(self, node, echo):
self.messages.append((node, 'pong', self.sender, echo))
def send_find_node(self, node, nodeid):
self.messages.append((node, 'find_node', self.sender, nodeid))
def send_neighbours(self, node, neighbours):
self.messages.append((node, 'neighbours', self.sender, neighbours))
def poll(self, node):
for i, x in enumerate(self.messages):
if x[0] == node:
del self.messages[i]
return x[1:]
def process(self, kademlia_protocols, steps=0):
"""
process messages until none are left
or if process steps messages if steps >0
"""
i = 0
proto_by_node = dict((p.this_node, p) for p in kademlia_protocols)
while self.messages:
msg = self.messages.pop(0)
assert isinstance(msg[2], kademlia.Node)
target = proto_by_node[msg[0]]
cmd = 'recv_' + msg[1]
getattr(target, cmd)(*msg[2:])
i += 1
if steps and i == steps:
return # messages may be left
assert not self.messages
def random_pubkey():
pk = int_to_big_endian(random.getrandbits(kademlia.k_pubkey_size))
return '\x00' * (kademlia.k_pubkey_size / 8 - len(pk)) + pk
def random_node():
return kademlia.Node(random_pubkey())
def routing_table(num_nodes=1000):
node = random_node()
routing = kademlia.RoutingTable(node)
for i in range(num_nodes):
routing.add_node(random_node())
assert len(routing.buckets) <= i + 2
assert len(routing.buckets) <= 512
assert i == num_nodes - 1
return routing
def get_wired_protocol():
this_node = random_node()
return kademlia.KademliaProtocol(this_node, WireMock(this_node))
def test_bootstrap():
proto = get_wired_protocol()
wire = proto.wire
other = routing_table()
# lookup self
proto.bootstrap(nodes=[other.this_node])
msg = wire.poll(other.this_node)
assert msg == ('find_node', proto.routing.this_node, proto.routing.this_node.id)
assert wire.poll(other.this_node) is None
assert wire.messages == []
def test_setup():
"""
nodes connect to any peer and do a lookup for them selfs
"""
proto = get_wired_protocol()
wire = proto.wire
other = routing_table()
# lookup self
proto.bootstrap(nodes=[other.this_node])
msg = wire.poll(other.this_node)
assert msg == ('find_node', proto.routing.this_node, proto.routing.this_node.id)
assert wire.poll(other.this_node) is None
assert wire.messages == []
# respond with neighbours
closest = other.neighbours(msg[2])
assert len(closest) == kademlia.k_bucket_size
proto.recv_neighbours(random_node(), closest)
# expect 3 lookups
for i in range(kademlia.k_find_concurrency):
msg = wire.poll(closest[i])
assert msg == ('find_node', proto.routing.this_node, proto.routing.this_node.id)
# and pings for all nodes
for node in closest:
msg = wire.poll(node)
assert msg[0] == 'ping'
# nothing else
assert wire.messages == []
@pytest.mark.timeout(5)
@pytest.mark.xfail
def test_find_node_timeout():
proto = get_wired_protocol()
other = routing_table()
wire = proto.wire
# lookup self
proto.bootstrap(nodes=[other.this_node])
msg = wire.poll(other.this_node)
assert msg == ('find_node', proto.routing.this_node, proto.routing.this_node.id)
assert wire.poll(other.this_node) is None
assert wire.messages == []
# do timeout
gevent.sleep(kademlia.k_request_timeout)
# respond with neighbours
closest = other.neighbours(msg[2])
assert len(closest) == kademlia.k_bucket_size
proto.recv_neighbours(random_node(), closest)
# expect pings, but no other lookup
msg = wire.poll(closest[0])
assert msg[0] == 'ping'
assert wire.poll(closest[0]) is None
wire.empty()
assert wire.messages == []
def test_eviction():
proto = get_wired_protocol()
proto.routing = routing_table(1000)
wire = proto.wire
# trigger node ping
node = proto.routing.neighbours(random_node())[0]
proto.ping(node)
msg = wire.poll(node)
assert msg[0] == 'ping'
assert wire.messages == []
proto.recv_pong(node, msg[2])
# expect no message and that node is still there
assert wire.messages == []
assert node in proto.routing
# expect node to be on the tail
assert proto.routing.bucket_by_node(node).tail == node
@pytest.mark.timeout(5)
@pytest.mark.xfail
def test_eviction_timeout():
proto = get_wired_protocol()
proto.routing = routing_table(1000)
wire = proto.wire
# trigger node ping
node = proto.routing.neighbours(random_node())[0]
proto.ping(node)
msg = wire.poll(node)
assert msg[0] == 'ping'
assert wire.messages == []
gevent.sleep(kademlia.k_request_timeout)
proto.recv_pong(node, msg[2])
# expect no message and that is not there anymore
assert wire.messages == []
assert node not in proto.routing
# expect node not to be in the replacement_cache
assert node not in proto.routing.bucket_by_node(node).replacement_cache
def test_eviction_node_active():
"""
active nodes (replying in time) should not be evicted
"""
proto = get_wired_protocol()
proto.routing = routing_table(10000) # set high, so add won't split
wire = proto.wire
# get a full bucket
full_buckets = [b for b in proto.routing.buckets if b.is_full and not b.should_split]
assert full_buckets
bucket = full_buckets[0]
assert not bucket.should_split
assert len(bucket) == kademlia.k_bucket_size
bucket_nodes = bucket.nodes[:]
eviction_candidate = bucket.head
# create node to insert
node = random_node()
node.id = bucket.start + 1 # should not split
assert bucket.in_range(node)
assert bucket == proto.routing.bucket_by_node(node)
# insert node
proto.update(node)
# expect bucket was not split
assert len(bucket) == kademlia.k_bucket_size
# expect bucket to be unchanged
assert bucket_nodes == bucket.nodes
assert eviction_candidate == bucket.head
# expect node not to be in bucket yet
assert node not in bucket
assert node not in proto.routing
# expect a ping to bucket.head
msg = wire.poll(eviction_candidate)
assert msg[0] == 'ping'
assert msg[1] == proto.this_node
assert len(proto._expected_pongs) == 1
expected_pingid = proto._expected_pongs.keys()[0]
assert len(expected_pingid) == 96
echo = expected_pingid[:32]
assert len(echo) == 32
assert wire.messages == []
# reply in time
# can not check w/o mcd
print 'sending pong'
proto.recv_pong(eviction_candidate, echo)
# expect no other messages
assert wire.messages == []
# expect node was not added
assert node not in proto.routing
# eviction_candidate is around and was promoted to bucket.tail
assert eviction_candidate in proto.routing
assert eviction_candidate == bucket.tail
# expect node to be in the replacement_cache
assert node in bucket.replacement_cache
@pytest.mark.timeout(5)
@pytest.mark.xfail
def test_eviction_node_inactive():
"""
active nodes (replying in time) should not be evicted
"""
proto = get_wired_protocol()
proto.routing = routing_table(10000) # set high, so add won't split
wire = proto.wire
# get a full bucket
full_buckets = [b for b in proto.routing.buckets if b.is_full and not b.should_split]
assert full_buckets
bucket = full_buckets[0]
assert not bucket.should_split
assert len(bucket) == kademlia.k_bucket_size
bucket_nodes = bucket.nodes[:]
eviction_candidate = bucket.head
# create node to insert
node = random_node()
node.id = bucket.start + 1 # should not split
assert bucket.in_range(node)
assert bucket == proto.routing.bucket_by_node(node)
# insert node
proto.update(node)
# expect bucket was not split
assert len(bucket) == kademlia.k_bucket_size
# expect bucket to be unchanged
assert bucket_nodes == bucket.nodes
assert eviction_candidate == bucket.head
# expect node not to be in bucket yet
assert node not in bucket
assert node not in proto.routing
# expect a ping to bucket.head
msg = wire.poll(eviction_candidate)
assert msg[0] == 'ping'
assert msg[1] == proto.this_node
assert len(proto._expected_pongs) == 1
expected_pingid = proto._expected_pongs.keys()[0]
assert len(expected_pingid) == 96
echo = expected_pingid[:32]
assert len(echo) == 32
assert wire.messages == []
# reply late
gevent.sleep(kademlia.k_request_timeout)
proto.recv_pong(eviction_candidate, echo)
# expect no other messages
assert wire.messages == []
# expect node was not added
assert node in proto.routing
# eviction_candidate is around and was promoted to bucket.tail
assert eviction_candidate not in proto.routing
assert node == bucket.tail
# expect node to be in the replacement_cache
assert eviction_candidate not in bucket.replacement_cache
def test_eviction_node_split():
"""
active nodes (replying in time) should not be evicted
"""
proto = get_wired_protocol()
proto.routing = routing_table(1000) # set lpw, so we'll split
wire = proto.wire
# get a full bucket
full_buckets = [b for b in proto.routing.buckets if b.is_full and b.should_split]
assert full_buckets
bucket = full_buckets[0]
assert bucket.should_split
assert len(bucket) == kademlia.k_bucket_size
bucket_nodes = bucket.nodes[:]
eviction_candidate = bucket.head
# create node to insert
node = random_node()
node.id = bucket.start + 1 # should not split
assert bucket.in_range(node)
assert bucket == proto.routing.bucket_by_node(node)
# insert node
proto.update(node)
# expect bucket to be unchanged
assert bucket_nodes == bucket.nodes
assert eviction_candidate == bucket.head
# expect node not to be in bucket yet
assert node not in bucket
assert node in proto.routing
# expect no ping to bucket.head
assert not wire.poll(eviction_candidate)
assert wire.messages == []
# expect node was not added
assert node in proto.routing
# eviction_candidate is around and was unchanged
assert eviction_candidate == bucket.head
def test_ping_adds_sender():
p = get_wired_protocol()
assert len(p.routing) == 0
for i in range(10):
n = random_node()
p.recv_ping(n, 'some id %d' % i)
assert len(p.routing) == i + 1
p.wire.empty()
def test_two():
print
one = get_wired_protocol()
one.routing = routing_table(100)
two = get_wired_protocol()
wire = one.wire
assert one.this_node != two.this_node
two.ping(one.this_node)
# print 'messages', wire.messages
wire.process([one, two])
two.find_node(two.this_node.id)
# print 'messages', wire.messages
msg = wire.process([one, two], steps=2)
# print 'messages', wire.messages
assert len(wire.messages) >= kademlia.k_bucket_size
msg = wire.messages.pop(0)
assert msg[1] == 'find_node'
for m in wire.messages[kademlia.k_find_concurrency:]:
assert m[1] == 'ping'
wire.empty()
def test_many(num_nodes=17):
WireMock.empty()
assert num_nodes >= kademlia.k_bucket_size + 1
protos = []
for i in range(num_nodes):
protos.append(get_wired_protocol())
bootstrap = protos[0]
wire = bootstrap.wire
# bootstrap
for p in protos[1:]:
p.bootstrap([bootstrap.this_node])
wire.process(protos) # successively add nodes
# now everbody does a find node to fill the buckets
for p in protos[1:]:
p.find_node(p.this_node.id)
wire.process(protos) # can all send in parallel
for i, p in enumerate(protos):
# print i, len(p.routing)
assert len(p.routing) >= kademlia.k_bucket_size
return protos
def test_find_closest(num_nodes=50):
"""
assert, that nodes find really the closest of all nodes
"""
num_tests = 10
protos = test_many(num_nodes)
all_nodes = [p.this_node for p in protos]
for i, p in enumerate(protos[:num_tests]):
for j, node in enumerate(all_nodes):
if p.this_node == node:
continue
p.find_node(node.id)
p.wire.process(protos)
assert p.routing.neighbours(node)[0] == node
if __name__ == '__main__':
import ethereum.slogging
ethereum.slogging.configure(config_string=':debug')
test_many()
| ms83/pydevp2p | devp2p/tests/test_kademlia_protocol.py | Python | mit | 13,421 | 0.000522 |
# -*- coding:utf-8 -*-
__author__ = 'chenjun'
import torch
from torch.autograd import Variable
from utils.util import *
"""Beam search module.
Beam search takes the top K results from the model, predicts the K results for
each of the previous K result, getting K*K results. Pick the top K results from
K*K results, and start over again until certain number of results are fully
decoded.
"""
class Hypothesis(object):
"""Defines a hypothesis during beam search."""
def __init__(self, tokens, log_prob, state):
"""Hypothesis constructor.
Args:
tokens: start tokens for decoding.
log_prob: log prob of the start tokens, usually 1.
state: decoder state.
"""
self.tokens = tokens
self.log_prob = log_prob
self.state = state
def extend(self, token, log_prob, new_state):
"""Extend the hypothesis with result from latest step.
Args:
token: latest token from decoding.
log_prob: log prob of the latest decoded tokens.
new_state: decoder output state. Fed to the decoder for next step.
Returns:
New Hypothesis with the results from latest step.
"""
return Hypothesis(self.tokens + [token], self.log_prob + log_prob, new_state)
@property
def latest_token(self):
return self.tokens[-1]
@property
def sequence_tokens(self):
return self.tokens
@property
def decode_state(self):
return self.state
class BeamSearch(object):
"""Beam search for generation."""
def __init__(self, vocab_size, beam_size, state=None):
"""
beam search init.
:param vocab_size: target vocab size
:param beam_size: beam size
"""
self.beam_size = beam_size
self.vocab_size = vocab_size
self.hypothesis = [Hypothesis([], 0.0, state)] * self.beam_size
self.results = []
def top_hypothesis(self, hypothesis, normalize=False):
"""
sort the hypothesis list based on log_probs and length.
:param hypothesis: list of hypothesis
:param normalize: bool, normalized by length, only for last search to output
:return:
"""
# This length normalization is only effective for the final results.
if normalize:
return sorted(hypothesis, key=lambda h: h.log_prob/len(h.tokens), reverse=True)
else:
return sorted(hypothesis, key=lambda h: h.log_prob, reverse=True)
def variable(self, token):
"""
convert token to torch variable.
:param token: int
:return:
"""
return Variable(torch.LongTensor([[token]]))
def beam_search(self, inputs):
"""
beam search to generate sequence.
:param inputs: list of decoder outputs, (decoder_out, decode_state)
:return:
"""
all_hypothesis = []
for i, (input, state) in enumerate(inputs):
top_log_probs, top_tokens = input.data.topk(self.vocab_size)
for j in xrange(self.beam_size*2):
token = top_tokens[0][j] # value
log_prob = top_log_probs[0][j] # value
all_hypothesis.append(self.hypothesis[i].extend(token, log_prob, state))
# Filter and collect any hypotheses that have the end token.
self.hypothesis = []
for h in self.top_hypothesis(all_hypothesis):
if h.latest_token == EOS_token:
# Pull the hypothesis off the beam if the end token is reached.
self.results.append(h)
else:
# Otherwise continue to the extend the hypothesis.
self.hypothesis.append(h)
if len(self.hypothesis) == self.beam_size or len(self.results) == self.beam_size:
break
outputs = [(self.variable(hyp.latest_token), hyp.decode_state) for hyp in self.hypothesis]
return outputs
def generate(self, num):
"""
return top num of generated sequence tokens.
:return:
"""
generates = [hyp.sequence_tokens for hyp in self.top_hypothesis(self.results, normalize=True)[:num]]
return generates
| nanfeng1101/Seq2Seq | pytorch_models/models/beam_search.py | Python | mit | 4,237 | 0.001652 |
# -*- coding: utf-8 -*-
import subprocess
import os
cmd=['/Users/jehlke/workspace/epywing/src/epywing/utils/mecab/bin/mecab',
'-Owakati', '--dicdir=mecab/dic/ipadic']
#cmd = ['mecab', '-Owakati', '--dicdir=../dic/ipadic']
a = subprocess.Popen(cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
a.stdin.write(u'何~これですか what is that HUH OK I SEE ?\n\n'.encode('utf8'))
a.stdin.flush()
b = unicode(a.stdout.readline().decode('utf8'))
print 'test'
print b.strip()#.split()
print 'test'
| aehlke/epywing | src/epywing/utils/test.py | Python | gpl-3.0 | 527 | 0.009747 |
from __future__ import print_function
from copy import copy, deepcopy
import datetime
import inspect
import sys
import traceback
from django.core.management import call_command
from django.core.management.commands import loaddata
from django.db import models
import south.db
from south import exceptions
from south.db import DEFAULT_DB_ALIAS
from south.models import MigrationHistory
from south.signals import ran_migration
from south.utils.py3 import StringIO
class Migrator(object):
def __init__(self, verbosity=0, interactive=False):
self.verbosity = int(verbosity)
self.interactive = bool(interactive)
@staticmethod
def title(target):
raise NotImplementedError()
def print_title(self, target):
if self.verbosity:
print(self.title(target))
@staticmethod
def status(target):
raise NotImplementedError()
def print_status(self, migration):
status = self.status(migration)
if self.verbosity and status:
print(status)
@staticmethod
def orm(migration):
raise NotImplementedError()
def backwards(self, migration):
return self._wrap_direction(migration.backwards(), migration.prev_orm())
def direction(self, migration):
raise NotImplementedError()
@staticmethod
def _wrap_direction(direction, orm):
args = inspect.getargspec(direction)
if len(args[0]) == 1:
# Old migration, no ORM should be passed in
return direction
return (lambda: direction(orm))
@staticmethod
def record(migration, database):
raise NotImplementedError()
def run_migration_error(self, migration, extra_info=''):
return (
' ! Error found during real run of migration! Aborting.\n'
'\n'
' ! Since you have a database that does not support running\n'
' ! schema-altering statements in transactions, we have had \n'
' ! to leave it in an interim state between migrations.\n'
'%s\n'
' ! The South developers regret this has happened, and would\n'
' ! like to gently persuade you to consider a slightly\n'
' ! easier-to-deal-with DBMS (one that supports DDL transactions)\n'
' ! NOTE: The error which caused the migration to fail is further up.'
) % extra_info
def run_migration(self, migration, database):
migration_function = self.direction(migration)
south.db.db.start_transaction()
try:
migration_function()
south.db.db.execute_deferred_sql()
if not isinstance(getattr(self, '_wrapper', self), DryRunMigrator):
# record us as having done this in the same transaction,
# since we're not in a dry run
self.record(migration, database)
except:
south.db.db.rollback_transaction()
if not south.db.db.has_ddl_transactions:
print(self.run_migration_error(migration))
print("Error in migration: %s" % migration)
raise
else:
try:
south.db.db.commit_transaction()
except:
print("Error during commit in migration: %s" % migration)
raise
def run(self, migration, database):
# Get the correct ORM.
south.db.db.current_orm = self.orm(migration)
# If we're not already in a dry run, and the database doesn't support
# running DDL inside a transaction, *cough*MySQL*cough* then do a dry
# run first.
if not isinstance(getattr(self, '_wrapper', self), DryRunMigrator):
if not south.db.db.has_ddl_transactions:
dry_run = DryRunMigrator(migrator=self, ignore_fail=False)
dry_run.run_migration(migration, database)
return self.run_migration(migration, database)
def send_ran_migration(self, migration, database):
ran_migration.send(None,
app=migration.app_label(),
migration=migration,
method=self.__class__.__name__.lower(),
verbosity=self.verbosity,
interactive=self.interactive,
db=database)
def migrate(self, migration, database):
"""
Runs the specified migration forwards/backwards, in order.
"""
app = migration.migrations._migrations
migration_name = migration.name()
self.print_status(migration)
result = self.run(migration, database)
self.send_ran_migration(migration, database)
return result
def migrate_many(self, target, migrations, database):
raise NotImplementedError()
class MigratorWrapper(object):
def __init__(self, migrator, *args, **kwargs):
self._migrator = copy(migrator)
attributes = dict([(k, getattr(self, k))
for k in self.__class__.__dict__
if not k.startswith('__')])
self._migrator.__dict__.update(attributes)
self._migrator.__dict__['_wrapper'] = self
def __getattr__(self, name):
return getattr(self._migrator, name)
class DryRunMigrator(MigratorWrapper):
def __init__(self, ignore_fail=True, *args, **kwargs):
super(DryRunMigrator, self).__init__(*args, **kwargs)
self._ignore_fail = ignore_fail
def _run_migration(self, migration):
if migration.no_dry_run():
if self.verbosity:
print(" - Migration '%s' is marked for no-dry-run." % migration)
return
south.db.db.dry_run = True
# preserve the constraint cache as it can be mutated by the dry run
constraint_cache = deepcopy(south.db.db._constraint_cache)
if self._ignore_fail:
south.db.db.debug, old_debug = False, south.db.db.debug
pending_creates = south.db.db.get_pending_creates()
south.db.db.start_transaction()
migration_function = self.direction(migration)
try:
try:
migration_function()
south.db.db.execute_deferred_sql()
except:
raise exceptions.FailedDryRun(migration, sys.exc_info())
finally:
south.db.db.rollback_transactions_dry_run()
if self._ignore_fail:
south.db.db.debug = old_debug
south.db.db.clear_run_data(pending_creates)
south.db.db.dry_run = False
# restore the preserved constraint cache from before dry run was
# executed
south.db.db._constraint_cache = constraint_cache
def run_migration(self, migration, database):
try:
self._run_migration(migration)
except exceptions.FailedDryRun:
if self._ignore_fail:
return False
raise
def send_ran_migration(self, *args, **kwargs):
pass
class FakeMigrator(MigratorWrapper):
def run(self, migration, database):
# Don't actually run, just record as if ran
self.record(migration, database)
if self.verbosity:
print(' (faked)')
def send_ran_migration(self, *args, **kwargs):
pass
class LoadInitialDataMigrator(MigratorWrapper):
def load_initial_data(self, target, db='default'):
if target is None or target != target.migrations[-1]:
return
# Load initial data, if we ended up at target
if self.verbosity:
print(" - Loading initial data for %s." % target.app_label())
# Override Django's get_apps call temporarily to only load from the
# current app
old_get_apps = models.get_apps
new_get_apps = lambda: [models.get_app(target.app_label())]
models.get_apps = new_get_apps
loaddata.get_apps = new_get_apps
try:
call_command('loaddata', 'initial_data', verbosity=self.verbosity, database=db)
finally:
models.get_apps = old_get_apps
loaddata.get_apps = old_get_apps
def migrate_many(self, target, migrations, database):
migrator = self._migrator
result = migrator.__class__.migrate_many(migrator, target, migrations, database)
if result:
self.load_initial_data(target, db=database)
return True
class Forwards(Migrator):
"""
Runs the specified migration forwards, in order.
"""
torun = 'forwards'
@staticmethod
def title(target):
if target is not None:
return " - Migrating forwards to %s." % target.name()
else:
assert False, "You cannot migrate forwards to zero."
@staticmethod
def status(migration):
return ' > %s' % migration
@staticmethod
def orm(migration):
return migration.orm()
def forwards(self, migration):
return self._wrap_direction(migration.forwards(), migration.orm())
direction = forwards
@staticmethod
def record(migration, database):
# Record us as having done this
record = MigrationHistory.for_migration(migration, database)
try:
from django.utils.timezone import now
record.applied = now()
except ImportError:
record.applied = datetime.datetime.utcnow()
if database != DEFAULT_DB_ALIAS:
record.save(using=database)
else:
# Django 1.1 and below always go down this branch.
record.save()
def format_backwards(self, migration):
if migration.no_dry_run():
return " (migration cannot be dry-run; cannot discover commands)"
old_debug, old_dry_run = south.db.db.debug, south.db.db.dry_run
south.db.db.debug = south.db.db.dry_run = True
stdout = sys.stdout
sys.stdout = StringIO()
try:
try:
self.backwards(migration)()
return sys.stdout.getvalue()
except:
raise
finally:
south.db.db.debug, south.db.db.dry_run = old_debug, old_dry_run
sys.stdout = stdout
def run_migration_error(self, migration, extra_info=''):
extra_info = ('\n'
'! You *might* be able to recover with:'
'%s'
'%s' %
(self.format_backwards(migration), extra_info))
return super(Forwards, self).run_migration_error(migration, extra_info)
def migrate_many(self, target, migrations, database):
try:
for migration in migrations:
result = self.migrate(migration, database)
if result is False: # The migrations errored, but nicely.
return False
finally:
# Call any pending post_syncdb signals
south.db.db.send_pending_create_signals(verbosity=self.verbosity,
interactive=self.interactive)
return True
class Backwards(Migrator):
"""
Runs the specified migration backwards, in order.
"""
torun = 'backwards'
@staticmethod
def title(target):
if target is None:
return " - Migrating backwards to zero state."
else:
return " - Migrating backwards to just after %s." % target.name()
@staticmethod
def status(migration):
return ' < %s' % migration
@staticmethod
def orm(migration):
return migration.prev_orm()
direction = Migrator.backwards
@staticmethod
def record(migration, database):
# Record us as having not done this
record = MigrationHistory.for_migration(migration, database)
if record.id is not None:
if database != DEFAULT_DB_ALIAS:
record.delete(using=database)
else:
# Django 1.1 always goes down here
record.delete()
def migrate_many(self, target, migrations, database):
for migration in migrations:
self.migrate(migration, database)
return True
| RaD/django-south | south/migration/migrators.py | Python | apache-2.0 | 12,278 | 0.001547 |
#!/usr/bin/python
import sys, time
for ts in sys.argv[1:]:
print ts, time.ctime(float(ts))
sys.exit(0)
| vandys/nowplaying | reports/disptime.py | Python | unlicense | 107 | 0.009346 |
# __author__ = MelissaChan
# -*- coding: utf-8 -*-
# 16-4-16 下午10:53
import MySQLdb
def connect(id,name,gender,region,status,date,inter):
try:
conn = MySQLdb.connect(host='localhost',user='root',passwd=' ',port=3306)
cur = conn.cursor()
# cur.execute('create database if not exists PythonDB')
conn.select_db('Facebook')
# cur.execute('create table Test(id int,name varchar(20),info varchar(20))')
value = [id,name,gender,region,status,date,inter]
cur.execute('insert into info values(%s,%s,%s,%s,%s,%s,%s)',value)
# values = []
# for i in range(20):
# values.append((i,'Hello World!','My number is '+str(i)))
#
# cur.executemany('insert into Test values(%s,%s,%s)',values)
# cur.execute('update Test set name="ACdreamer" where id=3')
conn.commit()
cur.close()
conn.close()
print 'insert ok~'
except MySQLdb.Error,msg:
print "MySQL Error %d: %s" %(msg.args[0],msg.args[1])
| MelissaChan/Crawler_Facebook | Crawler/facebook_mysql.py | Python | mit | 1,040 | 0.021236 |
import sys, re
for fn in sys.argv[1:]:
with open(fn, 'r') as f:
s = f.read()
xx = re.findall(r'([^\n]+)\s+\'\'\'(.*?)\'\'\'', s, re.M|re.S)
for (obj, doc) in xx:
s = re.findall('[^:`]\B(([`*])[a-zA-Z_][a-zA-Z0-9_]*\\2)\B', doc)
if s:
print '-'*50
print fn, obj
print '.'*50
print doc
print '.'*50
print [ss[0] for ss in s]
# for vim:
# :s/\([^`:]\)\([`*]\)\([a-zA-Z0-9_]\+\)\2/\1``\3``/
| pyrocko/pyrocko | maintenance/docstring_cop.py | Python | gpl-3.0 | 543 | 0.01105 |
import components
def AclContentCacheTest ():
"""ACL content cache test"""
ctx = components.Context (['a', 'b', 'c', 'd', 'cc', 'f'],\
['ip_a', 'ip_b', 'ip_c', 'ip_d', 'ip_cc', 'ip_f'])
net = components.Network (ctx)
a = components.EndHost(ctx.a, net, ctx)
b = components.EndHost(ctx.b, net, ctx)
c = components.EndHost(ctx.c, net, ctx)
d = components.EndHost(ctx.d, net, ctx)
cc = components.AclContentCache(ctx.cc, net, ctx)
f = components.AclFirewall(ctx.f, net, ctx)
net.setAddressMappings([(a, ctx.ip_a), \
(b, ctx.ip_b), \
(c, ctx.ip_c), \
(d, ctx.ip_d), \
(f, ctx.ip_f), \
(cc, ctx.ip_cc)])
addresses = [ctx.ip_a, ctx.ip_b, ctx.ip_c, ctx.ip_d, ctx.ip_cc, ctx.ip_f]
net.RoutingTable(a, [(x, f) for x in addresses])
net.RoutingTable(b, [(x, f) for x in addresses])
net.RoutingTable(c, [(x, f) for x in addresses])
net.RoutingTable(d, [(x, f) for x in addresses])
net.RoutingTable(f, [(x, cc) for x in addresses])
net.RoutingTable(cc, [(ctx.ip_a, a), \
(ctx.ip_b, b), \
(ctx.ip_c, c), \
(ctx.ip_d, d)])
net.Attach(a, b, c, d, cc)
endhosts = [a, b, c, d]
f.AddAcls([(ctx.ip_a, ctx.ip_b), (ctx.ip_c, ctx.ip_d)])
cc.AddAcls([(ctx.ip_a, ctx.ip_b), (ctx.ip_c, ctx.ip_d)])
net.Attach(a, b, c, d, cc, f)
endhosts = [a, b, c, d]
class AclContentCacheReturn (object):
def __init__ (self, net, ctx, a, b, c, d, cc, f):
self.net = net
self.ctx = ctx
self.a = a
self.b = b
self.c = c
self.d = d
self.cc = cc
self.f = f
self.check = components.PropertyChecker (ctx, net)
return AclContentCacheReturn(net, ctx, a, b, c, d, cc, f)
| apanda/modeling | tests/examples/AclContentCacheTest.py | Python | bsd-3-clause | 1,990 | 0.011055 |
# coding: utf-8
"""
Salt Edge Account Information API
API Reference for services # noqa: E501
OpenAPI spec version: 5.0.0
Contact: support@saltedge.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.rates_response import RatesResponse # noqa: E501
from swagger_client.rest import ApiException
class TestRatesResponse(unittest.TestCase):
"""RatesResponse unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testRatesResponse(self):
"""Test RatesResponse"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.rates_response.RatesResponse() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| ltowarek/budget-supervisor | third_party/saltedge/test/test_rates_response.py | Python | mit | 900 | 0 |
#!/usr/bin/python
from ops_i2cbase import I2CBase
# ===========================================================================
# SI1145 Class
#
# Ported from github.com/adafruit/Adafruit_SI1145_Library/
# ===========================================================================
class SI1145:
i2c = None
# SI1145 Address
address = 0x60
# Commands
SI1145_PARAM_QUERY = 0x80
SI1145_PARAM_SET = 0xA0
SI1145_PSALS_AUTO = 0x0F
# Parameters
SI1145_PARAM_I2CADDR = 0x00
SI1145_PARAM_CHLIST = 0x01
SI1145_PARAM_CHLIST_ENUV = 0x80
SI1145_PARAM_CHLIST_ENAUX = 0x40
SI1145_PARAM_CHLIST_ENALSIR = 0x20
SI1145_PARAM_CHLIST_ENALSVIS = 0x10
SI1145_PARAM_CHLIST_ENPS1 = 0x01
SI1145_PARAM_CHLIST_ENPS2 = 0x02
SI1145_PARAM_CHLIST_ENPS3 = 0x04
# Registers
SI1145_REG_PARTID = 0x00
SI1145_REG_UCOEFF0 = 0x13
SI1145_REG_UCOEFF1 = 0x14
SI1145_REG_UCOEFF2 = 0x15
SI1145_REG_UCOEFF3 = 0x16
SI1145_REG_PARAMWR = 0x17
SI1145_REG_COMMAND = 0x18
SI1145_REG_MEASRATE0 = 0x08
SI1145_REG_MEASRATE1 = 0x09
# Constructor
def __init__(self):
# I2C
self.i2c = I2CBase(self.address)
id = self.i2c.readU8(self.SI1145_REG_PARTID)
if (id != 0x45):
print "SI1145 is not found"
# to enable UV reading, set the EN_UV bit in CHLIST, and configure UCOEF [0:3] to the default values of 0x7B, 0x6B, 0x01, and 0x00.
self.i2c.write8(self.SI1145_REG_UCOEFF0, 0x7B)
self.i2c.write8(self.SI1145_REG_UCOEFF1, 0x6B)
self.i2c.write8(self.SI1145_REG_UCOEFF2, 0x01)
self.i2c.write8(self.SI1145_REG_UCOEFF3, 0x00)
# enable UV sensor
self.i2c.write8(self.SI1145_REG_PARAMWR, self.SI1145_PARAM_CHLIST_ENUV | self.SI1145_PARAM_CHLIST_ENALSIR | self.SI1145_PARAM_CHLIST_ENALSVIS | self.SI1145_PARAM_CHLIST_ENPS1)
self.i2c.write8(self.SI1145_REG_COMMAND, self.SI1145_PARAM_CHLIST | self.SI1145_PARAM_SET)
# measurement rate for auto
self.i2c.write8(self.SI1145_REG_MEASRATE0, 0xFF)
# auto run
self.i2c.write8(self.SI1145_REG_COMMAND, self.SI1145_PSALS_AUTO)
def readUVIndex(self):
"Read UV index data from sensor (UV index * 100)"
rawData = self.i2c.readU16(0x2C)
if rawData > 0x0258:
return 0x0258
else:
return rawData
def readAmbientLight(self):
"Read Ambient Light data from sensor (Visible light + IR) in lux"
rawData = self.i2c.readU16(0x22)
return rawData
def readIRLight(self):
"Read IR data from sensor in lux"
rawData = self.i2c.readU16(0x24)
return rawData
| dudakp/rasPi_systemInfo | lib_si1145/lib_si1145.py | Python | mit | 2,472 | 0.045307 |
#
# This is the container for the palettes. To change them
# simply edit this.
#
from numpy import *
NTSC = array([
[0x00,0x00,0x00],[0x40,0x40,0x40],[0x6C,0x6C,0x6C],[0x90,0x90,0x90],
[0xB0,0xB0,0xB0],[0xC8,0xC8,0xC8],[0xDC,0xDC,0xDC],[0xEC,0xEC,0xEC],
[0x44,0x44,0x00],[0x64,0x64,0x10],[0x84,0x84,0x24],[0xA0,0xA0,0x34],
[0xB8,0xB8,0x40],[0xD0,0xD0,0x50],[0xE8,0xE8,0x5C],[0xFC,0xFC,0x68],
[0x70,0x28,0x00],[0x84,0x44,0x14],[0x98,0x5C,0x28],[0xAC,0x78,0x3C],
[0xBC,0x8C,0x4C],[0xCC,0xA0,0x5C],[0xDC,0xB4,0x68],[0xEC,0xC8,0x78],
[0x84,0x18,0x00],[0x98,0x34,0x18],[0xAC,0x50,0x30],[0xC0,0x68,0x48],
[0xD0,0x80,0x5C],[0xE0,0x94,0x70],[0xEC,0xA8,0x80],[0xFC,0xBC,0x94],
[0x88,0x00,0x00],[0x9C,0x20,0x20],[0xB0,0x3C,0x3C],[0xC0,0x58,0x58],
[0xD0,0x70,0x70],[0xE0,0x88,0x88],[0xEC,0xA0,0xA0],[0xFC,0xB4,0xB4],
[0x78,0x00,0x5C],[0x8C,0x20,0x74],[0xA0,0x3C,0x88],[0xB0,0x58,0x9C],
[0xC0,0x70,0xB0],[0xD0,0x84,0xC0],[0xDC,0x9C,0xD0],[0xEC,0xB0,0xE0],
[0x48,0x00,0x78],[0x60,0x20,0x90],[0x78,0x3C,0xA4],[0x8C,0x58,0xB8],
[0xA0,0x70,0xCC],[0xB4,0x84,0xDC],[0xC4,0x9C,0xEC],[0xD4,0xB0,0xFC],
[0x14,0x00,0x84],[0x30,0x20,0x98],[0x4C,0x3C,0xAC],[0x68,0x58,0xC0],
[0x7C,0x70,0xD0],[0x94,0x88,0xE0],[0xA8,0xA0,0xEC],[0xBC,0xB4,0xFC],
[0x00,0x00,0x88],[0x1C,0x20,0x9C],[0x38,0x40,0xB0],[0x50,0x5C,0xC0],
[0x68,0x74,0xD0],[0x7C,0x8C,0xE0],[0x90,0xA4,0xEC],[0xA4,0xB8,0xFC],
[0x00,0x18,0x7C],[0x1C,0x38,0x90],[0x38,0x54,0xA8],[0x50,0x70,0xBC],
[0x68,0x88,0xCC],[0x7C,0x9C,0xDC],[0x90,0xB4,0xEC],[0xA4,0xC8,0xFC],
[0x00,0x2C,0x5C],[0x1C,0x4C,0x78],[0x38,0x68,0x90],[0x50,0x84,0xAC],
[0x68,0x9C,0xC0],[0x7C,0xB4,0xD4],[0x90,0xCC,0xE8],[0xA4,0xE0,0xFC],
[0x00,0x3C,0x2C],[0x1C,0x5C,0x48],[0x38,0x7C,0x64],[0x50,0x9C,0x80],
[0x68,0xB4,0x94],[0x7C,0xD0,0xAC],[0x90,0xE4,0xC0],[0xA4,0xFC,0xD4],
[0x00,0x3C,0x00],[0x20,0x5C,0x20],[0x40,0x7C,0x40],[0x5C,0x9C,0x5C],
[0x74,0xB4,0x74],[0x8C,0xD0,0x8C],[0xA4,0xE4,0xA4],[0xB8,0xFC,0xB8],
[0x14,0x38,0x00],[0x34,0x5C,0x1C],[0x50,0x7C,0x38],[0x6C,0x98,0x50],
[0x84,0xB4,0x68],[0x9C,0xCC,0x7C],[0xB4,0xE4,0x90],[0xC8,0xFC,0xA4],
[0x2C,0x30,0x00],[0x4C,0x50,0x1C],[0x68,0x70,0x34],[0x84,0x8C,0x4C],
[0x9C,0xA8,0x64],[0xB4,0xC0,0x78],[0xCC,0xD4,0x88],[0xE0,0xEC,0x9C],
[0x44,0x28,0x00],[0x64,0x48,0x18],[0x84,0x68,0x30],[0xA0,0x84,0x44],
[0xB8,0x9C,0x58],[0xD0,0xB4,0x6C],[0xE8,0xCC,0x7C],[0xFC,0xE0,0x8C]
],uint8)
PAL = array([
[0x00,0x00,0x00],[0x28,0x28,0x28],[0x50,0x50,0x50],[0x74,0x74,0x74],
[0x94,0x94,0x94],[0xB4,0xB4,0xB4],[0xD0,0xD0,0xD0],[0xEC,0xEC,0xEC],
[0x00,0x00,0x00],[0x28,0x28,0x28],[0x50,0x50,0x50],[0x74,0x74,0x74],
[0x94,0x94,0x94],[0xB4,0xB4,0xB4],[0xD0,0xD0,0xD0],[0xEC,0xEC,0xEC],
[0x80,0x58,0x00],[0x94,0x70,0x20],[0xA8,0x84,0x3C],[0xBC,0x9C,0x58],
[0xCC,0xAC,0x70],[0xDC,0xC0,0x84],[0xEC,0xD0,0x9C],[0xFC,0xE0,0xB0],
[0x44,0x5C,0x00],[0x5C,0x78,0x20],[0x74,0x90,0x3C],[0x8C,0xAC,0x58],
[0xA0,0xC0,0x70],[0xB0,0xD4,0x84],[0xC4,0xE8,0x9C],[0xD4,0xFC,0xB0],
[0x70,0x34,0x00],[0x88,0x50,0x20],[0xA0,0x68,0x3C],[0xB4,0x84,0x58],
[0xC8,0x98,0x70],[0xDC,0xAC,0x84],[0xEC,0xC0,0x9C],[0xFC,0xD4,0xB0],
[0x00,0x64,0x14],[0x20,0x80,0x34],[0x3C,0x98,0x50],[0x58,0xB0,0x6C],
[0x70,0xC4,0x84],[0x84,0xD8,0x9C],[0x9C,0xE8,0xB4],[0xB0,0xFC,0xC8],
[0x70,0x00,0x14],[0x88,0x20,0x34],[0xA0,0x3C,0x50],[0xB4,0x58,0x6C],
[0xC8,0x70,0x84],[0xDC,0x84,0x9C],[0xEC,0x9C,0xB4],[0xFC,0xB0,0xC8],
[0x00,0x5C,0x5C],[0x20,0x74,0x74],[0x3C,0x8C,0x8C],[0x58,0xA4,0xA4],
[0x70,0xB8,0xB8],[0x84,0xC8,0xC8],[0x9C,0xDC,0xDC],[0xB0,0xEC,0xEC],
[0x70,0x00,0x5C],[0x84,0x20,0x74],[0x94,0x3C,0x88],[0xA8,0x58,0x9C],
[0xB4,0x70,0xB0],[0xC4,0x84,0xC0],[0xD0,0x9C,0xD0],[0xE0,0xB0,0xE0],
[0x00,0x3C,0x70],[0x1C,0x58,0x88],[0x38,0x74,0xA0],[0x50,0x8C,0xB4],
[0x68,0xA4,0xC8],[0x7C,0xB8,0xDC],[0x90,0xCC,0xEC],[0xA4,0xE0,0xFC],
[0x58,0x00,0x70],[0x6C,0x20,0x88],[0x80,0x3C,0xA0],[0x94,0x58,0xB4],
[0xA4,0x70,0xC8],[0xB4,0x84,0xDC],[0xC4,0x9C,0xEC],[0xD4,0xB0,0xFC],
[0x00,0x20,0x70],[0x1C,0x3C,0x88],[0x38,0x58,0xA0],[0x50,0x74,0xB4],
[0x68,0x88,0xC8],[0x7C,0xA0,0xDC],[0x90,0xB4,0xEC],[0xA4,0xC8,0xFC],
[0x3C,0x00,0x80],[0x54,0x20,0x94],[0x6C,0x3C,0xA8],[0x80,0x58,0xBC],
[0x94,0x70,0xCC],[0xA8,0x84,0xDC],[0xB8,0x9C,0xEC],[0xC8,0xB0,0xFC],
[0x00,0x00,0x88],[0x20,0x20,0x9C],[0x3C,0x3C,0xB0],[0x58,0x58,0xC0],
[0x70,0x70,0xD0],[0x84,0x84,0xE0],[0x9C,0x9C,0xEC],[0xB0,0xB0,0xFC],
[0x00,0x00,0x00],[0x28,0x28,0x28],[0x50,0x50,0x50],[0x74,0x74,0x74],
[0x94,0x94,0x94],[0xB4,0xB4,0xB4],[0xD0,0xD0,0xD0],[0xEC,0xEC,0xEC],
[0x00,0x00,0x00],[0x28,0x28,0x28],[0x50,0x50,0x50],[0x74,0x74,0x74],
[0x94,0x94,0x94],[0xB4,0xB4,0xB4],[0xD0,0xD0,0xD0],[0xEC,0xEC,0xEC]
],uint8)
SECAM = repeat([[0x00,0x00,0x00],
[0x21,0x21,0xFF],
[0xF0,0x3C,0x79],
[0xFF,0x50,0xFF],
[0x7F,0xFF,0x00],
[0x7F,0xFF,0xFF],
[0xFF,0xFF,0x3F],
[0xFF,0xFF,0xFF]],16).astype(uint8)
| asterick/pytari | Palettes.py | Python | gpl-2.0 | 5,212 | 0.13891 |
#!/home/mharris/Projects/DevOpsDays/venv/bin/python2
# $Id: rst2pseudoxml.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing pseudo-XML.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates pseudo-XML from standalone reStructuredText '
'sources (for testing purposes). ' + default_description)
publish_cmdline(description=description)
| morissette/devopsdays-hackathon-2016 | venv/bin/rst2pseudoxml.py | Python | gpl-3.0 | 635 | 0.001575 |
#Schedule-generator for LHL use written by Acebulf (acebulf at gmail.com)
#Current version 1.0 -- Jan 16 2014
#Copyrighted under the MIT License (see License included in the github repo)
import random
import time
while 1:
print "Starting random-schedule generation process..."
starttime = time.time()
kill = False
schedule = [[]]*30
teams = ["BOS", "CHI", "COL", "DET", "NJD", "WSH"]
# Randomly Choose Team
team1 = random.choice(teams)
teams_mt1 = list(teams)
teams_mt1.remove(team1)
matchups = []
for x in teams_mt1:
for y in xrange(6):
matchups.append((team1,x))
random.shuffle(matchups)
for x in xrange(30):
schedule[x]=[matchups[x]]
team2 = random.choice(teams_mt1)
teams_2 = list(teams_mt1)
teams_2.remove(team2)
matchups=[]
for x in teams_2:
for y in xrange(6):
matchups.append((team2,x))
random.shuffle(matchups)
days = range(30)
def playing_day(team, day):
occupied = [i[0] for i in day] + [i[1] for i in day]
return (team in occupied)
for matchup in matchups:
while 1:
temp_day = random.choice(days)
if time.time()-starttime >= 4:
kill = True
break
if not playing_day(matchup[0],schedule[temp_day]) and not playing_day(matchup[1],schedule[temp_day]):
schedule[temp_day].append(matchup)
days.remove(temp_day)
break
if kill:
print "Error in stage 1; restarting"
continue
print "Stage 1/3 Successfully Completed!"
days2games = list(schedule)
days1game = []
try:
for x in xrange(30):
if len(days2games[x]) == 1:
days1game.append(days2games.pop(x))
except IndexError:
pass
team3 = random.choice(teams_2)
teams_3 = list(teams_2)
teams_3.remove(team3)
matchups=[]
for x in teams_3:
matchups.append((team3,x))
team4 = random.choice(teams_3)
teams_4 = list(teams_3)
teams_4.remove(team4)
for x in teams_4:
matchups.append((team4,x))
matchups.append((teams_4[0],teams_4[1]))
for x in days2games:
for y in matchups:
if not playing_day(y[0],x) and not playing_day(y[1],x):
x.append(y)
newmatchups = []
for x in matchups:
newmatchups.append(x)
newmatchups.append(x)
random.shuffle(newmatchups)
print "Stage 2/3 Successfully Completed!"
for x in days1game:
for y in newmatchups:
if not playing_day(y[0],x) and not playing_day(y[1],x):
x.append(y)
newmatchups.remove(y)
for x in schedule:
if len(x) != 3:
print "Problem encountered in stage 3; Restarting..."
kill=True
break
if kill:
continue
print "Stage 3/3 Successfully Completed"
break
print "Schedule Successfully Generated"
print "Printing to File..."
f = open("schedule.txt","w")
dayno = 0
while dayno <= 29:
f.write("Day {0}:\n".format(dayno+1))
for x in schedule[dayno]:
f.write(x[0] + " - " + x[1]+"\n")
f.write("\n")
dayno += 1
print "Result written to file. Program terminating."
| Acebulf/HockeyPythonScripts | schedule.py | Python | mit | 3,298 | 0.006064 |
# Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
import grpc
from grpc.framework.foundation import logging_pool
from tests.unit.framework.common import test_constants
_REQUEST = b''
_RESPONSE = b''
_UNARY_UNARY = '/test/UnaryUnary'
_UNARY_STREAM = '/test/UnaryStream'
_STREAM_UNARY = '/test/StreamUnary'
_STREAM_STREAM = '/test/StreamStream'
def handle_unary_unary(request, servicer_context):
return _RESPONSE
def handle_unary_stream(request, servicer_context):
for _ in range(test_constants.STREAM_LENGTH):
yield _RESPONSE
def handle_stream_unary(request_iterator, servicer_context):
for request in request_iterator:
pass
return _RESPONSE
def handle_stream_stream(request_iterator, servicer_context):
for request in request_iterator:
yield _RESPONSE
class _MethodHandler(grpc.RpcMethodHandler):
def __init__(self, request_streaming, response_streaming):
self.request_streaming = request_streaming
self.response_streaming = response_streaming
self.request_deserializer = None
self.response_serializer = None
self.unary_unary = None
self.unary_stream = None
self.stream_unary = None
self.stream_stream = None
if self.request_streaming and self.response_streaming:
self.stream_stream = handle_stream_stream
elif self.request_streaming:
self.stream_unary = handle_stream_unary
elif self.response_streaming:
self.unary_stream = handle_unary_stream
else:
self.unary_unary = handle_unary_unary
class _GenericHandler(grpc.GenericRpcHandler):
def service(self, handler_call_details):
if handler_call_details.method == _UNARY_UNARY:
return _MethodHandler(False, False)
elif handler_call_details.method == _UNARY_STREAM:
return _MethodHandler(False, True)
elif handler_call_details.method == _STREAM_UNARY:
return _MethodHandler(True, False)
elif handler_call_details.method == _STREAM_STREAM:
return _MethodHandler(True, True)
else:
return None
class EmptyMessageTest(unittest.TestCase):
def setUp(self):
self._server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
self._server = grpc.server(
self._server_pool, handlers=(_GenericHandler(),))
port = self._server.add_insecure_port('[::]:0')
self._server.start()
self._channel = grpc.insecure_channel('localhost:%d' % port)
def tearDown(self):
self._server.stop(0)
def testUnaryUnary(self):
response = self._channel.unary_unary(_UNARY_UNARY)(_REQUEST)
self.assertEqual(_RESPONSE, response)
def testUnaryStream(self):
response_iterator = self._channel.unary_stream(_UNARY_STREAM)(_REQUEST)
self.assertSequenceEqual([_RESPONSE] * test_constants.STREAM_LENGTH,
list(response_iterator))
def testStreamUnary(self):
response = self._channel.stream_unary(_STREAM_UNARY)(
iter([_REQUEST] * test_constants.STREAM_LENGTH))
self.assertEqual(_RESPONSE, response)
def testStreamStream(self):
response_iterator = self._channel.stream_stream(_STREAM_STREAM)(
iter([_REQUEST] * test_constants.STREAM_LENGTH))
self.assertSequenceEqual([_RESPONSE] * test_constants.STREAM_LENGTH,
list(response_iterator))
if __name__ == '__main__':
unittest.main(verbosity=2)
| royalharsh/grpc | src/python/grpcio_tests/tests/unit/_empty_message_test.py | Python | bsd-3-clause | 5,056 | 0.000198 |
"""
sentry.plugins.sentry_useragents.models
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2013 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import httpagentparser
import sentry
from django.utils.translation import ugettext_lazy as _
from sentry.plugins import register
from sentry.plugins.bases.tag import TagPlugin
class UserAgentPlugin(TagPlugin):
version = sentry.VERSION
author = "Sentry Team"
author_url = "https://github.com/getsentry/sentry"
project_default_enabled = True
def get_tag_values(self, event):
http = event.interfaces.get('sentry.interfaces.Http')
if not http:
return []
if not http.headers:
return []
if 'User-Agent' not in http.headers:
return []
ua = httpagentparser.detect(http.headers['User-Agent'])
if not ua:
return []
result = self.get_tag_from_ua(ua)
if not result:
return []
return [result]
class BrowserPlugin(UserAgentPlugin):
"""
Automatically adds the 'browser' tag from events containing interface data
from ``sentry.interfaes.Http``.
"""
slug = 'browsers'
title = _('Auto Tag: Browsers')
tag = 'browser'
tag_label = _('Browser Name')
def get_tag_from_ua(self, ua):
if 'browser' not in ua:
return
tag = ua['browser']['name']
if 'version' in ua['browser']:
tag += ' ' + ua['browser']['version']
return tag
register(BrowserPlugin)
class OsPlugin(UserAgentPlugin):
"""
Automatically adds the 'os' tag from events containing interface data
from ``sentry.interfaes.Http``.
"""
slug = 'os'
title = _('Auto Tag: Operating Systems')
tag = 'os'
tag_label = _('Operating System')
def get_tag_from_ua(self, ua):
if 'flavor' in ua:
tag = ua['flavor']['name']
if 'version' in ua['flavor']:
tag += ' ' + ua['version']
elif 'os' in ua:
# Linux
tag = ua['os']['name']
if 'version' in ua['os']:
tag += ' ' + ua['version']
elif 'dist' in ua:
# Ubuntu
tag += ua['dist']['name']
else:
return
return tag
register(OsPlugin)
| beni55/sentry | src/sentry/plugins/sentry_useragents/models.py | Python | bsd-3-clause | 2,383 | 0.00042 |
# -*- coding: utf-8 -*-
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .. import unittest
from synapse.events import FrozenEvent
from synapse.events.utils import prune_event, serialize_event
def MockEvent(**kwargs):
if "event_id" not in kwargs:
kwargs["event_id"] = "fake_event_id"
if "type" not in kwargs:
kwargs["type"] = "fake_type"
return FrozenEvent(kwargs)
class PruneEventTestCase(unittest.TestCase):
""" Asserts that a new event constructed with `evdict` will look like
`matchdict` when it is redacted. """
def run_test(self, evdict, matchdict):
self.assertEquals(
prune_event(FrozenEvent(evdict)).get_dict(),
matchdict
)
def test_minimal(self):
self.run_test(
{
'type': 'A',
'event_id': '$test:domain',
},
{
'type': 'A',
'event_id': '$test:domain',
'content': {},
'signatures': {},
'unsigned': {},
}
)
def test_basic_keys(self):
self.run_test(
{
'type': 'A',
'room_id': '!1:domain',
'sender': '@2:domain',
'event_id': '$3:domain',
'origin': 'domain',
},
{
'type': 'A',
'room_id': '!1:domain',
'sender': '@2:domain',
'event_id': '$3:domain',
'origin': 'domain',
'content': {},
'signatures': {},
'unsigned': {},
}
)
def test_unsigned_age_ts(self):
self.run_test(
{
'type': 'B',
'event_id': '$test:domain',
'unsigned': {'age_ts': 20},
},
{
'type': 'B',
'event_id': '$test:domain',
'content': {},
'signatures': {},
'unsigned': {'age_ts': 20},
}
)
self.run_test(
{
'type': 'B',
'event_id': '$test:domain',
'unsigned': {'other_key': 'here'},
},
{
'type': 'B',
'event_id': '$test:domain',
'content': {},
'signatures': {},
'unsigned': {},
}
)
def test_content(self):
self.run_test(
{
'type': 'C',
'event_id': '$test:domain',
'content': {'things': 'here'},
},
{
'type': 'C',
'event_id': '$test:domain',
'content': {},
'signatures': {},
'unsigned': {},
}
)
self.run_test(
{
'type': 'm.room.create',
'event_id': '$test:domain',
'content': {'creator': '@2:domain', 'other_field': 'here'},
},
{
'type': 'm.room.create',
'event_id': '$test:domain',
'content': {'creator': '@2:domain'},
'signatures': {},
'unsigned': {},
}
)
class SerializeEventTestCase(unittest.TestCase):
def serialize(self, ev, fields):
return serialize_event(ev, 1479807801915, only_event_fields=fields)
def test_event_fields_works_with_keys(self):
self.assertEquals(
self.serialize(
MockEvent(
sender="@alice:localhost",
room_id="!foo:bar"
),
["room_id"]
),
{
"room_id": "!foo:bar",
}
)
def test_event_fields_works_with_nested_keys(self):
self.assertEquals(
self.serialize(
MockEvent(
sender="@alice:localhost",
room_id="!foo:bar",
content={
"body": "A message",
},
),
["content.body"]
),
{
"content": {
"body": "A message",
}
}
)
def test_event_fields_works_with_dot_keys(self):
self.assertEquals(
self.serialize(
MockEvent(
sender="@alice:localhost",
room_id="!foo:bar",
content={
"key.with.dots": {},
},
),
["content.key\.with\.dots"]
),
{
"content": {
"key.with.dots": {},
}
}
)
def test_event_fields_works_with_nested_dot_keys(self):
self.assertEquals(
self.serialize(
MockEvent(
sender="@alice:localhost",
room_id="!foo:bar",
content={
"not_me": 1,
"nested.dot.key": {
"leaf.key": 42,
"not_me_either": 1,
},
},
),
["content.nested\.dot\.key.leaf\.key"]
),
{
"content": {
"nested.dot.key": {
"leaf.key": 42,
},
}
}
)
def test_event_fields_nops_with_unknown_keys(self):
self.assertEquals(
self.serialize(
MockEvent(
sender="@alice:localhost",
room_id="!foo:bar",
content={
"foo": "bar",
},
),
["content.foo", "content.notexists"]
),
{
"content": {
"foo": "bar",
}
}
)
def test_event_fields_nops_with_non_dict_keys(self):
self.assertEquals(
self.serialize(
MockEvent(
sender="@alice:localhost",
room_id="!foo:bar",
content={
"foo": ["I", "am", "an", "array"],
},
),
["content.foo.am"]
),
{}
)
def test_event_fields_nops_with_array_keys(self):
self.assertEquals(
self.serialize(
MockEvent(
sender="@alice:localhost",
room_id="!foo:bar",
content={
"foo": ["I", "am", "an", "array"],
},
),
["content.foo.1"]
),
{}
)
def test_event_fields_all_fields_if_empty(self):
self.assertEquals(
self.serialize(
MockEvent(
type="foo",
event_id="test",
room_id="!foo:bar",
content={
"foo": "bar",
},
),
[]
),
{
"type": "foo",
"event_id": "test",
"room_id": "!foo:bar",
"content": {
"foo": "bar",
},
"unsigned": {}
}
)
def test_event_fields_fail_if_fields_not_str(self):
with self.assertRaises(TypeError):
self.serialize(
MockEvent(
room_id="!foo:bar",
content={
"foo": "bar",
},
),
["room_id", 4]
)
| TribeMedia/synapse | tests/events/test_utils.py | Python | apache-2.0 | 8,591 | 0.000582 |
import os
BASEDIR = os.path.abspath(os.path.dirname(__file__))
DEBUG = False
##
# Database settings
##
DB_HOST = 'localhost'
DB_NAME = 'scoremodel'
DB_USER = 'scoremodel'
DB_PASS = 'scoremodel'
##
# MySQL SSL connections
##
use_ssl = False
SSL_CA = '/etc/mysql/certs/ca-cert.pem'
SSL_KEY = '/etc/mysql/keys/client-key.pem'
SSL_CERT = '/etc/mysql/certs/client-cert.pem'
##
# Flask-WTF
##
WTF_CSRF_ENABLED = True
SECRET_KEY = 'secret_key'
##
# Log-in
##
REMEMBER_COOKIE_SECURE = True
REMEMBER_COOKIE_HTTPONLY = True
SESSION_PROTECTION = "strong"
##
# Babel
##
BABEL_DEFAULT_LOCALE = 'en'
BABEL_DEFAULT_TIMEZONE = 'UTC'
LANGUAGES = ['nl', 'en']
##
# Uploads
##
UPLOAD_FOLDER = 'uploads'
ALLOWED_EXTENSIONS = ('txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif')
MAX_CONTENT_LENGTH = 16 * 1024 * 1024 # 16 MB
##
# Logger
##
LOG_FILENAME = 'logs/scoremodel.log'
if use_ssl is True:
SQLALCHEMY_DATABASE_URI = 'mysql+mysqlconnector://{user}:{passw}@{host}/{db}?ssl_key={ssl_key}&ssl_cert={ssl_cert}'.format(
user=DB_USER, passw=DB_PASS,
host=DB_HOST, db=DB_NAME, ssl_key=SSL_KEY, ssl_cert=SSL_CERT)
else:
SQLALCHEMY_DATABASE_URI = 'mysql+mysqlconnector://{user}:{passw}@{host}/{db}'.format(user=DB_USER, passw=DB_PASS,
host=DB_HOST, db=DB_NAME)
| PACKED-vzw/scoremodel | example_config.py | Python | gpl-2.0 | 1,348 | 0.002967 |
""" Tools for reading Mac resource forks. """
from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
import struct
from fontTools.misc import sstruct
from collections import OrderedDict
try:
from collections.abc import MutableMapping
except ImportError:
from UserDict import DictMixin as MutableMapping
class ResourceError(Exception):
pass
class ResourceReader(MutableMapping):
def __init__(self, fileOrPath):
self._resources = OrderedDict()
if hasattr(fileOrPath, 'read'):
self.file = fileOrPath
else:
try:
# try reading from the resource fork (only works on OS X)
self.file = self.openResourceFork(fileOrPath)
self._readFile()
return
except (ResourceError, IOError):
# if it fails, use the data fork
self.file = self.openDataFork(fileOrPath)
self._readFile()
@staticmethod
def openResourceFork(path):
with open(path + '/..namedfork/rsrc', 'rb') as resfork:
data = resfork.read()
infile = BytesIO(data)
infile.name = path
return infile
@staticmethod
def openDataFork(path):
with open(path, 'rb') as datafork:
data = datafork.read()
infile = BytesIO(data)
infile.name = path
return infile
def _readFile(self):
self._readHeaderAndMap()
self._readTypeList()
def _read(self, numBytes, offset=None):
if offset is not None:
try:
self.file.seek(offset)
except OverflowError:
raise ResourceError("Failed to seek offset ('offset' is too large)")
if self.file.tell() != offset:
raise ResourceError('Failed to seek offset (reached EOF)')
try:
data = self.file.read(numBytes)
except OverflowError:
raise ResourceError("Cannot read resource ('numBytes' is too large)")
if len(data) != numBytes:
raise ResourceError('Cannot read resource (not enough data)')
return data
def _readHeaderAndMap(self):
self.file.seek(0)
headerData = self._read(ResourceForkHeaderSize)
sstruct.unpack(ResourceForkHeader, headerData, self)
# seek to resource map, skip reserved
mapOffset = self.mapOffset + 22
resourceMapData = self._read(ResourceMapHeaderSize, mapOffset)
sstruct.unpack(ResourceMapHeader, resourceMapData, self)
self.absTypeListOffset = self.mapOffset + self.typeListOffset
self.absNameListOffset = self.mapOffset + self.nameListOffset
def _readTypeList(self):
absTypeListOffset = self.absTypeListOffset
numTypesData = self._read(2, absTypeListOffset)
self.numTypes, = struct.unpack('>H', numTypesData)
absTypeListOffset2 = absTypeListOffset + 2
for i in range(self.numTypes + 1):
resTypeItemOffset = absTypeListOffset2 + ResourceTypeItemSize * i
resTypeItemData = self._read(ResourceTypeItemSize, resTypeItemOffset)
item = sstruct.unpack(ResourceTypeItem, resTypeItemData)
resType = tostr(item['type'], encoding='mac-roman')
refListOffset = absTypeListOffset + item['refListOffset']
numRes = item['numRes'] + 1
resources = self._readReferenceList(resType, refListOffset, numRes)
self._resources[resType] = resources
def _readReferenceList(self, resType, refListOffset, numRes):
resources = []
for i in range(numRes):
refOffset = refListOffset + ResourceRefItemSize * i
refData = self._read(ResourceRefItemSize, refOffset)
res = Resource(resType)
res.decompile(refData, self)
resources.append(res)
return resources
def __getitem__(self, resType):
return self._resources[resType]
def __delitem__(self, resType):
del self._resources[resType]
def __setitem__(self, resType, resources):
self._resources[resType] = resources
def __len__(self):
return len(self._resources)
def __iter__(self):
return iter(self._resources)
def keys(self):
return self._resources.keys()
@property
def types(self):
return list(self._resources.keys())
def countResources(self, resType):
"""Return the number of resources of a given type."""
try:
return len(self[resType])
except KeyError:
return 0
def getIndices(self, resType):
numRes = self.countResources(resType)
if numRes:
return list(range(1, numRes+1))
else:
return []
def getNames(self, resType):
"""Return list of names of all resources of a given type."""
return [res.name for res in self.get(resType, []) if res.name is not None]
def getIndResource(self, resType, index):
"""Return resource of given type located at an index ranging from 1
to the number of resources for that type, or None if not found.
"""
if index < 1:
return None
try:
res = self[resType][index-1]
except (KeyError, IndexError):
return None
return res
def getNamedResource(self, resType, name):
"""Return the named resource of given type, else return None."""
name = tostr(name, encoding='mac-roman')
for res in self.get(resType, []):
if res.name == name:
return res
return None
def close(self):
if not self.file.closed:
self.file.close()
class Resource(object):
def __init__(self, resType=None, resData=None, resID=None, resName=None,
resAttr=None):
self.type = resType
self.data = resData
self.id = resID
self.name = resName
self.attr = resAttr
def decompile(self, refData, reader):
sstruct.unpack(ResourceRefItem, refData, self)
# interpret 3-byte dataOffset as (padded) ULONG to unpack it with struct
self.dataOffset, = struct.unpack('>L', bytesjoin([b"\0", self.dataOffset]))
absDataOffset = reader.dataOffset + self.dataOffset
dataLength, = struct.unpack(">L", reader._read(4, absDataOffset))
self.data = reader._read(dataLength)
if self.nameOffset == -1:
return
absNameOffset = reader.absNameListOffset + self.nameOffset
nameLength, = struct.unpack('B', reader._read(1, absNameOffset))
name, = struct.unpack('>%ss' % nameLength, reader._read(nameLength))
self.name = tostr(name, encoding='mac-roman')
ResourceForkHeader = """
> # big endian
dataOffset: L
mapOffset: L
dataLen: L
mapLen: L
"""
ResourceForkHeaderSize = sstruct.calcsize(ResourceForkHeader)
ResourceMapHeader = """
> # big endian
attr: H
typeListOffset: H
nameListOffset: H
"""
ResourceMapHeaderSize = sstruct.calcsize(ResourceMapHeader)
ResourceTypeItem = """
> # big endian
type: 4s
numRes: H
refListOffset: H
"""
ResourceTypeItemSize = sstruct.calcsize(ResourceTypeItem)
ResourceRefItem = """
> # big endian
id: h
nameOffset: h
attr: B
dataOffset: 3s
reserved: L
"""
ResourceRefItemSize = sstruct.calcsize(ResourceRefItem)
| MitchTalmadge/Emoji-Tools | src/main/resources/PythonScripts/fontTools/misc/macRes.py | Python | gpl-3.0 | 6,563 | 0.026512 |
# https://www.reddit.com/r/dailyprogrammer/comments/3fva66/20150805_challenge_226_intermediate_connect_four/
import sys, string
xmoves = open(sys.argv[1]).read().translate(None, string.ascii_lowercase + ' \n')
omoves = open(sys.argv[1]).read().translate(None, string.ascii_uppercase + ' \n')
board = [[' ' for x in range(6)] for x in range(7)]
def insert(colchar, player):
colnumber = ord(colchar.lower()) - ord('a')
col = board[colnumber]
for i in range(len(col)):
if col[i] == ' ':
col[i] = player
break
def checkwinner(player):
for x in range(6):
for y in range(6):
if board[x][y] == player:
top = board[x][y+1:y+4]
if len(top) == 3 and not ''.join(top).strip(player):
return True
try:
right = [board[x+1][y], board[x+2][y], board[x+3][y]]
if not ''.join(right).strip(player):
return True
except:
pass
try:
topright = [board[x+1][y+1], board[x+2][y+2], board[x+3][y+3]]
if not ''.join(topright).strip(player):
return True
except:
pass
for i in range(len(xmoves)):
insert(xmoves[i], 'X')
if checkwinner('X'):
print 'X won at move ' + str(i+1)
break
insert(omoves[i], 'O')
if checkwinner('O'):
print 'O won at move ' + str(i+1)
break
| lw7360/dailyprogrammer | Intermediate/226/226.py | Python | mit | 1,541 | 0.00584 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# References:
# [1] Jean-Luc Starck, Fionn Murtagh & Jalal M. Fadili
# Sparse Image and Signal Processing: Wavelets, Curvelets, Morphological Diversity
# Section 3.5, 6.6
#
# Credits:
# [1] https://github.com/abrazhe/image-funcut/blob/master/imfun/atrous.py
#
# Aaron LI
# Created: 2016-03-17
# Updated: 2016-04-22
#
# ChangeLog:
# 2016-04-22:
# * Add argument "end-scale" to specifiy the end denoising scale
# * Check outfile existence first
# * Add argument "start-scale" to specifiy the start denoising scale
# * Fix a bug about "p_cutoff" when "comp" contains ALL False's
# * Show more verbose information/details
# 2016-04-20:
# * Add argparse and main() for scripting
#
"""
Starlet wavelet transform, i.e., isotropic undecimated wavelet transform
(IUWT), or à trous wavelet transform.
And multi-scale variance stabling transform (MS-VST), which can be used
to effectively remove the Poisson noises.
"""
__version__ = "0.2.5"
__date__ = "2016-04-22"
import sys
import os
import argparse
from datetime import datetime
import numpy as np
import scipy as sp
from scipy import signal
from astropy.io import fits
class B3Spline: # {{{
"""
B3-spline wavelet.
"""
# scaling function (phi)
dec_lo = np.array([1.0, 4.0, 6.0, 4.0, 1.0]) / 16
dec_hi = np.array([-1.0, -4.0, 10.0, -4.0, -1.0]) / 16
rec_lo = np.array([0.0, 0.0, 1.0, 0.0, 0.0])
rec_hi = np.array([0.0, 0.0, 1.0, 0.0, 0.0])
# B3Spline }}}
class IUWT: # {{{
"""
Isotropic undecimated wavelet transform.
"""
## Decomposition filters list:
# a_{scale} = convole(a_0, filters[scale])
# Note: the zero-th scale filter (i.e., delta function) is the first
# element, thus the array index is the same as the decomposition scale.
filters = []
phi = None # wavelet scaling function (2D)
level = 0 # number of transform level
decomposition = None # decomposed coefficients/images
reconstruction = None # reconstructed image
# convolution boundary condition
boundary = "symm"
def __init__(self, phi=B3Spline.dec_lo, level=None, boundary="symm",
data=None):
self.set_wavelet(phi=phi)
self.level = level
self.boundary = boundary
self.data = np.array(data)
def reset(self):
"""
Reset the object attributes.
"""
self.data = None
self.phi = None
self.decomposition = None
self.reconstruction = None
self.level = 0
self.filters = []
self.boundary = "symm"
def load_data(self, data):
self.reset()
self.data = np.array(data)
def set_wavelet(self, phi):
self.reset()
phi = np.array(phi)
if phi.ndim == 1:
phi_ = phi.reshape(1, -1)
self.phi = np.dot(phi_.T, phi_)
elif phi.ndim == 2:
self.phi = phi
else:
raise ValueError("Invalid phi dimension")
def calc_filters(self):
"""
Calculate the convolution filters of each scale.
Note: the zero-th scale filter (i.e., delta function) is the first
element, thus the array index is the same as the decomposition scale.
"""
self.filters = []
# scale 0: delta function
h = np.array([[1]]) # NOTE: 2D
self.filters.append(h)
# scale 1
h = self.phi[::-1, ::-1]
self.filters.append(h)
for scale in range(2, self.level+1):
h_up = self.zupsample(self.phi, order=scale-1)
h2 = signal.convolve2d(h_up[::-1, ::-1], h, mode="same",
boundary=self.boundary)
self.filters.append(h2)
def transform(self, data, scale, boundary="symm"):
"""
Perform only one scale wavelet transform for the given data.
return:
[ approx, detail ]
"""
self.decomposition = []
approx = signal.convolve2d(data, self.filters[scale],
mode="same", boundary=self.boundary)
detail = data - approx
return [approx, detail]
def decompose(self, level, boundary="symm"):
"""
Perform IUWT decomposition in the plain loop way.
The filters of each scale/level are calculated first, then the
approximations of each scale/level are calculated by convolving the
raw/finest image with these filters.
return:
[ W_1, W_2, ..., W_n, A_n ]
n = level
W: wavelet details
A: approximation
"""
self.boundary = boundary
if self.level != level or self.filters == []:
self.level = level
self.calc_filters()
self.decomposition = []
approx = self.data
for scale in range(1, level+1):
# approximation:
approx2 = signal.convolve2d(self.data, self.filters[scale],
mode="same", boundary=self.boundary)
# wavelet details:
w = approx - approx2
self.decomposition.append(w)
if scale == level:
self.decomposition.append(approx2)
approx = approx2
return self.decomposition
def decompose_recursive(self, level, boundary="symm"):
"""
Perform the IUWT decomposition in the recursive way.
return:
[ W_1, W_2, ..., W_n, A_n ]
n = level
W: wavelet details
A: approximation
"""
self.level = level
self.boundary = boundary
self.decomposition = self.__decompose(self.data, self.phi, level=level)
return self.decomposition
def __decompose(self, data, phi, level):
"""
2D IUWT decomposition (or stationary wavelet transform).
This is a convolution version, where kernel is zero-upsampled
explicitly. Not fast.
Parameters:
- level : level of decomposition
- phi : low-pass filter kernel
- boundary : boundary conditions (passed to scipy.signal.convolve2d,
'symm' by default)
Returns:
list of wavelet details + last approximation. Each element in
the list is an image of the same size as the input image.
"""
if level <= 0:
return data
shapecheck = map(lambda a,b:a>b, data.shape, phi.shape)
assert np.all(shapecheck)
# approximation:
approx = signal.convolve2d(data, phi[::-1, ::-1], mode="same",
boundary=self.boundary)
# wavelet details:
w = data - approx
phi_up = self.zupsample(phi, order=1)
shapecheck = map(lambda a,b:a>b, data.shape, phi_up.shape)
if level == 1:
return [w, approx]
elif not np.all(shapecheck):
print("Maximum allowed decomposition level reached",
file=sys.stderr)
return [w, approx]
else:
return [w] + self.__decompose(approx, phi_up, level-1)
@staticmethod
def zupsample(data, order=1):
"""
Upsample data array by interleaving it with zero's.
h{up_order: n}[l] = (1) h[l], if l % 2^n == 0;
(2) 0, otherwise
"""
shape = data.shape
new_shape = [ (2**order * (n-1) + 1) for n in shape ]
output = np.zeros(new_shape, dtype=data.dtype)
output[[ slice(None, None, 2**order) for d in shape ]] = data
return output
def reconstruct(self, decomposition=None):
if decomposition is not None:
reconstruction = np.sum(decomposition, axis=0)
return reconstruction
else:
self.reconstruction = np.sum(self.decomposition, axis=0)
def get_detail(self, scale):
"""
Get the wavelet detail coefficients of given scale.
Note: 1 <= scale <= level
"""
if scale < 1 or scale > self.level:
raise ValueError("Invalid scale")
return self.decomposition[scale-1]
def get_approx(self):
"""
Get the approximation coefficients of the largest scale.
"""
return self.decomposition[-1]
# IUWT }}}
class IUWT_VST(IUWT): # {{{
"""
IUWT with Multi-scale variance stabling transform.
Refernce:
[1] Bo Zhang, Jalal M. Fadili & Jean-Luc Starck,
IEEE Trans. Image Processing, 17, 17, 2008
"""
# VST coefficients and the corresponding asymptotic standard deviation
# of each scale.
vst_coef = []
def reset(self):
super(self.__class__, self).reset()
vst_coef = []
def __decompose(self):
raise AttributeError("No '__decompose' attribute")
@staticmethod
def soft_threshold(data, threshold):
if isinstance(data, np.ndarray):
data_th = data.copy()
data_th[np.abs(data) <= threshold] = 0.0
data_th[data > threshold] -= threshold
data_th[data < -threshold] += threshold
else:
data_th = data
if np.abs(data) <= threshold:
data_th = 0.0
elif data > threshold:
data_th -= threshold
else:
data_th += threshold
return data_th
def tau(self, k, scale):
"""
Helper function used in VST coefficients calculation.
"""
return np.sum(np.power(self.filters[scale], k))
def filters_product(self, scale1, scale2):
"""
Calculate the scalar product of the filters of two scales,
considering only the overlapped part.
Helper function used in VST coefficients calculation.
"""
if scale1 > scale2:
filter_big = self.filters[scale1]
filter_small = self.filters[scale2]
else:
filter_big = self.filters[scale2]
filter_small = self.filters[scale1]
# crop the big filter to match the size of the small filter
size_big = filter_big.shape
size_small = filter_small.shape
size_diff2 = list(map(lambda a,b: (a-b)//2, size_big, size_small))
filter_big_crop = filter_big[
size_diff2[0]:(size_big[0]-size_diff2[0]),
size_diff2[1]:(size_big[1]-size_diff2[1])]
assert(np.all(list(map(lambda a,b: a==b,
size_small, filter_big_crop.shape))))
product = np.sum(filter_small * filter_big_crop)
return product
def calc_vst_coef(self):
"""
Calculate the VST coefficients and the corresponding
asymptotic standard deviation of each scale, according to the
calculated filters of each scale/level.
"""
self.vst_coef = []
for scale in range(self.level+1):
b = 2 * np.sqrt(np.abs(self.tau(1, scale)) / self.tau(2, scale))
c = 7.0*self.tau(2, scale) / (8.0*self.tau(1, scale)) - \
self.tau(3, scale) / (2.0*self.tau(2, scale))
if scale == 0:
std = -1.0
else:
std = np.sqrt((self.tau(2, scale-1) / \
(4 * self.tau(1, scale-1)**2)) + \
(self.tau(2, scale) / (4 * self.tau(1, scale)**2)) - \
(self.filters_product(scale-1, scale) / \
(2 * self.tau(1, scale-1) * self.tau(1, scale))))
self.vst_coef.append({ "b": b, "c": c, "std": std })
def vst(self, data, scale, coupled=True):
"""
Perform variance stabling transform
XXX: parameter `coupled' why??
Credit: MSVST-V1.0/src/libmsvst/B3VSTAtrous.h
"""
self.vst_coupled = coupled
if self.vst_coef == []:
self.calc_vst_coef()
if coupled:
b = 1.0
else:
b = self.vst_coef[scale]["b"]
data_vst = b * np.sqrt(np.abs(data + self.vst_coef[scale]["c"]))
return data_vst
def ivst(self, data, scale, cbias=True):
"""
Inverse variance stabling transform
NOTE: assuming that `a_{j} + c^{j}' are all positive.
XXX: parameter `cbias' why??
`bias correction' is recommended while reconstruct the data
after estimation
Credit: MSVST-V1.0/src/libmsvst/B3VSTAtrous.h
"""
self.vst_cbias = cbias
if cbias:
cb = 1.0 / (self.vst_coef[scale]["b"] ** 2)
else:
cb = 0.0
data_ivst = data ** 2 + cb - self.vst_coef[scale]["c"]
return data_ivst
def is_significant(self, scale, fdr=0.1, independent=False, verbose=False):
"""
Multiple hypothesis testing with false discovery rate (FDR) control.
`independent': whether the test statistics of all the null
hypotheses are independent.
If `independent=True': FDR <= (m0/m) * q
otherwise: FDR <= (m0/m) * q * (1 + 1/2 + 1/3 + ... + 1/m)
References:
[1] False discovery rate - Wikipedia
https://en.wikipedia.org/wiki/False_discovery_rate
"""
coef = self.get_detail(scale)
std = self.vst_coef[scale]["std"]
pvalues = 2.0 * (1.0 - sp.stats.norm.cdf(np.abs(coef) / std))
p_sorted = pvalues.flatten()
p_sorted.sort()
N = len(p_sorted)
if independent:
cn = 1.0
else:
cn = np.sum(1.0 / np.arange(1, N+1))
p_comp = fdr * np.arange(N) / (N * cn)
comp = (p_sorted < p_comp)
if np.sum(comp) == 0:
# `comp' contains ALL False
p_cutoff = 0.0
else:
# cutoff p-value after FDR control/correction
p_cutoff = np.max(p_sorted[comp])
sig = (pvalues <= p_cutoff)
if verbose:
print("std/sigma: %g, p_cutoff: %g" % (std, p_cutoff),
flush=True, file=sys.stderr)
return (sig, p_cutoff)
def denoise(self, fdr=0.1, fdr_independent=False, start_scale=1,
end_scale=None, verbose=False):
"""
Denoise the wavelet coefficients by controlling FDR.
"""
self.fdr = fdr
self.fdr_indepent = fdr_independent
self.denoised = []
# supports of significant coefficients of each scale
self.sig_supports = [None] # make index match the scale
self.p_cutoff = [None]
if verbose:
print("MSVST denosing ...", flush=True, file=sys.stderr)
for scale in range(1, self.level+1):
coef = self.get_detail(scale)
if verbose:
print("\tScale %d: " % scale, end="",
flush=True, file=sys.stderr)
if (scale < start_scale) or \
((end_scale is not None) and scale > end_scale):
if verbose:
print("skipped", flush=True, file=sys.stderr)
sig, p_cutoff = None, None
else:
sig, p_cutoff = self.is_significant(scale, fdr=fdr,
independent=fdr_independent, verbose=verbose)
coef[np.logical_not(sig)] = 0.0
#
self.denoised.append(coef)
self.sig_supports.append(sig)
self.p_cutoff.append(p_cutoff)
# append the last approximation
self.denoised.append(self.get_approx())
def decompose(self, level=5, boundary="symm", verbose=False):
"""
2D IUWT decomposition with VST.
"""
self.boundary = boundary
if self.level != level or self.filters == []:
self.level = level
self.calc_filters()
self.calc_vst_coef()
self.decomposition = []
approx = self.data
if verbose:
print("IUWT decomposing (%d levels): " % level,
end="", flush=True, file=sys.stderr)
for scale in range(1, level+1):
if verbose:
print("%d..." % scale, end="", flush=True, file=sys.stderr)
# approximation:
approx2 = signal.convolve2d(self.data, self.filters[scale],
mode="same", boundary=self.boundary)
# wavelet details:
w = self.vst(approx, scale=scale-1) - self.vst(approx2, scale=scale)
self.decomposition.append(w)
if scale == level:
self.decomposition.append(approx2)
approx = approx2
if verbose:
print("DONE!", flush=True, file=sys.stderr)
return self.decomposition
def reconstruct_ivst(self, denoised=True, positive_project=True):
"""
Reconstruct the original image from the *un-denoised* decomposition
by applying the inverse VST.
This reconstruction result is also used as the `initial condition'
for the below `iterative reconstruction' algorithm.
arguments:
* denoised: whether use th denoised data or the direct decomposition
* positive_project: whether replace negative values with zeros
"""
if denoised:
decomposition = self.denoised
else:
decomposition = self.decomposition
self.positive_project = positive_project
details = np.sum(decomposition[:-1], axis=0)
approx = self.vst(decomposition[-1], scale=self.level)
reconstruction = self.ivst(approx+details, scale=0)
if positive_project:
reconstruction[reconstruction < 0.0] = 0.0
self.reconstruction = reconstruction
return reconstruction
def reconstruct(self, denoised=True, niter=10, verbose=False):
"""
Reconstruct the original image using iterative method with
L1 regularization, because the denoising violates the exact inverse
procedure.
arguments:
* denoised: whether use the denoised coefficients
* niter: number of iterations
"""
if denoised:
decomposition = self.denoised
else:
decomposition = self.decomposition
# L1 regularization
lbd = 1.0
delta = lbd / (niter - 1)
# initial solution
solution = self.reconstruct_ivst(denoised=denoised,
positive_project=True)
#
iuwt = IUWT(level=self.level)
iuwt.calc_filters()
# iterative reconstruction
if verbose:
print("Iteratively reconstructing (%d times): " % niter,
end="", flush=True, file=sys.stderr)
for i in range(niter):
if verbose:
print("%d..." % i, end="", flush=True, file=sys.stderr)
tempd = self.data.copy()
solution_decomp = []
for scale in range(1, self.level+1):
approx, detail = iuwt.transform(tempd, scale)
approx_sol, detail_sol = iuwt.transform(solution, scale)
# Update coefficients according to the significant supports,
# which are acquired during the denosing precodure with FDR.
sig = self.sig_supports[scale]
detail_sol[sig] = detail[sig]
detail_sol = self.soft_threshold(detail_sol, threshold=lbd)
#
solution_decomp.append(detail_sol)
tempd = approx.copy()
solution = approx_sol.copy()
# last approximation (the two are the same)
solution_decomp.append(approx)
# reconstruct
solution = iuwt.reconstruct(decomposition=solution_decomp)
# discard all negative values
solution[solution < 0] = 0.0
#
lbd -= delta
if verbose:
print("DONE!", flush=True, file=sys.stderr)
#
self.reconstruction = solution
return self.reconstruction
# IUWT_VST }}}
def main():
# commandline arguments parser
parser = argparse.ArgumentParser(
description="Poisson Noise Removal with Multi-scale Variance " + \
"Stabling Transform and Wavelet Transform",
epilog="Version: %s (%s)" % (__version__, __date__))
parser.add_argument("-l", "--level", dest="level",
type=int, default=5,
help="level of the IUWT decomposition")
parser.add_argument("-r", "--fdr", dest="fdr",
type=float, default=0.1,
help="false discovery rate")
parser.add_argument("-I", "--fdr-independent", dest="fdr_independent",
action="store_true", default=False,
help="whether the FDR null hypotheses are independent")
parser.add_argument("-s", "--start-scale", dest="start_scale",
type=int, default=1,
help="which scale to start the denoising (inclusive)")
parser.add_argument("-e", "--end-scale", dest="end_scale",
type=int, default=0,
help="which scale to end the denoising (inclusive)")
parser.add_argument("-n", "--niter", dest="niter",
type=int, default=10,
help="number of iterations for reconstruction")
parser.add_argument("-v", "--verbose", dest="verbose",
action="store_true", default=False,
help="show verbose progress")
parser.add_argument("-C", "--clobber", dest="clobber",
action="store_true", default=False,
help="overwrite output file if exists")
parser.add_argument("infile", help="input image with Poisson noises")
parser.add_argument("outfile", help="output denoised image")
args = parser.parse_args()
if args.end_scale == 0:
args.end_scale = args.level
if args.verbose:
print("infile: '%s'" % args.infile, file=sys.stderr)
print("outfile: '%s'" % args.outfile, file=sys.stderr)
print("level: %d" % args.level, file=sys.stderr)
print("fdr: %.2f" % args.fdr, file=sys.stderr)
print("fdr_independent: %s" % args.fdr_independent, file=sys.stderr)
print("start_scale: %d" % args.start_scale, file=sys.stderr)
print("end_scale: %d" % args.end_scale, file=sys.stderr)
print("niter: %d\n" % args.niter, flush=True, file=sys.stderr)
if not args.clobber and os.path.exists(args.outfile):
raise OSError("outfile '%s' already exists" % args.outfile)
imgfits = fits.open(args.infile)
img = imgfits[0].data
# Remove Poisson noises
msvst = IUWT_VST(data=img)
msvst.decompose(level=args.level, verbose=args.verbose)
msvst.denoise(fdr=args.fdr, fdr_independent=args.fdr_independent,
start_scale=args.start_scale, end_scale=args.end_scale,
verbose=args.verbose)
msvst.reconstruct(denoised=True, niter=args.niter, verbose=args.verbose)
img_denoised = msvst.reconstruction
# Output
imgfits[0].data = img_denoised
imgfits[0].header.add_history("%s: Removed Poisson Noises @ %s" % (
os.path.basename(sys.argv[0]), datetime.utcnow().isoformat()))
imgfits[0].header.add_history(" TOOL: %s (v%s, %s)" % (
os.path.basename(sys.argv[0]), __version__, __date__))
imgfits[0].header.add_history(" PARAM: %s" % " ".join(sys.argv[1:]))
imgfits.writeto(args.outfile, checksum=True, clobber=args.clobber)
if __name__ == "__main__":
main()
| liweitianux/atoolbox | python/msvst_starlet.py | Python | mit | 23,422 | 0.003074 |
"""
Page view class
"""
import os
from Server.Importer import ImportFromModule
class PageView(ImportFromModule("Server.PageViewBase", "PageViewBase")):
"""
Page view class.
"""
_PAGE_TITLE = "Python Web Framework"
def __init__(self, htmlToLoad):
"""
Constructor.
- htmlToLoad : HTML to load
"""
self.SetPageTitle(self._PAGE_TITLE)
self.AddMetaData("charset=\"UTF-8\"")
self.AddMetaData("name=\"viewport\" content=\"width=device-width, initial-scale=1\"")
self.AddStyleSheet("/css/styles.css")
self.AddJavaScript("/js/http.js")
self.LoadHtml(os.path.join(os.path.dirname(__file__), "%s.html" % htmlToLoad))
self.SetPageData({ "PageTitle" : self._PAGE_TITLE })
| allembedded/python_web_framework | WebApplication/Views/PageView.py | Python | gpl-3.0 | 781 | 0.008963 |
"""create table for hierarchy of accounts
Revision ID: 17fb1559a5cd
Revises: 3b7de32aebed
Create Date: 2015-09-16 14:20:30.972593
"""
# revision identifiers, used by Alembic.
revision = '17fb1559a5cd'
down_revision = '3b7de32aebed'
branch_labels = None
depends_on = None
from alembic import op, context
import sqlalchemy as sa
def downgrade():
schema = context.get_context().config.get_main_option('schema')
op.drop_table('lux_user_inheritance', schema=schema)
op.execute("DROP FUNCTION IF EXISTS "
"%(schema)s.getMainAccount(VARCHAR)"
% {"schema": schema})
def upgrade():
schema = context.get_context().config.get_main_option('schema')
op.create_table(
'lux_user_inheritance',
sa.Column(
'login', sa.VARCHAR(), autoincrement=False,
nullable=False),
sa.Column(
'login_father', sa.VARCHAR(), autoincrement=False,
nullable=False),
schema=schema
)
op.create_primary_key(
"lux_user_inheritance_pkey", "lux_user_inheritance",
['login', 'login_father'],
schema=schema
)
op.execute(
"CREATE OR REPLACE FUNCTION %(schema)s.getMainAccount "
"(child_login VARCHAR)"
"RETURNS VARCHAR AS "
"$$ "
"DECLARE "
"cur_login_father VARCHAR;"
"res_login_father VARCHAR;"
"c_father Cursor (p_login VARCHAR) FOR "
"Select login_father From %(schema)s.lux_user_inheritance Where "
"login = p_login;"
"BEGIN "
"cur_login_father := child_login;"
"LOOP "
"OPEN c_father(cur_login_father);"
"FETCH FIRST FROM c_father into res_login_father;"
"IF FOUND THEN "
"cur_login_father := res_login_father;"
"END IF;"
"CLOSE c_father;"
"IF NOT FOUND THEN "
"RETURN cur_login_father;"
"END IF;"
"END LOOP;"
"END;"
"$$"
"LANGUAGE plpgsql;" % {"schema": schema})
| Geoportail-Luxembourg/geoportailv3 | geoportal/LUX_alembic/versions/17fb1559a5cd_create_table_for_hierarchy_of_accounts.py | Python | mit | 2,012 | 0.000994 |
# -*- coding: utf-8 -*-
import os
from django.db import models
from django.db.models import Q
from seahub.tags.models import FileUUIDMap
from seahub.utils import normalize_file_path
class RelatedFilesManager(models.Manager):
def get_related_files_uuid(self, uuid):
related_files_uuid = super(RelatedFilesManager, self).filter(
Q(o_uuid=uuid) | Q(r_uuid=uuid)).select_related('o_uuid', 'r_uuid')
return related_files_uuid
def get_related_file_uuid(self, o_repo_id, r_repo_id, o_path, r_path):
o_file_path = normalize_file_path(o_path)
o_filename = os.path.basename(o_file_path)
o_parent_path = os.path.dirname(o_file_path)
r_file_path = normalize_file_path(r_path)
r_filename = os.path.basename(r_file_path)
r_parent_path = os.path.dirname(r_file_path)
o_uuid = FileUUIDMap.objects.get_or_create_fileuuidmap(o_repo_id, o_parent_path, o_filename, is_dir=False)
r_uuid = FileUUIDMap.objects.get_or_create_fileuuidmap(r_repo_id, r_parent_path, r_filename, is_dir=False)
try:
return super(RelatedFilesManager, self).get(
Q(o_uuid=o_uuid, r_uuid=r_uuid) | Q(o_uuid=r_uuid, r_uuid=o_uuid))
except self.model.DoesNotExist:
return None
def add_related_file_uuid(self, o_repo_id, r_repo_id, o_path, r_path):
o_file_path = normalize_file_path(o_path)
o_filename = os.path.basename(o_file_path)
o_parent_path = os.path.dirname(o_file_path)
r_file_path = normalize_file_path(r_path)
r_filename = os.path.basename(r_file_path)
r_parent_path = os.path.dirname(r_file_path)
o_uuid = FileUUIDMap.objects.get_or_create_fileuuidmap(o_repo_id, o_parent_path, o_filename, is_dir=False)
r_uuid = FileUUIDMap.objects.get_or_create_fileuuidmap(r_repo_id, r_parent_path, r_filename, is_dir=False)
related_file_uuid = self.model(o_uuid=o_uuid, r_uuid=r_uuid)
related_file_uuid.save()
return related_file_uuid
def get_related_file_uuid_by_id(self, related_id):
try:
return super(RelatedFilesManager, self).get(pk=related_id)
except self.model.DoesNotExist:
return None
def delete_related_file_uuid(self, related_id):
try:
file_related = super(RelatedFilesManager, self).get(pk=related_id)
file_related.delete()
return True
except self.model.DoesNotExist:
return False
class RelatedFiles(models.Model):
o_uuid = models.ForeignKey(FileUUIDMap, db_index=True, on_delete=models.CASCADE, related_name='o_uuid')
r_uuid = models.ForeignKey(FileUUIDMap, db_index=True, on_delete=models.CASCADE, related_name='r_uuid')
objects = RelatedFilesManager()
| miurahr/seahub | seahub/related_files/models.py | Python | apache-2.0 | 2,806 | 0.002495 |
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.ENTSOE.StateVariables.Element import Element
class SvShuntCompensatorSections(Element):
"""State variable for the number of sections in service for a shunt compensator.
"""
def __init__(self, sections=0, continuousSections=0.0, ShuntCompensator=None, *args, **kw_args):
"""Initialises a new 'SvShuntCompensatorSections' instance.
@param sections: The number of sections in service.
@param continuousSections: The number of sections in service as a continous variable.
@param ShuntCompensator: The shunt compensator for which the state applies.
"""
#: The number of sections in service.
self.sections = sections
#: The number of sections in service as a continous variable.
self.continuousSections = continuousSections
self._ShuntCompensator = None
self.ShuntCompensator = ShuntCompensator
super(SvShuntCompensatorSections, self).__init__(*args, **kw_args)
_attrs = ["sections", "continuousSections"]
_attr_types = {"sections": int, "continuousSections": float}
_defaults = {"sections": 0, "continuousSections": 0.0}
_enums = {}
_refs = ["ShuntCompensator"]
_many_refs = []
def getShuntCompensator(self):
"""The shunt compensator for which the state applies.
"""
return self._ShuntCompensator
def setShuntCompensator(self, value):
if self._ShuntCompensator is not None:
self._ShuntCompensator._SvShuntCompensatorSections = None
self._ShuntCompensator = value
if self._ShuntCompensator is not None:
self._ShuntCompensator.SvShuntCompensatorSections = None
self._ShuntCompensator._SvShuntCompensatorSections = self
ShuntCompensator = property(getShuntCompensator, setShuntCompensator)
| rwl/PyCIM | CIM14/ENTSOE/StateVariables/StateVariables/SvShuntCompensatorSections.py | Python | mit | 2,935 | 0.002726 |
import numpy as np
from bokeh.layouts import layout
from bokeh.models import CustomJS, Slider, ColumnDataSource, WidgetBox
from bokeh.plotting import figure, output_file, show
output_file('dashboard.html')
tools = 'pan'
def bollinger():
# Define Bollinger Bands.
upperband = np.random.random_integers(100, 150, size=100)
lowerband = upperband - 100
x_data = np.arange(1, 101)
# Bollinger shading glyph:
band_x = np.append(x_data, x_data[::-1])
band_y = np.append(lowerband, upperband[::-1])
p = figure(x_axis_type='datetime', tools=tools)
p.patch(band_x, band_y, color='#7570B3', fill_alpha=0.2)
p.title.text = 'Bollinger Bands'
p.title_location = 'left'
p.title.align = 'left'
p.plot_height = 600
p.plot_width = 800
p.grid.grid_line_alpha = 0.4
return [p]
def slider():
x = np.linspace(0, 10, 100)
y = np.sin(x)
source = ColumnDataSource(data=dict(x=x, y=y))
plot = figure(
y_range=(-10, 10), tools='', toolbar_location=None,
title="Sliders example")
plot.line('x', 'y', source=source, line_width=3, line_alpha=0.6)
callback = CustomJS(args=dict(source=source), code="""
var data = source.data;
var A = amp.value;
var k = freq.value;
var phi = phase.value;
var B = offset.value;
x = data['x']
y = data['y']
for (i = 0; i < x.length; i++) {
y[i] = B + A*Math.sin(k*x[i]+phi);
}
source.trigger('change');
""")
amp_slider = Slider(start=0.1, end=10, value=1, step=.1, title="Amplitude", callback=callback, callback_policy='mouseup')
callback.args["amp"] = amp_slider
freq_slider = Slider(start=0.1, end=10, value=1, step=.1, title="Frequency", callback=callback)
callback.args["freq"] = freq_slider
phase_slider = Slider(start=0, end=6.4, value=0, step=.1, title="Phase", callback=callback)
callback.args["phase"] = phase_slider
offset_slider = Slider(start=-5, end=5, value=0, step=.1, title="Offset", callback=callback)
callback.args["offset"] = offset_slider
widgets = WidgetBox(amp_slider, freq_slider, phase_slider, offset_slider)
return [widgets, plot]
def linked_panning():
N = 100
x = np.linspace(0, 4 * np.pi, N)
y1 = np.sin(x)
y2 = np.cos(x)
y3 = np.sin(x) + np.cos(x)
s1 = figure(tools=tools)
s1.circle(x, y1, color="navy", size=8, alpha=0.5)
s2 = figure(tools=tools, x_range=s1.x_range, y_range=s1.y_range)
s2.circle(x, y2, color="firebrick", size=8, alpha=0.5)
s3 = figure(tools='pan, box_select', x_range=s1.x_range)
s3.circle(x, y3, color="olive", size=8, alpha=0.5)
return [s1, s2, s3]
l = layout([
bollinger(),
slider(),
linked_panning(),
], sizing_mode='stretch_both')
show(l)
| schoolie/bokeh | examples/howto/layouts/dashboard.py | Python | bsd-3-clause | 2,816 | 0.002131 |
# This file is part of Invenio.
# Copyright (C) 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibFormat element - Links to arXiv"""
from cgi import escape
from invenio.base.i18n import gettext_set_language
def format_element(bfo, tag="037__", target="_blank"):
"""
Extracts the arXiv preprint information and
presents it as a direct link towards arXiv.org
"""
_ = gettext_set_language(bfo.lang)
potential_arxiv_ids = bfo.fields(tag)
arxiv_id = ""
for potential_arxiv_id in potential_arxiv_ids:
if potential_arxiv_id.get('9') == 'arXiv' and potential_arxiv_id.get('a', '').startswith('arXiv:'):
arxiv_id = potential_arxiv_id['a'][len('arXiv:'):]
return '<a href="http://arxiv.org/abs/%s" target="%s" alt="%s">%s</a>' % (
escape(arxiv_id, True),
escape(target, True),
escape(_("This article on arXiv.org"), True),
escape(arxiv_id))
return ""
def escape_values(bfo):
"""
Called by BibFormat in order to check if output of this element
should be escaped.
"""
return 0
| zenodo/invenio | invenio/modules/formatter/format_elements/bfe_arxiv_link.py | Python | gpl-2.0 | 1,776 | 0.002815 |
from cast.analysers import log, mainframe
class EmptyParagraphEndOfSection(mainframe.Extension):
def __init__(self):
self.program = None
def start_program(self, program):
self.program = program
def end_program(self, _):
self.program = None
def start_section(self, section):
last_paragraph = section.get_children()[-1]
if 'paragraph' == last_paragraph.get_kind():
children = last_paragraph.get_children()
if len(children) > 1:
# violation test_ko2
self.program.save_violation('MyCompany_COBOL_Rules.sectionEndParagraph', section.get_position())
elif len(children) == 1:
kind = children[0].get_kind()
if kind not in ['exit', 'stop_run', 'goback']:
self.program.save_violation('MyCompany_COBOL_Rules.sectionEndParagraph', section.get_position())
else:
# violation test_ko1
self.program.save_violation('MyCompany_COBOL_Rules.sectionEndParagraph', section.get_position())
| CAST-projects/Extension-SDK | samples/analyzer_level/mainframe/mainframe.quality_rule/empty_paragraph_end.py | Python | mit | 1,192 | 0.012584 |
#! /usr/bin/env python
'''
Testsuite for the CustomParameter class
'''
from __future__ import absolute_import, division, print_function
import numpy as np
from puq import *
def _hisplot(y, nbins):
n, bins = np.histogram(y, nbins, normed=True)
mids = bins[:-1] + np.diff(bins) / 2.0
return mids, n
def compare_curves(x1, y1, x2, y2, **args):
ay = np.interp(x2, x1, y1)
print("maximum difference is", np.max(np.abs(ay - y2)))
assert np.allclose(ay, y2, **args)
n = NormalParameter('x','x',mean=10,dev=1)
norm80 = n.pdf.lhs(80)
# test mean and deviation
def test_custom_pdf_meandev():
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(norm80))
assert np.allclose(c.pdf.mean, 10.0, rtol=.05), "mean=%s" % c.pdf.mean
assert np.allclose(c.pdf.dev, 1.0, rtol=.05), "dev=%s" % c.pdf.dev
# test lhs()
def test_custom_pdf_lhs():
a = np.array([2,2,3,3,3,4,4,4,4,5,5,5,5,5,6,6,6,6,6,7,7,7,8,8])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a, fit=True))
print("LHS: mean=%s dev=%s" % (c.pdf.mean, c.pdf.dev))
assert(np.allclose(c.pdf.mean, 5.04, atol=.1))
assert(np.allclose(c.pdf.dev, 1.9, atol=.1))
# test the lhs() function to see if the curve it generates is
# close enough
data = c.pdf.lhs(1000)
dx, dy = _hisplot(data, 40)
compare_curves(c.pdf.x, c.pdf.y, dx, dy, atol=.01)
# test lhs1()
def test_custom_pdf_lhs1():
a = np.array([12,12,13,13,13,14,14,14,14,15,15,15,15,15,16,16,16,16,16,17,17,17,18,18])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a, fit=True))
# test the lhs1() function to see if the curve it generates is
# close enough
xs = c.pdf.ds1(1000)
assert len(xs) == 1000
# scale [-1,1] back to original size
min, max = c.pdf.range
mean = (min + max)/2.0
xs *= max - mean
xs += mean
# bin it
mids, n = _hisplot(xs, 40)
compare_curves(c.pdf.x, c.pdf.y, mids, n, atol=.004)
'''
import matplotlib.pyplot as plt
plt.plot(mids, n, color='green')
plt.plot(c.pdf.x, c.pdf.y, color='blue')
plt.show()
'''
def test_custom_pdf_random():
a = np.array([2,2,3,3,3,4,4,4,4,5,5,5,5,5,6,6,6,6,6,7,7,7,8,8])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a, fit=True))
data = c.pdf.random(100000)
dx,dy = _hisplot(data, 40)
compare_curves(c.pdf.x, c.pdf.y, dx, dy, atol=.03)
'''
import matplotlib.pyplot as plt
plt.plot(dx, dy, color='red')
plt.plot(c.pdf.x, c.pdf.y, color='blue')
plt.show()
'''
# test lhs()
def test_custom_pdf_lhs_nofit():
a = np.array([2,2,3,3,3,4,4,4,4,5,5,5,5,5,6,6,6,6,6,7,7,7,8,8])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a, nbins=40))
print("LHS: mean=%s dev=%s" % (c.pdf.mean, c.pdf.dev))
assert(np.allclose(c.pdf.mean, 5.04, atol=.1))
assert(np.allclose(c.pdf.dev, 1.7, atol=.1))
# test the lhs() function to see if the curve it generates is
# close enough
data = c.pdf.ds(1000)
dx,dy = _hisplot(data, 40)
"""
import matplotlib.pyplot as plt
plt.plot(dx, dy, color='red')
plt.plot(c.pdf.x, c.pdf.y, color='blue')
plt.show()
"""
compare_curves(c.pdf.x, c.pdf.y, dx, dy, atol=.4)
# test lhs1()
def test_custom_pdf_lhs1_nofit():
a = np.array([2,2,3,3,3,4,4,4,4,5,5,5,5,5,6,6,6,6,6,7,7,7,8,8])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a, nbins=40))
# test the lhs1() function to see if the curve it generates is
# close enough
xs = c.pdf.ds1(1000)
assert len(xs) == 1000
# scale [-1,1] back to original size
min, max = c.pdf.range
mean = (min + max)/2.0
xs *= max - mean
xs += mean
# bin it
mids, n = _hisplot(xs, 40)
compare_curves(c.pdf.x, c.pdf.y, mids, n, atol=.4)
'''
import matplotlib.pyplot as plt
plt.plot(mids, n, color='green')
plt.plot(c.pdf.x, c.pdf.y, color='blue')
plt.show()
'''
def test_custom_pdf_random_nofit():
a = np.array([2,2,3,3,3,4,4,4,4,5,5,5,5,5,6,6,6,6,6,7,7,7,8,8])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a, nbins=40))
data = c.pdf.random(100000)
dx,dy = _hisplot(data, 40)
compare_curves(c.pdf.x, c.pdf.y, dx, dy, atol=.4)
'''
import matplotlib.pyplot as plt
plt.plot(dx, dy, color='red')
plt.plot(c.pdf.x, c.pdf.y, color='blue')
plt.show()
'''
def test_custom_pdf_small():
a = np.array([2,3,2])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a))
assert np.allclose(c.pdf.mean, 7.0/3, atol=.3), "mean=%s" % c.pdf.mean
assert np.allclose(c.pdf.dev, 0.4, atol=.2), "dev=%s" % c.pdf.dev
def test_custom_pdf_small_fit():
a = np.array([2,3,2])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a, fit=True))
assert np.allclose(c.pdf.mean, 7.0/3, atol=.3), "mean=%s" % c.pdf.mean
assert np.allclose(c.pdf.dev, 0.4, atol=.4), "dev=%s" % c.pdf.dev
# single data point. Must use Bayesian fit.
def test_custom_pdf_single_fit():
a = np.array([42])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a, error=NormalPDF(0,.1)))
assert np.allclose(c.pdf.mean, 42), "mean=%s" % c.pdf.mean
assert np.allclose(c.pdf.dev, .1, atol=.01), "dev=%s" % c.pdf.dev
def test_custom_pdf_single():
a = np.array([42])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a))
assert c.pdf.mean == 42
assert c.pdf.dev == 0
assert c.pdf.mode == 42
def test_custom_pdf_zero():
a = np.array([0])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a))
assert c.pdf.mean == 0
assert c.pdf.dev == 0
assert c.pdf.mode == 0
def test_custom_pdf_zerozero():
a = np.array([0, 0])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a))
assert c.pdf.mean == 0
assert c.pdf.dev == 0
assert c.pdf.mode == 0
def test_custom_pdf_zerozerozero():
a = np.array([0, 0, 0])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a))
assert c.pdf.mean == 0
assert c.pdf.dev == 0
assert c.pdf.mode == 0
def test_custom_pdf_zerozerozero_fit():
a = np.array([0, 0, 0])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a, fit=True))
assert c.pdf.mean == 0
assert c.pdf.dev == 0
assert c.pdf.mode == 0
def test_custom_pdf_const():
a = np.array([2,2,2,2,2,2,2,2,2,2,2])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a))
assert c.pdf.mean == 2
assert c.pdf.dev == 0
assert c.pdf.mode == 2
def test_custom_pdf_const_fit():
a = np.array([2,2,2,2,2,2,2,2,2,2,2])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a, fit=True))
assert c.pdf.mean == 2
assert c.pdf.dev == 0
assert c.pdf.mode == 2
#### EXCEPTION TESTING
# forget to include pdf
def test_custom_pdf_exception():
ok = False
try:
c = CustomParameter('x', 'X, the unknown')
except ValueError:
ok = True
except:
assert False, 'Wrong Exception'
if not ok:
assert False, 'No Exception when one was expected'
if __name__ == "__main__":
test_custom_pdf_meandev()
test_custom_pdf_lhs()
test_custom_pdf_lhs1()
test_custom_pdf_random()
test_custom_pdf_lhs_nofit()
test_custom_pdf_lhs1_nofit()
test_custom_pdf_random_nofit()
test_custom_pdf_exception()
test_custom_pdf_small()
test_custom_pdf_small_fit()
test_custom_pdf_single()
test_custom_pdf_single_fit()
test_custom_pdf_const()
test_custom_pdf_const_fit()
test_custom_pdf_zero()
test_custom_pdf_zerozero()
test_custom_pdf_zerozerozero()
test_custom_pdf_zerozerozero_fit()
| c-PRIMED/puq | test/CustomParameter_test.py | Python | mit | 7,647 | 0.024062 |
import logging
from virttest import virsh
from provider import libvirt_version
from autotest.client.shared import error
def run_cmd_in_guest(vm, cmd):
"""
Run command in the guest
:params vm: vm object
:params cmd: a command needs to be ran
"""
session = vm.wait_for_login()
status, output = session.cmd_status_output(cmd)
logging.debug("The '%s' output: %s", cmd, output)
if status:
session.close()
raise error.TestError("Can not run '%s' in guest: %s", cmd, output)
else:
session.close()
return output
def run(test, params, env):
"""
1. Configure kernel cmdline to support kdump
2. Start kdump service
3. Inject NMI to the guest
4. Check NMI times
"""
for cmd in 'inject-nmi', 'qemu-monitor-command':
if not virsh.has_help_command(cmd):
raise error.TestNAError("This version of libvirt does not "
" support the %s test", cmd)
vm_name = params.get("main_vm", "virt-tests-vm1")
vm = env.get_vm(vm_name)
start_vm = params.get("start_vm")
expected_nmi_times = params.get("expected_nmi_times", '0')
unprivileged_user = params.get('unprivileged_user')
if unprivileged_user:
if unprivileged_user.count('EXAMPLE'):
unprivileged_user = 'testacl'
if not libvirt_version.version_compare(1, 1, 1):
if params.get('setup_libvirt_polkit') == 'yes':
raise error.TestNAError("API acl test not supported in current"
+ " libvirt version.")
if start_vm == "yes":
# start kdump service in the guest
cmd = "which kdump"
try:
run_cmd_in_guest(vm, cmd)
except:
try:
# try to install kexec-tools on fedoraX/rhelx.y guest
run_cmd_in_guest(vm, "yum install -y kexec-tools")
except:
raise error.TestNAError("Requires kexec-tools(or the "
"equivalent for your distro)")
# enable kdump service in the guest
cmd = "service kdump start"
run_cmd_in_guest(vm, cmd)
# filter original 'NMI' information from the /proc/interrupts
cmd = "grep NMI /proc/interrupts"
nmi_str = run_cmd_in_guest(vm, cmd)
# filter CPU from the /proc/cpuinfo and count number
cmd = "grep -E '^process' /proc/cpuinfo | wc -l"
vcpu_num = run_cmd_in_guest(vm, cmd).strip()
logging.info("Inject NMI to the guest via virsh inject_nmi")
virsh.inject_nmi(vm_name, debug=True, ignore_status=False)
logging.info("Inject NMI to the guest via virsh qemu_monitor_command")
virsh.qemu_monitor_command(vm_name, '{"execute":"inject-nmi"}')
# injects a Non-Maskable Interrupt into the default CPU (x86/s390)
# or all CPUs (ppc64), as usual, the default CPU index is 0
cmd = "grep NMI /proc/interrupts | awk '{print $2}'"
nmi_from_default_vcpu = run_cmd_in_guest(vm, cmd)
real_nmi_times = nmi_from_default_vcpu.splitlines()[0]
logging.debug("The current Non-Maskable Interrupts: %s", real_nmi_times)
# check Non-maskable interrupts times
if real_nmi_times != expected_nmi_times:
raise error.TestFail("NMI times aren't expected %s:%s",
real_nmi_times, expected_nmi_times)
| PandaWei/tp-libvirt | libvirt/tests/src/guest_kernel_debugging/nmi_test.py | Python | gpl-2.0 | 3,444 | 0.000871 |
import numpy as np
import tensorflow as tf
import os
def get_inputs(split, config):
split_dir = config['split_dir']
data_dir = config['data_dir']
dataset = config['dataset']
split_file = os.path.join(split_dir, dataset, split + '.lst')
filename_queue = get_filename_queue(split_file, os.path.join(data_dir, dataset))
if dataset == 'mnist':
image = get_inputs_mnist(filename_queue, config)
config['output_size'] = 28
config['c_dim'] = 1
elif dataset == "cifar-10":
image = get_inputs_cifar10(filename_queue, config)
config['output_size'] = 32
config['c_dim'] = 3
else:
image = get_inputs_image(filename_queue, config)
image_batch = create_batch([image], config['batch_size'])
return image_batch
def get_inputs_image(filename_queue, config):
output_size = config['output_size']
image_size = config['image_size']
c_dim = config['c_dim']
# Read a record, getting filenames from the filename_queue.
reader = tf.WholeFileReader()
key, value = reader.read(filename_queue)
image = tf.image.decode_image(value, channels=c_dim)
image = tf.cast(image, tf.float32)/255.
image_shape = tf.shape(image)
image_height, image_width = image_shape[0], image_shape[1]
offset_height = tf.cast((image_height - image_size)/2, tf.int32)
offset_width = tf.cast((image_width - image_size)/2, tf.int32)
image = tf.image.crop_to_bounding_box(image, offset_height, offset_width, image_size, image_size)
image = tf.image.resize_images(image, [output_size, output_size])
image.set_shape([output_size, output_size, c_dim])
return image
def get_inputs_mnist(filename_queue, config):
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
# Defaults are not specified since all keys are required.
features={
'height': tf.FixedLenFeature([], tf.int64),
'width': tf.FixedLenFeature([], tf.int64),
'depth': tf.FixedLenFeature([], tf.int64),
'label': tf.FixedLenFeature([], tf.int64),
'image_raw': tf.FixedLenFeature([], tf.string),
})
image = tf.decode_raw(features['image_raw'], tf.uint8)
image.set_shape([784])
image = tf.reshape(image, [28, 28, 1])
image = tf.cast(image, tf.float32) / 255.
# Convert label from a scalar uint8 tensor to an int32 scalar.
label = tf.cast(features['label'], tf.int32)
binary_image = (tf.random_uniform(image.get_shape()) <= image)
binary_image = tf.cast(binary_image, tf.float32)
return binary_image
def get_inputs_cifar10(filename_queue, config):
output_size = config['output_size']
image_size = config['image_size']
c_dim = config['c_dim']
# Dimensions of the images in the CIFAR-10 dataset.
# See http://www.cs.toronto.edu/~kriz/cifar.html for a description of the
# input format.
label_bytes = 1 # 2 for CIFAR-100
image_bytes = 32 * 32 * 3
# Every record consists of a label followed by the image, with a
# fixed number of bytes for each.
record_bytes = label_bytes + image_bytes
# Read a record, getting filenames from the filename_queue.
reader = tf.FixedLengthRecordReader(record_bytes=record_bytes)
key, value = reader.read(filename_queue)
record = tf.decode_raw(value, tf.uint8)
# The first bytes represent the label, which we convert from uint8->int32.
label = tf.cast(record[0], tf.int32)
# The remaining bytes after the label represent the image, which we reshape
# from [depth * height * width] to [depth, height, width].
#tf.strided_slice(record, [label_bytes], [label_bytes + image_bytes])
image = tf.reshape(record[label_bytes:label_bytes+image_bytes], [3, 32, 32])
image = tf.cast(image, tf.float32)/255.
# Convert from [depth, height, width] to [height, width, depth].
image = tf.transpose(image, [1, 2, 0])
return image
def get_filename_queue(split_file, data_dir):
with open(split_file, 'r') as f:
filenames = f.readlines()
filenames = [os.path.join(data_dir, f.strip()) for f in filenames]
for f in filenames:
if not os.path.exists(f):
raise ValueError('Failed to find file: ' + f)
filename_queue = tf.train.string_input_producer(filenames)
return filename_queue
def create_batch(inputs, batch_size=64, min_queue_examples=1000, num_preprocess_threads=12, enqueue_many=False):
# Generate a batch of images and labels by building up a queue of examples.
batch = tf.train.shuffle_batch(
inputs,
batch_size=batch_size,
num_threads=num_preprocess_threads,
capacity=min_queue_examples + 3 * batch_size,
min_after_dequeue=min_queue_examples,
enqueue_many=enqueue_many,
)
return batch
| LMescheder/AdversarialVariationalBayes | avb/inputs.py | Python | mit | 4,913 | 0.002239 |
#!/usr/bin/python3
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from sklearn.cluster import KMeans
from sklearn import datasets
from PIL import Image, ImageChops
from scipy.spatial.distance import cdist
import matplotlib.pyplot as plt
from random import randint
import time
import ephem
from PIL import Image
import cv2
import glob
import sys
import os
import numpy as np
import datetime
from pathlib import Path
import subprocess
from amscommon import read_config
import math
import time
from sklearn.cluster import Birch
from collections import deque
video_dir = "/mnt/ams2/SD/"
def stack_stack(pic1, pic2):
frame_pil = Image.fromarray(pic1)
stacked_image = pic2
if stacked_image is None:
stacked_image = frame_pil
else:
stacked_image=ImageChops.lighter(stacked_image,frame_pil)
return(stacked_image)
def compute_straight_line(x1,y1,x2,y2,x3,y3):
print ("COMP STRAIGHT", x1,y1,x2,y2,x3,y3)
if x2 - x1 != 0:
a = (y2 - y1) / (x2 - x1)
else:
a = 0
if x3 - x1 != 0:
b = (y3 - y1) / (x3 - x1)
else:
b = 0
straight_line = a - b
if (straight_line < 1):
straight = "Y"
else:
straight = "N"
return(straight_line)
def crop_center(img,cropx,cropy):
y,x = img.shape
startx = x//2-(cropx//2) +12
starty = y//2-(cropy//2) + 4
return img[starty:starty+cropy,startx:startx+cropx]
def fig2data ( fig ):
"""
@brief Convert a Matplotlib figure to a 4D numpy array with RGBA channels and return it
@param fig a matplotlib figure
@return a numpy 3D array of RGBA values
"""
# draw the renderer
fig.canvas.draw ( )
# Get the RGBA buffer from the figure
w,h = fig.canvas.get_width_height()
buf = np.fromstring ( fig.canvas.tostring_argb(), dtype=np.uint8 )
buf.shape = ( w, h,4 )
# canvas.tostring_argb give pixmap in ARGB mode. Roll the ALPHA channel to have it in RGBA mode
buf = np.roll ( buf, 3, axis = 2 )
return buf
def kmeans_cluster(points, num_clusters):
points = np.array(points)
print(points)
clusters = []
cluster_points = []
colors = ('r', 'g', 'b')
est = KMeans(n_clusters=num_clusters)
est.fit(points)
print (est.labels_)
print (len(points))
({i: np.where(est.labels_ == i)[0] for i in range(est.n_clusters)})
for i in set(est.labels_):
index = est.labels_ == i
cluster_idx = np.where(est.labels_ == i)
for idxg in cluster_idx:
for idx in idxg:
idx = int(idx)
point = points[idx]
#print ("IDX:",i, idx, point)
cluster_points.append(point)
clusters.append(cluster_points)
cluster_points = []
#print(points[:,0])
#print(points[:,1])
int_lb = est.labels_.astype(float)
#fig = gcf()
fig = Figure()
canvas = FigureCanvas(fig)
plot = fig.add_subplot(1,1,1)
plot.scatter(points[:,0], points[:,1], c=[plt.cm.Spectral(float(i) / 10) for i in est.labels_])
for cluster in clusters:
cxs = []
cys = []
for cp in cluster:
x,y,w,h = cp
cxs.append(x)
cys.append(y)
if len(cxs) > 3:
plot.plot(np.unique(cxs), np.poly1d(np.polyfit(cxs, cys, 1))(np.unique(cxs)))
plt.xlim(0,640)
plt.ylim(0,480)
plot.invert_yaxis()
fig.canvas.draw()
fig.savefig("/tmp/plot.png", dpi=fig.dpi)
#plt.show()
return(clusters)
def calc_dist(x1,y1,x2,y2):
dist = math.sqrt((x2 - x1)**2 + (y2 - y1)**2)
return dist
def find_angle(x1,x2,y1,y2):
if x2 - x1 != 0:
a1 = (y2 - y1) / (x2 - x1)
else:
a1 = 0
angle = math.atan(a1)
angle = math.degrees(angle)
return(angle)
def closest_node(node, nodes):
return nodes[cdist([node], nodes).argmin()]
def find_objects(index, points):
apoints = []
unused_points = []
cl_sort = []
sorted_points = []
last_angle = None
objects = []
group_pts = []
line_segments = []
stars = []
obj_points = []
big_cnts = []
count = 0
x1,y1,w1,h1 = points[index]
print ("Total Points found in image: ", len(points))
used_pts = {}
for i in range(0,len(points)-1):
x1,y1,w1,h1 = points[i]
for i in range(0,len(points)-1):
x2,y2,w2,h2 = points[i]
key = str(x1)+"."+str(y1)+"."+str(x2)+"."+str(y2)
used_pts[key] = 0
key2 = str(x2)+"."+str(y2)+"."+str(x1)+"."+str(y1)
used_pts[key2] = 0
possible_stars = []
for i in range(0,len(points)-1):
closest = []
x1,y1,w1,h1 = points[i]
for j in range(0,len(points)-1):
x2,y2,w2,h2 = points[j]
key = str(x1)+"."+str(y1)+"."+str(x2)+"."+str(y2)
key2 = str(x2)+"."+str(y2)+"."+str(x1)+"."+str(y1)
dist = calc_dist(x1,y1,x2,y2)
angle = find_angle(x1,y1,x2,y2)
if x1 != x2 and y1 != y2:
if used_pts[key] == 0 and used_pts[key2] == 0 :
#print("Closest Point:", (int(dist),int(angle),int(x1),int(y1),int(x2),int(y2)))
closest.append((int(dist),int(angle),int(x1),int(y1),int(x2),int(y2)))
used_pts[key] = 1
used_pts[key2] = 1
#print("Key has been used:", key, key2)
#else:
# print("Key already used try another one:", key, key2)
#else:
# print ("this point has already been used")
count = count + 1
# of all the close points, make sure that at least 2 points < 25 px dist exist.
conf_closest = []
for cls in closest:
if cls[0] < 100:
conf_closest.append(cls)
if len(closest) > 0:
distsort = np.unique(closest, axis=0)
dist,angle,x1,y1,x2,y2 = distsort[0]
if dist < 50 and len(conf_closest) > 1:
line_segments.append((int(dist),int(angle),int(x1),int(y1),int(x2),int(y2)))
obj_points.append((int(x1),int(y1), int(w1), int(h1)))
else:
possible_stars.append((int(x1),int(y1),int(w1),int(h1)))
#print("CLOSEST LINE SEGMENT FOR PT: ", distsort[0])
#else:
#print("ERROR! no close points to this one!", x1,y1)
if w1 > 15 or h1 > 15:
# print ("BIG!!! We have a big object here likely containing many line segments.")
big_cnts.append((int(x1),int(y1),int(w1),int(h1)))
for star in possible_stars:
close = 0
for line in line_segments:
dist,angle,x1,y1,x2,y2 = line
star_dist = calc_dist(star[0], star[1], x1,y1)
#print ("STARDIST: ", star_dist, star[0], star[1], x1,y1)
if star_dist < 60:
close = 1
if close == 1:
obj_points.append(star)
else:
stars.append(star)
#print ("OBJECT POINTS")
if len(line_segments) > 0:
sorted_lines = sorted(line_segments, key=lambda x: x[2])
else:
sorted_lines = []
#print ("LINE SEGMENTS:")
#for line in sorted_lines:
# print (line)
last_ang = 0
last_dist = 0
line_groups = []
line_group = []
orphan_lines = []
if len(sorted_lines) > 0:
for segment in sorted_lines:
dist,angle,x1,y1,x2,y2 = segment
if last_ang != 0 and (angle -5 < last_ang < angle + 5) and dist < 100:
#print ("Line Segment Part of Existing Group: ", segment)
line_group.append((dist,angle,x1,y1,x2,y2))
else:
#print ("New Group Started!", last_ang, angle )
# print ("Line Segment Part of New Group: ", segment)
if len(line_group) >= 3:
line_groups.append(line_group)
else:
#print("Last line segment was too small to be part of a group! These are random points or stars. Skip for now.")
for line in line_group:
orphan_lines.append(line)
line_group = []
line_group.append((dist,angle,x1,y1,x2,y2))
last_ang = angle
if len(line_group) >= 2:
line_groups.append(line_group)
else:
for line in line_group:
orphan_lines.append(line)
# now make sure all of the line segments in the line group can connect to at least one of the other segments
#print ("Total Line Groups as of now:", len(line_groups))
#print ("Total Orphan Lines as of now:", len(orphan_lines))
#print ("Confirm the line segments are all part of the same group", len(line_groups))
#print ("TOTAL POINTS: ", len(points))
#print ("TOTAL LINE GROUPS: ", len(line_groups))
#print ("ORPHAN GROUPS: ", len(orphan_lines))
#for point in points:
#print ("POINT: ", point)
gc = 1
if len(line_groups) > 0:
for line_group in line_groups:
lc = 1
for line in line_group:
#print("LINE:", line)
dist,ang,x1,y1,x2,y2 = line
#confirm_angle = find_angle(x1,y1,x2,y2)
#print ("GROUP", gc, lc, line, ang, confirm_angle)
lc = lc + 1
gc = gc + 1
#else:
#make sure the obj points are not false positives, if so move to stars.
(line_groups, orphan_lines, stars, obj_points, big_cnts) = conf_objs(line_groups, orphan_lines, stars, obj_points, big_cnts)
return(line_groups, orphan_lines, stars, obj_points, big_cnts)
def conf_objs(line_groups, orphan_lines, stars, obj_points, big_cnts):
print ("CONF OBJS")
print ("LINE GROUPS", len(line_groups))
print ("OBJ POINTS", len(obj_points))
conf_line_groups = []
mx = []
my = []
mw = []
mh = []
#first lets check the line groups and make sure at least 3 points are straight
for line_group in line_groups:
mx = []
my = []
mw = []
mh = []
lgc = 0
for dist,ang,x1,y1,x2,y2 in line_group:
mx.append(x1)
my.append(y1)
print (dist, ang, x1,y1,x2,y2)
print (lgc, "adding MX", x1, mx)
print (lgc, "adding MYs", y1, my)
#mx.append(x2)
#my.append(y2)
lgc = lgc + 1
if len(mx) > 2:
print ("MXs", mx)
print ("MYs", my)
st = compute_straight_line(mx[0],my[0],mx[1],my[1],mx[2],my[2])
else:
st = 100
if st <= 1:
print ("This group is straight")
conf_line_groups.append(line_group)
else:
print ("This group is NOT straight")
orphan_lines.append(line_group)
cc = 0
mx = []
my = []
mw = []
mh = []
for x,y,h,w in obj_points:
mx.append(x)
my.append(y)
mw.append(w)
mh.append(h)
cc = cc + 1
if len(mx) > 2:
st = compute_straight_line(mx[0],my[0],mx[1],my[1],mx[2],my[2])
else:
st = 100
if st <= 1:
print ("At least 3 of these are straight, we can continue.", st)
else:
print ("These 3 objects are not straight, and thus false!", st)
for x,y,h,w in obj_points:
stars.append((x,y,h,w))
obj_points = []
return(line_groups, orphan_lines, stars, obj_points, big_cnts)
def clean_line_groups(line_groups, orphan_lines):
cleaned_line_groups = []
cleaned_line_group = []
for line_group in line_groups:
if len(line_group) == 2:
# make sure these two groups are close enough to each other to be grouped.
(dist,angle,x1,y1,x2,y2) = line_group[0]
(xdist,xangle,xx1,xy1,xx2,xy2) = line_group[1]
group_dist = calc_dist(x1,y1,xx1,xy1)
if group_dist > 50 or (angle -5 < xangle < angle + 5):
orphan_lines.append(line_group[0])
orphan_lines.append(line_group[1])
else:
cleaned_line_group.append(line_group[0])
cleaned_line_group.append(line_group[1])
else:
cleaned_line_groups.append(line_group)
line_groups = cleaned_line_groups
print("CLG:", line_groups)
return(cleaned_line_groups, orphan_lines)
def confirm_cnts(crop):
crop = cv2.GaussianBlur(crop, (5, 5), 0)
avg_flux = np.average(crop)
max_flux = np.amax(crop)
thresh_limit = avg_flux / 2
_, crop_thresh = cv2.threshold(crop, thresh_limit, 255, cv2.THRESH_BINARY)
#(_, cnts, xx) = cv2.findContours(crop_thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
#if np.sum(crop_thresh) > (255 * 2):
#print ("CONFIRM:", max_flux, avg_flux, thresh_limit, np.sum(crop_thresh))
#cv2.imshow('pepe', crop_thresh)
#else:
# print ("FAILED:", max_flux, avg_flux, thresh_limit, np.sum(crop_thresh))
#cv2.imshow('pepe', crop)
#cv2.waitKey(100)
return(np.sum(crop_thresh))
def find_best_thresh(image, thresh_limit, type):
go = 1
while go == 1:
_, thresh = cv2.threshold(image, thresh_limit, 255, cv2.THRESH_BINARY)
(_, cnts, xx) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if type == 0:
cap = 80
else:
cap = 100
if len(cnts) > cap:
thresh_limit = thresh_limit + 1
else:
bad = 0
for (i,c) in enumerate(cnts):
x,y,w,h = cv2.boundingRect(cnts[i])
if w == image.shape[1]:
bad = 1
if type == 0 and (w >= 10 or h > 10):
bad = 1
if bad == 0:
go = 0
else:
thresh_limit = thresh_limit + 1
#print ("CNTs, BEST THRESH:", str(len(cnts)), thresh_limit)
return(thresh_limit)
def find_objects2(timage, tag, current_image, filename):
stars = []
big_cnts = []
obj_points = []
image = timage
thresh_limit = 10
thresh_limit = find_best_thresh(image, thresh_limit, 0)
# find best thresh limit code here!
line_objects = []
points = []
orphan_lines = []
_, thresh = cv2.threshold(image, thresh_limit, 255, cv2.THRESH_BINARY)
(_, cnts, xx) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
#print ("CNTS:", len(cnts))
hit = 0
objects = []
if len(cnts) < 500:
for (i,c) in enumerate(cnts):
x,y,w,h = cv2.boundingRect(cnts[i])
if w > 1 and h > 1:
if (w < 10 and h <10):
nothing = 0
# cv2.rectangle(image, (x,y), (x+w+5, y+h+5), (255),1)
#cv2.circle(image, (x,y), 20, (120), 1)
#if w != h:
# cv2.rectangle(image, (x,y), (x+w+5, y+h+5), (255),1)
else:
#cv2.rectangle(image, (x,y), (x+w+5, y+h+5), (255),1)
# Convert big object into points and add each one to the points array.
crop = timage[y:y+h,x:x+w]
points.append((x,y,w,h))
if w < 600 and h < 400:
crop_points = find_points_in_crop(crop,x,y,w,h)
for x,y,w,h in crop_points:
print("adding some points",x,y,w,h)
points.append((x,y,w,h))
points.append((x,y,w,h))
#objects.append((x,y,w,h))
else:
image[y:y+h,x:x+w] = [0]
else:
print ("WAY TO MANY CNTS:", len(cnts))
thresh_limit = thresh_limit + 5
return(points)
# find line objects
if (len(objects) + len(points)) > 0:
line_groups, orphan_lines, stars, obj_points = find_objects(0, points)
else:
line_groups = []
final_group = []
final_groups = []
reject_group = []
reject_groups = []
line_segments = flatten_line_groups(line_groups)
line_segments = sorted(line_segments, key = lambda x: (x[0],x[1]))
if len(line_segments) > 0:
final_group, reject_group = regroup_lines(line_segments)
print ("MIKE!:", len(final_group))
if len(final_group) > 1:
final_groups.append(final_group)
else:
for line in final_group:
orphan_lines.append(line)
if len(reject_group) > 3:
print (len(reject_group), "rejects left. do it again.")
reject_group = sorted(reject_group, key = lambda x: (x[1],x[0]))
final_group, reject_group = regroup_lines(reject_group)
if len(final_group) > 1:
final_groups.append(final_group)
else:
for line in final_group:
orphan_lines.append(line)
print (len(reject_group), "rejects left after 2nd try")
if len(reject_group) > 3:
print (len(reject_group), "rejects left. do it again.")
final_group, reject_group = regroup_lines(reject_group)
if len(final_group) > 1:
final_groups.append(final_group)
else:
for line in final_group:
orphan_lines.append(line)
print (len(reject_group), "rejects left after 3rd try")
# try to adopt the orphans!
if len(orphan_lines) >= 1:
print (orphan_lines)
final_group, reject_group = regroup_lines(orphan_lines)
if len(final_group) > 1:
final_groups.append(final_group)
if len(final_group) > 0:
print ("Adopted! : ", final_group)
orphan_lines = reject_group
if len(orphan_lines) >= 1:
final_group, reject_group = regroup_lines(reject_group)
if len(final_group) > 1:
final_groups.append(final_group)
if len(final_group) > 0:
print ("Adopted! : ", final_group)
orphan_lines = reject_group
if len(orphan_lines) >= 1:
final_group, reject_group = regroup_lines(reject_group)
if len(final_group) > 1:
final_groups.append(final_group)
if len(final_group) > 0:
print ("Adopted! : ", final_group)
orphan_lines = reject_group
final_groups, orphan_lines = clean_line_groups(final_groups, orphan_lines)
clusters= []
clusters_ab= []
last_x = None
last_y = None
last_ang = None
ang = None
if len(points) > 3:
num_clusters = int(len(points)/3)
clusters = kmeans_cluster(points, num_clusters)
#print ("MIKE CLUSTERS", len(clusters))
for cluster in clusters:
cxs = []
cys = []
for cp in cluster:
x,y,w,h = cp
cxs.append(x)
cys.append(y)
if last_x is not None:
ang = find_angle(x,y,last_x,last_y)
print ("CLUSTER ANGLE:", x,y,last_x,last_y,ang)
if last_ang is not None:
if ang - 5 < last_ang < ang + 5:
cv2.line(image, (x,y), (last_x,last_y), (200), 4)
last_x = x
last_y = y
last_ang = ang
a, b = best_fit (cxs,cys)
mnx = min(cxs)
mny = min(cys)
mmx = max(cxs)
mmy = max(cys)
cv2.rectangle(image, (mnx,mny), (mmx, mmy), (255),1)
#print ("MIKE MIKE XS,", cxs)
#print ("MIKE MIKE YS,", cys)
clusters_ab.append((a,b))
print ("MIKE AB,", a,b)
print ("FINAL ANALYSIS")
print (final_groups)
print ("--------------")
print ("File Name: ", filename)
print ("Total Points:", len(points))
print ("Total Line Segments:", len(line_segments))
print ("Total Final Line Groups:", len(final_groups))
print ("Total Clusters:", len(clusters))
cl =0
for a,b in clusters_ab:
print ("Cluster " + str(cl + 1) + " " + str(len(clusters[cl])) + " points")
print ("LINE AB " + str(a) + " " + str(b))
cl = cl + 1
#print (final_groups)
print ("Total Rejected Lines:", len(reject_group))
gc = 1
xs = ys = []
for line_group in final_groups:
lc = 1
for line in line_group:
dist,angle,x1,y1,x2,y2 = line
xs.append(x1)
xs.append(x2)
ys.append(y1)
ys.append(y2)
#print (gc, lc, line)
lc = lc + 1
gc = gc + 1
if len(xs) > 0 and len(ys) > 0:
mnx = min(xs)
mxx = max(xs)
mny = min(ys)
mxy = max(ys)
cv2.rectangle(image, (mnx,mny), (mxx, mxy), (255),1)
print ("Total Orphaned Lines:", len(orphan_lines))
if len(line_groups) > 0:
line_segments = flatten_line_groups(line_groups)
find_line_nodes(line_segments)
gc = 1
for line_group in line_groups:
lc = 1
line_group = sorted(line_group, key = lambda x: (x[2],x[3]))
dist,angle,sx1,sy1,sx2,sy2 = line_group[0]
for line in line_group:
dist,angle,x1,y1,x2,y2 = line
#s_ang = find_angle(sx1,sy1,x1,y1)
#if angle - 5 < s_ang < angle + 5:
# print("FINAL GROUP:", gc,lc,line, angle, s_ang)
# final_group.append((dist,angle,x1,y1,x2,y2))
#else:
# print("REJECT GROUP:", gc,lc,line, angle, s_ang)
# reject_group.append((dist,angle,x1,y1,x2,y2))
#seg_dist = find_closest_segment(line, line_group)
cv2.line(image, (x1,y1), (x2,y2), (255), 2)
cv2.putText(image, "L " + str(lc), (x1+25,y1+10), cv2.FONT_HERSHEY_SIMPLEX, .4, (255), 1)
lc = lc + 1
if len(line_group) > 0:
cv2.putText(image, "LG " + str(gc), (x1+25,y1), cv2.FONT_HERSHEY_SIMPLEX, .4, (255), 1)
gc = gc + 1
for line in orphan_lines:
#print("ORPHAN:", line)
dist,angle,x1,y1,x2,y2 = line
cv2.line(image, (x1,y1), (x2,y2), (255), 1)
cv2.putText(image, "Orph" , (x1+25,y1), cv2.FONT_HERSHEY_SIMPLEX, .4, (255), 1)
#cv2.ellipse(image,(ax,ay),(dist_x,dist_y),elp_ang,elp_ang,180,255,-1)
#a,b = best_fit(lxs, lys)
#plt.scatter(lxs,lys)
#plt.xlim(0,640)
#plt.ylim(0,480)
#yfit = [a + b * xi for xi in lxs]
#plt.plot(lxs,yfit)
#cv2.imshow('pepe', image)
#cv2.waitKey(1)
#plt.gca().invert_yaxis()
#plt.show()
#for x,y,w,h in points:
# if w > 25 or h > 25:
# cv2.rectangle(image, (x,y), (x+w+5, y+h+5), (255),1)
# else:
# cv2.circle(image, (x,y), 20, (120), 1)
edges = cv2.Canny(image.copy(),thresh_limit,255)
el = filename.split("/");
fn = el[-1]
cv2.putText(current_image, "File Name: " + fn, (10,440), cv2.FONT_HERSHEY_SIMPLEX, .4, (255), 1)
cv2.putText(current_image, str(tag), (10,450), cv2.FONT_HERSHEY_SIMPLEX, .4, (255), 1)
cv2.putText(current_image, "Points: " + str(len(points)), (10,460), cv2.FONT_HERSHEY_SIMPLEX, .4, (255), 1)
cv2.putText(current_image, "Line Groups: " + str(len(final_groups)), (10,470), cv2.FONT_HERSHEY_SIMPLEX, .4, (255), 1)
blend = cv2.addWeighted(image, .2, current_image, .8,0)
np_plt = cv2.imread("/tmp/plot.png")
np_plt = cv2.cvtColor(np_plt, cv2.COLOR_BGR2GRAY)
hh, ww = np_plt.shape
crop = cv2.resize(np_plt, (0,0), fx=1.1, fy=1.1)
crop = crop_center(crop, 640,480)
#blend = cv2.addWeighted(blend, .5, crop, .5,0)
#for x,y in stars:
# cv2.circle(blend, (x,y), 5, (255), 1)
#exit()
return(line_groups, points, clusters)
def regroup_lines(line_segments):
final_group = []
reject_group = []
sangles = []
dist,angle,sx1,sy1,sx2,sy2 = line_segments[0]
for line in line_segments:
dist,angle,x1,y1,x2,y2 = line
s_ang = find_angle(sx1,sy1,x1,y1)
sangles.append(s_ang)
mean_angle = np.median(np.array(sangles))
if len(line_segments ) > 0:
dist,angle,sx1,sy1,sx2,sy2 = line_segments[0]
for line in line_segments:
dist,angle,x1,y1,x2,y2 = line
s_ang = find_angle(sx1,sy1,x1,y1)
if mean_angle - 10 <= s_ang <= mean_angle + 10:
#print("FINAL GROUP:", line, angle, s_ang, mean_angle)
found = 0
for (dd,aa,ax1,ay1,ax2,ay2) in final_group:
if ax1 == x1 and ay1 == y1:
found = 1
if found == 0:
final_group.append((dist,angle,x1,y1,x2,y2))
else:
#print("REJECT GROUP:",line, angle, s_ang, mean_angle)
reject_group.append((dist,angle,x1,y1,x2,y2))
if len(line_segments ) > 0:
sdist,sangle,sx1,sy1,sx2,sy2 = line_segments[0]
for line in line_segments:
dist,angle,x1,y1,x2,y2 = line
s_ang = find_angle(sx1,sy1,x1,y1)
tdist = calc_dist(x1,y1,sx1,sy1)
if sangle - 10 <= angle <= sangle + 10 and tdist < 20:
found = 0
for (dd,aa,ax1,ay1,ax2,ay2) in final_group:
if ax1 == x1 and ay1 == y1:
found = 1
if found == 0:
print("FINAL GROUP:", line, angle, s_ang, mean_angle)
final_group.append((dist,angle,x1,y1,x2,y2))
else:
#print("REJECT GROUP:",line, angle, s_ang, mean_angle)
reject_group.append((dist,angle,x1,y1,x2,y2))
return(final_group, reject_group)
def flatten_line_groups(line_groups):
line_segments = []
for line_group in line_groups:
for line in line_group:
dist,angle,x1,y1,x2,y2 = line
line_segments.append((dist,angle,x1,y1,x2,y2))
return(line_segments)
def log_node(nodes, line, closest):
if len(nodes) == 0:
nodes.append((line,closest))
return(nodes)
def find_line_nodes(line_segments):
nodes = []
seg_list = []
rest = line_segments
for line in line_segments:
#print("LENLINE", len(line))
#print(line)
dist,angle,x1,y1,x2,y2 = line
closest, rest = sort_segs(x1,y1,rest)
#nodes = log_node(nodes, line, closest)
def sort_segs(x,y,seg_dist):
sorted_lines = sorted(seg_dist, key=lambda x: x[0])
#for line in sorted_lines:
# print ("SORTED LINE", line)
closest = []
rest = []
already_found = 0
for line in sorted_lines:
if len(line) == 6:
dist,angle,x1,y1,x2,y2 = line
else:
print("WTF!:", line)
seg_dist = calc_dist(x,y,x1,y1)
if seg_dist != 0 and already_found != 1:
closest.append((dist,angle,x1,y1,x2,y2))
else:
rest.append((dist,angle,x1,y1,x2,y2))
return(closest, rest)
def find_closest_segment(this_line,line_group):
seg_dist = []
dist, angle, x1,y1,x2,y2 = this_line
cx = (x1 + x2) / 2
cy = (y1 + y2) / 2
for line in line_group:
xdist, xangle, xx1,xy1,xx2,xy2 = line
xcx = (xx1 + xx2) / 2
xcy = (xy1 + xy2) / 2
dist = calc_dist(cx,cy,xcx,xcy)
if dist > 0:
seg_dist.append((dist, x1,y1,x2,y2))
sorted_lines = sorted(seg_dist, key=lambda x: x[0])
#for line in sorted_lines:
# print("CLOSEST SEGMENTS:", line)
def find_points_in_crop(crop,x,y,w,h):
print ("cropping")
go = 1
cnt_pts = []
thresh_limit = 250
canvas = np.zeros([480,640], dtype=crop.dtype)
canvas[y:y+h,x:x+w] = crop
for i in range(x,x+w):
for j in range(y,y+w):
if i % 5 == 0:
canvas[0:480,i:i+3] = 0
if j % 5 == 0:
canvas[j:j+3,0:640] = 0
#print ("CROP", crop.shape[0])
#if crop.shape[0] > 25:
#cv2.imshow('pepe', canvas)
#cv2.waitKey(1000)
last_cnts = []
while go == 1:
_, thresh = cv2.threshold(canvas, thresh_limit, 255, cv2.THRESH_BINARY)
(_, cnts, xx) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnt_limit = int((w + h) / 20)
if cnt_limit < 5:
cnt_limit = 5
if cnt_limit > 25:
cnt_limit = 25
#print ("CNTS at thresh:", len(cnts), thresh_limit)
thresh_limit = thresh_limit - 2
if len(cnts) >= cnt_limit:
for (i,c) in enumerate(cnts):
x,y,w,h = cv2.boundingRect(cnts[i])
if w > 1 and h > 1:
cnt_pts.append((x,y,w,h))
if len(last_cnts) >= len(cnt_pts) and len(last_cnts) > cnt_limit:
#cnt_pts = last_cnts
go = 0
if thresh_limit < 5:
cnt_pts = last_cnts
go = 0
if len(cnts) > 70:
go = 0
#print ("CNTS: ", len(cnts))
#print ("LAST CNTS: ", len(last_cnts))
#print ("THRESH LIMIT: ", thresh_limit)
#cv2.imshow('pepe', thresh)
#cv2.waitKey(100)
last_cnts = cnt_pts
return(cnt_pts)
def best_fit(X, Y):
xbar = sum(X)/len(X)
ybar = sum(Y)/len(Y)
n = len(X) # or len(Y)
numer = sum([xi*yi for xi,yi in zip(X, Y)]) - n * xbar * ybar
denum = sum([xi**2 for xi in X]) - n * xbar**2
b = numer / denum
a = ybar - b * xbar
print('best fit line:\ny = {:.2f} + {:.2f}x'.format(a, b))
return a, b
def diff_all(med_stack_all, background, median, before_image, current_image, after_image,filename ):
before_diff = cv2.absdiff(current_image.astype(current_image.dtype), before_image,)
after_diff = cv2.absdiff(current_image.astype(current_image.dtype), after_image,)
before_after_diff = cv2.absdiff(before_image.astype(current_image.dtype), after_image,)
median_three = np.median(np.array((before_image, after_image, current_image)), axis=0)
median = np.uint8(median)
median_sum = np.sum(median)
median_diff = cv2.absdiff(median_three.astype(current_image.dtype), median,)
blur_med = cv2.GaussianBlur(median, (5, 5), 0)
# find bright areas in median and mask them out of the current image
tm = find_best_thresh(blur_med, 30, 1)
_, median_thresh = cv2.threshold(blur_med, tm, 255, cv2.THRESH_BINARY)
#cv2.imshow('pepe', median_thresh)
#cv2.waitKey(1000)
(_, cnts, xx) = cv2.findContours(median_thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
hit = 0
real_cnts = []
print ("CNTS: ", len(cnts))
if len(cnts) < 1000:
for (i,c) in enumerate(cnts):
x,y,w,h = cv2.boundingRect(cnts[i])
if True:
w = w + 20
h = h + 20
x = x - 20
y = y - 20
if x < 0:
x = 0
if y < 0:
y = 0
if x+w > current_image.shape[1]:
x = current_image.shape[1]-1
if y+h > current_image.shape[0]:
y = current_image.shape[0]-1
if w > 0 and h > 0:
mask = current_image[y:y+h, x:x+w]
#cv2.rectangle(current_image, (x,y), (x+w+5, y+h+5), (255),1)
for xx in range(0, mask.shape[1]):
for yy in range(0, mask.shape[0]):
mask[yy,xx] = randint(0,6)
blur_mask = cv2.GaussianBlur(mask, (5, 5), 0)
current_image[y:y+h,x:x+w] = blur_mask
median[y:y+h,x:x+w] =blur_mask
# find the diff between the masked median and the masked current image
blur_cur = cv2.GaussianBlur(current_image, (5, 5), 0)
blur_med = cv2.GaussianBlur(median, (5, 5), 0)
cur_med_diff = cv2.absdiff(blur_cur.astype(blur_cur.dtype), blur_med,)
blend = cv2.addWeighted(current_image, .5, cur_med_diff, .5,0)
cur_med_diff =- median
#line_groups, points, clusters = find_objects2(blend, "Current Median Diff Blend", current_image, filename)
return(blend, current_image, filename)
#return(line_groups, points)
def inspect_image(med_stack_all, background, median, before_image, current_image, after_image, avg_cnt,avg_tot,avg_pts,filename):
rois = []
big_cnts = []
line_groups = []
orphan_lines = []
obj_points = []
stars = []
image_diff = cv2.absdiff(current_image.astype(current_image.dtype), background,)
orig_image = current_image
current_image = image_diff
blend, current_image, filename = diff_all(med_stack_all, background, median, before_image, current_image, after_image,filename)
points = find_objects2(blend, "Current Median Diff Blend", current_image, filename)
if len(points) > 2:
line_groups, orphan_lines, stars, obj_points, big_cnts = find_objects(0, points)
if len(obj_points) > 2:
line_groups, orphan_lines, stars2, obj_points, big_cnts = find_objects(0, obj_points)
stars = stars + stars2
print ("---FINAL ANALYSIS---")
print ("File: ", filename)
print ("Total Points: ", len(points))
print ("Line Groups: ", len(line_groups))
lg_points = 0
lg = 1
for line in line_groups:
print (" Group " + str(lg) + ": " + str(len(line)))
lg = lg + 1
lg_points = lg_points + len(line)
print ("Total Line Group Points: ", lg_points)
print ("Orphan Lines: ", len(line_groups))
print ("Stars: ", len(stars))
print ("Obj Points: ", len(obj_points))
print ("Big CNTS: ", len(big_cnts))
for x,y,w,h in big_cnts:
cv2.rectangle(blend, (x,y), (x+w+5, y+h+5), (255),1)
#for x,y,w,h in obj_points:
# if w > 25 or h > 25:
# cv2.rectangle(blend, (x,y), (x+w+5, y+h+5), (255),1)
# else:
# cv2.circle(blend, (x,y), 20, (120), 1)
#for x,y,w,h in stars:
# if w > 25 or h > 25:
# cv2.rectangle(blend, (x,y), (x+w+5, y+h+5), (255),1)
# else:
# cv2.circle(blend, (x,y), 5, (120), 1)
return(blend, points, line_groups, stars, obj_points, big_cnts)
def parse_file_date(orig_video_file):
#print(orig_video_file)
if ".mp4" in orig_video_file:
stacked_image_fn = orig_video_file.replace(".mp4", "-stack.jpg")
star_image_fn = orig_video_file.replace(".mp4", "-stars.jpg")
report_fn = orig_video_file.replace(".mp4", "-stack-report.txt")
video_report = orig_video_file.replace(".mp4", "-report.txt")
trim_file = orig_video_file.replace(".mp4", "-trim.mp4")
else:
stacked_image_fn = orig_video_file.replace(".avi", "-stack.jpg")
trim_file = orig_video_file.replace(".avi", "-trim.avi")
star_image_fn = orig_video_file.replace(".avi", "-stars.jpg")
report_fn = orig_video_file.replace(".avi", "-stack-report.txt")
el = orig_video_file.split("/")
file_name = el[-1]
file_name = file_name.replace("_", "-")
file_name = file_name.replace(".", "-")
#print ("FN", file_name)
xyear, xmonth, xday, xhour, xmin, xsec, xcam_num, ftype, xext = file_name.split("-")
cam_num = xcam_num.replace("cam", "")
date_str = xyear + "-" + xmonth + "-" + xday + " " + xhour + ":" + xmin + ":" + xsec
capture_date = date_str
return(capture_date)
def day_or_night(config, capture_date):
obs = ephem.Observer()
obs.pressure = 0
obs.horizon = '-0:34'
obs.lat = config['device_lat']
obs.lon = config['device_lng']
obs.date = capture_date
sun = ephem.Sun()
sun.compute(obs)
(sun_alt, x,y) = str(sun.alt).split(":")
saz = str(sun.az)
(sun_az, x,y) = saz.split(":")
#print ("SUN", sun_alt)
if int(sun_alt) < -1:
sun_status = "night"
else:
sun_status = "day"
return(sun_status, sun_alt)
def diff_stills(sdate, cam_num):
med_last_objects = []
last_objects = deque(maxlen=5)
diffed_files = []
config = read_config("conf/config-1.txt")
video_dir = "/mnt/ams2/SD/"
images = []
images_orig = []
images_blend = []
images_info = []
count = 0
last_image = None
last_thresh_sum = 0
hits = 0
avg_cnt = 0
avg_tot = 0
avg_pts = 0
count = 0
glob_dir = video_dir + "proc/" + sdate + "/" + "*cam" + str(cam_num) + "-stacked.jpg"
report_file = video_dir + "proc/" + sdate + "/" + sdate + "-cam" + str(cam_num) + "-report.txt"
master_stack_file = video_dir + "proc/" + sdate + "/" + sdate + "-cam" + str(cam_num) + "-master_stack.jpg"
#cv2.namedWindow('pepe')
mask_file = "conf/mask-" + str(cam_num) + ".txt"
file_exists = Path(mask_file)
mask_exists = 0
still_mask = [0,0,0,0]
if (file_exists.is_file()):
print("File found.")
ms = open(mask_file)
for lines in ms:
line, jk = lines.split("\n")
exec(line)
ms.close()
mask_exists = 1
(sm_min_x, sm_max_x, sm_min_y, sm_max_y) = still_mask
diffs = 0
image_list = []
file_list = []
sorted_list = []
print ("Loading still images from ", glob_dir)
fp = open(report_file, "w")
for filename in (glob.glob(glob_dir)):
capture_date = parse_file_date(filename)
sun_status, sun_alt = day_or_night(config, capture_date)
if sun_status != 'day' and int(sun_alt) <= -5:
#print("NIGHTTIME", capture_date, filename, sun_status)
file_list.append(filename)
else:
print ("This is a daytime or dusk file")
sorted_list = sorted(file_list)
for filename in sorted_list:
open_cv_image = cv2.imread(filename,0)
orig_image = open_cv_image
images_orig.append(orig_image)
print(filename)
open_cv_image[440:480, 0:640] = [0]
if mask_exists == 1:
open_cv_image[sm_min_y:sm_max_y, sm_min_x:sm_max_x] = [0]
images.append(open_cv_image)
#exit()
#time.sleep(5)
height , width = open_cv_image.shape
master_stack = None
# Define the codec and create VideoWriter object
#fourcc = cv2.VideoWriter_fourcc(*'H264')
#out = cv2.VideoWriter(outfile,fourcc, 5, (width,height),1)
#med_stack_all = np.median(np.array(images[50:150]), axis=0)
med_stack_all = np.median(np.array(images), axis=0)
#cv2.imshow('pepe', cv2.convertScaleAbs(med_stack_all))
#cv2.waitKey(1000)
objects = None
last_line_groups = []
last_points = []
for filename in sorted_list:
hit = 0
detect = 0
el = filename.split("/")
fn = el[-1]
#this_image = cv2.imread(filename,1)
this_image = images[count]
if count >= 1:
before_image = images[count-1]
else:
before_image = images[count+2]
if count >= len(file_list)-1:
after_image = images[count-2]
else:
after_image = images[count+1]
if count < 25:
median = np.median(np.array(images[0:count+25]), axis=0)
elif len(images) - count < 25:
median = np.median(np.array(images[count-25:count]), axis=0)
else:
median = np.median(np.array(images[count-25:count]), axis=0)
if count < 10:
background = images[count+1]
for i in range (0,10):
background = cv2.addWeighted(background, .8, images[count+i], .2,0)
else:
background = images[count-1]
for i in range (0,10):
background = cv2.addWeighted(background, .8, images[count-i], .2,0)
img_rpt_file = filename.replace("-stacked.jpg", "-stack-report.txt")
img_report = open(img_rpt_file, "w")
(blend, points, line_groups, stars, obj_points, big_cnts) = inspect_image(med_stack_all, background, median, before_image, this_image, after_image, avg_cnt,avg_tot,avg_pts, filename)
master_stack = stack_stack(blend, master_stack)
img_report.write("points=" + str(points) + "\n")
img_report.write("line_groups=" + str(line_groups) + "\n")
img_report.write("stars=" + str(stars) + "\n")
img_report.write("obj_points=" + str(obj_points) + "\n")
img_report.write("big_cnts=" + str(big_cnts) + "\n")
img_report.close()
images_blend.append(blend)
images_info.append((points, line_groups, stars, obj_points, big_cnts))
# block out the detections in the master image to remove it from the running mask
last_line_group = line_groups
last_points = points
for x,y,w,h in last_points:
images[count][y:y+h,x:x+w] = 5
count = count + 1
if len(big_cnts) > 0 or len(obj_points) >= 3:
hits = hits + 1
#cv2.imshow('pepe', blend)
#if len(line_groups) >= 1 or len(obj_points) > 3 or len(big_cnts) > 0:
#cv2.waitKey(1)
# while(1):
# k = cv2.waitKey(33)
# if k == 32:
# break
# if k == 27:
# exit()
#else:
#cv2.waitKey(1)
data = filename + "," + str(len(line_groups)) + "," + str(len(obj_points)) + "," + str(len(big_cnts)) + "\n"
fp.write(data)
print ("TOTAL: ", len(file_list))
print ("HITS: ", hits)
fp.close()
if master_stack is not None:
print("saving", master_stack_file)
master_stack.save(master_stack_file, "JPEG")
else:
print("Failed.")
hits = 1
for count in range(0, len(sorted_list) - 1):
file = sorted_list[count]
el = file.split("/")
st = el[-1]
report_str = st.replace("-stacked.jpg", "-report.txt")
video_str = st.replace("-stacked.jpg", ".mp4")
video_file = file.replace("-stacked.jpg", ".mp4")
(points, line_groups, stars, obj_points, big_cnts) = images_info[count]
if len(obj_points) > 3 or len(big_cnts) > 0:
for bc in big_cnts:
(x,y,w,h) = bc
obj_points.append((x,y,5,5))
obj_points.append((x+w,y+h,5,5))
np_obj_points = np.array(obj_points)
max_x = np.max(np_obj_points[:,0])
max_y = np.max(np_obj_points[:,1])
min_x = np.min(np_obj_points[:,0])
min_y = np.min(np_obj_points[:,1])
myimg = cv2.imread(sorted_list[count],0)
cv2.rectangle(myimg, (min_x,min_y), (max_x, max_y), (255),1)
#cv2.imshow('pepe', myimg)
#cv2.waitKey(1)
print ("-------")
print ("Count:", count)
print ("Hit:", hits)
print ("File:", sorted_list[count])
print ("Points:", str(len(points)))
print ("Line Groups:", str(len(line_groups)))
gc = 1
for line_group in line_groups:
for dist, ang, x1,y1,w1,h1 in line_group:
print ("GROUP: ", gc, dist, ang, x1,y1,w1,h1)
gc = gc + 1
print ("Stars:", str(len(stars)))
print ("Obj Points:", str(len(obj_points)))
print ("Big Cnts:", str(len(big_cnts)))
print ("Min/Max X/Y:", str(min_x), str(min_y), str(max_x), str(max_y))
print ("-------")
hits = hits + 1
video_report = video_file.replace(".mp4", "-report.txt")
file_exists = Path(video_report)
if (file_exists.is_file()):
print ("Already processed the video.")
#else:
# print("./PV.py " + video_file + " " + cam_num)
# os.system("./PV.py " + video_file + " " + cam_num)
else :
min_x = min_y = max_x = max_y = 0
#cmd = "grep \"Motion Frames:\" `find /mnt/ams2/SD/" + str(cam_num) + " |grep " + report_str + "`"
#output = subprocess.check_output(cmd, shell=True).decode("utf-8")
#output = output.replace("Motion Frames:", "motion_frames=")
#print (output)
#exec(output)
#if len(motion_frames) > 14:
# cmd = "find /mnt/ams2/SD/" + str(cam_num) + " |grep " + video_str
# video_file = subprocess.check_output(cmd, shell=True).decode("utf-8")
# print("This is probably a real event?")
# print(video_file)
sdate = sys.argv[1]
cam_num = sys.argv[2]
diff_stills(sdate, cam_num)
| mikehankey/fireball_camera | scan-stills2.py | Python | gpl-3.0 | 42,716 | 0.043426 |
'''
Using the Python language, have the function MultiplicativePersistence(num)
take the num parameter being passed which will always be a positive integer
and return its multiplicative persistence which is the number of times
you must multiply the digits in num until you reach a single digit.
For example: if num is 39 then your program should return 3
because 3 * 9 = 27 then 2 * 7 = 14 and finally 1 * 4 = 4 and you stop at 4.
'''
def MultiplicativePersistence(num):
steps = 0
while num > 9:
snum = str(num)
sdigits = list(snum)
num = 1
for snum in sdigits:
n = int(snum)
num = num * n
steps = steps + 1
return steps
# keep this function call here
# to see how to enter arguments in Python scroll down
print MultiplicativePersistence(raw_input())
| anomen-s/programming-challenges | coderbyte.com/easy/Multiplicative Persistence/solve.py | Python | gpl-2.0 | 820 | 0.02439 |
# Copyright 2021 ForgeFlow S.L. <https://www.forgeflow.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openupgradelib import openupgrade
_xmlid_renames = [
(
"sale.access_product_product_attribute_custom_value",
"sale.access_product_product_attribute_custom_value_sale_manager",
),
("sale.account_move_see_all", "sale.account_invoice_rule_see_all"),
("sale.account_move_personal_rule", "sale.account_invoice_rule_see_personal"),
("sale.account_move_line_see_all", "sale.account_invoice_line_rule_see_all"),
(
"sale.account_move_line_personal_rule",
"sale.account_invoice_line_rule_see_personal",
),
]
def fast_fill_sale_order_currency_id(env):
if not openupgrade.column_exists(env.cr, "sale_order", "currency_id"):
openupgrade.logged_query(
env.cr,
"""
ALTER TABLE sale_order
ADD COLUMN currency_id integer""",
)
openupgrade.logged_query(
env.cr,
"""
UPDATE sale_order so
SET currency_id = pp.currency_id
FROM product_pricelist pp
WHERE so.pricelist_id = pp.id""",
)
@openupgrade.migrate()
def migrate(env, version):
openupgrade.rename_xmlids(env.cr, _xmlid_renames)
fast_fill_sale_order_currency_id(env)
| OCA/OpenUpgrade | openupgrade_scripts/scripts/sale/14.0.1.1/pre-migration.py | Python | agpl-3.0 | 1,334 | 0.001499 |
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import IPython
import os
import threading
import warnings
import scipy.ndimage as ndi
import cv2
import random
try:
from PIL import Image as pil_image
except ImportError:
pil_image = None
from keras import backend as K
from sklearn.preprocessing import OneHotEncoder
from itertools import islice, chain
from sklearn.model_selection import train_test_split
def random_rotation(x, rg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random rotation of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
rg: Rotation range, in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Rotated Numpy image tensor.
"""
theta = np.pi / 180 * np.random.uniform(-rg, rg)
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shift(x, wrg, hrg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shift of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
wrg: Width shift range, as a float fraction of the width.
hrg: Height shift range, as a float fraction of the height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Shifted Numpy image tensor.
"""
h, w = x.shape[row_axis], x.shape[col_axis]
tx = np.random.uniform(-hrg, hrg) * h
ty = np.random.uniform(-wrg, wrg) * w
translation_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = translation_matrix # no need to do offset
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def augment_brightness_camera_images(image):
image1 = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
image1 = np.array(image1)
random_bright = 0.5+np.random.uniform()
image1[:, :, 2] = image1[:, :, 2]*random_bright
image1[:, :, 2][image1[:, :, 2] > 255] = 255
image1 = cv2.cvtColor(image1, cv2.COLOR_HSV2RGB)
return image1
def add_random_shadow(image):
top_y = 320*np.random.uniform()
top_x = 0
bot_x = 160
bot_y = 320*np.random.uniform()
image_hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
shadow_mask = 0*image_hls[:, :, 1]
X_m = np.mgrid[0:image.shape[0], 0:image.shape[1]][0]
Y_m = np.mgrid[0:image.shape[0], 0:image.shape[1]][1]
shadow_mask[((X_m-top_x)*(bot_y-top_y) - (bot_x - top_x)*(Y_m-top_y) >= 0)] = 1
if np.random.randint(2) == 1:
random_bright = .5
cond1 = shadow_mask == 1
cond0 = shadow_mask == 0
if np.random.randint(2) == 1:
image_hls[:, :, 1][cond1] = image_hls[:, :, 1][cond1]*random_bright
else:
image_hls[:, :, 1][cond0] = image_hls[:, :, 1][cond0]*random_bright
image = cv2.cvtColor(image_hls, cv2.COLOR_HLS2RGB)
return image
def random_shear(x, intensity, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shear of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Sheared Numpy image tensor.
"""
shear = np.random.uniform(-intensity, intensity)
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_zoom(x, zoom_range, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial zoom of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
zoom_range: Tuple of floats; zoom range for width and height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Zoomed Numpy image tensor.
# Raises
ValueError: if `zoom_range` isn't a tuple.
"""
if len(zoom_range) != 2:
raise ValueError('zoom_range should be a tuple or list of two floats. '
'Received arg: ', zoom_range)
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_channel_shift(x, intensity, channel_axis=0):
x = np.rollaxis(x, channel_axis, 0)
min_x, max_x = np.min(x), np.max(x)
channel_images = [np.clip(x_channel + np.random.uniform(-intensity, intensity), min_x, max_x)
for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def transform_matrix_offset_center(matrix, x, y):
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
def apply_transform(x,
transform_matrix,
channel_axis=0,
fill_mode='nearest',
cval=0.):
"""Apply the image transformation specified by a matrix.
# Arguments
x: 2D numpy array, single image.
transform_matrix: Numpy array specifying the geometric transformation.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
The transformed version of the input.
"""
x = np.rollaxis(x, channel_axis, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [ndi.interpolation.affine_transform(
x_channel,
final_affine_matrix,
final_offset,
order=0,
mode=fill_mode,
cval=cval) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def flip_axis(x, axis):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
def array_to_img(x, data_format=None, scale=True):
"""Converts a 3D Numpy array to a PIL Image instance.
# Arguments
x: Input Numpy array.
data_format: Image data format.
scale: Whether to rescale image values
to be within [0, 255].
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if invalid `x` or `data_format` is passed.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 3:
raise ValueError('Expected image array to have rank 3 (single image). '
'Got array with shape:', x.shape)
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Invalid data_format:', data_format)
# Original Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but target PIL image has format (width, height, channel)
if data_format == 'channels_first':
x = x.transpose(1, 2, 0)
if scale:
x = x + max(-np.min(x), 0)
x_max = np.max(x)
if x_max != 0:
x /= x_max
x *= 255
if x.shape[2] == 3:
# RGB
return pil_image.fromarray(x.astype('uint8'), 'RGB')
elif x.shape[2] == 1:
# grayscale
return pil_image.fromarray(x[:, :, 0].astype('uint8'), 'L')
else:
raise ValueError('Unsupported channel number: ', x.shape[2])
def img_to_array(img, data_format=None):
"""Converts a PIL Image instance to a Numpy array.
# Arguments
img: PIL Image instance.
data_format: Image data format.
# Returns
A 3D Numpy array.
# Raises
ValueError: if invalid `img` or `data_format` is passed.
"""
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ', data_format)
# Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but original PIL image has format (width, height, channel)
x = np.asarray(img, dtype=K.floatx())
if len(x.shape) == 3:
if data_format == 'channels_first':
x = x.transpose(2, 0, 1)
elif len(x.shape) == 2:
if data_format == 'channels_first':
x = x.reshape((1, x.shape[0], x.shape[1]))
else:
x = x.reshape((x.shape[0], x.shape[1], 1))
else:
raise ValueError('Unsupported image shape: ', x.shape)
return x.astype(np.uint8)
def load_img(path, grayscale=False, target_size=None):
"""Loads an image into PIL format.
# Arguments
path: Path to image file
grayscale: Boolean, whether to load the image as grayscale.
target_size: Either `None` (default to original size)
or tuple of ints `(img_height, img_width)`.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
img = pil_image.open(path)
if grayscale:
if img.mode != 'L':
img = img.convert('L')
else:
if img.mode != 'RGB':
img = img.convert('RGB')
if target_size:
wh_tuple = (target_size[1], target_size[0])
if img.size != wh_tuple:
img = img.resize(wh_tuple)
return img
def load_mask(path):
mask = cv2.imread(path)
return mask
def resize_mask(mask, target_size=None):
if not (target_size[0] == mask.shape[0] and target_size[1] == mask.shape[1]):
mask = cv2.resize(mask, (target_size[1],
target_size[0]),
interpolation=cv2.INTER_NEAREST)
return mask
def mask_to_onehot(mask, n_values, shape):
''' Convert mask to binary one-hot-encoded-masks'''
SPARSE = False
enc = OneHotEncoder(n_values=n_values, sparse=SPARSE)
# Check if shape is (nb_rows, nb_cols, 3)
# Then just assign one of the channelse as the mask
if (mask[:, :, 0] == mask[:, :, 1]).all() and (mask[:, :, 1] == mask[:, :, 2]).all() and mask.shape[2] == 3:
mask = mask[:, :, 0]
# If mask is not (batch_size, nb_rows, nb_cols)
# add an extra dimension representing batch_size
if len(mask.shape) == 2 and mask.shape[0] != 1:
mask = np.expand_dims(mask, axis=0)
# Now mask should be of shape (batch_size, nb_rows, nb_cols)
# Initiate one-hot-encoded array
x = np.zeros((mask.shape[0],
mask.shape[1],
mask.shape[2],
n_values))
for i in range(mask.shape[0]):
img_enc = enc.fit_transform(mask[i])
img_enc = np.reshape(img_enc, (shape[0], shape[1], n_values))
x[i] = img_enc
return x
def onehot_to_mask(mask):
'''Currently only for one sample'''
# If only one batch but dimension 4
if len(mask.shape) == 4 and mask.shape[0] == 1:
x = np.squeeze(np.argmax(mask, axis=3))
# If dimension 4 and multiple batches
elif len(mask.shape) == 4 and mask.shape[0] != 1:
x = np.zeros((mask.shape[0],
mask.shape[1],
mask.shape[2]))
for i in range(x.shape[0]):
x[i] = np.argmax(mask[i], axis=2)
elif len(mask.shape) == 3:
x = np.argmax(mask, axis=2)
return x
def list_pictures(directory, ext='jpg|jpeg|bmp|png'):
return [os.path.join(root, f)
for root, _, files in os.walk(directory) for f in files
if re.match(r'([\w]+\.(?:' + ext + '))', f)]
class CAMVIDImageDataGenerator(object):
"""Generate minibatches of image data with real-time data augmentation.
# Arguments
featurewise_center: set input mean to 0 over the dataset.
samplewise_center: set each sample mean to 0.
featurewise_std_normalization: divide inputs by std of the dataset.
samplewise_std_normalization: divide each input by its std.
zca_whitening: apply ZCA whitening.
rotation_range: degrees (0 to 180).
width_shift_range: fraction of total width.
height_shift_range: fraction of total height.
shear_range: shear intensity (shear angle in radians).
zoom_range: amount of zoom. if scalar z, zoom will be randomly picked
in the range [1-z, 1+z]. A sequence of two can be passed instead
to select this range.
channel_shift_range: shift range for each channels.
fill_mode: points outside the boundaries are filled according to the
given mode ('constant', 'nearest', 'reflect' or 'wrap'). Default
is 'nearest'.
cval: value used for points outside the boundaries when fill_mode is
'constant'. Default is 0.
horizontal_flip: whether to randomly flip images horizontally.
vertical_flip: whether to randomly flip images vertically.
rescale: rescaling factor. If None or 0, no rescaling is applied,
otherwise we multiply the data by the value provided
(before applying any other transformation).
preprocessing_function: function that will be implied on each input.
The function will run before any other modification on it.
The function should take one argument:
one image (Numpy tensor with rank 3),
and should output a Numpy tensor with the same shape.
data_format: 'channels_first' or 'channels_last'. In 'channels_first' mode, the channels dimension
(the depth) is at index 1, in 'channels_last' mode it is at index 3.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
"""
def __init__(self,
data_split="train",
val_mode=False,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=0.,
width_shift_range=0.,
height_shift_range=0.,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
preprocessing_function=None,
data_format=None,
void_cval=0,
augment_brightness=False,
augment_shadow=False):
if data_format is None:
data_format = K.image_data_format()
self.featurewise_center = featurewise_center
self.samplewise_center = samplewise_center
self.featurewise_std_normalization = featurewise_std_normalization
self.samplewise_std_normalization = samplewise_std_normalization
self.zca_whitening = zca_whitening
self.rotation_range = rotation_range
self.width_shift_range = width_shift_range
self.height_shift_range = height_shift_range
self.shear_range = shear_range
self.zoom_range = zoom_range
self.channel_shift_range = channel_shift_range
self.fill_mode = fill_mode
self.cval = cval
self.horizontal_flip = horizontal_flip
self.vertical_flip = vertical_flip
self.rescale = rescale
self.preprocessing_function = preprocessing_function
self.data_split = data_split
self.val_mode = val_mode
self.void_cval = void_cval
self.augment_brightness = augment_brightness
self.augment_shadow = augment_shadow
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError('data_format should be "channels_last" (channel after row and '
'column) or "channels_first" (channel before row and column). '
'Received arg: ', data_format)
self.data_format = data_format
if data_format == 'channels_first':
self.channel_axis = 1
self.row_axis = 2
self.col_axis = 3
if data_format == 'channels_last':
self.channel_axis = 3
self.row_axis = 1
self.col_axis = 2
self.mean = None
self.std = None
self.principal_components = None
if np.isscalar(zoom_range):
self.zoom_range = [1 - zoom_range, 1 + zoom_range]
elif len(zoom_range) == 2:
self.zoom_range = [zoom_range[0], zoom_range[1]]
else:
raise ValueError('zoom_range should be a float or '
'a tuple or list of two floats. '
'Received arg: ', zoom_range)
def flow_from_directory(self, directory, dataset,
target_size=(360, 480),
target_size_mask=(45, 60),
color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
save_to_dir=None,
save_prefix='',
save_format='jpeg',
follow_links=False,
keep_classes=None,
validation_data=False):
return DirectoryIterator(
directory,
dataset=dataset,
image_data_generator=self,
data_split=self.data_split,
val_mode=self.val_mode,
target_size=target_size,
target_size_mask=target_size_mask,
color_mode=color_mode,
classes=classes, class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size, shuffle=shuffle, seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
follow_links=follow_links,
keep_classes=keep_classes,
validation_data=validation_data)
def standardize(self, x):
"""Apply the normalization configuration to a batch of inputs.
# Arguments
x: batch of inputs to be normalized.
# Returns
The inputs, normalized.
"""
if self.preprocessing_function:
x = self.preprocessing_function(x)
if self.rescale:
x *= self.rescale
# x is a single image, so it doesn't have image number at index 0
img_channel_axis = self.channel_axis - 1
if self.samplewise_center:
x -= np.mean(x, axis=img_channel_axis, keepdims=True)
if self.samplewise_std_normalization:
x /= (np.std(x, axis=img_channel_axis, keepdims=True) + 1e-7)
if self.featurewise_center:
if self.mean is not None:
x -= self.mean
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_center`, but it hasn\'t'
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.featurewise_std_normalization:
if self.std is not None:
x /= (self.std + 1e-7)
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, but it hasn\'t'
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.zca_whitening:
if self.principal_components is not None:
flatx = np.reshape(x, (x.size))
whitex = np.dot(flatx, self.principal_components)
x = np.reshape(whitex, (x.shape[0], x.shape[1], x.shape[2]))
else:
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, but it hasn\'t'
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
return x
def random_transform(self, x, y):
"""Randomly augment a single image tensor.
# Arguments
x: 3D tensor, single image.
# Returns
A randomly transformed version of the input (same shape).
"""
# x is a single image, so it doesn't have image number at index 0
img_row_axis = self.row_axis - 1
img_col_axis = self.col_axis - 1
img_channel_axis = self.channel_axis - 1
# use composition of homographies
# to generate final transform that needs to be applied
if self.rotation_range:
theta = np.pi / 180 * np.random.uniform(-self.rotation_range, self.rotation_range)
else:
theta = 0
if self.height_shift_range:
shf = np.random.uniform(-self.height_shift_range, self.height_shift_range)
tx_x = shf * x.shape[img_row_axis]
tx_y = shf * y.shape[img_row_axis]
else:
tx_x = 0
tx_y = 0
if self.width_shift_range:
shf = np.random.uniform(-self.width_shift_range, self.width_shift_range)
ty_x = shf * x.shape[img_col_axis]
ty_y = shf * y.shape[img_col_axis]
else:
ty_x = 0
ty_y = 0
if self.shear_range:
shear = np.random.uniform(-self.shear_range, self.shear_range)
else:
shear = 0
if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(self.zoom_range[0], self.zoom_range[1], 2)
transform_matrix = None
transform_matrix_x = None
transform_matrix_y = None
if theta != 0:
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
transform_matrix = rotation_matrix
if shear != 0:
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
transform_matrix = shear_matrix if transform_matrix is None else np.dot(transform_matrix, shear_matrix)
if zx != 1 or zy != 1:
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
transform_matrix = zoom_matrix if transform_matrix is None else np.dot(transform_matrix, zoom_matrix)
if tx_x != 0 or ty_x != 0:
shift_matrix = np.array([[1, 0, tx_x],
[0, 1, ty_x],
[0, 0, 1]])
transform_matrix_x = shift_matrix if transform_matrix is None else np.dot(transform_matrix, shift_matrix)
if tx_y != 0 or ty_y != 0:
shift_matrix = np.array([[1, 0, tx_y],
[0, 1, ty_y],
[0, 0, 1]])
transform_matrix_y = shift_matrix if transform_matrix is None else np.dot(transform_matrix, shift_matrix)
if transform_matrix is not None:
h, w = x.shape[img_row_axis], x.shape[img_col_axis]
transform_matrix = transform_matrix_offset_center(transform_matrix_x, h, w)
x = apply_transform(x, transform_matrix, img_channel_axis,
fill_mode=self.fill_mode, cval=self.cval)
# For y, mask data, fill mode constant, cval = 0
h, w = y.shape[img_row_axis], y.shape[img_col_axis]
transform_matrix = transform_matrix_offset_center(transform_matrix_y, h, w)
y = apply_transform(y, transform_matrix, img_channel_axis,
fill_mode="constant", cval=self.void_cval)
if self.channel_shift_range != 0:
x = random_channel_shift(x,
self.channel_shift_range,
img_channel_axis)
y = random_channel_shift(y,
self.channel_shift_range,
img_channel_axis)
if self.horizontal_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_col_axis)
y = flip_axis(y, img_col_axis)
if self.vertical_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_row_axis)
y = flip_axis(y, img_row_axis)
if self.augment_brightness:
x = augment_brightness_camera_images(x)
if self.augment_shadow:
x = add_random_shadow(x)
return x, y
def fit(self, x,
augment=False,
rounds=1,
seed=None):
"""Fits internal statistics to some sample data.
Required for featurewise_center, featurewise_std_normalization
and zca_whitening.
# Arguments
x: Numpy array, the data to fit on. Should have rank 4.
In case of grayscale data,
the channels axis should have value 1, and in case
of RGB data, it should have value 3.
augment: Whether to fit on randomly augmented samples
rounds: If `augment`,
how many augmentation passes to do over the data
seed: random seed.
# Raises
ValueError: in case of invalid input `x`.
"""
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 4:
raise ValueError('Input to `.fit()` should have rank 4. '
'Got array with shape: ' + str(x.shape))
if x.shape[self.channel_axis] not in {1, 3, 4}:
raise ValueError(
'Expected input to be images (as Numpy array) '
'following the data format convention "' + self.data_format + '" '
'(channels on axis ' + str(self.channel_axis) + '), i.e. expected '
'either 1, 3 or 4 channels on axis ' + str(self.channel_axis) + '. '
'However, it was passed an array with shape ' + str(x.shape) +
' (' + str(x.shape[self.channel_axis]) + ' channels).')
if seed is not None:
np.random.seed(seed)
x = np.copy(x)
if augment:
ax = np.zeros(tuple([rounds * x.shape[0]] + list(x.shape)[1:]), dtype=K.floatx())
for r in range(rounds):
for i in range(x.shape[0]):
ax[i + r * x.shape[0]] = self.random_transform(x[i])
x = ax
if self.featurewise_center:
self.mean = np.mean(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.mean = np.reshape(self.mean, broadcast_shape)
x -= self.mean
if self.featurewise_std_normalization:
self.std = np.std(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.std = np.reshape(self.std, broadcast_shape)
x /= (self.std + K.epsilon())
if self.zca_whitening:
flat_x = np.reshape(x, (x.shape[0], x.shape[1] * x.shape[2] * x.shape[3]))
sigma = np.dot(flat_x.T, flat_x) / flat_x.shape[0]
u, s, _ = linalg.svd(sigma)
self.principal_components = np.dot(np.dot(u, np.diag(1. / np.sqrt(s + 10e-7))), u.T)
class Iterator(object):
"""Abstract base class for image data iterators.
# Arguments
n: Integer, total number of samples in the dataset to loop over.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seeding for data shuffling.
"""
def __init__(self, n, batch_size, shuffle, seed):
self.n = n
self.batch_size = batch_size
self.shuffle = shuffle
self.batch_index = 0
self.total_batches_seen = 0
self.lock = threading.Lock()
self.index_generator = self._flow_index(n, batch_size, shuffle, seed)
def reset(self):
self.batch_index = 0
def _flow_index(self, n, batch_size=32, shuffle=False, seed=None):
if self.val_mode:
while 1:
if self.batch_index == 0:
index_array = np.arange(n)
current_index = (self.batch_index * batch_size) % n
if n > current_index + batch_size:
current_batch_size = batch_size
self.batch_index += 1
else:
current_batch_size = n - current_index
self.batch_index = 0
self.total_batches_seen += 1
yield (index_array[current_index: current_index + current_batch_size],
current_index, current_batch_size)
if n < current_index + batch_size:
break
else:
# Ensure self.batch_index is 0.
self.reset()
while 1:
if seed is not None:
np.random.seed(seed + self.total_batches_seen)
if self.batch_index == 0:
index_array = np.arange(n)
if shuffle:
index_array = np.random.permutation(n)
current_index = (self.batch_index * batch_size) % n
if n > current_index + batch_size:
current_batch_size = batch_size
self.batch_index += 1
else:
current_batch_size = n - current_index
self.batch_index = 0
self.total_batches_seen += 1
yield (index_array[current_index: current_index + current_batch_size],
current_index, current_batch_size)
def __iter__(self):
# Needed if we want to do something like:
# for x, y in data_gen.flow(...):
return self
def __next__(self, *args, **kwargs):
return self.next(*args, **kwargs)
class DirectoryIterator(Iterator):
"""Iterator capable of reading images from a directory on disk.
# Arguments
directory: Path to the directory to read images from.
Each subdirectory in this directory will be
considered to contain images from one class,
or alternatively you could specify class subdirectories
via the `classes` argument.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
target_size: tuple of integers, dimensions to resize input images to.
color_mode: One of `"rgb"`, `"grayscale"`. Color mode to read images.
classes: Optional list of strings, names of sudirectories
containing images from each class (e.g. `["dogs", "cats"]`).
It will be computed automatically if not set.
class_mode: Mode for yielding the targets:
`"binary"`: binary targets (if there are only two classes),
`"categorical"`: categorical targets,
`"sparse"`: integer targets,
`None`: no targets get yielded (only input images are yielded).
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
"""
def __init__(self, directory, dataset,
image_data_generator,
data_split,
val_mode=False,
target_size=(360, 480),
target_size_mask=(45, 60),
color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
data_format=None,
save_to_dir=None, save_prefix='', save_format='jpeg',
follow_links=False,
keep_classes=None,
random_split=False,
validation_data=False):
if data_format is None:
data_format = K.image_data_format()
self.directory = directory
self.image_data_generator = image_data_generator
self.target_size = tuple(target_size)
self.target_size_mask = tuple(target_size_mask)
if color_mode not in {'rgb', 'grayscale'}:
raise ValueError('Invalid color mode:', color_mode,
'; expected "rgb" or "grayscale".')
self.color_mode = color_mode
self.data_format = data_format
if self.color_mode == 'rgb':
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (3,)
else:
self.image_shape = (3,) + self.target_size
else:
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (1,)
else:
self.image_shape = (1,) + self.target_size
self.classes = classes
if class_mode not in {'categorical', 'binary', 'sparse', None}:
raise ValueError('Invalid class_mode:', class_mode,
'; expected one of "categorical", '
'"binary", "sparse", or None.')
self.class_mode = class_mode
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
self.val_mode = val_mode
self.dataset = dataset
self.validation_data = validation_data
white_list_formats = {'png', 'jpg', 'jpeg', 'bmp'}
if data_split not in ["train", "val", "test"]:
raise ValueError("Invalid data_split. Must be either ",
"train, val or test")
if dataset not in ["camvid", "vrinno"]:
raise ValueError("Invalid dataset")
if dataset == "camvid":
# first, count the number of samples and classes
if data_split == "train":
trainImgPath, trainGtPath, trainTotalImages = self.load_from_txt(directory + "/train.txt")
self.samples = trainTotalImages
tmp_imagenames = trainImgPath
tmp_masknames = trainGtPath
elif data_split == "val":
valImgPath, valGtPath, valTotalImages = self.load_from_txt(directory + "/val.txt")
self.samples = valTotalImages
tmp_imagenames = valImgPath
tmp_masknames = valGtPath
elif data_split == "test":
testImgPath, testGtPath, testTotalImages = self.load_from_txt(directory + "/test.txt")
self.samples = testTotalImages
tmp_imagenames = testImgPath
tmp_masknames = testGtPath
self.original_nb_classes = 12
elif dataset == "vrinno":
tmp_imagenames, tmp_masknames, total_images = self.load_from_folder(directory)
tot = len(tmp_imagenames)
# Split into train, validation and test
train_per, val_per, test_per = 0.8, 0.1, 0.1
if random_split:
x, x_test, y, y_test = train_test_split(tmp_imagenames,
tmp_masknames,
test_size=test_per,
train_size=train_per+val_per,
random_state=42)
x_train, x_cv, y_train, y_cv = train_test_split(x,
y,
test_size=val_per,
train_size=train_per,
random_state=42)
else:
train_idx = int(np.ceil(train_per * tot))
val_idx = int(np.ceil(val_per * tot) + train_idx)
test_idx = tot
x_train, y_train = tmp_imagenames[0:train_idx], tmp_masknames[0:train_idx]
x_cv, y_cv = tmp_imagenames[train_idx:val_idx], tmp_masknames[train_idx:val_idx]
x_test, y_test = tmp_imagenames[val_idx:tot], tmp_masknames[val_idx:tot]
if data_split == "train":
tmp_imagenames, tmp_masknames = x_train, y_train
elif data_split == "val":
tmp_imagenames, tmp_masknames = x_cv, y_cv
elif data_split == "test":
tmp_imagenames, tmp_masknames = x_test, y_test
self.samples = len(tmp_imagenames)
self.original_nb_classes = 4
self.num_class = len(keep_classes)
self.classes = np.arange(self.num_class)
self.class_indices = dict(zip(self.classes.tolist(), range(len(self.classes.tolist()))))
root = os.path.dirname(os.path.realpath(__file__))
tmp_imagenames = [os.path.join(root, directory, f) for f in tmp_imagenames]
tmp_masknames = [os.path.join(root, directory, f) for f in tmp_masknames]
self.imagenames = tmp_imagenames
self.masknames = tmp_masknames
if not keep_classes:
print("Using normal")
self.keep_classes = np.arange(self.num_class)
else:
if len(keep_classes) == 1:
raise ValueError("You must have two or more classes")
else:
self.keep_classes = keep_classes
print('Found %d images belonging to %d classes in %s.' % (self.samples,
self.num_class,
data_split))
super(DirectoryIterator, self).__init__(self.samples, batch_size, shuffle, seed)
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
with self.lock:
index_array, current_index, current_batch_size = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
batch_x = np.zeros((current_batch_size,) + self.image_shape)
batch_y = np.zeros((current_batch_size,
self.target_size_mask[0],
self.target_size_mask[1],
self.num_class),
dtype=K.floatx())
grayscale = self.color_mode == 'grayscale'
# build batch of image data
for i, j in enumerate(index_array):
imagename = self.imagenames[j]
mskname = self.masknames[j]
img = load_img(os.path.join(self.directory, imagename),
grayscale=grayscale,
target_size=self.target_size)
x = img_to_array(img, data_format=self.data_format)
x = x.astype(np.uint8)
msk = load_mask(os.path.join(self.directory,
mskname))
y = img_to_array(msk, data_format=self.data_format)
# Augment images and masks
y = resize_mask(y, target_size=self.target_size_mask)
x, y = self.image_data_generator.random_transform(x.astype('float32'),
y.astype('float32'))
if not self.validation_data: # Then we are in training and we should normalize pr standard
x = self.image_data_generator.standardize(x)
y = y.astype(np.uint8)
y = mask_to_onehot(y, self.original_nb_classes, self.target_size_mask)
y = y[:, :, :, self.keep_classes] # Only retain classes of interest
y = y.astype(np.uint8)
batch_x[i] = x
batch_y[i] = y
if self.validation_data: # Then we are in validation generator and we should normalize with training mean and std
batch_x = self.normalize(batch_x)
# optionally save augmented images to disk for debugging purposes
if self.save_to_dir:
for i in range(current_batch_size):
hash_name = np.random.randint(1e4)
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(prefix=self.save_prefix,
index=current_index + i,
hash=hash_name,
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
mask = onehot_to_mask(batch_y[i])
mask = np.expand_dims(mask, 2)
mask = array_to_img(mask, self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}_mask.{format}'.format(prefix=self.save_prefix,
index=current_index + i,
hash=hash_name,
format=self.save_format)
mask.save(os.path.join(self.save_to_dir, fname))
return batch_x, batch_y
def normalize(self, x):
mean = np.load("tmp/" + self.dataset + "_train_mean.npy")
std = np.load("tmp/" + self.dataset + "_train_std.npy")
broadcast_shape = [1, 1, 1]
broadcast_shape[3 - 1] = x.shape[3]
mean = np.reshape(mean, broadcast_shape)
x -= mean
broadcast_shape = [1, 1, 1]
broadcast_shape[3 - 1] = x.shape[3]
std = np.reshape(std, broadcast_shape)
x /= (std + K.epsilon())
return x
@staticmethod
def load_from_folder(path):
print("Loading images and masks from {}".format(path))
allfiles = [f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))]
images = [img for img in allfiles if img.endswith(".png")]
masks = [msk for msk in allfiles if msk.endswith(".msk")]
# Check if we have a mask for each image
img_set = {os.path.splitext(img)[0] for img in images}
msk_set = {os.path.splitext(msk)[0] for msk in masks}
assert img_set-msk_set == set()
assert len(images) == len(masks)
total_images = images
images = sorted(images, key=lambda x: float(x.replace(".png", "")))
masks = sorted(masks, key=lambda x: float(x.replace(".msk", "")))
return images, masks, total_images
@staticmethod
def load_from_txt(path):
print("Extracting file names from {}".format(path))
lImages, lAnnos = [], []
with open(path) as f:
for i, line in enumerate(f):
parts = line.split()
if len(parts) > 1:
lImages.append(parts[0][1:])
lAnnos.append(parts[1][1:])
assert(len(lImages) == len(lAnnos))
total_images = len(lImages)
return lImages, lAnnos, total_images
if __name__ == "__main__":
from Parameters import Parameters
params = Parameters()
params.load_experiment("experiments/ENet_Camvid_test/experiment.json")
ctd = CAMVIDTestData("data/camvid/", data_split="test", batch_size=70)
IPython.embed()
| MarcoDalFarra/semseg | DataGenerators.py | Python | mit | 48,218 | 0.001307 |
# -*- coding: utf-8 -*-
class Charset:
common_name = u'Google Fonts: Extras'
native_name = u''
def glyphs(self):
glyphs = [0xe0ff] # PUA: Font logo
glyphs += [0xeffd] # PUA: Font version number
glyphs += [0xf000] # PUA: font ppem size indicator: run `ftview -f 1255 10 Ubuntu-Regular.ttf` to see it in action!
return glyphs
| davelab6/pyfontaine | fontaine/charsets/internals/google_extras.py | Python | gpl-3.0 | 372 | 0.016129 |
#!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Philipp Wagner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the author nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys, math
from PIL import Image
import facedetect
def Distance(p1,p2):
dx = p2[0] - p1[0]
dy = p2[1] - p1[1]
return math.sqrt(dx*dx+dy*dy)
def ScaleRotateTranslate(image, angle, center = None, new_center = None, scale = None, resample=Image.BICUBIC):
if (scale is None) and (center is None):
return image.rotate(angle=angle, resample=resample)
nx,ny = x,y = center
sx=sy=1.0
if new_center:
(nx,ny) = new_center
if scale:
(sx,sy) = (scale, scale)
cosine = math.cos(angle)
sine = math.sin(angle)
a = cosine/sx
b = sine/sx
c = x-nx*a-ny*b
d = -sine/sy
e = cosine/sy
f = y-nx*d-ny*e
return image.transform(image.size, Image.AFFINE, (a,b,c,d,e,f), resample=resample)
def CropFace(image, eye_left=(0,0), eye_right=(0,0), offset_pct=(0.2,0.2), dest_sz = (70,70)):
# calculate offsets in original image
offset_h = math.floor(float(offset_pct[0])*dest_sz[0])
offset_v = math.floor(float(offset_pct[1])*dest_sz[1])
# get the direction
eye_direction = (eye_right[0] - eye_left[0], eye_right[1] - eye_left[1])
# calc rotation angle in radians
rotation = -math.atan2(float(eye_direction[1]),float(eye_direction[0]))
# distance between them
dist = Distance(eye_left, eye_right)
# calculate the reference eye-width
reference = dest_sz[0] - 2.0*offset_h
# scale factor
scale = float(dist)/float(reference)
# rotate original around the left eye
image = ScaleRotateTranslate(image, center=eye_left, angle=rotation)
# crop the rotated image
crop_xy = (eye_left[0] - scale*offset_h, eye_left[1] - scale*offset_v)
crop_size = (dest_sz[0]*scale, dest_sz[1]*scale)
image = image.crop((int(crop_xy[0]), int(crop_xy[1]), int(crop_xy[0]+crop_size[0]), int(crop_xy[1]+crop_size[1])))
# resize it
image = image.resize(dest_sz, Image.ANTIALIAS)
return image
if __name__ == "__main__":
f = open(sys.argv[1], 'r')
csv = open(sys.argv[2], "w")
for line in f:
lineArray = line.split(";")
fileName = lineArray[0]
label = lineArray[1]
print "aligning %s to aligned" % (fileName)
aligned_file_name = "aligned/%s" % fileName
face = facedetect.detect_faces(fileName)['face'][0]
print(face)
CropFace(Image.open(fileName), eye_left=(face[0],face[1]), eye_right=(face[2],face[1]), offset_pct=(0.08,0.08), dest_sz=(200,200)).save(aligned_file_name)
# CropFace(Image.open(fileName), eye_left=(252,364), eye_right=(420,366), offset_pct=(0.1,0.1), dest_sz=(200,200)).save(aligned_file_name)
csv.write("%s;%s" % (aligned_file_name, label))
f.close()
csv.close() | DiUS/Physiognomy | python/align_faces.py | Python | mit | 4,189 | 0.020769 |
# -*- coding: utf-8 -*-
import unittest
from config.context import Attribute, attr
class Data(object):
pass
class AttributeTestCase(unittest.TestCase):
def setUp(self):
self.data= Data()
self.data.int2= 1
self.integer= 3
self.int1= Attribute("int1", destObj= self.data, valueType=int)
self.int2= Attribute("int2", destObj= self.data)
self.int3= Attribute("integer", destObj= self)
self.flt1= Attribute("flt",
destObj= self.data,
destName="float",
valueType=float )
self.flt2= Attribute("value", valueType= float)
self.str = Attribute("string", destObj=self.data)
def test_construction(self):
self.assertEqual(self.int1.name, "int1")
self.assertEqual(self.int2.name, "int2")
self.assertEqual(self.int3.name, "integer")
self.assertEqual(self.flt1.name, "flt")
self.assertEqual(self.flt2.name, "value")
self.assertEqual(self.int1.data, self.data.int1)
self.assertEqual(self.int2.data, self.data.int2)
self.assertEqual(self.int3.data, self.integer)
self.assertEqual(self.flt1.data, self.data.float)
self.assertEqual(self.flt2.data, self.flt2.value)
self.assertEqual(self.str.data, self.data.string)
def test_fromString(self):
value="123"
x=int(value)
self.int1.fromString(value)
self.int2.fromString(value)
self.int3.fromString(value)
self.str.fromString(value)
self.assertEqual(self.int1.data, x)
self.assertEqual(self.int2.data, x)
self.assertEqual(self.int3.data, x)
self.assertEqual(self.data.int1, x)
self.assertEqual(self.data.int2, x)
self.assertEqual(self.integer, x)
self.assertEqual(self.data.string, value)
value="1.23"
self.flt1.fromString(value)
self.flt2.fromString(value)
x= float(value)
self.assertEqual(self.flt1.data, x)
self.assertEqual(self.data.float, x)
self.assertEqual(self.flt2.data, x)
self.assertEqual(self.flt2.value, x)
value="a1.23"
self.assertRaises(ValueError, self.int1.fromString, value)
def test_contextInterface(self):
self.assertIsNone(self.int1.parent)
self.assertIs(self.int1, self.int1.decorator)
self.assertEqual(self.flt1.about, "")
self.assertIsNone(self.flt2.default)
self.int1.open()
self.int1.close()
self.int1.clear()
self.assertRaises(NotImplementedError, self.int2.getContext, "xx")
self.assertRaises(NotImplementedError, self.int2.insert, self.int3)
def test_attr(self):
dmc= attr("int1", destObj= self.data, valueType=int)
self.assertIs(dmc._ctx.data, self.data.int1)
def suite():
"""Get Test suite object
"""
return unittest.TestLoader().loadTestsFromTestCase(AttributeTestCase)
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run( suite() ) | claashk/python-config | tests/context/attribute.py | Python | gpl-3.0 | 3,198 | 0.014071 |
# -*- coding: utf-8 -*-
#
#
# OpenERP, Open Source Management Solution
# Authors: Raphaël Valyi, Renato Lima
# Copyright (C) 2011 Akretion LTDA.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
{'name': 'Sale Exceptions',
'summary': 'Custom exceptions on sale order',
'version': '1.0',
'category': 'Generic Modules/Sale',
'description': """
Sale Exceptions
===============
This module allows you attach several customizable exceptions to your
sale order in a way that you can filter orders by exceptions type and fix them.
This is especially useful in an order importation scenario such as with
the base_sale_multi_channels module, because it's likely a few orders have
errors when you import them (like product not found in Odoo, wrong line
format etc...)
Contributors
------------
* Raphaël Valyi <raphael.valyi@akretion.com>
* Renato Lima <renato.lima@akretion.com>
* Sébastien BEAU <sebastien.beau@akretion.com>
* Guewen Baconnier <guewen.baconnier@camptocamp.com>
* Yannick Vaucher <yannick.vaucher@camptocamp.com>
""",
'author': "Akretion,Odoo Community Association (OCA)",
'website': 'http://www.akretion.com',
'depends': ['sale'],
'data': ['sale_workflow.xml',
'sale_view.xml',
'sale_exceptions_data.xml',
'wizard/sale_exception_confirm_view.xml',
'security/ir.model.access.csv',
'settings/sale.exception.csv'],
'installable': True,
}
| luistorresm/sale-workflow | sale_exceptions/__openerp__.py | Python | agpl-3.0 | 2,062 | 0 |
import numpy as np
import openmc
###############################################################################
# Simulation Input File Parameters
###############################################################################
# OpenMC simulation parameters
batches = 15
inactive = 5
particles = 10000
###############################################################################
# Exporting to OpenMC materials.xml File
###############################################################################
# Instantiate some Materials and register the appropriate Nuclides
fuel1 = openmc.Material(material_id=1, name='fuel')
fuel1.set_density('g/cc', 4.5)
fuel1.add_nuclide('U235', 1.)
fuel2 = openmc.Material(material_id=2, name='depleted fuel')
fuel2.set_density('g/cc', 4.5)
fuel2.add_nuclide('U238', 1.)
moderator = openmc.Material(material_id=3, name='moderator')
moderator.set_density('g/cc', 1.0)
moderator.add_element('H', 2.)
moderator.add_element('O', 1.)
moderator.add_s_alpha_beta('c_H_in_H2O')
# Instantiate a Materials collection and export to XML
materials_file = openmc.Materials([fuel1, fuel2, moderator])
materials_file.export_to_xml()
###############################################################################
# Exporting to OpenMC geometry.xml file
###############################################################################
# Instantiate planar surfaces
x1 = openmc.XPlane(surface_id=1, x0=-10)
x2 = openmc.XPlane(surface_id=2, x0=-7)
x3 = openmc.XPlane(surface_id=3, x0=-4)
x4 = openmc.XPlane(surface_id=4, x0=4)
x5 = openmc.XPlane(surface_id=5, x0=7)
x6 = openmc.XPlane(surface_id=6, x0=10)
y1 = openmc.YPlane(surface_id=11, y0=-10)
y2 = openmc.YPlane(surface_id=12, y0=-7)
y3 = openmc.YPlane(surface_id=13, y0=-4)
y4 = openmc.YPlane(surface_id=14, y0=4)
y5 = openmc.YPlane(surface_id=15, y0=7)
y6 = openmc.YPlane(surface_id=16, y0=10)
z1 = openmc.ZPlane(surface_id=21, z0=-10)
z2 = openmc.ZPlane(surface_id=22, z0=-7)
z3 = openmc.ZPlane(surface_id=23, z0=-4)
z4 = openmc.ZPlane(surface_id=24, z0=4)
z5 = openmc.ZPlane(surface_id=25, z0=7)
z6 = openmc.ZPlane(surface_id=26, z0=10)
# Set vacuum boundary conditions on outside
for surface in [x1, x6, y1, y6, z1, z6]:
surface.boundary_type = 'vacuum'
# Instantiate Cells
inner_box = openmc.Cell(cell_id=1, name='inner box')
middle_box = openmc.Cell(cell_id=2, name='middle box')
outer_box = openmc.Cell(cell_id=3, name='outer box')
# Use each set of six planes to create solid cube regions. We can then use these
# to create cubic shells.
inner_cube = +x3 & -x4 & +y3 & -y4 & +z3 & -z4
middle_cube = +x2 & -x5 & +y2 & -y5 & +z2 & -z5
outer_cube = +x1 & -x6 & +y1 & -y6 & +z1 & -z6
outside_inner_cube = -x3 | +x4 | -y3 | +y4 | -z3 | +z4
# Use surface half-spaces to define regions
inner_box.region = inner_cube
middle_box.region = middle_cube & outside_inner_cube
outer_box.region = outer_cube & ~middle_cube
# Register Materials with Cells
inner_box.fill = fuel1
middle_box.fill = fuel2
outer_box.fill = moderator
# Instantiate root universe
root = openmc.Universe(universe_id=0, name='root universe')
root.add_cells([inner_box, middle_box, outer_box])
# Instantiate a Geometry, register the root Universe, and export to XML
geometry = openmc.Geometry(root)
geometry.export_to_xml()
###############################################################################
# Exporting to OpenMC settings.xml File
###############################################################################
# Instantiate a Settings object, set all runtime parameters, and export to XML
settings_file = openmc.Settings()
settings_file.batches = batches
settings_file.inactive = inactive
settings_file.particles = particles
# Create an initial uniform spatial source distribution over fissionable zones
uniform_dist = openmc.stats.Box(*outer_cube.bounding_box, only_fissionable=True)
settings_file.source = openmc.source.Source(space=uniform_dist)
settings_file.export_to_xml()
###############################################################################
# Exporting to OpenMC plots.xml File
###############################################################################
plot = openmc.Plot(plot_id=1)
plot.origin = [0, 0, 0]
plot.width = [20, 20]
plot.pixels = [200, 200]
plot.color_by = 'cell'
# Instantiate a Plots collection and export to XML
plot_file = openmc.Plots([plot])
plot_file.export_to_xml()
| bhermanmit/openmc | examples/python/boxes/build-xml.py | Python | mit | 4,480 | 0.000446 |
"""
Copyright 2016, Paul Powell, All rights reserved.
"""
import team
import round
class Region:
def __init__(self, name, teams, algorithm):
self.initialize(name, teams)
self.name = name
self.rounds = []
self.algorithm = algorithm
self.final = None
def __call__(self, madness):
round1 = round.Round(self.name, 1, madness, self.algorithm, self.matchups)
round2 = round1.go()
round3 = round2.go()
round4 = round3.go()
self.rounds = [round1, round2, round3, round4]
# Special hacks for final round
self.final = self.algorithm(round4.games[0], madness)
round4.winner = self.final.winner
round4.results.append(self.final)
return self.final()[0]
def initialize(self, name, teams):
# Looks like [((1,16), (8,9)), ((5,12), (4,13)), ((6,11), (3,14)), ((7,10), (2,15))]
sregion = name
game1 = (team.Team(teams[1], sregion, 1), team.Team(teams[16], sregion, 16))
game2 = (team.Team(teams[8], sregion, 8), team.Team(teams[9], sregion, 9))
game3 = (team.Team(teams[5], sregion, 5), team.Team(teams[12], sregion, 12))
game4 = (team.Team(teams[4], sregion, 4), team.Team(teams[13], sregion, 13))
game5 = (team.Team(teams[6], sregion, 6), team.Team(teams[11], sregion, 11))
game6 = (team.Team(teams[3], sregion, 3), team.Team(teams[14], sregion, 14))
game7 = (team.Team(teams[7], sregion, 7), team.Team(teams[10], sregion, 10))
game8 = (team.Team(teams[2], sregion, 2), team.Team(teams[15], sregion, 15))
self.matchups = [(game1, game2), (game3, game4), (game5, game6), (game7, game8)]
def set_sf(self, winner, second):
for matchup in self.matchups:
for game in matchup:
for team in game:
if team.name == winner:
print "found winner"
team.sf = 3
if team.name == second:
print "found second"
team.sf = 2
| ez-p/madness | tournament/engine/region.py | Python | gpl-3.0 | 2,088 | 0.005747 |
#
# Race Capture App
#
# Copyright (C) 2014-2017 Autosport Labs
#
# This file is part of the Race Capture App
#
# This is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License for more details. You should
# have received a copy of the GNU General Public License along with
# this code. If not, see <http://www.gnu.org/licenses/>.
import kivy
kivy.require('1.10.0')
from kivy.uix.anchorlayout import AnchorLayout
from kivy.uix.boxlayout import BoxLayout
from kivy.app import Builder
from kivy.uix.screenmanager import Screen
from kivy.uix.treeview import TreeView, TreeViewLabel
from kivy.uix.label import Label
from kivy.properties import ObjectProperty, StringProperty, NumericProperty
from datetime import timedelta
from utils import *
from fieldlabel import AutoShrinkFieldLabel
from kivy.logger import LoggerHistory, Logger
from autosportlabs.racecapture.theme.color import ColorScheme
from autosportlabs.uix.toast.kivytoast import toast
from main import RaceCaptureApp
from autosportlabs.widgets.scrollcontainer import ScrollContainer
STATUS_KV_FILE = 'autosportlabs/racecapture/views/status/statusview.kv'
RAW_STATUS_BGCOLOR_1 = ColorScheme.get_background()
RAW_STATUS_BGCOLOR_2 = ColorScheme.get_dark_background()
class StatusLabel(AutoShrinkFieldLabel):
backgroundColor = ObjectProperty(RAW_STATUS_BGCOLOR_1)
class StatusTitle(StatusLabel):
pass
class StatusValue(StatusLabel):
def __init__(self, **kwargs):
super(StatusLabel, self).__init__(**kwargs)
self.shorten = False
# Simple extension of Kivy's TreeViewLabel so we can add on our own properties
# to it for easier view tracking
class LinkedTreeViewLabel(TreeViewLabel):
id = None
# Shows RCP's entire status, getting the values by polling RCP for its status
class StatusView(Screen):
_bg_current = RAW_STATUS_BGCOLOR_1
# Dict object that contains the status of RCP
status = ObjectProperty(None)
# Currently selected menu item
_selected_item = None
_menu_built = False
# Track manager for getting track name
track_manager = None
# Connection to status pump
_status_pump = None
# Used for building the left side menu
_menu_keys = {
"app": "Application",
"system": "Device",
"GPS": "GPS",
"cell": "Cellular",
"bt": "Bluetooth",
"logging": "Logging",
"track": "Track",
"telemetry": "Telemetry",
"wifi": "WiFi",
"imu": "Accel/Gyro",
}
# Dict for getting English text for status enums
_enum_keys = {
'GPS': {
'init': [
'Not initialized',
'Initialized',
'Error initializing'
],
'qual': [
'No fix',
'Weak',
'Acceptable',
'Strong'
]
},
'cell': {
'init': [
'Not initialized',
'Initialized',
'Searching',
'Denied',
'Registered'
],
'sig_str': [
'Unknown',
'Marginal',
'Marginal',
'Marginal',
'Marginal',
'Marginal',
'Marginal',
'Marginal',
'Marginal',
'Marginal',
'OK',
'OK',
'OK',
'OK',
'OK',
'Good',
'Good',
'Good',
'Good',
'Good',
'Excellent',
'Excellent',
'Excellent',
'Excellent',
'Excellent',
'Excellent',
'Excellent',
'Excellent',
'Excellent',
'Excellent',
'Excellent'
]
},
'bt': {
'init': [
'Not initialized',
'Initialized',
'Error initializing'
]
},
'wifi': {
'init': [
'Not initialized',
'Initialized'
]
},
'logging': {
'status': [
'Not logging',
'Logging',
'Error logging'
]
},
'track': {
'status': [
'Searching',
'Fixed start/finish',
'Detected',
'Manually Set'
]
},
'telemetry': {
'status': [
'Idle',
'Connected',
'Connection terminated',
'Device ID rejected',
'Data connection failed. SIM card is valid, either no data plan is associated or the plan has expired.',
'Failed to connect to server',
'Data connection failed. APN settings possibly wrong.',
'Unable to join network'
]
}
}
_menu_node = None
menu_select_color = ColorScheme.get_primary()
def __init__(self, track_manager, status_pump, **kwargs):
Builder.load_file(STATUS_KV_FILE)
super(StatusView, self).__init__(**kwargs)
self.track_manager = track_manager
self.register_event_type('on_tracks_updated')
self._menu_node = self.ids.menu
self._menu_node.bind(selected_node=self._on_menu_select)
status_pump.add_listener(self.status_updated)
self._build_core_menu()
def _build_core_menu(self):
# build application status node
self._append_menu_node('Application', 'app')
# select the first node in the tree.
self._menu_node.select_node(self._menu_node.root.nodes[0])
def _build_menu(self):
if self._menu_built:
return
for item in self.status.iterkeys():
text = self._menu_keys[item] if item in self._menu_keys else item
self._append_menu_node(text, item)
self._menu_built = True
def _append_menu_node(self, text, item):
label = LinkedTreeViewLabel(text=text)
label.id = item
label.color_selected = self.menu_select_color
return self._menu_node.add_node(label)
def _on_menu_select(self, instance, value):
self._selected_item = value.id
self.update()
def status_updated(self, status):
self.status = status['status']
def update(self):
_bg_current = RAW_STATUS_BGCOLOR_1
if self._selected_item in self._menu_keys:
text = self._menu_keys[self._selected_item]
else:
text = self._selected_item
self.ids.name.text = text
self.ids.status_grid.clear_widgets()
function_name = ('render_' + self._selected_item).lower()
# Generic way of not having to create a long switch or if/else block
# to call each render function
if function_name in dir(self):
getattr(self, function_name)()
else:
self.render_generic(self._selected_item)
def render_generic(self, section):
status = self.status[section]
for item, value in status.iteritems():
self._add_item(item, value)
def render_app(self):
label_widget = StatusTitle(text='Application Log')
self.ids.status_grid.add_widget(label_widget)
self.ids.status_grid.add_widget(ApplicationLogView())
self._add_item('Application Version', RaceCaptureApp.get_app_version())
def render_system(self):
if 'git_info' in self.status['system']:
version = self.status['system']['git_info']
else:
version = '.'.join(
[
str(self.status['system']['ver_major']),
str(self.status['system']['ver_minor']),
str(self.status['system']['ver_bugfix'])
]
)
self._add_item('Version', version)
self._add_item('Serial Number', self.status['system']['serial'])
uptime = timedelta(seconds=(self.status['system']['uptime'] / 1000))
self._add_item('Uptime', uptime)
def render_gps(self):
status = self.status['GPS']
init_status = self._get_enum_definition('GPS', 'init', status['init'])
quality = self._get_enum_definition('GPS', 'qual', status['qual'])
location = str(status['lat']) + ', ' + str(status['lon'])
satellites = status['sats']
dop = status['DOP']
self._add_item('Status', init_status)
self._add_item('GPS Quality', quality)
self._add_item('Location', location)
self._add_item('Satellites', satellites)
self._add_item('Dilution of precision', dop)
def render_cell(self):
status = self.status['cell']
init_status = self._get_enum_definition('cell', 'init', status['init'])
imei = status['IMEI']
signal_strength = self._get_enum_definition('cell', 'sig_str', status['sig_str'], 'Unknown')
number = status['number']
self._add_item('Status', init_status)
self._add_item('IMEI', imei)
self._add_item('Signal strength', signal_strength)
self._add_item('Phone Number', number)
self._add_item('Network Status', status.get('state', '').capitalize())
def render_bt(self):
status = self.status['bt']
init_status = self._get_enum_definition('bt', 'init', status['init'])
self._add_item('Status', init_status)
def render_wifi(self):
status = self.status['wifi']
initialized = status['initialized']
ap_enabled = status['ap']['active']
self._add_item('Status', self._get_enum_definition('wifi', 'init', int(status['initialized'])))
self._add_item('Access Point', 'Enabled' if ap_enabled else 'Disabled')
client_enabled = status['client']['active']
client_connected = status['client']['connected']
connected_msg = '' if not client_enabled else '({})'.format(
'Connected' if client_connected else 'Disconnected')
client_status_msg = '{} {}'.format('Enabled' if client_enabled else 'Disabled', connected_msg)
self._add_item('Client', client_status_msg)
def render_imu(self):
status = self.status['imu']
self._add_item('Status', 'Initialized' if status['init'] else 'Not initialized')
def render_logging(self):
status = self.status['logging']
init_status = self._get_enum_definition('logging', 'status', status['status'])
duration = timedelta(seconds=(status['dur'] / 1000))
self._add_item('Status', init_status)
self._add_item('Logging for', duration)
def render_telemetry(self):
status = self.status['telemetry']
init_status = self._get_enum_definition('telemetry', 'status', status['status'])
duration = timedelta(seconds=(status['dur'] / 1000))
self._add_item('Status', init_status)
self._add_item('Logging for', duration)
def render_track(self):
status = self.status['track']
init_status = self._get_enum_definition('track', 'status', status['status'])
if status['status'] == 1:
track_name = 'User defined'
else:
if status['trackId'] != 0:
track = self.track_manager.find_track_by_short_id(status['trackId'])
if track is None:
if status['status'] == 1:
track_name = 'Fixed'
else:
track_name = 'Track not found'
else:
track_name = track.name
configuration_name = track.configuration
if configuration_name and len(configuration_name):
track_name += ' (' + configuration_name + ')'
else:
track_name = 'No track detected'
in_lap = 'Yes' if status['inLap'] == 1 else 'No'
armed = 'Yes' if status['armed'] == 1 else 'No'
self._add_item('Status', init_status)
self._add_item('Track', track_name)
self._add_item('In lap', in_lap)
self._add_item('Armed', armed)
def _add_item(self, label, data):
label_widget = StatusTitle(text=label)
data_widget = StatusValue(text=str(data))
self.ids.status_grid.add_widget(label_widget)
self.ids.status_grid.add_widget(data_widget)
if len(self.ids.status_grid.children) / 2 % 2 == 0:
bg_color = RAW_STATUS_BGCOLOR_2
else:
bg_color = RAW_STATUS_BGCOLOR_1
label_widget.backgroundColor = bg_color
data_widget.backgroundColor = bg_color
def on_status(self, instance, value):
self._build_menu()
self.update()
# Generalized function for getting an enum's English
# equivalent. If the value is not found, the enum is returned
def _get_enum_definition(self, section, subsection, value, default=None):
val = default if default is not None else value
if section in self._enum_keys and subsection in self._enum_keys[section]:
enum_data = self._enum_keys[section][subsection]
if len(enum_data) > value:
val = enum_data[value]
return val
def on_tracks_updated(self, track_manager):
pass
class ApplicationLogView(BoxLayout):
def copy_app_log(self):
try:
recent_log = ''
for record in reversed(LoggerHistory.history):
recent_log += record.msg + '\r\n'
paste_clipboard(recent_log)
toast('Application log copied to clipboard')
except Exception as e:
Logger.error("ApplicationLogView: Error copying app log to clipboard: " + str(e))
toast('Unable to copy to clipboard\n' + str(e), True)
| autosportlabs/RaceCapture_App | autosportlabs/racecapture/views/status/statusview.py | Python | gpl-3.0 | 14,474 | 0.002418 |
import openpathsampling.pathmover_inout
import svgwrite as svg
from svgwrite.container import Group
import openpathsampling as paths
import os
import ujson
from collections import namedtuple, OrderedDict, Counter
# TODO: Move TreeRenderer and Builder to a different file ???
class TreeRenderer(svg.Drawing):
"""
Helper Class to render SVG Drawings
Main use is that it is difficult to scale coordinates in SVG
without distort the content. What we want is to move objects further
apart of close while maintaining their size.
"""
def __init__(self):
super(TreeRenderer, self).__init__()
self.scale_x = 20.0
self.scale_y = 20.0
self.horizontal_gap = 0.05
def add_css_file(self, css_file='vis'):
css_file_name = os.path.join(
paths.resources_directory, css_file + '.css')
with open(css_file_name) as content_file:
vis_css = content_file.read()
# Add the CSS Stylesheet
self.defs.add(self.style(
vis_css
))
def add_css(self, css_style):
self.defs.add(self.style(
css_style
))
@staticmethod
def css_class(css_class):
"""
Generate a string that can be passed to the SVG class attribute
Parameters
----------
css_class : list of str
the class names as a list
Returns
-------
str
the actual string
"""
return ' '.join(css_class)
def x(self, x):
return self.w(x)
def y(self, y):
return self.h(y)
def w(self, y):
return self.scale_x * y
def h(self, y):
return self.scale_y * y
def xy(self, x, y):
return self.x(x), self.y(y)
def wh(self, w, h):
return self.w(w), self.h(h)
def connector(self, x, y, text="", css_class=None):
if css_class is None:
css_class = list()
css_class += ['connector']
return self.block(
x, y, text, False, False, css_class=css_class)
def block(self, x, y, text="",
extend_right=True, extend_left=True,
extend_top=False, extend_bottom=False,
w=1.0, color=None, css_class=None, data=None):
if css_class is None:
css_class = list()
css_class += ['block']
padding = self.horizontal_gap
group = self.g(
class_=self.css_class(css_class)
)
if color is not None:
adds = {'fill': color}
else:
adds = {}
if data is not None:
group.set_desc(desc=ujson.dumps(data))
group.add(self.rect(
insert=self.xy(x - 0.5 + padding, y - 0.3),
size=self.wh(1.0 * w - 2 * padding, 0.6),
**adds
))
if extend_left:
group.add(self.circle(
center=self.xy(x - 0.5, y),
r=self.w(padding)
))
if extend_right:
group.add(self.circle(
center=(self.xy(x + w - 0.5, y)),
r=self.w(padding)
))
if extend_top:
group.add(self.circle(
center=self.xy(x, y - 0.3),
r=self.w(padding)
))
if extend_bottom:
group.add(self.circle(
center=(self.xy(x + w - 1.0, y + 0.3)),
r=self.w(padding)
))
group.add(self.text(
text=str(text),
insert=self.xy(x + (w - 1.0) / 2.0, y)
))
return group
def horizontal_region(
self, x, y, w=1.0, text="",
extend_right=False, extend_left=False, css_class=None):
if css_class is None:
css_class = list()
css_class += ['h-region']
if w == 0:
return []
padding = self.horizontal_gap
group = Group(
class_=self.css_class(css_class)
)
group.add(self.line(
start=self.xy(x - 0.5 + padding, y),
end=self.xy(x - 0.5 + w - padding, y)
))
if extend_left:
group.add(self.circle(
center=self.xy(x - 0.5, y),
r=self.w(padding)
))
group.add(self.line(
start=self.xy(x - 0.5, y - 0.3),
end=self.xy(x - 0.5, y + 0.3)
))
if extend_right:
group.add(self.circle(
center=(self.xy(x + w - 0.5, y)),
r=self.w(padding)
))
group.add(self.line(
start=self.xy(x + w - 0.5, y - 0.3),
end=self.xy(x + w - 0.5, y + 0.3)
))
text = str(text)
if self.w(w) < len(text) * 5:
text = text[0]
if self.w(w) < 10:
text = ''
group.add(self.text(
text=str(text),
insert=self.xy(x + (w - 1.0) / 2.0, y),
class_='shadow'
))
group.add(self.text(
text=str(text),
insert=self.xy(x + (w - 1.0) / 2.0, y)
))
return group
def vertical_region(
self, x, y, w=1.0, text="",
extend_top=True, extend_bottom=True, css_class=None):
if css_class is None:
css_class = list()
css_class += ['v-region']
# padding = self.horizontal_gap
width = 0.2
gap = 0.0
radius = 0.07
group = Group(
class_=self.css_class(css_class)
)
group.add(self.line(
start=self.xy(x, y - 0.5 + gap),
end=self.xy(x, y + w - 1 + 0.5 - gap)
))
if extend_top:
group.add(self.circle(
center=self.xy(x, y - 0.5 + gap),
r=self.w(radius)
))
group.add(self.line(
start=self.xy(x - 1.0 * width, y - 0.5 + gap),
end=self.xy(x + width, y - 0.5 + gap)
))
if extend_bottom:
group.add(self.circle(
center=(self.xy(x, y + (w - 1.0) + 0.5 - gap)),
r=self.w(radius)
))
group.add(self.line(
start=self.xy(x - 1.0 * width, y + w - 1.0 + 0.5 - gap),
end=self.xy(x + width, y + w - 1.0 + 0.5 - gap)
))
group.add(self.text(
text=str(text),
insert=self.xy(x - width, y + (w - 1.0) / 2.0)
))
return group
def shade(self, x, y, w, css_class=None, color=None):
if css_class is None:
css_class = list()
css_class += ['shade']
adds = {}
if color is not None:
adds = {'fill': color}
group = self.g(
class_=self.css_class(css_class)
)
group.add(self.rect(
insert=self.xy(x - 0.6, y + 0.10),
size=self.wh(w + 0.2, 0.25),
fill='white'
))
group.add(self.rect(
insert=self.xy(x - 0.6, y - 0.35),
size=self.wh(w + 0.2, 0.25),
fill='white'
))
group.add(self.rect(
insert=self.xy(x - 0.5, y + 0.15),
size=self.wh(w, 0.15),
**adds
))
group.add(self.rect(
insert=self.xy(x - 0.5, y - 0.30),
size=self.wh(w, 0.15),
**adds
))
return group
def vertical_connector(self, x, y1, y2, css_class=None):
if css_class is None:
css_class = list()
css_class += ['v-connector']
padding = self.horizontal_gap
return self.line(
class_=self.css_class(css_class),
start=self.xy(x - 0.5, y1 + padding),
end=self.xy(x - 0.5, y2 - padding)
)
def vertical_hook(self, x1, y1, x2, y2, css_class=None):
if css_class is None:
css_class = list()
css_class += ['v-hook']
padding = self.horizontal_gap
return self.line(
class_=self.css_class(css_class),
start=self.xy(x1, y1 + padding + 0.3),
end=self.xy(x2, y2 - padding - 0.3)
)
def horizontal_connector(self, x1, x2, y, css_class=None):
if css_class is None:
css_class = list()
css_class += ['h-connector']
padding = self.horizontal_gap
return self.line(
class_=self.css_class(css_class),
start=self.xy(x1 + 0.5 + padding, y),
end=self.xy(x2 - 0.5 - 2 * padding, y)
)
def label(self, x, y, text, css_class=None):
if css_class is None:
css_class = list()
css_class += ['label']
group = self.g(
class_=self.css_class(css_class)
)
group.translate(self.x(x), self.y(y))
group2 = self.g(
class_='shift'
)
group2.add(
self.text(
text=str(text),
insert=(0, 0)
)
)
group.add(
group2
)
return group
def vertical_label(self, x, y, text, css_class=None):
if css_class is None:
css_class = list()
css_class += ['v-label']
group = self.g(
class_=self.css_class(css_class)
)
group.translate(x, y)
group.add(
self.text(
text=str(text),
insert=(0, 0),
)
)
return group
def rectangle(self, x, y, w, h, css_class=None):
if css_class is None:
css_class = list()
return self.rect(
class_=self.css_class(css_class),
insert=self.xy(x, y),
size=self.wh(w, h),
)
def to_svg(self):
return self.tostring()
def to_html(self):
svg_source = self.to_svg()
html = '<!DOCTYPE html>' \
'<html style="margin:0px; padding:0px; width:100%;">' + \
svg_source + \
'<body style="margin:0px; padding:0px;"></body></html>'
return html
def _height(self):
return self.h(self.height) + self.margin * 2
def _width(self):
return self.w(self.width) + self.margin * 2
class Builder(object):
"""
Abstract class of building SVG representations
"""
unique_id = 0
def __init__(self, additional_option_categories=None, base_css_style='vis'):
options = ['analysis', 'css', 'ui', 'format']
if additional_option_categories is not None:
options += additional_option_categories
option_tuple_class = namedtuple(
'optionstuple',
' '.join(options)
)
self.options = option_tuple_class(**{opt: {} for opt in options})
self.base_css_style = base_css_style
self._add_css = []
def add_css(self, css):
self._add_css.append(css)
def reset_css(self):
self._add_css = []
def svg(self):
svg = self.render()
self._finalize_svg(svg)
return svg.tostring()
def _finalize_svg(self, svg):
# add a unique ID
unique_id = 'pathtree-' + str(Builder.unique_id)
Builder.unique_id += 1
svg['id'] = unique_id
# add CSS
svg.add_css_file(self.base_css_style)
if self.add_css:
for css in self._add_css:
svg.add_css(css.replace('#self', '#' + unique_id))
def html(self):
return self.svg()
def render(self):
"""
Create the graphics object
Returns
-------
`class`:TreeRenderer
the rendering object that can return the final graphics
"""
raise NotImplemented('This is a stub class. Use a derived instance!')
class MoveTreeBuilder(Builder):
"""
Builder Class for creating MoveTree Visualisations
You need to specify a :obj:`openpathsampling.PathMover` and a list of
ensembles. Then it will display all possible steps in the pathmover and its
relation to the given list of ensembles.
This is useful to get an idea which parts of the ensemble affect which
part of ensembles
"""
def __init__(self, pathmover=None, ensembles=None, initial=None):
super(MoveTreeBuilder, self).__init__()
self.p_x = dict()
self.p_y = dict()
self.obj = list()
self.ensembles = []
self.pathmover = None
self.initial = None
self.traj_ens_x = dict()
self.traj_ens_y = dict()
self.traj_repl_x = dict()
self.traj_repl_y = dict()
self.ens_x = list()
self.repl_x = list()
self.options.analysis['only_canonical'] = True
self.options.analysis['label_with'] = "name" # or "class"
self.doc = None
if pathmover is not None:
self.pathmover = pathmover
if ensembles is not None:
self.ensembles = ensembles
if initial is not None:
self.initial = initial
@staticmethod
def from_scheme(scheme, hidden_ensembles=True):
"""
Initalize a new `MoveTreeBuilder` from the data in a `MoveScheme`
Parameters
----------
scheme : :obj:`openpathsampling.MoveScheme`
use the root mover of this scheme as the basis for visualization
hidden_ensembles : bool
whether to show the scheme's hidden ensembles as well (default
True)
Returns
-------
:obj:`MoveTreeBuilder`
"""
try:
# inp is a move scheme
input_ensembles = scheme.list_initial_ensembles()
except AttributeError:
# inp is a path mover
# ??? this is nonsense in from_scheme, isn't it? you would get
# error on the thing you return below ~~~DWHS
input_ensembles = scheme.input_ensembles
# using network.all_ensembles forces a correct ordering
ensembles = scheme.network.all_ensembles
if hidden_ensembles:
ensembles += list(scheme.find_hidden_ensembles())
return MoveTreeBuilder(
pathmover=scheme.root_mover,
ensembles=ensembles,
initial=input_ensembles
)
@staticmethod
def _get_sub_used(mover, replica_states, level):
l = [(mover, level, replica_states)]
subs = mover.sub_replica_state(replica_states)
map(
lambda x, y, z: l.extend(MoveTreeBuilder._get_sub_used(x, y, z)),
mover.submovers, subs, [1 + level] * len(mover.submovers)
)
return l
def render(self):
doc = TreeRenderer()
self.doc = doc
level_y = dict()
self.ens_x = [None] * len(self.ensembles)
self.repl_x = [None] * len(self.ensembles)
path = self.pathmover
group = doc.g(
class_='tree'
)
tree = path.depth_pre_order(
lambda this: this,
only_canonical=self.options.analysis['only_canonical'])
total = len(tree)
for yp, (level, sub_mp) in enumerate(tree):
x_pos = - level
sub_type = sub_mp.__class__
if self.options.analysis['label_with'] == "name":
try:
sub_name = sub_mp.name
except AttributeError:
sub_name = sub_type.__name__[:-5]
elif self.options.analysis['label_with'] == "class":
sub_name = sub_type.__name__[:-5]
else: # pragma: no cover (should never occur)
raise ValueError("Bad option for 'label_with': "
+ str(self.options.analysis['label_width']))
if sub_type is paths.SampleMoveChange:
group.add(
doc.block(level, yp))
group.add(
doc.label(
x_pos,
yp,
sub_name,
css_class=['name'] + [sub_type.__name__]
)
)
else:
group.add(
doc.block(
x_pos,
yp,
)
)
group.add(
doc.label(
x_pos,
yp,
sub_name
)
)
if level - 1 in level_y \
and level_y[level - 1] == yp - 1:
group.add(
doc.vertical_connector(
x_pos + 1,
yp,
yp - 1
)
)
if level + 1 in level_y:
del level_y[level + 1]
if level in level_y and level_y[level]:
group.add(
doc.vertical_connector(
x_pos + 1,
yp,
level_y[level]
)
)
level_y[level] = yp
doc.add(group)
group = doc.g(
class_='ensembles'
)
for ens_idx, ens in enumerate(self.ensembles):
txt = chr(ens_idx + 65)
label = ens.name if hasattr(ens, 'name') else \
ens.__class__.__name__[:-8]
group.add(
doc.label(
ens_idx,
-1,
'[' + txt + '] ' + label,
css_class=['head']
)
)
group.add(
doc.vertical_hook(
ens_idx,
-1,
ens_idx,
total
)
)
max_level = 0
rset = openpathsampling.pathmover_inout.ReplicaStateSet
initial_rs = rset.from_ensembles(self.initial)
subs = MoveTreeBuilder._get_sub_used(self.pathmover, initial_rs, 0)
# this checks if the mover can actually be run without problems
# assert(
# Counter(dict(initial_rs)) >= self.pathmover.in_out_matrix.minimal)
for yp, (level, sub_mp) in enumerate(
path.depth_pre_order(
lambda this: this,
only_canonical=self.options.analysis['only_canonical'])):
sub = subs[yp]
if level > max_level:
max_level = level
possible_input_replica_states = [Counter(dict(s)) for s in sub[2]]
sub_io_set = sub_mp.in_out
# minimal_input_replica_states = sub_io_set.minimal
# in_ens = sub_mp.input_ensembles
# out_ens = sub_mp.output_ensembles
possible_ins = [
i.ins for i in sub_io_set
if any(s >= i.ins for s in possible_input_replica_states)]
possible_outs = [
i.outs for i in sub_io_set
if any(s >= i.ins for s in possible_input_replica_states)]
in_ens = reduce(lambda a, b: a | b, possible_ins, Counter())
out_ens = reduce(lambda a, b: a | b, possible_outs, Counter())
for ens_idx, ens in enumerate(self.ensembles):
txt = chr(ens_idx + 65)
show = False
if in_ens is None or None in in_ens or ens in in_ens:
group.add(
doc.connector(
ens_idx,
yp - 0.15,
css_class=['input']
)
)
show = True
if out_ens is None or None in out_ens or ens in out_ens:
group.add(
doc.connector(
ens_idx,
yp + 0.15,
css_class=['output'])
)
show = True
if show:
group.add(
doc.connector(
ens_idx,
yp,
txt,
css_class=['unknown']
)
)
group.translate(50, 0)
doc.add(group)
doc['class'] = 'movetree'
left_x = -max_level * doc.scale_x - 130
top_y = - 120
width = len(self.ensembles) * doc.scale_x - left_x + 50
height = (total + 1) * doc.scale_y - top_y
# adjust view box to fit full image
doc['viewBox'] = '%.2f %.2f %.2f %.2f' % (
left_x,
top_y,
width,
height
)
doc['width'] = width
return doc
class EnsembleMixBuilder(Builder):
"""
Builder Class for creating MoveTree Visualisations
You need to specify a :obj:`openpathsampling.PathMover` and a list of
ensembles. Then it will display all possible steps in the pathmover and its
relation to the given list of ensembles.
This is useful to get an idea which parts of the ensemble affect which part
of ensembles
"""
def __init__(self, pathmover=None, ensembles=None, initial=None):
super(EnsembleMixBuilder, self).__init__()
self.p_x = dict()
self.p_y = dict()
self.obj = list()
self.ensembles = []
self.pathmover = None
self.initial = None
self.traj_ens_x = dict()
self.traj_ens_y = dict()
self.traj_repl_x = dict()
self.traj_repl_y = dict()
self.ens_x = list()
self.repl_x = list()
self.options.analysis['only_canonical'] = True
self.doc = None
if pathmover is not None:
self.pathmover = pathmover
if ensembles is not None:
self.ensembles = ensembles
if initial is not None:
self.initial = initial
@staticmethod
def from_scheme(scheme):
"""
Initaliza a new `MoveTreeBuilder` from the date in a `MoveScheme`
Parameters
----------
scheme : :obj:`openpathsampling.MoveScheme`
Returns
-------
:obj:`MoveTreeBuilder`
"""
try:
# inp is a move scheme
input_ensembles = scheme.list_initial_ensembles()
except AttributeError:
# inp is a path mover
# ??? this is nonsense in from_scheme, isn't it? you would get
# error on the thing you return below ~~~DWHS
input_ensembles = scheme.input_ensembles
# using network.all_ensembles forces a correct ordering
return EnsembleMixBuilder(
pathmover=scheme.root_mover,
ensembles=scheme.network.all_ensembles,
initial=input_ensembles
)
@staticmethod
def _get_sub_used(mover, replica_states, level):
l = [(mover, level, replica_states)]
subs = mover.sub_replica_state(replica_states)
map(
lambda x, y, z: l.extend(MoveTreeBuilder._get_sub_used(x, y, z)),
mover.submovers, subs, [1 + level] * len(mover.submovers)
)
return l
def render(self):
doc = TreeRenderer()
self.doc = doc
self.ens_x = [None] * len(self.ensembles)
self.repl_x = [None] * len(self.ensembles)
path = self.pathmover
total = len(self.ensembles)
mat = path.in_out.mixing_matrix(self.ensembles)
group = doc.g(
class_='ensembles'
)
for yp, ens1 in enumerate(self.ensembles):
txt = chr(yp + 65)
label = ens1.name if hasattr(ens1, 'name') else \
ens1.__class__.__name__[:-8]
group.add(
doc.label(
-1,
yp,
label
)
)
group.add(
doc.label(
yp,
-1,
'[' + txt + '] ' + label,
css_class=['head']
)
)
group.add(
doc.vertical_hook(
yp,
-1,
yp,
total
)
)
group.add(
doc.horizontal_connector(
-1.35,
total + 0.35,
yp
)
)
for yp, ens1 in enumerate(self.ensembles):
for ens_idx, ens2 in enumerate(self.ensembles):
txt = ''
m = mat[ens1][ens2]
if 0 in m:
txt += 'A'
if 1 in m:
txt += 'O'
if -1 in m:
txt += 'R'
if 1 in m:
group.add(
doc.connector(
ens_idx,
yp - 0.15,
txt,
css_class=['input']
)
)
if -1 in m:
group.add(
doc.connector(
ens_idx,
yp + 0.15,
txt,
css_class=['output']
)
)
if 0 in m:
group.add(
doc.connector(
ens_idx,
yp,
txt,
css_class=['unknown']
)
)
group.translate(50, 0)
doc.add(group)
doc['class'] = 'movetree'
left_x = - 120
top_y = - 120
width = len(self.ensembles) * doc.scale_x - left_x + 50
height = (total + 1) * doc.scale_y - top_y
# adjust view box to fit full image
doc['viewBox'] = '%.2f %.2f %.2f %.2f' % (
left_x,
top_y,
width,
height
)
doc['width'] = width
return doc
def _create_simple_legend(title, fnc, width=1):
def _legend_fnc(self):
doc = self.doc
part = doc.g(class_='legend-' + title)
part.add(
doc.label(0, 0, title, css_class=['head'])
)
for pos_y, data in enumerate(self._plot_sample_list):
sample = data['sample']
part.add(
doc.label(0, 1 + pos_y, str(
fnc(sample)))
)
return part, width
return _legend_fnc
class PathTreeBuilder(Builder):
"""
Builder class to visualize the time evolution of a list of samples
This will basically create path trees as known from TIS and adding some
useful features.
The basic way to use it is to create a list of samples that should be
visualized first. Then create the `PathTreeBuilder` and
>>> tree = PathTreeBuilder.from_()
>>> tree.samples = my_samplelist
>>> SVG(tree.svg())
There are a lot of options. For a full list see the tutorial on pathree
visualization.
Attributes
----------
states : dict, 'svg_color': :obj:`openpathsampling.Volume`-like
a dictionary listing a color that fulfills the SVG specification like
`#888`, `gold` or `rgb(12,32,59)` referencing a volume like object that
will return a bool when passed a snapshot. If true then the snapshot
is highlighed using the given color
op : :obj:`openpathsampling.CollectiveVariable`-like
a function that returns a value when passed a snapshot. The value will
be put on single snapshots.
"""
def __init__(self):
super(PathTreeBuilder, self).__init__(['movers'])
self.obj = list()
self.doc = None
self.states = {}
self.op = None
self._generator = None
self._plot_sample_list = None
self.reset_options()
self.coloring = None
@property
def generator(self):
"""
:obj:`SampleList` : a `SampleList` object containing the list of samples
to be plotted
"""
return self._generator
@generator.setter
def generator(self, generator):
self._generator = generator
@property
def samples(self):
return iter(self._generator)
def render(self):
# make sure we are up-to-date
self.generator.analyze()
doc = TreeRenderer()
self.doc = doc
opts = self.options
doc.scale_x = opts.css['scale_x']
doc.scale_y = opts.css['scale_y']
# TODO: Might remove this option. Could be useful for teaching purposes
if type(opts.css['horizontal_gap']) is bool:
doc.horizontal_gap = 0.05 if opts.css['horizontal_gap'] else 0.0
else:
doc.horizontal_gap = opts.css['horizontal_gap']
matrix = self.generator.matrix
# Loops over samples first time to determine all necessary information
pos_y = -1
draw_pos_y = {}
self._plot_sample_list = []
for num, sample in enumerate(self.generator):
pos_y += 1
draw_pos_y[num] = pos_y
info = self.generator[sample]
mover_type = 'unknown'
mover = sample.mover
if mover is not None:
mover_type = mover.__class__.__name__
if hasattr(mover, '_visualization_class'):
mover_type = getattr(mover, '_visualization_class')
new_sample = info['new']
time_direction = info['time_direction']
level = info['level']
bw_css_class = 'bw'
fw_css_class = 'fw'
view_options = {}
view_options.update(opts.movers['default'])
if new_sample:
view_options_upd = opts.movers['new']
elif mover_type in opts.movers:
view_options_upd = opts.movers[mover_type]
else:
view_options_upd = opts.movers['unknown']
view_options.update(view_options_upd)
if view_options['hide']:
pos_y -= 1
draw_pos_y[num] = None
continue
label_position = view_options['label_position']
if time_direction == -1:
bw_css_class, fw_css_class = fw_css_class, bw_css_class
label_position = 'left' if \
view_options['label_position'] == 'right' else 'right'
css_class = [] + view_options['css_class']
step_accepted = True
move_accepted = True
if isinstance(self.generator, SampleListGenerator):
# we have steps available to figure out, if a step was rejected
step = self.generator.get_step(sample)
if step is not None:
step_accepted = step.change.accepted
if not step_accepted:
# in the case of the initial step we still use an
# EmptyMove although technically an EmptyMove is
# rejected we use it as rejected or better this did
# not even have an acceptance
# step so we treat is as accepted for visual purposes
active_steps = self.generator.get_active_steps(sample)
# so if this sample is in the active samplesets
# somewhere it must have been accepted in the past.
# So if it has not been accepted in steps we know, we
# still assume that the first mention of this sample
# is like an accepting treat the step as accepted
if active_steps is not None:
step_accepted = True
change = self.generator.get_change(sample)
if change is not None:
move_accepted = change.accepted
if not step_accepted and opts.css['mark_transparent'] == 'rejected':
css_class += ['rejected']
if level > 0 and opts.css['mark_transparent'] == 'auxiliary':
css_class += ['rejected']
if not move_accepted and opts.css['mark_transparent'] == 'submove':
css_class += ['rejected']
data = {
'sample': sample,
'sample_idx': num,
'css_class': css_class,
'view_options': view_options,
'bw_css_class': bw_css_class,
'fw_css_class': fw_css_class,
'label_position': label_position,
'mover_type': mover_type,
'mover_accepted': move_accepted,
'step_accepted': step_accepted
}
self._plot_sample_list.append(data)
# start plotting all parts from here
tree_group = doc.g(
class_='tree'
)
_doc_parts = [
self.part_trajectory_label(),
self.part_shooting_hooks(),
self.part_snapshot_blocks()
]
# finish snapshot block on the right
min_x, max_x = min(matrix.matrix_x.keys()), max(matrix.matrix_x.keys())
min_y, max_y = 0, pos_y
tree_group.translate(32 + doc.w(1 - min_x), doc.h(1))
for part in _doc_parts:
tree_group.add(part)
# +--------------------------------------------------------------------
# + LEGEND
# +--------------------------------------------------------------------
legend_group = doc.g(
class_='legend'
)
# use different x-scaling for the legend
tree_scale = opts.css['scale_x']
doc.scale_x = 32
# collect all parts of the legend separately
legend_parts = []
for part in reversed(opts.ui['legends']):
if type(part) is str:
method_name = 'part_legend_' + part
if hasattr(self, method_name):
legend_parts.append(getattr(self, method_name)())
else:
legend_parts.append(part(self))
# add all the legend parts
pos_shift = 0
for part, width in legend_parts:
part.translate(- doc.scale_x * pos_shift)
legend_group.add(part)
pos_shift += width
# +--------------------------------------------------------------------
# + BUILD FINAL IMAGE
# +--------------------------------------------------------------------
left_x = (-0.5 - pos_shift) * doc.scale_x
width = 64 + tree_scale * (max_x - min_x + 2) - left_x
height = doc.scale_y * (max_y + 3.0)
top_y = -1.5 * doc.scale_y
# build the full figure
group_all = doc.g()
group_all.add(tree_group)
group_all.add(legend_group)
# INFO BOX PER SNAPSHOT (still experimental)
if opts.ui['info']:
group_all.add(self.part_info_box())
group_all.add(self.part_hovering_blocks(left_x, width))
zoom = opts.css['zoom']
group_all.scale(zoom)
doc.add(group_all)
# set the overall OPS tree class
doc['class'] = 'opstree'
# adjust view box to fit full image
doc['viewBox'] = '%.2f %.2f %.2f %.2f' % (
left_x * zoom,
top_y * zoom,
width * zoom,
height * zoom
)
# set width
w_opt = opts.css['width']
if w_opt == 'inherit':
# inherit will use the actual size in pixels
doc['width'] = width * zoom
else:
doc['width'] = w_opt
return doc
def part_hovering_blocks(self, left, width):
doc = self.doc
group = doc.g(class_='hovering-blocks')
# +--------------------------------------------------------------------
# + HOVERING TABLE LINE PLOT
# +--------------------------------------------------------------------
css_class = ['tableline']
for pos_y, data in enumerate(self._plot_sample_list):
group.add(
doc.rect(
class_=doc.css_class(css_class),
insert=(left, doc.y(1 + pos_y - 0.45)),
size=(width, doc.scale_y * 0.9)
)
)
return group
def part_trajectory_label(self):
doc = self.doc
group = doc.g(class_='trajectory-label')
trj_format = self._create_naming_fnc(
self.options.format['trajectory_label'])
for pos_y, data in enumerate(self._plot_sample_list):
sample = data['sample']
info = self.generator[sample]
shift = info['shift']
length = info['length']
view_options = data['view_options']
label_position = data['label_position']
css_class = data['css_class']
traj_str = \
str(trj_format(sample.trajectory)) + \
view_options['suffix'].upper()
if label_position == 'left':
group.add(
doc.label(shift, pos_y, traj_str,
css_class=css_class + ['left'])
)
elif label_position == 'right':
group.add(
doc.label(shift + length - 1, pos_y, traj_str,
css_class=css_class + ['right'])
)
return group
def part_shooting_hooks(self):
doc = self.doc
group = doc.g(class_='shooting-hooks')
draw_pos_y = {}
matrix = self.generator.matrix
for pos_y, data in enumerate(self._plot_sample_list):
num = data['sample_idx']
sample = data['sample']
draw_pos_y[num] = pos_y
info = self.generator[sample]
new_sample = info['new']
shift = info['shift']
length = info['length']
length_fw = info['length_fw']
length_bw = info['length_bw']
bw_css_class = data['bw_css_class']
fw_css_class = data['fw_css_class']
css_class = data['css_class']
# SHOOTING HOOKS
if not new_sample:
bw_x = shift + length_bw
fw_x = shift + length - 1 - length_fw
if 0 < length_bw:
root_y = draw_pos_y.get(matrix.root(num, bw_x))
if root_y is not None and root_y < pos_y:
group.add(
doc.vertical_connector(
bw_x, root_y, pos_y,
css_class=css_class + [bw_css_class, 'connection'])
)
if 0 < length_fw:
root_y = draw_pos_y.get(matrix.root(num, fw_x))
if root_y is not None and root_y < pos_y:
group.add(
doc.vertical_connector(
fw_x + 1, root_y, pos_y,
css_class=css_class + [fw_css_class, 'connection'])
)
return group
def part_snapshot_blocks(self):
doc = self.doc
group = doc.g(class_='snapshot-blocks')
matrix = self.generator.matrix
# TRAJECTORY PARTS
opts = self.options
trj_format = self._create_naming_fnc(opts.format['trajectory_label'])
smp_format = self._create_naming_fnc(opts.format['sample_label'])
snp_format = self._create_naming_fnc(opts.format['snapshot_label'])
vis_blocks = {}
for pos_y, data in enumerate(self._plot_sample_list):
num = data['sample_idx']
sample = data['sample']
info = self.generator[sample]
new_sample = info['new']
shift = info['shift']
length = info['length']
length_fw = info['length_fw']
length_bw = info['length_bw']
overlap_reversed = info['overlap_reversed']
bw_css_class = data['bw_css_class']
fw_css_class = data['fw_css_class']
view_options = data['view_options']
css_class = data['css_class']
# draw actual parts of the sample as
# single snapshots, a block of snapshots or a line
parts = []
regions = {
'bw': (0, length_bw),
'fw': (length - length_fw, length),
'full': (0, length),
'overlap': (length_bw, length - length_fw),
'reversed': (length_bw, length - length_fw),
'new': (0, length)
}
css_classs = {
'fw': [fw_css_class],
'bw': [bw_css_class],
'reversed': ['reversed'],
'full': ['full'],
'overlap': ['overlap'],
'new': ['new']
}
vis_types = {
'fw': 'new',
'bw': 'new',
'reversed': 'reversed',
'full': 'full',
'overlap': 'overlap',
'new': 'new'
}
if not new_sample:
if length_bw > 0:
parts.append('bw')
if length_fw > 0:
parts.append('fw')
if overlap_reversed:
parts.append('reversed')
else:
if length_bw == 0 and length_fw == 0:
# if all are new use a special vis
parts.append('full')
else:
parts.append('overlap')
else:
parts.append('new')
for part in parts:
hidden = False
vis_type = view_options[vis_types[part]]
add_css_class = css_classs[part]
region = regions[part]
if vis_type == 'line':
label = view_options['label'] or view_options['name']
group.add(
doc.horizontal_region(
shift + region[0], pos_y, region[1] - region[0],
label, css_class=css_class + add_css_class)
)
elif vis_type == 'block':
group.add(
doc.block(
shift + region[0],
pos_y,
view_options['label'],
w=region[1] - region[0],
extend_left=False,
css_class=css_class + add_css_class
))
elif vis_type == 'single':
for pos in range(region[0], region[1]):
pos_x = shift + pos
snapshot = matrix[num, pos_x]
if opts.ui['info']:
data = {
'smp': smp_format(sample),
'snp': snp_format(snapshot),
'trj': trj_format(sample.trajectory)
}
else:
data = {}
txt = ''
if self.op is not None and opts.ui['cv']:
txt = str(self.op(snapshot))
group.add(
doc.block(
pos_x,
pos_y,
txt,
extend_left=pos > 0,
extend_right=pos < length - 1,
css_class=css_class + add_css_class,
data=data,
color=self.coloring(snapshot)
if self.coloring else None
))
else:
hidden = True
if not hidden:
self._update_vis_block(vis_blocks, num, shift, region)
# STATE COLORING
if self.states is not None:
for color, op in self.states.items():
xp = None
for pos_y, data in enumerate(self._plot_sample_list):
num = data['sample_idx']
left = None
for xp in matrix.get_x_range(num):
if xp in vis_blocks[num] and bool(op(matrix[num, xp])):
if left is None:
left = xp
else:
if left is not None:
group.add(
doc.shade(
left, pos_y, xp - left, color=color)
)
left = None
if left is not None:
group.add(
doc.shade(left, pos_y, xp - left + 1, color=color)
)
return group
def part_info_box(self):
doc = self.doc
group = doc.g(class_='info-box')
group.add(
doc.label(0, -1, 'Information', css_class=['infobox'])
)
doc.defs.add(doc.script(
content='''
box = $('.opstree .infobox text')[0];
var kernel = IPython.notebook.kernel;
$('.opstree .block').each(
function() {
json = JSON.parse($(this)[0].firstChild.textContent);
$(this).data(json);
}
);
$('.opstree .block').hover(
function(){
box.textContent =
'Snapshot(' + $(this).data('snp') + ')' + ' ' +
'Trajectoy(' + $(this).data('trj') + ')';
},
function(){
box.textContent = '';
});
'''))
return group
part_legend_ensemble = _create_simple_legend(
'ens', lambda sample: sample.ensemble.name)
part_legend_replica = _create_simple_legend(
'repl', lambda sample: sample.replica)
part_legend_bias = _create_simple_legend(
'bias', lambda sample: sample.bias)
def part_legend_sample(self):
doc = self.doc
smp_format = self._create_naming_fnc(
self.options.format['sample_label'])
part = doc.g(class_='legend-sample')
part.add(
doc.label(0, 0, 'smp', css_class=['head'])
)
for pos_y, data in enumerate(self._plot_sample_list):
sample = data['sample']
part.add(
doc.label(0, 1 + pos_y, str(
smp_format(sample)))
)
return part, 1
def part_legend_correlation(self):
doc = self.doc
time_symmetric = self.generator.time_symmetric
part = doc.g(class_='legend-correlation')
part.add(
doc.label(0, 0, 'cor', css_class=['head'])
)
old_tc = 1
prev = self._plot_sample_list[0]['sample']
for pos_y, data in enumerate(self._plot_sample_list):
sample = data['sample']
if pos_y > 0:
if not paths.Trajectory.is_correlated(
sample.trajectory,
prev,
time_reversal=time_symmetric
):
part.add(
doc.vertical_region(
0,
old_tc,
1 + pos_y - old_tc,
css_class=['correlation']
)
)
old_tc = 1 + pos_y
prev = sample.trajectory
part.add(
doc.vertical_region(
0,
old_tc,
1 + len(self._plot_sample_list) - old_tc,
extend_bottom=False,
css_class=['correlation']))
return part, 1
def part_legend_step(self):
doc = self.doc
part = doc.g(class_='legend-step')
part.add(
doc.label(0, 0, 'step', css_class=['head'])
)
for pos_y, data in enumerate(self._plot_sample_list):
sample = data['sample']
if isinstance(self.generator, SampleListGenerator):
step = self.generator.get_step(sample)
if step is None:
# apparently this sample was not generate by any known step
txt = '*'
else:
txt = str(step.mccycle)
else:
txt = '?'
part.add(
doc.label(0, 1 + pos_y, txt)
)
return part, 1
def part_legend_active(self):
doc = self.doc
part = doc.g(class_='legend-active')
part.add(
doc.label(0, 0, 'active', css_class=['head'])
)
for pos_y, data in enumerate(self._plot_sample_list):
sample = data['sample']
if isinstance(self.generator, SampleListGenerator):
mccycles = self.generator.get_active_mccycles(sample)
if mccycles is None:
txt = '*'
else:
txt = self._set_of_int_to_str(mccycles)
else:
txt = '?'
part.add(
doc.label(0, 1 + pos_y, txt)
)
return part, 2
@staticmethod
def _set_of_int_to_str(ints):
sorted_ints = sorted(ints) + [-1]
first = None
last = None
out = ''
for ii in sorted_ints:
if first is None:
first = ii
elif ii != last + 1:
out += '%d-%d' % (first, last)
first = ii
last = ii
return out
def _create_naming_fnc(self, fnc):
opts = self.options
return fnc or opts.format['default_label'] or (lambda obj: '')
@staticmethod
def _update_vis_block(vis_block, pos_y, shift, region):
# necessary to remember where we actually drew something
if pos_y not in vis_block:
vis_block[pos_y] = set()
vis_block[pos_y].update(range(shift + region[0], shift + region[1] + 1))
def use_storage_indices(self, storage):
"""
Set the default_labelling to use indices in the given storage
Parameters
----------
storage : :obj:`openpathsampling.Storage`
the storage to be used for indices
"""
self.options.format['default_label'] = storage.idx
def reset_options(self):
"""
Return the options to default
"""
self.options.movers.update({
'ReplicaExchangeMover': {
'name': 'RepEx',
'suffix': 'x',
'css_class': ['repex'],
'hide': True
},
'BackwardShootMover': {
'name': 'Backward',
'suffix': 'b',
'css_class': ['shooting']
},
'ForwardShootMover': {
'name': 'Forward',
'suffix': 'f',
'label_position': 'right',
'css_class': ['shooting']
},
'BackwardExtendMover': {
'name': 'Extend',
'suffix': 'b',
'overlap': 'line',
'css_class': ['extend']
},
'ForwardExtendMover': {
'name': 'Extend',
'suffix': 'f',
'overlap': 'line',
'label_position': 'right',
'css_class': ['extend']
},
'FinalSubtrajectorySelectMover': {
'name': 'Truncate',
'suffix': 't',
'label_position': 'right',
'css_class': ['extend']
},
'FirstSubtrajectorySelectMover': {
'name': 'Truncate',
'suffix': 't',
'css_class': ['extend']
},
'EnsembleHopMover': {
'name': 'Hop',
'suffix': 'h',
'css_class': ['hop']
},
'PathReversalMover': {
'name': 'Reversal',
'suffix': 'r',
'css_class': ['reversal']
},
'new': {
'name': 'New',
'suffix': '+',
'css_class': ['unknown']
},
'unknown': {
'name': '???',
'suffix': '?',
'css_class': ['repex']
},
'default': {
'name': '---',
'overlap': 'none',
'new': 'block',
'reversed': 'block',
'full': 'line',
'label': '',
'suffix': '?',
'label_position': 'left',
'css_class': [],
'hide': False
}
})
self.options.ui.update({
'legends': ['sample', 'correlation'],
'cv': True,
'info': False,
})
self.options.css.update({
'scale_x': 5,
'scale_y': 15,
'zoom': 1.0,
'horizontal_gap': False,
'width': '100%',
'mark_transparent': 'rejected'
})
self.options.format.update({
'default_label': lambda x: hex(id(x))[-5:] + ' ',
# 'default_label': lambda x: '',
'trajectory_label': lambda x: '',
'sample_label': None,
'step_label': None,
'snapshot_label': None,
'display_repeated': True,
'new_snapshots': True,
'repeated_snapshots': True
})
if self.generator and self.generator.steps:
self.options.ui['legends'] = ['step', 'correlation']
def reset(self):
"""
Revert to default options and remove all ther setting as well
"""
self.reset_options()
self.states = {}
self.op = None
self.coloring = None
if self._generator is not None:
self._generator.set_default_settings()
class PathTree(PathTreeBuilder):
def __init__(self, steps, generator=None):
super(PathTree, self).__init__()
self.steps = steps
self.generator = generator
self.reset_options()
@property
def generator(self):
return self._generator
@generator.setter
def generator(self, generator):
self._generator = generator
if generator is not None:
self._generator.steps = self.steps
self._generator.update_tree_options(self)
@property
def steps(self):
return self._steps
@steps.setter
def steps(self, steps):
self._steps = StepList(steps)
if self.generator is not None:
self.generator.steps = self.steps
class SnapshotMatrix(object):
def __init__(self, sample_list):
self.sample_list = sample_list
self.matrix_x = {}
self.matrix_y = {}
self.shift = [0] * len(sample_list)
@property
def time_symmetric(self):
return self.sample_list.time_symmetric
def __setitem__(self, key, value):
y_pos = key[0]
x_pos = key[1]
if x_pos not in self.matrix_x:
self.matrix_x[x_pos] = {}
if y_pos not in self.matrix_y:
self.matrix_y[y_pos] = {}
if isinstance(value, paths.BaseSnapshot):
self.matrix_x[x_pos][y_pos] = value
self.matrix_y[y_pos][x_pos] = value
elif type(value) is paths.Trajectory:
for pos, snapshot in enumerate(value.as_proxies()):
self[y_pos, x_pos + pos] = snapshot
self.shift[y_pos] = x_pos
def __getitem__(self, item):
y_pos = item[0]
x_pos = item[1]
if x_pos in self.matrix_x:
return self.matrix_x[x_pos][y_pos]
else:
raise KeyError(x_pos)
def get_x_range(self, y_pos):
xs = set(self.matrix_y[y_pos])
return range(min(xs), max(xs) + 1)
def get(self, y_pos, x_pos):
if x_pos in self.matrix_x:
return self.matrix_x[x_pos].get(y_pos)
else:
return None
def is_new(self, y_pos, x_pos):
snapshot = self[y_pos, x_pos]
x = self.matrix_x[x_pos]
pos = y_pos
while pos > 0:
new_y_pos = self.sample_list.parent(pos)
if not new_y_pos or new_y_pos > pos:
return True
pos = new_y_pos
if snapshot == x[pos]:
return False
return True
def _snapshot_is(self, snap1, snap2):
if not self.time_symmetric:
return snap1 == snap2
else:
if snap1 == snap2:
return True
else:
return snap1.reversed == snap2
def root(self, y_pos, x_pos):
snapshot = self[y_pos, x_pos]
x = self.matrix_x[x_pos]
pos = y_pos
while pos > 0:
new_y_pos = self.sample_list.parent(pos)
if new_y_pos is None or new_y_pos > pos:
return pos
if new_y_pos not in x or \
not self._snapshot_is(snapshot, x[new_y_pos]):
return pos
pos = new_y_pos
return pos
def parent(self, y_pos, x_pos):
snapshot = self[y_pos, x_pos]
x = self.matrix_x[x_pos]
if y_pos == 0:
return None
new_y_pos = self.sample_list.parent(y_pos)
if new_y_pos is None or new_y_pos > y_pos:
return None
if not self._snapshot_is(snapshot, x[new_y_pos]):
return None
return new_y_pos
class SampleList(OrderedDict):
"""
A timely ordered series of `Sample` objects.
This is effectively a list object enhanced with a few additional functions
that simplify analysis. Although this can hold an arbitrary list of samples
it is meant to represent a time evolution of samples and thus samples that
have a causal relation.
Examples would be the history of samples that lead to a specific samples
(heritage) or the history of samples in a specific ensemble or of a given
replica.
It provides some useful filters that make sense for samples. And you can
add a list of steps as context, where the samples where generated in.
In analyzing the evolution of a path you do not need the context. It is
mostly for error checking and inspecting moves, while analyzing in the
step context allow you to analyze decorrelation of paths.
Attributes
----------
time_symmetric : bool, default: `True`
if `True` a snapshots and its reversed counterpart will be treated
alike.
flip_time_direction : bool, default: `False`
if `True` the sample list detects if a reversal happens between to
successive samples and will reverse the time direction to counter
the flip. This results in a much clearer picture and shows the
redundancy of snapshots when reversing trajectories. Use with care it
will distort the sense of time from left to right in the generated
picture
trace_missing : bool, default: `False`
if `True` this will mean that alignment between trajectories will be
traced using the `.parent` property even if a sample is not contained
in the sample list itself. Imagine you are looking only at the evolution
of a particular replica after a complete MC step. These steps might
involve several shooting moves that will completely deorrelate between
a sample and its listed predecessor. Usually the closest parent is used
as a reference and overlapping parts will be aligned. If the closest
parent does not have overlap (because of being completely decorrelated)
we cannot simply align. In that case you might create a new hidden
samplelist tracing the parents to the closest parent to determine the
relative shift. This is done, if `trace_missing` is `True`. If `False`
two such samples will be treated as unrelated and the new is placed at
position zero as is the very first sample in the list.
Notes
-----
This is a special `OrderedDict` of the form
`{ samp1: information, samp2: information }`. So, if you get by integer
you will get the sample at the position, while getting a sample
directly will act as a regular dict. So this will actually work
and return the information of the third sample in the list.
>>> sl = SampleList()
>>> print sl[sl[3]]
It seemed to make sense to provide a possibility to access a specific
index in an OrderedDict, which is not possible in the base
implementation.
"""
def __init__(
self,
samples,
time_symmetric=True,
flip_time_direction=False,
trace_missing=False
):
OrderedDict.__init__(self)
self._time_symmetric = time_symmetric
self._flip_time_direction = flip_time_direction
self._trace_missing = trace_missing
self._matrix = []
self._steps = None
if hasattr(samples, '__iter__'):
for s in samples:
self[s] = {}
else:
self[samples] = {}
self.analyze()
def set_default_settings(self):
self._time_symmetric = True
self._flip_time_direction = False
self._trace_missing = False
self.analyze()
def filter(self, filter_func):
"""
Keep only samples where the filter function returns True
Parameters
----------
filter_func : callable
a function that is called on all sample, data pairs. If `True` is
returned the sample is kept, otherwise the sample will be removed
from the list. The function can be called with either
`filter_func(sample, data_dict)` or `filter_func(sample),
depending on how many parameters the function accepts. data dict
is the information contained in `sample_list[sample]`
"""
try:
# see, if the filter function accepts two parameters
self.set_samples([
samp for samp, data in self.items() if filter_func(samp, data)
])
except:
self.set_samples([
samp for samp in self if filter_func(samp)
])
@property
def steps(self):
"""
list of `openpathsampling.MCStep` : The list of steps giving the context
for the samples. Currently samples do no contain information about
the context / step they were generated in.
"""
return self._steps
@steps.setter
def steps(self, value):
self._steps = value
@staticmethod
def filter_redundant_moves(samp, data):
"""
A filter samples that are not identical to the previous one
"""
return not data['length'] == data['length_shared']
@property
def matrix(self):
"""
:obj:`SnapshotMatrix`
a generated sparse matrix of snapshots. Mostly used for plotting
purposes
"""
return self._matrix
def set_samples(self, samples):
"""
Parameters
----------
samples : list of :obj:`openpathsampling.Sample`
the list of samples to be inspected. This will trigger reevaluation
of the current list of samples
"""
self.clear()
for s in samples:
self[s] = {}
self.analyze()
@staticmethod
def from_ancestors(sample):
"""
Generate a :obj:`SampleList` from the ancestors of a given sample
Parameters
----------
sample : :obj:`openpathsampling.Sample`
the sample from which the ancestory are traced. It will follow the
`.parent` property until no parent is found
Returns
-------
:obj:`SampleList`
the generated list of samples
"""
l = []
while sample is not None:
l.append(sample)
sample = sample.parent
return SampleList(reversed(l))
@staticmethod
def from_steps(steps, replica, accepted):
"""
Generate a :obj:`SampleList` from a list of step and a replica ID
Parameters
----------
steps : list of :obj:`openpathsampling.MCStep`
the list of simulation steps to be inspected and turned into a
list of samples
replica : int
the replica ID to be traced
accepted : bool
if `True` only accepted samples will be included in the list.
Otherwise it will also contain trial samples
Returns
-------
:obj:`SampleList`
the generated list of samples
"""
sl = SampleList(SampleList._get_samples_from_steps(
steps, replica, accepted))
sl.steps = steps
return sl
@staticmethod
def _get_samples_from_steps(steps, replica, accepted, intermediates=True):
if accepted:
samples = []
for step in steps:
if step.active and replica in step.active:
next_sample = step.active[replica]
if intermediates:
# add the intermediate samples to completely trace
# where we came from and allow only samples that
# happened in this step
samp = next_sample.parent
add_samples = []
while samp is not None and steps.get_step(samp) == step and samp is not samples[-1]:
add_samples.append(samp)
samp = samp.parent
samples.extend(list(reversed(add_samples)))
samples.append(next_sample)
return samples
else:
samp = steps[0].active[replica]
samples = [samp]
for step in steps:
rep_trials = [s for s in step.change.trials
if s.replica == replica]
if len(rep_trials) > 0:
samples.append(rep_trials[-1])
return samples
def without_redundant(self):
"""
Remove all redundant samples and return a new object
Redundant samples are samples where the overlap with the previous
sample is effectively all samples. This depends on the analysis settings
like `time_symmetric` and `flip_time_direction`
Returns
-------
:obj:`SampleList`
the generated list of samples
"""
l = SampleList([
samp for samp, data in self.items()
if data['length_shared'] < data['length']])
l.flip_time_direction = self.flip_time_direction
l.time_symmetric = self.time_symmetric
return l
def remove_redundant(self):
"""
Remove all redundant samples from the current object.
Redundant samples are samples where the overlap with the previous
sample is effectively all samples. This depends on the analysis
settings like `time_symmetric` and `flip_time_direction`
"""
l = [
samp for samp, data in self.items()
if data['length_shared'] < data['length']]
self.set_samples(l)
def flatten_to_main(self):
"""
Remove all redundant samples from the current object.
Redundant samples are samples where the overlap with the previous
sample is effectively all samples. This depends on the analysis settings
like `time_symmetric` and `flip_time_direction`
"""
l = [samp for samp, data in self.items() if data['level'] == 0]
self.set_samples(l)
@property
def time_symmetric(self):
return self._time_symmetric
@time_symmetric.setter
def time_symmetric(self, value):
self._time_symmetric = value
self.analyze()
@property
def flip_time_direction(self):
return self._flip_time_direction
@flip_time_direction.setter
def flip_time_direction(self, value):
self._flip_time_direction = value
self.analyze()
@property
def trace_missing(self):
return self._trace_missing
@trace_missing.setter
def trace_missing(self, value):
self._trace_missing = value
self.analyze()
def __getitem__(self, item):
if type(item) is slice:
return SampleList(self.keys()[item])
elif isinstance(item, list):
return [self[s] for s in item]
elif type(item) is int:
return self.keys()[item]
else:
return OrderedDict.__getitem__(self, item)
def index(self, value):
"""
Return the index of a sample in the list
Parameters
----------
value : :obj:`openpathsampling.Sample`
Returns
-------
int
the index if present in the list. Throw an exception otherwise
"""
return self.keys().index(value)
def parent(self, idx):
"""
Return the index of the next present parent of an index or sample
Next present parent means. That from the given sample we check if the
direct parent is in the list. If so its index is returned. If not we
try recursively of the parent of the parent and so on until we find
a sample that is present or return None
Parameters
----------
idx : :obj:`openpathsampling.Sample` or int
If an `int` is given the Sample at the index in the list is used,
othewise the sample is used for finding the parent
Returns
-------
int or None
the index of the parent in the list if present. None otherwise.
"""
try:
if type(idx) is int:
samp = self[idx]
else:
samp = idx
parent = samp.parent
while parent not in self and parent is not None:
parent = parent.parent
return self.keys().index(parent)
except ValueError:
return None
def _trajectory_index(self, trajectory, snapshot):
if self.time_symmetric:
return trajectory.index_symmetric(snapshot)
else:
return trajectory.index(snapshot)
def _trajectory_contains(self, trajectory, snapshot):
if self.time_symmetric:
return trajectory.contains_symmetric(snapshot)
else:
return snapshot in trajectory
def analyze(self):
"""
Perform the analysis of the samples.
It will loop through the list of samples and determine the overlap with its
parent. Note that at this point there is no move that can create a sample from
more than one initial one. So it is enough to assume that a sample has a single
parent, its origin that is determined by the mover.
Since the parent is unique we will base the alignment upon the position of the
parent or (if samples are missing) on the closest ancestor.
The alignment will be chosen such that parts that exist in both trajectories are
placed on top. If we chose `time_symmetric` this will also be true if the trajectories
are reversed.
If you set `flip_time_direction = True` samples might be displayed in reverse order
to perfectly align reversed ones. This means that in a plot the direction of time
but not of correlation will change. Imagine have samples between state A and B and
you start with A -> B then this will keep the initial direction in a plot although
a time reversal move will go from B -> A while being perfectly reversed.
Should be called automatically when relevant changes are detected.
"""
matrix = SnapshotMatrix(self)
flip_time_direction = self.flip_time_direction
parent = None
time_direction = +1
for y_pos, sample in enumerate(self):
traj = sample.trajectory
length = len(traj)
parent_shift = 0
parent_traj = None
overlap = None
if sample.parent is not None:
parent = sample.parent
if parent not in self:
while parent not in self and parent is not None:
parent = parent.parent
if parent is None:
time_direction = +1
if parent is not None:
parent_shift = self[parent]['shift']
time_direction = self[parent]['time_direction']
parent_traj = parent.trajectory
if time_direction == -1:
traj = paths.Trajectory(list(reversed(traj.as_proxies())))
parent_traj = paths.Trajectory(list(reversed(parent_traj.as_proxies())))
overlap = parent_traj.shared_subtrajectory(traj, time_reversal=self.time_symmetric)
overlap_length = len(overlap)
if overlap is None or len(overlap) == 0:
# no overlap so we need to start new
if not self.trace_missing:
traj_shift = 0
elif parent is not None:
# if a parent is present but no overlap we could trace the missing chain
# and use this shift. This is "expensive" so by default it is switched off
current = paths.Sample(
replica=sample.replica,
trajectory=traj,
ensemble=sample.ensemble,
bias=sample.bias,
# details=sample.details,
parent=sample.parent,
mover=sample.mover
)
parent_list = [current]
while current is not parent and current is not None:
current = current.parent
parent_list.append(current)
if current is None:
# cannot trace to actual parent. That should not be possible since previously
# we found a parent. So just to make sure
traj_shift = 0
else:
missing_sl = SampleList(
reversed(parent_list),
time_symmetric=self.time_symmetric,
flip_time_direction=self.flip_time_direction,
trace_missing=False
)
traj_shift = parent_shift + missing_sl[missing_sl.last]['shift']
else:
traj_shift = 0
self[sample] = {
'shift': traj_shift,
'new': True,
'time_direction': time_direction,
'correlation': 0.0,
'length': len(traj),
'level': 0,
'length_shared': 0,
'length_fw': 0,
'length_bw': 0,
'overlap_reversed': False
}
else:
new_fw = self._trajectory_index(traj, overlap.get_as_proxy(-1))
new_bw = self._trajectory_index(traj, overlap.get_as_proxy(0))
overlap_reversed = False
if new_bw > new_fw:
overlap_reversed = True
new_fw, new_bw = new_bw, new_fw
if flip_time_direction:
# reverse the time and adjust the shifting
traj = paths.Trajectory(list(reversed(traj.as_proxies())))
time_direction *= -1
overlap_reversed = False
new_fw, new_bw = length - 1 - new_bw, length - 1 - new_fw
else:
# after
overlap_length = 0
traj_shift = parent_shift + self._trajectory_index(parent_traj, overlap.get_as_proxy(0)) - new_bw
self[sample] = {
'shift': traj_shift,
'length_fw': length - 1 - new_fw,
'length_bw': new_bw,
'length_shared': overlap_length,
'length': length,
'overlap_reversed': overlap_reversed,
'new': False,
'time_direction': time_direction,
'correlation': (1.0 * overlap_length) / len(traj),
'parent_y': self.parent(sample),
'level': 0
}
matrix[y_pos, traj_shift] = traj
parent = sample
self._matrix = matrix
for sample in reversed(self):
pos_y = self.index(sample)
pos_parent = self.parent(sample)
if pos_parent is not None and pos_parent < pos_y - 1:
for pos in range(pos_parent + 1, pos_y):
self[self[pos]]['level'] += 1
@property
def correlation(self):
"""
Return a list of correlation between neighboring samples in the list
The correlation is the fraction of shared snapshots. If `time_symmetric` is set
then this is taken into account and reversing of snapshots is ignored.
Returns
-------
list of float
the list of correlations
"""
return [s['correlation'] for s in self.values()]
@property
def decorrelated_trajectories(self):
"""List of decorrelated trajectories from the internal samples.
In path sampling, two trajectories are said to be "decorrelated" if
they share no frames in common. This is particularly important in
one-way shooting. This function returns the list of trajectories,
making the number (i.e., the length of the list) also easily
accessible.
Note that this only traced the main path of samples. So if you have
e.g. rejected parts these will not be taken into account.
Returns
-------
list of :obj:`opnpathsampling.Trajectory`
"""
return [samp.trajectory for samp in self.decorrelated]
@property
def decorrelated(self):
"""List of decorrelated samples from the internal samples.
In path sampling, two trajectories are said to be "decorrelated" if
they share no frames in common. This is particularly important in
one-way shooting. This function returns the list of trajectories,
making the number (i.e., the length of the list) also easily
accessible.
Note that this only traced the main path of samples. So if you have
e.g. rejected parts these will not be taken into account.
Returns
-------
list of :obj:`opnpathsampling.Trajectory`
"""
prev = self[0].trajectory
decorrelated = [self[0]]
for s in self:
# check if we are on the main path of evolution and not
# something that is rejected at some point
if self[s]['level'] == 0:
if not s.trajectory.is_correlated(prev, self.time_symmetric):
decorrelated.append(s)
prev = s.trajectory
return decorrelated
@property
def first(self):
"""
:obj:`openpathsampling.Sample`
Returns the first sample in the list
"""
return self[0]
@property
def last(self):
"""
:obj:`openpathsampling.Sample`
Returns the last sample in the list
"""
return self[-1]
class StepList(list):
def __init__(self, steps):
list.__init__(self, steps)
self._create_step_sample_list()
def _create_step_sample_list(self):
# TODO: This will someday be replaced by a `sample.step` property
self._sample_created_step_list = dict()
self._sample_active_step_list = dict()
self._sample_active_step_list_mccycle = dict()
self._sample_change_list = dict()
for step in self:
# TODO: This is a fix for the use of EmptyMoveChange for
# the initial step. We should use a special step that introduces
# the initial samples to the mccycle instead.
for s in step.active.samples:
if s not in self._sample_active_step_list:
self._sample_active_step_list[s] = [step]
self._sample_active_step_list_mccycle[s] = [step.mccycle]
else:
self._sample_active_step_list[s].append(step)
self._sample_active_step_list_mccycle[s].append(step.mccycle)
if s not in self._sample_created_step_list:
self._sample_created_step_list[s] = step
for ch in step.change:
for s in ch.samples:
self._sample_created_step_list[s] = step
self._sample_change_list[s] = ch
def get_step(self, sample):
"""
Return the step in which a sample was generated
Parameters
----------
sample : :obj:`Sample`
the sample to find the generating `MCStep` from
Returns
-------
:obj:`MCStep`
the step in which the sample was generated
Notes
-----
A sample can appear in other moves as well, but it is uniquely generated in
one move and thus during one step
"""
return self._sample_created_step_list.get(sample)
def get_active_steps(self, sample):
"""
Return the steps in which a sample was in the active sampleset
Parameters
----------
sample : :obj:`Sample`
the sample to find the appearing `MCStep` from
Returns
-------
list of :obj:`MCStep`
the steps in which the sample was in the active sampleset
Notes
-----
A sample can appear in other moves as well, but it is uniquely
generated in one move and thus during one step. This will list all
steps here the sample is in the _final_ active sampleset. This is
usually a range of steps from where is was first generated to the
step before it is replaced.
"""
return self._sample_active_step_list.get(sample)
def get_active_mccycles(self, sample):
"""
Return the mccycles in which a sample was in the active sampleset
Parameters
----------
sample : :obj:`Sample`
the sample to find the mccycles where it was in an active sampleset
Returns
-------
list of int
the mccycles in which the sample was in the active sampleset
Notes
-----
A sample can appear in other moves as well, but it is uniquely
generated in one move and thus during one step. This will list all
steps here the sample is in the _final_ active sampleset. This is
usually a range of steps from where is was first generated to the
step before it is replaced.
"""
return self._sample_active_step_list_mccycle.get(sample)
def get_mccycle(self, sample):
"""
Return the MC cycle in which a sample was generated
Parameters
----------
sample : :obj:`Sample`
the sample to find the generating `MCStep` from
Returns
-------
int
the cycle number in which the sample was generated
"""
return self._sample_created_step_list.get(sample).mccycle
def get_change(self, sample):
"""
Return the (sub-)change in which a sample was generated
Parameters
----------
sample : :obj:`Sample`
the sample to find the generating `MCStep` from
Returns
-------
:obj:`MoveChange`
the move change in which the sample was generated
"""
return self._sample_change_list.get(sample)
@property
def samples(self):
return self._sample_created_step_list.keys()
class SampleListGenerator(SampleList):
"""
An ordered list of `Sample`s analyzed in the context of a list of `MCStep`s
You often want to analyze the evolution of Replicas during a simulation. This object
will mimick a list of Samples generated from steps to your liking
"""
class UpdateSampleProperty(object):
def __init__(self, var):
if var[0] != '_':
var = '_' + var
self.var = var
def __get__(self, instance, owner):
return getattr(instance, self.var)
def __set__(self, instance, value):
setattr(instance, self.var, value)
if hasattr(instance, '_update_sample'):
instance._update_sample()
steps = UpdateSampleProperty('steps')
def __init__(self):
super(SampleListGenerator, self).__init__([])
self._steps = None
def _update_sample(self):
pass
def update_tree_options(self, tree):
pass
# Delegate functions to access methods in self.steps
def get_mccycle(self, sample):
"""
Return the MC cycle in which a sample was generated
Parameters
----------
sample : :obj:`Sample`
the sample to find the generating `MCStep` from
Returns
-------
int
the cycle number in which the sample was generated
"""
return self.steps.get_mccycle(sample)
def get_step(self, sample):
"""
Return the step in which a sample was generated
Parameters
----------
sample : :obj:`Sample`
the sample to find the generating `MCStep` from
Returns
-------
:obj:`MCStep`
the step in which the sample was generated
Notes
-----
A sample can appear in other moves as well, but it is uniquely generated in
one move and thus during one step
"""
return self.steps.get_step(sample)
def get_change(self, sample):
"""
Return the (sub-)change in which a sample was generated
Parameters
----------
sample : :obj:`Sample`
the sample to find the generating `MCStep` from
Returns
-------
:obj:`MoveChange`
the move change in which the sample was generated
"""
return self.steps.get_change(sample)
def get_active_steps(self, sample):
"""
Return the steps in which a sample was in the active sampleset
Parameters
----------
sample : :obj:`Sample`
the sample to find the appearing `MCStep` from
Returns
-------
list of :obj:`MCStep`
the steps in which the sample was in the active sampleset
Notes
-----
A sample can appear in other moves as well, but it is uniquely
generated in one move and thus during one step. This will list all
steps here the sample is in the _final_ active sampleset. This is
usually a range of steps from where is was first generated to the
step before it is replaced.
"""
return self.steps.get_active_steps(sample)
def get_active_mccycles(self, sample):
"""
Return the mccycles in which a sample was in the active sampleset
Parameters
----------
sample : :obj:`Sample`
the sample to find the mccycles where it was in an active sampleset
Returns
-------
list of int
the mccycles in which the sample was in the active sampleset
Notes
-----
A sample can appear in other moves as well, but it is uniquely
generated in one move and thus during one step. This will list all
steps here the sample is in the _final_ active sampleset. This is
usually a range of steps from where is was first generated to the
step before it is replaced.
"""
return self.steps.get_active_mccycles(sample)
class ReplicaEvolution(SampleListGenerator):
"""
An ordered list of `Sample`s analyzed in the context of a list of `MCStep`s
You often want to analyze the evolution of Replicas during a simulation. This object
will mimick a list of Samples generated from steps to your liking
"""
replica = SampleListGenerator.UpdateSampleProperty('replica')
accepted = SampleListGenerator.UpdateSampleProperty('accepted')
intermediates = SampleListGenerator.UpdateSampleProperty('intermediates')
def __init__(self, replica, accepted=True, intermediates=True):
super(ReplicaEvolution, self).__init__()
self._replica = replica
self._accepted = accepted
self._intermediates = intermediates
self._update_sample()
def _update_sample(self):
if self.steps:
self.set_samples(SampleList._get_samples_from_steps(
self.steps,
self._replica,
self._accepted,
self._intermediates
))
self.analyze()
def update_tree_options(self, tree):
tree.options.css['mark_transparent'] = 'rejected'
class SampleAncestors(SampleListGenerator):
def __init__(self, sample):
super(SampleAncestors, self).__init__()
self._sample = sample
sample = SampleListGenerator.UpdateSampleProperty('sample')
def _update_sample(self):
sample = self.sample
l = []
while sample is not None and (not self.steps or sample in self.steps.samples):
l.append(sample)
sample = sample.parent
self.set_samples(SampleList(reversed(l)))
def update_tree_options(self, tree):
tree.options.css['mark_transparent'] = 'auxiliary'
class EnsembleEvolution(SampleListGenerator):
"""
An ordered list of `Sample`s analyzed in the context of a list of `MCStep`s
You often want to analyze the evolution of Replicas during a simulation. This object
will mimick a list of Samples generated from steps to your liking
"""
ensemble = SampleListGenerator.UpdateSampleProperty('ensemble')
accepted = SampleListGenerator.UpdateSampleProperty('accepted')
def __init__(self, ensemble, accepted=True):
super(EnsembleEvolution, self).__init__()
self._ensemble = ensemble
self._accepted = accepted
def _update_sample(self):
self.set_samples([
step.active[self.ensemble] for step in self.steps
if not self.accepted or step.change.accepted
])
def update_tree_options(self, tree):
tree.options.css['mark_transparent'] = 'rejected'
| jhprinz/openpathsampling | openpathsampling/visualize.py | Python | lgpl-2.1 | 92,760 | 0.000571 |
"""
Boolean Operations
~~~~~~~~~~~~~~~~~~
Perform boolean operations with closed surfaces (intersect, cut, etc.).
Boolean/topological operations (intersect, cut, etc.) methods are implemented
for :class:`pyvista.PolyData` mesh types only and are accessible directly from
any :class:`pyvista.PolyData` mesh. Check out :class:`pyvista.PolyDataFilters`
and take a look at the following filters:
* :func:`pyvista.PolyDataFilters.boolean_add`
* :func:`pyvista.PolyDataFilters.boolean_cut`
* :func:`pyvista.PolyDataFilters.boolean_difference`
* :func:`pyvista.PolyDataFilters.boolean_union`
For merging, the ``+`` operator can be used between any two meshes in PyVista
which simply calls the ``.merge()`` filter to combine any two meshes.
Similarly, the ``-`` operator can be used between any two :class:`pyvista.PolyData`
meshes in PyVista to cut the first mesh by the second.
"""
# sphinx_gallery_thumbnail_number = 6
import pyvista as pv
import numpy as np
def make_cube():
x = np.linspace(-0.5, 0.5, 25)
grid = pv.StructuredGrid(*np.meshgrid(x, x, x))
return grid.extract_surface().triangulate()
# Create to example PolyData meshes for boolean operations
sphere = pv.Sphere(radius=0.65, center=(0, 0, 0))
cube = make_cube()
p = pv.Plotter()
p.add_mesh(sphere, color="yellow", opacity=0.5, show_edges=True)
p.add_mesh(cube, color="royalblue", opacity=0.5, show_edges=True)
p.show()
###############################################################################
# Boolean Add
# +++++++++++
#
# Add all of the two meshes together using the
# :func:`pyvista.PolyDataFilters.boolean_add` filter or the ``+`` operator.
#
# Order of operations does not matter for boolean add as the entirety of both
# meshes are appended together.
add = sphere + cube
add.plot(opacity=0.5, color=True, show_edges=True)
###############################################################################
# Boolean Cut
# +++++++++++
#
# Perform a boolean cut of ``a`` using ``b`` with the
# :func:`pyvista.PolyDataFilters.boolean_cut` filter or the ``-`` operator
# since both meshes are :class:`pyvista.PolyData`.
#
# Order of operations does not matter for boolean cut.
cut = cube - sphere
p = pv.Plotter()
p.add_mesh(cut, opacity=0.5, show_edges=True, color=True)
p.show()
###############################################################################
# Boolean Difference
# ++++++++++++++++++
#
# Combine two meshes and retains only the volume in common between the meshes
# using the :func:`pyvista.PolyDataFilters.boolean_difference` method.
#
# Note that the order of operations for a boolean difference will affect the
# results.
diff = sphere.boolean_difference(cube)
p = pv.Plotter()
p.add_mesh(diff, opacity=0.5, show_edges=True, color=True)
p.show()
###############################################################################
diff = cube.boolean_difference(sphere)
p = pv.Plotter()
p.add_mesh(diff, opacity=0.5, show_edges=True, color=True)
p.show()
###############################################################################
# Boolean Union
# +++++++++++++
#
# Combine two meshes and attempts to create a manifold mesh using the
# :func:`pyvista.PolyDataFilters.boolean_union` method.
#
# Order of operations does not matter for boolean union.
union = sphere.boolean_union(cube)
p = pv.Plotter()
p.add_mesh(union, opacity=0.5, show_edges=True, color=True)
p.show()
| akaszynski/vtkInterface | examples/01-filter/boolean-operations.py | Python | mit | 3,395 | 0.000884 |
#!/usr/bin/env conda-execute
# conda execute
# env:
# - python 2.7.*
# - conda-smithy
# - pygithub 1.*
# - six
# - conda-build
# channels:
# - conda-forge
# run_with: python
from __future__ import print_function
import argparse
import collections
import os
import six
from github import Github
import github
import yaml
from conda_build.metadata import MetaData
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('feedstocks_clone', help="The location of the feedstocks directory within the conda-forge/feedstocks clone.")
args = parser.parse_args()
from conda_smithy.github import gh_token
token = gh_token()
gh = Github(token)
conda_forge = gh.get_organization('conda-forge')
teams = {team.name: team for team in conda_forge.get_teams()}
feedstocks_path = args.feedstocks_clone
packages_visited = set()
all_members = set()
from random import choice
superlative = ['awesome', 'slick', 'formidable', 'awe-inspiring', 'breathtaking',
'magnificent', 'wonderous', 'stunning', 'astonishing', 'superb',
'splendid', 'impressive', 'unbeatable', 'excellent', 'top', 'outstanding',
'exalted', 'standout', 'smashing']
# Go through each of the feedstocks and ensure that the team is up to date and that
# there is nobody in the team which doesn't belong (i.e. isn't in the maintainers list).
for package_name in os.listdir(feedstocks_path):
print("Checking {}".format(package_name))
packages_visited.add(package_name)
feedstock = os.path.join(feedstocks_path, package_name)
recipe = os.path.join(feedstock, 'recipe', 'meta.yaml')
if not os.path.exists(recipe):
print("The {} feedstock is recipe less".format(package_name))
continue
meta = MetaData(os.path.dirname(recipe))
contributors = meta.meta.get('extra', {}).get('recipe-maintainers', [])
if not isinstance(contributors, list):
# Deal with a contribution list which has dashes but no spaces
# (e.g. https://github.com/conda-forge/pandoc-feedstock/issues/1)
contributors = [contributors.lstrip('-')]
contributors = set(handle.lower() for handle in contributors)
all_members.update(contributors)
# If the team already exists, get hold of it.
team = teams.get(package_name)
if not team:
print("Team {} does not exist in conda-forge organization".format(package_name))
continue
current_members = team.get_members()
member_handles = set([member.login.lower() for member in current_members])
for new_member in contributors - member_handles:
headers, data = team._requester.requestJsonAndCheck(
"PUT",
team.url + "/memberships/" + new_member)
for old_member in member_handles - contributors:
print("AN OLD MEMBER ({}) NEEDS TO BE REMOVED FROM {}".format(old_member, package_name))
# The following works, it is just a bit scary!
# headers, data = team._requester.requestJsonAndCheck(
# "DELETE",
# team.url + "/memberships/" + old_member)
# Create and administer the all-members team.
team = teams.get('all-members')
if not team:
raise RuntimeError("Team all-members does not exist in conda-forge organization")
current_members = team.get_members()
member_handles = set([member.login.lower() for member in current_members])
for new_member in all_members - member_handles:
headers, data = team._requester.requestJsonAndCheck(
"PUT",
team.url + "/memberships/" + new_member)
for old_member in member_handles - all_members:
print("AN OLD MEMBER ({}) NEEDS TO BE REMOVED FROM all-members".format(old_member))
# Remove any teams which don't belong any more (because there is no longer a feedstock).
for team_to_remove in set(teams.keys()) - set(packages_visited):
if team_to_remove in ['Core',
'conda-forge.github.io',
'all-members',
'conda-forge-anvil',
'conda-forge-webservices',
'staged-recipes']:
print('Keeping ', team_to_remove)
continue
print("THE {} TEAM NEEDS TO BE REMOVED.".format(team_to_remove))
# The following works, it is just a bit scary!
# teams[team_to_remove].delete()
| conda-forge/conda-forge.github.io | scripts/update_teams.py | Python | bsd-3-clause | 4,482 | 0.00357 |
from django import forms
from django.apps import apps
from django.contrib.auth import get_permission_codename, get_user_model
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.forms.utils import ErrorList
from django.forms.widgets import HiddenInput
from django.template.defaultfilters import slugify
from django.utils.encoding import force_str
from django.utils.translation import gettext, gettext_lazy as _
from cms import api
from cms.apphook_pool import apphook_pool
from cms.cache.permissions import clear_permission_cache
from cms.constants import PAGE_TYPES_ID, PUBLISHER_STATE_DIRTY, ROOT_USER_LEVEL
from cms.exceptions import PluginLimitReached
from cms.extensions import extension_pool
from cms.forms.validators import (
validate_overwrite_url, validate_relative_url, validate_url_uniqueness,
)
from cms.forms.widgets import (
AppHookSelect, ApplicationConfigSelect, UserSelectAdminWidget,
)
from cms.models import (
CMSPlugin, GlobalPagePermission, Page, PagePermission, PageType, PageUser,
PageUserGroup, Placeholder, Title, TreeNode,
)
from cms.models.permissionmodels import User
from cms.plugin_pool import plugin_pool
from cms.signals.apphook import set_restart_trigger
from cms.utils.compat.forms import UserChangeForm
from cms.utils.conf import get_cms_setting
from cms.utils.i18n import get_language_list, get_language_object
from cms.utils.permissions import (
get_current_user, get_subordinate_groups, get_subordinate_users,
get_user_permission_level,
)
from menus.menu_pool import menu_pool
def get_permission_accessor(obj):
User = get_user_model()
if isinstance(obj, (PageUser, User,)):
rel_name = 'user_permissions'
else:
rel_name = 'permissions'
return getattr(obj, rel_name)
def get_page_changed_by_filter_choices():
# This is not site-aware
# Been like this forever
# Would be nice for it to filter out by site
values = (
Page
.objects
.filter(publisher_is_draft=True)
.distinct()
.order_by('changed_by')
.values_list('changed_by', flat=True)
)
yield ('', _('All'))
for value in values:
yield (value, value)
def get_page_template_filter_choices():
yield ('', _('All'))
for value, name in get_cms_setting('TEMPLATES'):
yield (value, name)
def save_permissions(data, obj):
models = (
(Page, 'page'),
(PageUser, 'pageuser'),
(PageUserGroup, 'pageuser'),
(PagePermission, 'pagepermission'),
)
if not obj.pk:
# save obj, otherwise we can't assign permissions to him
obj.save()
permission_accessor = get_permission_accessor(obj)
for model, name in models:
content_type = ContentType.objects.get_for_model(model)
for key in ('add', 'change', 'delete'):
# add permission `key` for model `model`
codename = get_permission_codename(key, model._meta)
permission = Permission.objects.get(content_type=content_type, codename=codename)
field = 'can_%s_%s' % (key, name)
if data.get(field):
permission_accessor.add(permission)
elif field in data:
permission_accessor.remove(permission)
class CopyPermissionForm(forms.Form):
"""
Holds the specific field for permissions
"""
copy_permissions = forms.BooleanField(
label=_('Copy permissions'),
required=False,
initial=True,
)
class BasePageForm(forms.ModelForm):
_user = None
_site = None
_language = None
title = forms.CharField(label=_("Title"), max_length=255, widget=forms.TextInput(),
help_text=_('The default title'))
slug = forms.CharField(label=_("Slug"), max_length=255, widget=forms.TextInput(),
help_text=_('The part of the title that is used in the URL'))
menu_title = forms.CharField(label=_("Menu Title"), widget=forms.TextInput(),
help_text=_('Overwrite what is displayed in the menu'), required=False)
page_title = forms.CharField(label=_("Page Title"), widget=forms.TextInput(),
help_text=_('Overwrites what is displayed at the top of your browser or in bookmarks'),
required=False)
meta_description = forms.CharField(label=_('Description meta tag'), required=False,
widget=forms.Textarea(attrs={'maxlength': '320', 'rows': '4'}),
help_text=_('A description of the page used by search engines.'),
max_length=320)
class Meta:
model = Page
fields = []
def clean_slug(self):
slug = slugify(self.cleaned_data['slug'])
if not slug:
raise ValidationError(_("Slug must not be empty."))
return slug
class AddPageForm(BasePageForm):
source = forms.ModelChoiceField(
label=_(u'Page type'),
queryset=Page.objects.filter(
is_page_type=True,
publisher_is_draft=True,
),
required=False,
)
parent_node = forms.ModelChoiceField(
queryset=TreeNode.objects.all(),
required=False,
widget=forms.HiddenInput(),
)
class Meta:
model = Page
fields = ['source']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
source_field = self.fields.get('source')
if not source_field or source_field.widget.is_hidden:
return
root_page = PageType.get_root_page(site=self._site)
if root_page:
# Set the choicefield's choices to the various page_types
descendants = root_page.get_descendant_pages().filter(is_page_type=True)
titles = Title.objects.filter(page__in=descendants, language=self._language)
choices = [('', '---------')]
choices.extend((title.page_id, title.title) for title in titles)
source_field.choices = choices
else:
choices = []
if len(choices) < 2:
source_field.widget = forms.HiddenInput()
def clean(self):
data = self.cleaned_data
if self._errors:
# Form already has errors, best to let those be
# addressed first.
return data
parent_node = data.get('parent_node')
if parent_node:
slug = data['slug']
parent_path = parent_node.item.get_path(self._language)
path = u'%s/%s' % (parent_path, slug) if parent_path else slug
else:
path = data['slug']
try:
# Validate the url
validate_url_uniqueness(
self._site,
path=path,
language=self._language,
)
except ValidationError as error:
self.add_error('slug', error)
else:
data['path'] = path
return data
def clean_parent_node(self):
parent_node = self.cleaned_data.get('parent_node')
if parent_node and parent_node.site_id != self._site.pk:
raise ValidationError("Site doesn't match the parent's page site")
return parent_node
def create_translation(self, page):
data = self.cleaned_data
title_kwargs = {
'page': page,
'language': self._language,
'slug': data['slug'],
'path': data['path'],
'title': data['title'],
}
if 'menu_title' in data:
title_kwargs['menu_title'] = data['menu_title']
if 'page_title' in data:
title_kwargs['page_title'] = data['page_title']
if 'meta_description' in data:
title_kwargs['meta_description'] = data['meta_description']
return api.create_title(**title_kwargs)
def from_source(self, source, parent=None):
new_page = source.copy(
site=self._site,
parent_node=parent,
language=self._language,
translations=False,
permissions=False,
extensions=False,
)
new_page.update(is_page_type=False, in_navigation=True)
return new_page
def get_template(self):
return Page.TEMPLATE_DEFAULT
def save(self, *args, **kwargs):
source = self.cleaned_data.get('source')
parent = self.cleaned_data.get('parent_node')
if source:
new_page = self.from_source(source, parent=parent)
for lang in source.get_languages():
source._copy_contents(new_page, lang)
else:
new_page = super().save(commit=False)
new_page.template = self.get_template()
new_page.set_tree_node(self._site, target=parent, position='last-child')
new_page.save()
translation = self.create_translation(new_page)
if source:
extension_pool.copy_extensions(
source_page=source,
target_page=new_page,
languages=[translation.language],
)
is_first = not (
TreeNode
.objects
.get_for_site(self._site)
.exclude(pk=new_page.node_id)
.exists()
)
new_page.rescan_placeholders()
if is_first and not new_page.is_page_type:
# its the first page. publish it right away
new_page.publish(translation.language)
new_page.set_as_homepage(self._user)
new_page.clear_cache(menu=True)
return new_page
class AddPageTypeForm(AddPageForm):
menu_title = None
meta_description = None
page_title = None
source = forms.ModelChoiceField(
queryset=Page.objects.drafts(),
required=False,
widget=forms.HiddenInput(),
)
def get_or_create_root(self):
"""
Creates the root node used to store all page types
for the current site if it doesn't exist.
"""
root_page = PageType.get_root_page(site=self._site)
if not root_page:
root_page = Page(
publisher_is_draft=True,
in_navigation=False,
is_page_type=True,
)
root_page.set_tree_node(self._site)
root_page.save()
if not root_page.has_translation(self._language):
api.create_title(
language=self._language,
title=gettext('Page Types'),
page=root_page,
slug=PAGE_TYPES_ID,
path=PAGE_TYPES_ID,
)
return root_page.node
def clean_parent_node(self):
parent_node = super().clean_parent_node()
if parent_node and not parent_node.item.is_page_type:
raise ValidationError("Parent has to be a page type.")
if not parent_node:
# parent was not explicitly selected.
# fallback to the page types root
parent_node = self.get_or_create_root()
return parent_node
def from_source(self, source, parent=None):
new_page = source.copy(
site=self._site,
parent_node=parent,
language=self._language,
translations=False,
permissions=False,
extensions=False,
)
new_page.update(is_page_type=True, in_navigation=False)
return new_page
def save(self, *args, **kwargs):
new_page = super().save(*args, **kwargs)
if not self.cleaned_data.get('source'):
# User has created a page-type via "Add page"
# instead of from another page.
new_page.update(
draft_only=True,
is_page_type=True,
in_navigation=False,
)
return new_page
class DuplicatePageForm(AddPageForm):
source = forms.ModelChoiceField(
queryset=Page.objects.drafts(),
required=True,
widget=forms.HiddenInput(),
)
class ChangePageForm(BasePageForm):
translation_fields = (
'slug',
'title',
'meta_description',
'menu_title',
'page_title',
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.title_obj = self.instance.get_title_obj(
language=self._language,
fallback=False,
force_reload=True,
)
for field in self.translation_fields:
if field in self.fields:
self.fields[field].initial = getattr(self.title_obj, field)
def clean(self):
data = super().clean()
if self._errors:
# Form already has errors, best to let those be
# addressed first.
return data
page = self.instance
if page.is_home:
data['path'] = ''
return data
if self.title_obj.has_url_overwrite:
data['path'] = self.title_obj.path
return data
if 'slug' not in self.fields:
# the {% edit_title_fields %} template tag
# allows users to edit specific fields for a translation.
# as a result, slug might not always be there.
return data
if page.parent_page:
slug = data['slug']
parent_path = page.parent_page.get_path(self._language)
path = u'%s/%s' % (parent_path, slug) if parent_path else slug
else:
path = data['slug']
try:
# Validate the url
validate_url_uniqueness(
self._site,
path=path,
language=self._language,
exclude_page=page,
)
except ValidationError as error:
self.add_error('slug', error)
else:
data['path'] = path
return data
def save(self, commit=True):
data = self.cleaned_data
cms_page = super().save(commit=False)
translation_data = {field: data[field]
for field in self.translation_fields if field in data}
if 'path' in data:
# The path key is set if
# the slug field is present in the form,
# or if the page being edited is the home page,
# or if the translation has a url override.
translation_data['path'] = data['path']
update_count = cms_page.update_translations(
self._language,
publisher_state=PUBLISHER_STATE_DIRTY,
**translation_data
)
if self._language in cms_page.title_cache:
del cms_page.title_cache[self._language]
if update_count == 0:
api.create_title(language=self._language, page=cms_page, **translation_data)
# _update_title_path_recursive should be called if the new page is the parent
# of already created children in multilingual sites.
cms_page._update_title_path_recursive(self._language, slug=self.data['slug'])
cms_page.clear_cache(menu=True)
return cms_page
class PublicationDatesForm(forms.ModelForm):
class Meta:
model = Page
fields = ['publication_date', 'publication_end_date']
def save(self, *args, **kwargs):
page = super().save(*args, **kwargs)
page.clear_cache(menu=True)
return page
class AdvancedSettingsForm(forms.ModelForm):
from cms.forms.fields import PageSmartLinkField
_user = None
_site = None
_language = None
application_urls = forms.ChoiceField(label=_('Application'),
choices=(), required=False,
help_text=_('Hook application to this page.'))
overwrite_url = forms.CharField(label=_('Overwrite URL'), max_length=255, required=False,
help_text=_('Keep this field empty if standard path should be used.'))
xframe_options = forms.ChoiceField(
choices=Page._meta.get_field('xframe_options').choices,
label=_('X Frame Options'),
help_text=_('Whether this page can be embedded in other pages or websites'),
initial=Page._meta.get_field('xframe_options').default,
required=False
)
redirect = PageSmartLinkField(label=_('Redirect'), required=False,
help_text=_('Redirects to this URL.'),
placeholder_text=_('Start typing...'),
ajax_view='admin:cms_page_get_published_pagelist',
)
# This is really a 'fake' field which does not correspond to any Page attribute
# But creates a stub field to be populate by js
application_configs = forms.CharField(
label=_('Application configurations'),
required=False,
widget=ApplicationConfigSelect,
)
fieldsets = (
(None, {
'fields': ('overwrite_url', 'redirect'),
}),
(_('Language independent options'), {
'fields': ('template', 'reverse_id', 'soft_root', 'navigation_extenders',
'application_urls', 'application_namespace', 'application_configs',
'xframe_options',)
})
)
class Meta:
model = Page
fields = [
'template', 'reverse_id', 'overwrite_url', 'redirect', 'soft_root', 'navigation_extenders',
'application_urls', 'application_namespace', "xframe_options",
]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.title_obj = self.instance.get_title_obj(
language=self._language,
fallback=False,
force_reload=True,
)
if 'navigation_extenders' in self.fields:
navigation_extenders = self.get_navigation_extenders()
self.fields['navigation_extenders'].widget = forms.Select(
{}, [('', "---------")] + navigation_extenders)
if 'application_urls' in self.fields:
# Prepare a dict mapping the apps by class name ('PollApp') to
# their app_name attribute ('polls'), if any.
app_namespaces = {}
app_configs = {}
for hook in apphook_pool.get_apphooks():
app = apphook_pool.get_apphook(hook[0])
if app.app_name:
app_namespaces[hook[0]] = app.app_name
if app.app_config:
app_configs[hook[0]] = app
self.fields['application_urls'].widget = AppHookSelect(
attrs={'id': 'application_urls'},
app_namespaces=app_namespaces
)
self.fields['application_urls'].choices = [('', "---------")] + apphook_pool.get_apphooks()
page_data = self.data if self.data else self.initial
if app_configs:
self.fields['application_configs'].widget = ApplicationConfigSelect(
attrs={'id': 'application_configs'},
app_configs=app_configs,
)
if page_data.get('application_urls', False) and page_data['application_urls'] in app_configs:
configs = app_configs[page_data['application_urls']].get_configs()
self.fields['application_configs'].widget.choices = [(config.pk, force_str(config)) for config in configs]
try:
config = configs.get(namespace=self.initial['application_namespace'])
self.fields['application_configs'].initial = config.pk
except ObjectDoesNotExist:
# Provided apphook configuration doesn't exist (anymore),
# just skip it
# The user will choose another value anyway
pass
if 'redirect' in self.fields:
self.fields['redirect'].widget.language = self._language
self.fields['redirect'].initial = self.title_obj.redirect
if 'overwrite_url' in self.fields and self.title_obj.has_url_overwrite:
self.fields['overwrite_url'].initial = self.title_obj.path
def get_apphooks(self):
for hook in apphook_pool.get_apphooks():
yield (hook[0], apphook_pool.get_apphook(hook[0]))
def get_apphooks_with_config(self):
return {key: app for key, app in self.get_apphooks() if app.app_config}
def get_navigation_extenders(self):
return menu_pool.get_menus_by_attribute("cms_enabled", True)
def _check_unique_namespace_instance(self, namespace):
return Page.objects.drafts().on_site(self._site).filter(
application_namespace=namespace
).exclude(pk=self.instance.pk).exists()
def clean(self):
cleaned_data = super().clean()
if cleaned_data.get("overwrite_url"):
# Assuming that the user enters a full URL in the overwrite_url input.
# Here we validate it before publishing the page and if it contains
# reserved characters (e.g. $?:#), we add error in the form.
# issue 6934
url = cleaned_data.get("overwrite_url")
if url and not validate_overwrite_url(value=url):
self._errors['overwrite_url'] = self.error_class([_('You entered an invalid URL.')])
if self._errors:
# Fail fast if there's errors in the form
return cleaned_data
# Language has been validated already
# so we know it exists.
language_name = get_language_object(
self._language,
site_id=self._site.pk,
)['name']
if not self.title_obj.slug:
# This covers all cases where users try to edit
# page advanced settings without setting a title slug
# for page titles that already exist.
message = _("Please set the %(language)s slug "
"before editing its advanced settings.")
raise ValidationError(message % {'language': language_name})
if 'reverse_id' in self.fields:
reverse_id = cleaned_data['reverse_id']
if reverse_id:
lookup = Page.objects.drafts().on_site(self._site).filter(reverse_id=reverse_id)
if lookup.exclude(pk=self.instance.pk).exists():
self._errors['reverse_id'] = self.error_class(
[_('A page with this reverse URL id exists already.')])
apphook = cleaned_data.get('application_urls', None)
# The field 'application_namespace' is a misnomer. It should be
# 'instance_namespace'.
instance_namespace = cleaned_data.get('application_namespace', None)
application_config = cleaned_data.get('application_configs', None)
if apphook:
apphooks_with_config = self.get_apphooks_with_config()
# application_config wins over application_namespace
if apphook in apphooks_with_config and application_config:
# the value of the application config namespace is saved in
# the 'usual' namespace field to be backward compatible
# with existing apphooks
try:
appconfig_pk = forms.IntegerField(required=True).to_python(application_config)
except ValidationError:
self._errors['application_configs'] = ErrorList([
_('Invalid application config value')
])
return self.cleaned_data
try:
config = apphooks_with_config[apphook].get_configs().get(pk=appconfig_pk)
except ObjectDoesNotExist:
self._errors['application_configs'] = ErrorList([
_('Invalid application config value')
])
return self.cleaned_data
if self._check_unique_namespace_instance(config.namespace):
# Looks like there's already one with the default instance
# namespace defined.
self._errors['application_configs'] = ErrorList([
_('An application instance using this configuration already exists.')
])
else:
self.cleaned_data['application_namespace'] = config.namespace
else:
if instance_namespace:
if self._check_unique_namespace_instance(instance_namespace):
self._errors['application_namespace'] = ErrorList([
_('An application instance with this name already exists.')
])
else:
# The attribute on the apps 'app_name' is a misnomer, it should be
# 'application_namespace'.
application_namespace = apphook_pool.get_apphook(apphook).app_name
if application_namespace and not instance_namespace:
if self._check_unique_namespace_instance(application_namespace):
# Looks like there's already one with the default instance
# namespace defined.
self._errors['application_namespace'] = ErrorList([
_('An application instance with this name already exists.')
])
else:
# OK, there are zero instances of THIS app that use the
# default instance namespace, so, since the user didn't
# provide one, we'll use the default. NOTE: The following
# line is really setting the "instance namespace" of the
# new app to the app’s "application namespace", which is
# the default instance namespace.
self.cleaned_data['application_namespace'] = application_namespace
if instance_namespace and not apphook:
self.cleaned_data['application_namespace'] = None
if application_config and not apphook:
self.cleaned_data['application_configs'] = None
return self.cleaned_data
def clean_xframe_options(self):
if 'xframe_options' not in self.fields:
return # nothing to do, field isn't present
xframe_options = self.cleaned_data['xframe_options']
if xframe_options == '':
return Page._meta.get_field('xframe_options').default
return xframe_options
def clean_overwrite_url(self):
path_override = self.cleaned_data.get('overwrite_url')
if path_override:
path = path_override.strip('/')
else:
path = self.instance.get_path_for_slug(self.title_obj.slug, self._language)
validate_url_uniqueness(
self._site,
path=path,
language=self._language,
exclude_page=self.instance,
)
self.cleaned_data['path'] = path
return path_override
def has_changed_apphooks(self):
changed_data = self.changed_data
if 'application_urls' in changed_data:
return True
return 'application_namespace' in changed_data
def update_apphooks(self):
# User has changed the apphooks on the page.
# Update the public version of the page to reflect this change immediately.
public_id = self.instance.publisher_public_id
self._meta.model.objects.filter(pk=public_id).update(
application_urls=self.instance.application_urls,
application_namespace=(self.instance.application_namespace or None),
)
# Connects the apphook restart handler to the request finished signal
set_restart_trigger()
def save(self, *args, **kwargs):
data = self.cleaned_data
page = super().save(*args, **kwargs)
page.update_translations(
self._language,
path=data['path'],
redirect=(data.get('redirect') or None),
publisher_state=PUBLISHER_STATE_DIRTY,
has_url_overwrite=bool(data.get('overwrite_url')),
)
is_draft_and_has_public = page.publisher_is_draft and page.publisher_public_id
if is_draft_and_has_public and self.has_changed_apphooks():
self.update_apphooks()
page.clear_cache(menu=True)
return page
class PagePermissionForm(forms.ModelForm):
class Meta:
model = Page
fields = ['login_required', 'limit_visibility_in_menu']
def save(self, *args, **kwargs):
page = super().save(*args, **kwargs)
page.clear_cache(menu=True)
clear_permission_cache()
return page
class PageTreeForm(forms.Form):
position = forms.IntegerField(initial=0, required=True)
target = forms.ModelChoiceField(queryset=Page.objects.none(), required=False)
def __init__(self, *args, **kwargs):
self.page = kwargs.pop('page')
self._site = kwargs.pop('site', Site.objects.get_current())
super().__init__(*args, **kwargs)
self.fields['target'].queryset = Page.objects.drafts().filter(
node__site=self._site,
is_page_type=self.page.is_page_type,
)
def get_root_nodes(self):
# TODO: this needs to avoid using the pages accessor directly
nodes = TreeNode.get_root_nodes()
return nodes.exclude(cms_pages__is_page_type=not(self.page.is_page_type))
def get_tree_options(self):
position = self.cleaned_data['position']
target_page = self.cleaned_data.get('target')
parent_node = target_page.node if target_page else None
if parent_node:
return self._get_tree_options_for_parent(parent_node, position)
return self._get_tree_options_for_root(position)
def _get_tree_options_for_root(self, position):
siblings = self.get_root_nodes().filter(site=self._site)
try:
target_node = siblings[position]
except IndexError:
# The position requested is not occupied.
# Add the node as the last root node,
# relative to the current site.
return (siblings.reverse()[0], 'right')
return (target_node, 'left')
def _get_tree_options_for_parent(self, parent_node, position):
if position == 0:
return (parent_node, 'first-child')
siblings = parent_node.get_children().filter(site=self._site)
try:
target_node = siblings[position]
except IndexError:
# The position requested is not occupied.
# Add the node to be the parent's first child
return (parent_node, 'last-child')
return (target_node, 'left')
class MovePageForm(PageTreeForm):
def clean(self):
cleaned_data = super().clean()
if self.page.is_home and cleaned_data.get('target'):
self.add_error('target', force_str(_('You can\'t move the home page inside another page')))
return cleaned_data
def get_tree_options(self):
options = super().get_tree_options()
target_node, target_node_position = options
if target_node_position != 'left':
return (target_node, target_node_position)
node = self.page.node
node_is_first = node.path < target_node.path
if node_is_first and node.is_sibling_of(target_node):
# The node being moved appears before the target node
# and is a sibling of the target node.
# The user is moving from left to right.
target_node_position = 'right'
elif node_is_first:
# The node being moved appears before the target node
# but is not a sibling of the target node.
# The user is moving from right to left.
target_node_position = 'left'
else:
# The node being moved appears after the target node.
# The user is moving from right to left.
target_node_position = 'left'
return (target_node, target_node_position)
def move_page(self):
self.page.move_page(*self.get_tree_options())
class CopyPageForm(PageTreeForm):
source_site = forms.ModelChoiceField(queryset=Site.objects.all(), required=True)
copy_permissions = forms.BooleanField(initial=False, required=False)
def copy_page(self):
target, position = self.get_tree_options()
copy_permissions = self.cleaned_data.get('copy_permissions', False)
new_page = self.page.copy_with_descendants(
target_node=target,
position=position,
copy_permissions=copy_permissions,
target_site=self._site,
)
new_page.clear_cache(menu=True)
return new_page
def _get_tree_options_for_root(self, position):
try:
return super()._get_tree_options_for_root(position)
except IndexError:
# The user is copying a page to a site with no pages
# Add the node as the last root node.
siblings = self.get_root_nodes().reverse()
return (siblings[0], 'right')
class ChangeListForm(forms.Form):
BOOLEAN_CHOICES = (
('', _('All')),
('1', _('Yes')),
('0', _('No')),
)
q = forms.CharField(required=False, widget=forms.HiddenInput())
in_navigation = forms.ChoiceField(required=False, choices=BOOLEAN_CHOICES)
template = forms.ChoiceField(required=False)
changed_by = forms.ChoiceField(required=False)
soft_root = forms.ChoiceField(required=False, choices=BOOLEAN_CHOICES)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['changed_by'].choices = get_page_changed_by_filter_choices()
self.fields['template'].choices = get_page_template_filter_choices()
def is_filtered(self):
data = self.cleaned_data
if self.cleaned_data.get('q'):
return True
return any(bool(data.get(field.name)) for field in self.visible_fields())
def get_filter_items(self):
for field in self.visible_fields():
value = self.cleaned_data.get(field.name)
if value:
yield (field.name, value)
def run_filters(self, queryset):
for field, value in self.get_filter_items():
query = {'{}__exact'.format(field): value}
queryset = queryset.filter(**query)
return queryset
class BasePermissionAdminForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
permission_fields = self._meta.model.get_all_permissions()
for field in permission_fields:
if field not in self.base_fields:
setattr(self.instance, field, False)
class PagePermissionInlineAdminForm(BasePermissionAdminForm):
"""
Page permission inline admin form used in inline admin. Required, because
user and group queryset must be changed. User can see only users on the same
level or under him in chosen page tree, and users which were created by him,
but aren't assigned to higher page level than current user.
"""
page = forms.ModelChoiceField(
queryset=Page.objects.all(),
label=_('user'),
widget=HiddenInput(),
required=True,
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
user = get_current_user() # current user from threadlocals
site = Site.objects.get_current()
sub_users = get_subordinate_users(user, site)
limit_choices = True
use_raw_id = False
# Unfortunately, if there are > 500 users in the system, non-superusers
# won't see any benefit here because if we ask Django to put all the
# user PKs in limit_choices_to in the query string of the popup we're
# in danger of causing 414 errors so we fall back to the normal input
# widget.
if get_cms_setting('RAW_ID_USERS'):
if sub_users.count() < 500:
# If there aren't too many users, proceed as normal and use a
# raw id field with limit_choices_to
limit_choices = True
use_raw_id = True
elif get_user_permission_level(user, site) == ROOT_USER_LEVEL:
# If there are enough choices to possibly cause a 414 request
# URI too large error, we only proceed with the raw id field if
# the user is a superuser & thus can legitimately circumvent
# the limit_choices_to condition.
limit_choices = False
use_raw_id = True
# We don't use the fancy custom widget if the admin form wants to use a
# raw id field for the user
if use_raw_id:
from django.contrib.admin.widgets import ForeignKeyRawIdWidget
# This check will be False if the number of users in the system
# is less than the threshold set by the RAW_ID_USERS setting.
if isinstance(self.fields['user'].widget, ForeignKeyRawIdWidget):
# We can't set a queryset on a raw id lookup, but we can use
# the fact that it respects the limit_choices_to parameter.
if limit_choices:
self.fields['user'].widget.rel.limit_choices_to = dict(
id__in=list(sub_users.values_list('pk', flat=True))
)
else:
self.fields['user'].widget = UserSelectAdminWidget()
self.fields['user'].queryset = sub_users
self.fields['user'].widget.user = user # assign current user
self.fields['group'].queryset = get_subordinate_groups(user, site)
class Meta:
fields = [
'user',
'group',
'can_add',
'can_change',
'can_delete',
'can_publish',
'can_change_advanced_settings',
'can_change_permissions',
'can_move_page',
'grant_on',
]
model = PagePermission
class ViewRestrictionInlineAdminForm(BasePermissionAdminForm):
page = forms.ModelChoiceField(
queryset=Page.objects.all(),
label=_('user'),
widget=HiddenInput(),
required=True,
)
can_view = forms.BooleanField(
label=_('can_view'),
widget=HiddenInput(),
initial=True,
)
class Meta:
fields = [
'user',
'group',
'grant_on',
'can_view',
]
model = PagePermission
def clean_can_view(self):
return True
class GlobalPagePermissionAdminForm(BasePermissionAdminForm):
class Meta:
fields = [
'user',
'group',
'can_add',
'can_change',
'can_delete',
'can_publish',
'can_change_advanced_settings',
'can_change_permissions',
'can_move_page',
'can_view',
'sites',
]
model = GlobalPagePermission
class GenericCmsPermissionForm(forms.ModelForm):
"""Generic form for User & Grup permissions in cms
"""
_current_user = None
can_add_page = forms.BooleanField(label=_('Add'), required=False, initial=True)
can_change_page = forms.BooleanField(label=_('Change'), required=False, initial=True)
can_delete_page = forms.BooleanField(label=_('Delete'), required=False)
# pageuser is for pageuser & group - they are combined together,
# and read out from PageUser model
can_add_pageuser = forms.BooleanField(label=_('Add'), required=False)
can_change_pageuser = forms.BooleanField(label=_('Change'), required=False)
can_delete_pageuser = forms.BooleanField(label=_('Delete'), required=False)
can_add_pagepermission = forms.BooleanField(label=_('Add'), required=False)
can_change_pagepermission = forms.BooleanField(label=_('Change'), required=False)
can_delete_pagepermission = forms.BooleanField(label=_('Delete'), required=False)
def __init__(self, *args, **kwargs):
instance = kwargs.get('instance')
initial = kwargs.get('initial') or {}
if instance:
initial = initial or {}
initial.update(self.populate_initials(instance))
kwargs['initial'] = initial
super().__init__(*args, **kwargs)
def clean(self):
data = super().clean()
# Validate Page options
if not data.get('can_change_page'):
if data.get('can_add_page'):
message = _("Users can't create a page without permissions "
"to change the created page. Edit permissions required.")
raise ValidationError(message)
if data.get('can_delete_page'):
message = _("Users can't delete a page without permissions "
"to change the page. Edit permissions required.")
raise ValidationError(message)
if data.get('can_add_pagepermission'):
message = _("Users can't set page permissions without permissions "
"to change a page. Edit permissions required.")
raise ValidationError(message)
if data.get('can_delete_pagepermission'):
message = _("Users can't delete page permissions without permissions "
"to change a page. Edit permissions required.")
raise ValidationError(message)
# Validate PagePermission options
if not data.get('can_change_pagepermission'):
if data.get('can_add_pagepermission'):
message = _("Users can't create page permissions without permissions "
"to change the created permission. Edit permissions required.")
raise ValidationError(message)
if data.get('can_delete_pagepermission'):
message = _("Users can't delete page permissions without permissions "
"to change permissions. Edit permissions required.")
raise ValidationError(message)
def populate_initials(self, obj):
"""Read out permissions from permission system.
"""
initials = {}
permission_accessor = get_permission_accessor(obj)
for model in (Page, PageUser, PagePermission):
name = model.__name__.lower()
content_type = ContentType.objects.get_for_model(model)
permissions = permission_accessor.filter(content_type=content_type).values_list('codename', flat=True)
for key in ('add', 'change', 'delete'):
codename = get_permission_codename(key, model._meta)
initials['can_%s_%s' % (key, name)] = codename in permissions
return initials
def save(self, commit=True):
instance = super().save(commit=False)
instance.save()
save_permissions(self.cleaned_data, instance)
return instance
class PageUserAddForm(forms.ModelForm):
_current_user = None
user = forms.ModelChoiceField(queryset=User.objects.none())
class Meta:
fields = ['user']
model = PageUser
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['user'].queryset = self.get_subordinates()
def get_subordinates(self):
subordinates = get_subordinate_users(self._current_user, self._current_site)
return subordinates.filter(pageuser__isnull=True)
def save(self, commit=True):
user = self.cleaned_data['user']
instance = super().save(commit=False)
instance.created_by = self._current_user
for field in user._meta.fields:
# assign all the fields - we can do this, because object is
# subclassing User (one to one relation)
value = getattr(user, field.name)
setattr(instance, field.name, value)
if commit:
instance.save()
return instance
class PageUserChangeForm(UserChangeForm):
_current_user = None
class Meta:
fields = '__all__'
model = PageUser
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not self._current_user.is_superuser:
# Limit permissions to include only
# the permissions available to the manager.
permissions = self.get_available_permissions()
self.fields['user_permissions'].queryset = permissions
# Limit groups to include only those where
# the manager is a member.
self.fields['groups'].queryset = self.get_available_groups()
def get_available_permissions(self):
permissions = self._current_user.get_all_permissions()
permission_codes = (perm.rpartition('.')[-1] for perm in permissions)
return Permission.objects.filter(codename__in=permission_codes)
def get_available_groups(self):
return self._current_user.groups.all()
class PageUserGroupForm(GenericCmsPermissionForm):
class Meta:
model = PageUserGroup
fields = ('name', )
def save(self, commit=True):
if not self.instance.pk:
self.instance.created_by = self._current_user
return super().save(commit=commit)
class PluginAddValidationForm(forms.Form):
placeholder_id = forms.ModelChoiceField(
queryset=Placeholder.objects.all(),
required=True,
)
plugin_language = forms.CharField(required=True)
plugin_parent = forms.ModelChoiceField(
CMSPlugin.objects.all(),
required=False,
)
plugin_type = forms.CharField(required=True)
def clean_plugin_type(self):
plugin_type = self.cleaned_data['plugin_type']
try:
plugin_pool.get_plugin(plugin_type)
except KeyError:
message = gettext("Invalid plugin type '%s'") % plugin_type
raise ValidationError(message)
return plugin_type
def clean(self):
from cms.utils.plugins import has_reached_plugin_limit
data = self.cleaned_data
if self.errors:
return data
language = data['plugin_language']
placeholder = data['placeholder_id']
parent_plugin = data.get('plugin_parent')
if language not in get_language_list():
message = gettext("Language must be set to a supported language!")
self.add_error('plugin_language', message)
return self.cleaned_data
if parent_plugin:
if parent_plugin.language != language:
message = gettext("Parent plugin language must be same as language!")
self.add_error('plugin_language', message)
return self.cleaned_data
if parent_plugin.placeholder_id != placeholder.pk:
message = gettext("Parent plugin placeholder must be same as placeholder!")
self.add_error('placeholder_id', message)
return self.cleaned_data
page = placeholder.page
template = page.get_template() if page else None
try:
has_reached_plugin_limit(
placeholder,
data['plugin_type'],
language,
template=template,
parent_plugin=parent_plugin
)
except PluginLimitReached as error:
self.add_error(None, force_str(error))
return self.cleaned_data
class RequestToolbarForm(forms.Form):
obj_id = forms.CharField(required=False)
obj_type = forms.CharField(required=False)
cms_path = forms.CharField(required=False)
def clean(self):
data = self.cleaned_data
obj_id = data.get('obj_id')
obj_type = data.get('obj_type')
if not bool(obj_id or obj_type):
return data
if (obj_id and not obj_type) or (obj_type and not obj_id):
message = 'Invalid object lookup. Both obj_id and obj_type are required'
raise forms.ValidationError(message)
app, sep, model = obj_type.rpartition('.')
try:
model_class = apps.get_model(app_label=app, model_name=model)
except LookupError:
message = 'Invalid object lookup. Both obj_id and obj_type are required'
raise forms.ValidationError(message)
try:
generic_obj = model_class.objects.get(pk=obj_id)
except model_class.DoesNotExist:
message = 'Invalid object lookup. Both obj_id and obj_type are required'
raise forms.ValidationError(message)
else:
data['attached_obj'] = generic_obj
return data
def clean_cms_path(self):
path = self.cleaned_data.get('cms_path')
if path:
validate_relative_url(path)
return path
| rsalmaso/django-cms | cms/admin/forms.py | Python | bsd-3-clause | 49,683 | 0.001751 |
from blinker import Namespace
import logging
import json
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class MySignal:
def __init__(self):
self.signals = {}
self.signal = Namespace()
def init_app(self, app):
pass
def addSignal(self, classname, option):
logger.debug('add signal {}.{}'.format(classname, option))
if '{}.{}'.format(classname, option) not in self.signals.keys():
self.signals['{}.{}'.format(classname, option)] = self.signal.signal('{}.{}'.format(classname, option))
def send(self, classname, option, **extra):
logger.debug('send signal {}.{} with: {}'.format(classname, option, extra))
logger.info('send signal {}.{}'.format(classname, option))
if '{}.{}'.format(classname, option) in self.signals.keys():
payload = '{}.{}'.format(classname, option)
if extra:
extra['sender'] = payload
payload = json.dumps(extra)
self.signals['{}.{}'.format(classname, option)].send(str(payload))
def connect(self, classname, option, func):
logger.debug('connect signal {}.{} with func: {}()'.format(classname, option, func.__name__))
if not '{}.{}'.format(classname, option) in self.signals.keys():
self.signals['{}.{}'.format(classname, option)] = self.signal.signal('{}.{}'.format(classname, option))
self.signals['{}.{}'.format(classname, option)].connect(func)
def disconnect(self, classname, option, func):
if '{}.{}'.format(classname, option) in self.signals.keys():
self.signals['{}.{}'.format(classname, option)].disconnect(func)
| digifant/eMonitor | emonitor/signals.py | Python | bsd-3-clause | 1,692 | 0.002364 |
# -*- coding: utf-8 -*-
import re
import string
import random
import pbkdf2
HASHING_ITERATIONS = 400
ALLOWED_IN_SALT = string.ascii_letters + string.digits + './'
ALLOWD_PASSWORD_PATTERN = r'[A-Za-z0-9@#$%^&+=]{8,}'
def generate_random_string(len=12, allowed_chars=string.ascii_letters+string.digits):
return ''.join(random.choice(allowed_chars) for i in range(len))
def make_password(password=None):
if password is None:
raise ValueError('password is required')
salt = generate_random_string(len=32, allowed_chars=ALLOWED_IN_SALT)
return pbkdf2.crypt(password, salt=salt, iterations=HASHING_ITERATIONS)
def check_password(password, hashed_password):
return hashed_password == pbkdf2.crypt(password, hashed_password)
def validate_password(password=None):
"""
ALLOWED_PASSWORD_PATTERN = r'[A-Za-z0-9@#$%^&+=]{8,}'
"""
if password is None:
raise ValueError('password is required')
if re.match(ALLOWD_PASSWORD_PATTERN, password):
return True
return False
| takearest118/coconut | common/hashers.py | Python | gpl-3.0 | 1,030 | 0.000971 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .entity_health import EntityHealth
class PartitionHealth(EntityHealth):
"""Information about the health of a Service Fabric partition.
:param aggregated_health_state: Possible values include: 'Invalid', 'Ok',
'Warning', 'Error', 'Unknown'
:type aggregated_health_state: str or :class:`enum
<azure.servicefabric.models.enum>`
:param health_events: The list of health events reported on the entity.
:type health_events: list of :class:`HealthEvent
<azure.servicefabric.models.HealthEvent>`
:param unhealthy_evaluations:
:type unhealthy_evaluations: list of :class:`HealthEvaluationWrapper
<azure.servicefabric.models.HealthEvaluationWrapper>`
:param health_statistics:
:type health_statistics: :class:`HealthStatistics
<azure.servicefabric.models.HealthStatistics>`
:param partition_id:
:type partition_id: str
:param replica_health_states: The list of replica health states associated
with the partition.
:type replica_health_states: list of :class:`ReplicaHealthState
<azure.servicefabric.models.ReplicaHealthState>`
"""
_attribute_map = {
'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'},
'health_events': {'key': 'HealthEvents', 'type': '[HealthEvent]'},
'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'},
'health_statistics': {'key': 'HealthStatistics', 'type': 'HealthStatistics'},
'partition_id': {'key': 'PartitionId', 'type': 'str'},
'replica_health_states': {'key': 'ReplicaHealthStates', 'type': '[ReplicaHealthState]'},
}
def __init__(self, aggregated_health_state=None, health_events=None, unhealthy_evaluations=None, health_statistics=None, partition_id=None, replica_health_states=None):
super(PartitionHealth, self).__init__(aggregated_health_state=aggregated_health_state, health_events=health_events, unhealthy_evaluations=unhealthy_evaluations, health_statistics=health_statistics)
self.partition_id = partition_id
self.replica_health_states = replica_health_states
| AutorestCI/azure-sdk-for-python | azure-servicefabric/azure/servicefabric/models/partition_health.py | Python | mit | 2,612 | 0.002297 |
"""
Script that trains graph-conv models on ChEMBL dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import numpy as np
np.random.seed(123)
import tensorflow as tf
tf.set_random_seed(123)
import deepchem as dc
from chembl_datasets import load_chembl
# Load ChEMBL dataset
chembl_tasks, datasets, transformers = load_chembl(
shard_size=2000, featurizer="GraphConv", set="5thresh", split="random")
train_dataset, valid_dataset, test_dataset = datasets
# Fit models
metric = dc.metrics.Metric(dc.metrics.pearson_r2_score, np.mean)
# Do setup required for tf/keras models
# Number of features on conv-mols
n_feat = 75
# Batch size of models
batch_size = 128
graph_model = dc.nn.SequentialGraph(n_feat)
graph_model.add(dc.nn.GraphConv(128, n_feat, activation='relu'))
graph_model.add(dc.nn.BatchNormalization(epsilon=1e-5, mode=1))
graph_model.add(dc.nn.GraphPool())
graph_model.add(dc.nn.GraphConv(128, 128, activation='relu'))
graph_model.add(dc.nn.BatchNormalization(epsilon=1e-5, mode=1))
graph_model.add(dc.nn.GraphPool())
# Gather Projection
graph_model.add(dc.nn.Dense(256, 128, activation='relu'))
graph_model.add(dc.nn.BatchNormalization(epsilon=1e-5, mode=1))
graph_model.add(dc.nn.GraphGather(batch_size, activation="tanh"))
model = dc.models.MultitaskGraphRegressor(
graph_model,
len(chembl_tasks),
n_feat,
batch_size=batch_size,
learning_rate=1e-3,
learning_rate_decay_time=1000,
optimizer_type="adam",
beta1=.9,
beta2=.999)
# Fit trained model
model.fit(train_dataset, nb_epoch=20)
print("Evaluating model")
train_scores = model.evaluate(train_dataset, [metric], transformers)
valid_scores = model.evaluate(valid_dataset, [metric], transformers)
test_scores = model.evaluate(test_dataset, [metric], transformers)
print("Train scores")
print(train_scores)
print("Validation scores")
print(valid_scores)
print("Test scores")
print(test_scores)
| joegomes/deepchem | examples/chembl/chembl_graph_conv.py | Python | mit | 1,974 | 0.00152 |
import numpy as np
from typing import Any, List, Tuple
from ray.rllib.models.torch.misc import Reshape
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.framework import TensorType
torch, nn = try_import_torch()
if torch:
from torch import distributions as td
from ray.rllib.agents.dreamer.utils import Linear, Conv2d, \
ConvTranspose2d, GRUCell, TanhBijector
ActFunc = Any
# Encoder, part of PlaNET
class ConvEncoder(nn.Module):
"""Standard Convolutional Encoder for Dreamer. This encoder is used
to encode images frm an enviornment into a latent state for the
RSSM model in PlaNET.
"""
def __init__(self,
depth: int = 32,
act: ActFunc = None,
shape: Tuple[int] = (3, 64, 64)):
"""Initializes Conv Encoder
Args:
depth (int): Number of channels in the first conv layer
act (Any): Activation for Encoder, default ReLU
shape (List): Shape of observation input
"""
super().__init__()
self.act = act
if not act:
self.act = nn.ReLU
self.depth = depth
self.shape = shape
init_channels = self.shape[0]
self.layers = [
Conv2d(init_channels, self.depth, 4, stride=2),
self.act(),
Conv2d(self.depth, 2 * self.depth, 4, stride=2),
self.act(),
Conv2d(2 * self.depth, 4 * self.depth, 4, stride=2),
self.act(),
Conv2d(4 * self.depth, 8 * self.depth, 4, stride=2),
self.act(),
]
self.model = nn.Sequential(*self.layers)
def forward(self, x):
# Flatten to [batch*horizon, 3, 64, 64] in loss function
orig_shape = list(x.size())
x = x.view(-1, *(orig_shape[-3:]))
x = self.model(x)
new_shape = orig_shape[:-3] + [32 * self.depth]
x = x.view(*new_shape)
return x
# Decoder, part of PlaNET
class ConvDecoder(nn.Module):
"""Standard Convolutional Decoder for Dreamer.
This decoder is used to decode images from the latent state generated
by the transition dynamics model. This is used in calculating loss and
logging gifs for imagined trajectories.
"""
def __init__(self,
input_size: int,
depth: int = 32,
act: ActFunc = None,
shape: Tuple[int] = (3, 64, 64)):
"""Initializes a ConvDecoder instance.
Args:
input_size (int): Input size, usually feature size output from
RSSM.
depth (int): Number of channels in the first conv layer
act (Any): Activation for Encoder, default ReLU
shape (List): Shape of observation input
"""
super().__init__()
self.act = act
if not act:
self.act = nn.ReLU
self.depth = depth
self.shape = shape
self.layers = [
Linear(input_size, 32 * self.depth),
Reshape([-1, 32 * self.depth, 1, 1]),
ConvTranspose2d(32 * self.depth, 4 * self.depth, 5, stride=2),
self.act(),
ConvTranspose2d(4 * self.depth, 2 * self.depth, 5, stride=2),
self.act(),
ConvTranspose2d(2 * self.depth, self.depth, 6, stride=2),
self.act(),
ConvTranspose2d(self.depth, self.shape[0], 6, stride=2),
]
self.model = nn.Sequential(*self.layers)
def forward(self, x):
# x is [batch, hor_length, input_size]
orig_shape = list(x.size())
x = self.model(x)
reshape_size = orig_shape[:-1] + self.shape
mean = x.view(*reshape_size)
# Equivalent to making a multivariate diag
return td.Independent(td.Normal(mean, 1), len(self.shape))
# Reward Model (PlaNET), and Value Function
class DenseDecoder(nn.Module):
"""FC network that outputs a distribution for calculating log_prob.
Used later in DreamerLoss.
"""
def __init__(self,
input_size: int,
output_size: int,
layers: int,
units: int,
dist: str = "normal",
act: ActFunc = None):
"""Initializes FC network
Args:
input_size (int): Input size to network
output_size (int): Output size to network
layers (int): Number of layers in network
units (int): Size of the hidden layers
dist (str): Output distribution, parameterized by FC output
logits.
act (Any): Activation function
"""
super().__init__()
self.layrs = layers
self.units = units
self.act = act
if not act:
self.act = nn.ELU
self.dist = dist
self.input_size = input_size
self.output_size = output_size
self.layers = []
cur_size = input_size
for _ in range(self.layrs):
self.layers.extend([Linear(cur_size, self.units), self.act()])
cur_size = units
self.layers.append(Linear(cur_size, output_size))
self.model = nn.Sequential(*self.layers)
def forward(self, x):
x = self.model(x)
if self.output_size == 1:
x = torch.squeeze(x)
if self.dist == "normal":
output_dist = td.Normal(x, 1)
elif self.dist == "binary":
output_dist = td.Bernoulli(logits=x)
else:
raise NotImplementedError("Distribution type not implemented!")
return td.Independent(output_dist, 0)
# Represents dreamer policy
class ActionDecoder(nn.Module):
"""ActionDecoder is the policy module in Dreamer.
It outputs a distribution parameterized by mean and std, later to be
transformed by a custom TanhBijector in utils.py for Dreamer.
"""
def __init__(self,
input_size: int,
action_size: int,
layers: int,
units: int,
dist: str = "tanh_normal",
act: ActFunc = None,
min_std: float = 1e-4,
init_std: float = 5.0,
mean_scale: float = 5.0):
"""Initializes Policy
Args:
input_size (int): Input size to network
action_size (int): Action space size
layers (int): Number of layers in network
units (int): Size of the hidden layers
dist (str): Output distribution, with tanh_normal implemented
act (Any): Activation function
min_std (float): Minimum std for output distribution
init_std (float): Intitial std
mean_scale (float): Augmenting mean output from FC network
"""
super().__init__()
self.layrs = layers
self.units = units
self.dist = dist
self.act = act
if not act:
self.act = nn.ReLU
self.min_std = min_std
self.init_std = init_std
self.mean_scale = mean_scale
self.action_size = action_size
self.layers = []
self.softplus = nn.Softplus()
# MLP Construction
cur_size = input_size
for _ in range(self.layrs):
self.layers.extend([Linear(cur_size, self.units), self.act()])
cur_size = self.units
if self.dist == "tanh_normal":
self.layers.append(Linear(cur_size, 2 * action_size))
elif self.dist == "onehot":
self.layers.append(Linear(cur_size, action_size))
self.model = nn.Sequential(*self.layers)
# Returns distribution
def forward(self, x):
raw_init_std = np.log(np.exp(self.init_std) - 1)
x = self.model(x)
if self.dist == "tanh_normal":
mean, std = torch.chunk(x, 2, dim=-1)
mean = self.mean_scale * torch.tanh(mean / self.mean_scale)
std = self.softplus(std + raw_init_std) + self.min_std
dist = td.Normal(mean, std)
transforms = [TanhBijector()]
dist = td.transformed_distribution.TransformedDistribution(
dist, transforms)
dist = td.Independent(dist, 1)
elif self.dist == "onehot":
dist = td.OneHotCategorical(logits=x)
raise NotImplementedError("Atari not implemented yet!")
return dist
# Represents TD model in PlaNET
class RSSM(nn.Module):
"""RSSM is the core recurrent part of the PlaNET module. It consists of
two networks, one (obs) to calculate posterior beliefs and states and
the second (img) to calculate prior beliefs and states. The prior network
takes in the previous state and action, while the posterior network takes
in the previous state, action, and a latent embedding of the most recent
observation.
"""
def __init__(self,
action_size: int,
embed_size: int,
stoch: int = 30,
deter: int = 200,
hidden: int = 200,
act: ActFunc = None):
"""Initializes RSSM
Args:
action_size (int): Action space size
embed_size (int): Size of ConvEncoder embedding
stoch (int): Size of the distributional hidden state
deter (int): Size of the deterministic hidden state
hidden (int): General size of hidden layers
act (Any): Activation function
"""
super().__init__()
self.stoch_size = stoch
self.deter_size = deter
self.hidden_size = hidden
self.act = act
if act is None:
self.act = nn.ELU
self.obs1 = Linear(embed_size + deter, hidden)
self.obs2 = Linear(hidden, 2 * stoch)
self.cell = GRUCell(self.hidden_size, hidden_size=self.deter_size)
self.img1 = Linear(stoch + action_size, hidden)
self.img2 = Linear(deter, hidden)
self.img3 = Linear(hidden, 2 * stoch)
self.softplus = nn.Softplus
self.device = (torch.device("cuda")
if torch.cuda.is_available() else torch.device("cpu"))
def get_initial_state(self, batch_size: int) -> List[TensorType]:
"""Returns the inital state for the RSSM, which consists of mean,
std for the stochastic state, the sampled stochastic hidden state
(from mean, std), and the deterministic hidden state, which is
pushed through the GRUCell.
Args:
batch_size (int): Batch size for initial state
Returns:
List of tensors
"""
return [
torch.zeros(batch_size, self.stoch_size).to(self.device),
torch.zeros(batch_size, self.stoch_size).to(self.device),
torch.zeros(batch_size, self.stoch_size).to(self.device),
torch.zeros(batch_size, self.deter_size).to(self.device),
]
def observe(self,
embed: TensorType,
action: TensorType,
state: List[TensorType] = None
) -> Tuple[List[TensorType], List[TensorType]]:
"""Returns the corresponding states from the embedding from ConvEncoder
and actions. This is accomplished by rolling out the RNN from the
starting state through eacn index of embed and action, saving all
intermediate states between.
Args:
embed (TensorType): ConvEncoder embedding
action (TensorType): Actions
state (List[TensorType]): Initial state before rollout
Returns:
Posterior states and prior states (both List[TensorType])
"""
if state is None:
state = self.get_initial_state(action.size()[0])
embed = embed.permute(1, 0, 2)
action = action.permute(1, 0, 2)
priors = [[] for i in range(len(state))]
posts = [[] for i in range(len(state))]
last = (state, state)
for index in range(len(action)):
# Tuple of post and prior
last = self.obs_step(last[0], action[index], embed[index])
[o.append(s) for s, o in zip(last[0], posts)]
[o.append(s) for s, o in zip(last[1], priors)]
prior = [torch.stack(x, dim=0) for x in priors]
post = [torch.stack(x, dim=0) for x in posts]
prior = [e.permute(1, 0, 2) for e in prior]
post = [e.permute(1, 0, 2) for e in post]
return post, prior
def imagine(self, action: TensorType,
state: List[TensorType] = None) -> List[TensorType]:
"""Imagines the trajectory starting from state through a list of actions.
Similar to observe(), requires rolling out the RNN for each timestep.
Args:
action (TensorType): Actions
state (List[TensorType]): Starting state before rollout
Returns:
Prior states
"""
if state is None:
state = self.get_initial_state(action.size()[0])
action = action.permute(1, 0, 2)
indices = range(len(action))
priors = [[] for _ in range(len(state))]
last = state
for index in indices:
last = self.img_step(last, action[index])
[o.append(s) for s, o in zip(last, priors)]
prior = [torch.stack(x, dim=0) for x in priors]
prior = [e.permute(1, 0, 2) for e in prior]
return prior
def obs_step(
self, prev_state: TensorType, prev_action: TensorType,
embed: TensorType) -> Tuple[List[TensorType], List[TensorType]]:
"""Runs through the posterior model and returns the posterior state
Args:
prev_state (TensorType): The previous state
prev_action (TensorType): The previous action
embed (TensorType): Embedding from ConvEncoder
Returns:
Post and Prior state
"""
prior = self.img_step(prev_state, prev_action)
x = torch.cat([prior[3], embed], dim=-1)
x = self.obs1(x)
x = self.act()(x)
x = self.obs2(x)
mean, std = torch.chunk(x, 2, dim=-1)
std = self.softplus()(std) + 0.1
stoch = self.get_dist(mean, std).rsample()
post = [mean, std, stoch, prior[3]]
return post, prior
def img_step(self, prev_state: TensorType,
prev_action: TensorType) -> List[TensorType]:
"""Runs through the prior model and returns the prior state
Args:
prev_state (TensorType): The previous state
prev_action (TensorType): The previous action
Returns:
Prior state
"""
x = torch.cat([prev_state[2], prev_action], dim=-1)
x = self.img1(x)
x = self.act()(x)
deter = self.cell(x, prev_state[3])
x = deter
x = self.img2(x)
x = self.act()(x)
x = self.img3(x)
mean, std = torch.chunk(x, 2, dim=-1)
std = self.softplus()(std) + 0.1
stoch = self.get_dist(mean, std).rsample()
return [mean, std, stoch, deter]
def get_feature(self, state: List[TensorType]) -> TensorType:
# Constructs feature for input to reward, decoder, actor, critic
return torch.cat([state[2], state[3]], dim=-1)
def get_dist(self, mean: TensorType, std: TensorType) -> TensorType:
return td.Normal(mean, std)
# Represents all models in Dreamer, unifies them all into a single interface
class DreamerModel(TorchModelV2, nn.Module):
def __init__(self, obs_space, action_space, num_outputs, model_config,
name):
super().__init__(obs_space, action_space, num_outputs, model_config,
name)
nn.Module.__init__(self)
self.depth = model_config["depth_size"]
self.deter_size = model_config["deter_size"]
self.stoch_size = model_config["stoch_size"]
self.hidden_size = model_config["hidden_size"]
self.action_size = action_space.shape[0]
self.encoder = ConvEncoder(self.depth)
self.decoder = ConvDecoder(
self.stoch_size + self.deter_size, depth=self.depth)
self.reward = DenseDecoder(self.stoch_size + self.deter_size, 1, 2,
self.hidden_size)
self.dynamics = RSSM(
self.action_size,
32 * self.depth,
stoch=self.stoch_size,
deter=self.deter_size)
self.actor = ActionDecoder(self.stoch_size + self.deter_size,
self.action_size, 4, self.hidden_size)
self.value = DenseDecoder(self.stoch_size + self.deter_size, 1, 3,
self.hidden_size)
self.state = None
self.device = (torch.device("cuda")
if torch.cuda.is_available() else torch.device("cpu"))
def policy(self, obs: TensorType, state: List[TensorType], explore=True
) -> Tuple[TensorType, List[float], List[TensorType]]:
"""Returns the action. Runs through the encoder, recurrent model,
and policy to obtain action.
"""
if state is None:
self.initial_state()
else:
self.state = state
post = self.state[:4]
action = self.state[4]
embed = self.encoder(obs)
post, _ = self.dynamics.obs_step(post, action, embed)
feat = self.dynamics.get_feature(post)
action_dist = self.actor(feat)
if explore:
action = action_dist.sample()
else:
action = action_dist.mean
logp = action_dist.log_prob(action)
self.state = post + [action]
return action, logp, self.state
def imagine_ahead(self, state: List[TensorType],
horizon: int) -> TensorType:
"""Given a batch of states, rolls out more state of length horizon.
"""
start = []
for s in state:
s = s.contiguous().detach()
shpe = [-1] + list(s.size())[2:]
start.append(s.view(*shpe))
def next_state(state):
feature = self.dynamics.get_feature(state).detach()
action = self.actor(feature).rsample()
next_state = self.dynamics.img_step(state, action)
return next_state
last = start
outputs = [[] for i in range(len(start))]
for _ in range(horizon):
last = next_state(last)
[o.append(s) for s, o in zip(last, outputs)]
outputs = [torch.stack(x, dim=0) for x in outputs]
imag_feat = self.dynamics.get_feature(outputs)
return imag_feat
def get_initial_state(self) -> List[TensorType]:
self.state = self.dynamics.get_initial_state(1) + [
torch.zeros(1, self.action_space.shape[0]).to(self.device)
]
return self.state
def value_function(self) -> TensorType:
return None
| pcmoritz/ray-1 | rllib/agents/dreamer/dreamer_model.py | Python | apache-2.0 | 19,097 | 0.000052 |
# The MIT License (MIT)
#
# Copyright (c) 2014 Muratahan Aykol
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE
import numpy as np
xdatcar = open('XDATCAR', 'r')
xyz = open('XDATCAR.xyz', 'w')
xyz_fract = open('XDATCAR_fract.xyz', 'w')
system = xdatcar.readline()
scale = float(xdatcar.readline().rstrip('\n'))
print scale
#get lattice vectors
a1 = np.array([ float(s)*scale for s in xdatcar.readline().rstrip('\n').split() ])
a2 = np.array([ float(s)*scale for s in xdatcar.readline().rstrip('\n').split() ])
a3 = np.array([ float(s)*scale for s in xdatcar.readline().rstrip('\n').split() ])
print a1
print a2
print a3
#Save scaled lattice vectors
lat_rec = open('lattice.vectors', 'w')
lat_rec.write(str(a1[0])+' '+str(a1[1])+' '+str(a1[2])+'\n')
lat_rec.write(str(a2[0])+' '+str(a2[1])+' '+str(a2[2])+'\n')
lat_rec.write(str(a3[0])+' '+str(a3[1])+' '+str(a3[2]))
lat_rec.close()
#Read xdatcar
element_names = xdatcar.readline().rstrip('\n').split()
element_dict = {}
element_numbers = xdatcar.readline().rstrip('\n').split()
i = 0
N = 0
for t in range(len(element_names)):
element_dict[element_names[t]] = int(element_numbers[i])
N += int(element_numbers[i])
i += 1
print element_dict
while True:
line = xdatcar.readline()
if len(line) == 0:
break
xyz.write(str(N) + "\ncomment\n")
xyz_fract.write(str(N)+"\ncomment\n")
for el in element_names:
for i in range(element_dict[el]):
p = xdatcar.readline().rstrip('\n').split()
coords = np.array([ float(s) for s in p ])
# print coords
cartesian_coords = coords[0]*a1+coords[1]*a2+coords[2]*a3
xyz.write(el+ " " + str(cartesian_coords[0])+ " " + str(cartesian_coords[1]) + " " + str(cartesian_coords[2]) +"\n")
xyz_fract.write(el+ " " + str(coords[0])+ " " + str(coords[1]) + " " + str(coords[2]) +"\n")
xdatcar.close()
xyz.close()
xyz_fract.close()
| aykol/mean-square-displacement | xdatcar2xyz.1.04.py | Python | mit | 2,939 | 0.008166 |
__all__ = ['jazPrint', 'jazShow']
class jazPrint:
def __init__(self):
self.command = "print";
def call(self, interpreter, arg):
return interpreter.GetScope().GetStackTop()
class jazShow:
def __init__(self):
self.command = "show";
def call(self, interpreter, arg):
return arg;
# A dictionary of the classes in this file
# used to autoload the functions
Functions = {'jazShow': jazShow, 'jazPrint': jazPrint}
| joewashear007/jazzy | jazzy/functions/OutputFunc.py | Python | mit | 462 | 0.008658 |
import abc
import errno
import os
import platform
import socket
import time
import traceback
import mozprocess
__all__ = ["SeleniumServer", "ChromeDriverServer", "EdgeChromiumDriverServer", "OperaDriverServer",
"GeckoDriverServer", "InternetExplorerDriverServer", "EdgeDriverServer",
"ServoDriverServer", "WebKitDriverServer", "WebDriverServer"]
class WebDriverServer(object):
__metaclass__ = abc.ABCMeta
default_base_path = "/"
def __init__(self, logger, binary, host="127.0.0.1", port=None,
base_path="", env=None, args=None):
if binary is None:
raise ValueError("WebDriver server binary must be given "
"to --webdriver-binary argument")
self.logger = logger
self.binary = binary
self.host = host
if base_path == "":
self.base_path = self.default_base_path
else:
self.base_path = base_path
self.env = os.environ.copy() if env is None else env
self._port = port
self._cmd = None
self._args = args if args is not None else []
self._proc = None
@abc.abstractmethod
def make_command(self):
"""Returns the full command for starting the server process as a list."""
def start(self, block=False):
try:
self._run(block)
except KeyboardInterrupt:
self.stop()
def _run(self, block):
self._cmd = self.make_command()
self._proc = mozprocess.ProcessHandler(
self._cmd,
processOutputLine=self.on_output,
env=self.env,
storeOutput=False)
self.logger.debug("Starting WebDriver: %s" % ' '.join(self._cmd))
try:
self._proc.run()
except OSError as e:
if e.errno == errno.ENOENT:
raise IOError(
"WebDriver executable not found: %s" % self.binary)
raise
self.logger.debug(
"Waiting for WebDriver to become accessible: %s" % self.url)
try:
wait_for_service((self.host, self.port))
except Exception:
self.logger.error(
"WebDriver was not accessible "
"within the timeout:\n%s" % traceback.format_exc())
raise
if block:
self._proc.wait()
def stop(self, force=False):
if self.is_alive:
return self._proc.kill()
return not self.is_alive
@property
def is_alive(self):
return hasattr(self._proc, "proc") and self._proc.poll() is None
def on_output(self, line):
self.logger.process_output(self.pid,
line.decode("utf8", "replace"),
command=" ".join(self._cmd))
@property
def pid(self):
if self._proc is not None:
return self._proc.pid
@property
def url(self):
return "http://%s:%i%s" % (self.host, self.port, self.base_path)
@property
def port(self):
if self._port is None:
self._port = get_free_port()
return self._port
class SeleniumServer(WebDriverServer):
default_base_path = "/wd/hub"
def make_command(self):
return ["java", "-jar", self.binary, "-port", str(self.port)] + self._args
class ChromeDriverServer(WebDriverServer):
def __init__(self, logger, binary="chromedriver", port=None,
base_path="", args=None):
WebDriverServer.__init__(
self, logger, binary, port=port, base_path=base_path, args=args)
def make_command(self):
return [self.binary,
cmd_arg("port", str(self.port)),
cmd_arg("url-base", self.base_path) if self.base_path else ""] + self._args
class EdgeChromiumDriverServer(WebDriverServer):
def __init__(self, logger, binary="msedgedriver", port=None,
base_path="", args=None):
WebDriverServer.__init__(
self, logger, binary, port=port, base_path=base_path, args=args)
def make_command(self):
return [self.binary,
cmd_arg("port", str(self.port)),
cmd_arg("url-base", self.base_path) if self.base_path else ""] + self._args
class EdgeDriverServer(WebDriverServer):
def __init__(self, logger, binary="microsoftwebdriver.exe", port=None,
base_path="", host="localhost", args=None):
WebDriverServer.__init__(
self, logger, binary, host=host, port=port, args=args)
def make_command(self):
return [self.binary,
"--port=%s" % str(self.port)] + self._args
class OperaDriverServer(ChromeDriverServer):
def __init__(self, logger, binary="operadriver", port=None,
base_path="", args=None):
ChromeDriverServer.__init__(
self, logger, binary, port=port, base_path=base_path, args=args)
class InternetExplorerDriverServer(WebDriverServer):
def __init__(self, logger, binary="IEDriverServer.exe", port=None,
base_path="", host="localhost", args=None):
WebDriverServer.__init__(
self, logger, binary, host=host, port=port, args=args)
def make_command(self):
return [self.binary,
"--port=%s" % str(self.port)] + self._args
class GeckoDriverServer(WebDriverServer):
def __init__(self, logger, marionette_port=2828, binary="geckodriver",
host="127.0.0.1", port=None, args=None):
env = os.environ.copy()
env["RUST_BACKTRACE"] = "1"
WebDriverServer.__init__(self, logger, binary, host=host, port=port, env=env, args=args)
self.marionette_port = marionette_port
def make_command(self):
return [self.binary,
"--marionette-port", str(self.marionette_port),
"--host", self.host,
"--port", str(self.port)] + self._args
class SafariDriverServer(WebDriverServer):
def __init__(self, logger, binary="safaridriver", port=None, args=None):
WebDriverServer.__init__(
self, logger, binary, port=port, args=args)
def make_command(self):
return [self.binary,
"--port=%s" % str(self.port)] + self._args
class ServoDriverServer(WebDriverServer):
def __init__(self, logger, binary="servo", binary_args=None, host="127.0.0.1",
port=None, args=None):
env = os.environ.copy()
env["RUST_BACKTRACE"] = "1"
WebDriverServer.__init__(self, logger, binary, host=host, port=port, env=env, args=args)
self.binary_args = binary_args
def make_command(self):
command = [self.binary,
"--webdriver=%s" % self.port,
"--hard-fail",
"--headless"] + self._args
if self.binary_args:
command += self.binary_args
return command
class WebKitDriverServer(WebDriverServer):
def __init__(self, logger, binary=None, port=None, args=None):
WebDriverServer.__init__(self, logger, binary, port=port, args=args)
def make_command(self):
return [self.binary, "--port=%s" % str(self.port)] + self._args
def cmd_arg(name, value=None):
prefix = "-" if platform.system() == "Windows" else "--"
rv = prefix + name
if value is not None:
rv += "=" + value
return rv
def get_free_port():
"""Get a random unbound port"""
while True:
s = socket.socket()
try:
s.bind(("127.0.0.1", 0))
except socket.error:
continue
else:
return s.getsockname()[1]
finally:
s.close()
def wait_for_service(addr, timeout=15):
"""Waits until network service given as a tuple of (host, port) becomes
available or the `timeout` duration is reached, at which point
``socket.error`` is raised."""
end = time.time() + timeout
while end > time.time():
so = socket.socket()
try:
so.connect(addr)
except socket.timeout:
pass
except socket.error as e:
if e[0] != errno.ECONNREFUSED:
raise
else:
return True
finally:
so.close()
time.sleep(0.5)
raise socket.error("Service is unavailable: %s:%i" % addr)
| pyfisch/servo | tests/wpt/web-platform-tests/tools/wptrunner/wptrunner/webdriver_server.py | Python | mpl-2.0 | 8,378 | 0.001432 |
'''
Check the performance counters from SQL Server
See http://blogs.msdn.com/b/psssql/archive/2013/09/23/interpreting-the-counter-values-from-sys-dm-os-performance-counters.aspx
for information on how to report the metrics available in the sys.dm_os_performance_counters table
'''
# stdlib
import traceback
# 3rd party
import adodbapi
# project
from checks import AgentCheck
ALL_INSTANCES = 'ALL'
VALID_METRIC_TYPES = ('gauge', 'rate', 'histogram')
# Constant for SQLServer cntr_type
PERF_LARGE_RAW_BASE = 1073939712
PERF_RAW_LARGE_FRACTION = 537003264
PERF_AVERAGE_BULK = 1073874176
PERF_COUNTER_BULK_COUNT = 272696576
PERF_COUNTER_LARGE_RAWCOUNT = 65792
# Queries
COUNTER_TYPE_QUERY = '''select distinct cntr_type
from sys.dm_os_performance_counters
where counter_name = ?;'''
BASE_NAME_QUERY = '''select distinct counter_name
from sys.dm_os_performance_counters
where (counter_name=? or counter_name=?
or counter_name=?) and cntr_type=%s;''' % PERF_LARGE_RAW_BASE
INSTANCES_QUERY = '''select instance_name
from sys.dm_os_performance_counters
where counter_name=? and instance_name!='_Total';'''
VALUE_AND_BASE_QUERY = '''select cntr_value
from sys.dm_os_performance_counters
where (counter_name=? or counter_name=?)
and instance_name=?
order by cntr_type;'''
class SQLConnectionError(Exception):
"""
Exception raised for SQL instance connection issues
"""
pass
class SQLServer(AgentCheck):
SOURCE_TYPE_NAME = 'sql server'
SERVICE_CHECK_NAME = 'sqlserver.can_connect'
# FIXME: 6.x, set default to 5s (like every check)
DEFAULT_COMMAND_TIMEOUT = 30
METRICS = [
('sqlserver.buffer.cache_hit_ratio', 'Buffer cache hit ratio', ''), # RAW_LARGE_FRACTION
('sqlserver.buffer.page_life_expectancy', 'Page life expectancy', ''), # LARGE_RAWCOUNT
('sqlserver.stats.batch_requests', 'Batch Requests/sec', ''), # BULK_COUNT
('sqlserver.stats.sql_compilations', 'SQL Compilations/sec', ''), # BULK_COUNT
('sqlserver.stats.sql_recompilations', 'SQL Re-Compilations/sec', ''), # BULK_COUNT
('sqlserver.stats.connections', 'User Connections', ''), # LARGE_RAWCOUNT
('sqlserver.stats.lock_waits', 'Lock Waits/sec', '_Total'), # BULK_COUNT
('sqlserver.access.page_splits', 'Page Splits/sec', ''), # BULK_COUNT
('sqlserver.stats.procs_blocked', 'Processes blocked', ''), # LARGE_RAWCOUNT
('sqlserver.buffer.checkpoint_pages', 'Checkpoint pages/sec', '') # BULK_COUNT
]
def __init__(self, name, init_config, agentConfig, instances=None):
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
# Cache connections
self.connections = {}
self.failed_connections = {}
self.instances_metrics = {}
# Pre-process the list of metrics to collect
custom_metrics = init_config.get('custom_metrics', [])
for instance in instances:
try:
self._make_metric_list_to_collect(instance, custom_metrics)
except SQLConnectionError:
self.log.exception("Skipping SQL Server instance")
continue
def _make_metric_list_to_collect(self, instance, custom_metrics):
"""
Store the list of metrics to collect by instance_key.
Will also create and cache cursors to query the db.
"""
metrics_to_collect = []
for name, counter_name, instance_name in self.METRICS:
try:
sql_type, base_name = self.get_sql_type(instance, counter_name)
metrics_to_collect.append(self.typed_metric(name,
counter_name,
base_name,
None,
sql_type,
instance_name,
None))
except SQLConnectionError:
raise
except Exception:
self.log.warning("Can't load the metric %s, ignoring", name, exc_info=True)
continue
# Load any custom metrics from conf.d/sqlserver.yaml
for row in custom_metrics:
user_type = row.get('type')
if user_type is not None and user_type not in VALID_METRIC_TYPES:
self.log.error('%s has an invalid metric type: %s', row['name'], user_type)
sql_type = None
try:
if user_type is None:
sql_type, base_name = self.get_sql_type(instance, row['counter_name'])
except Exception:
self.log.warning("Can't load the metric %s, ignoring", row['name'], exc_info=True)
continue
metrics_to_collect.append(self.typed_metric(row['name'],
row['counter_name'],
base_name,
user_type,
sql_type,
row.get('instance_name', ''),
row.get('tag_by', None)))
instance_key = self._conn_key(instance)
self.instances_metrics[instance_key] = metrics_to_collect
def typed_metric(self, dd_name, sql_name, base_name, user_type, sql_type, instance_name, tag_by):
'''
Create the appropriate SqlServerMetric object, each implementing its method to
fetch the metrics properly.
If a `type` was specified in the config, it is used to report the value
directly fetched from SQLServer. Otherwise, it is decided based on the
sql_type, according to microsoft's documentation.
'''
metric_type_mapping = {
PERF_COUNTER_BULK_COUNT: (self.rate, SqlSimpleMetric),
PERF_COUNTER_LARGE_RAWCOUNT: (self.gauge, SqlSimpleMetric),
PERF_LARGE_RAW_BASE: (self.gauge, SqlSimpleMetric),
PERF_RAW_LARGE_FRACTION: (self.gauge, SqlFractionMetric),
PERF_AVERAGE_BULK: (self.gauge, SqlIncrFractionMetric)
}
if user_type is not None:
# user type overrides any other value
metric_type = getattr(self, user_type)
cls = SqlSimpleMetric
else:
metric_type, cls = metric_type_mapping[sql_type]
return cls(dd_name, sql_name, base_name,
metric_type, instance_name, tag_by, self.log)
def _get_access_info(self, instance):
''' Convenience method to extract info from instance
'''
host = instance.get('host', '127.0.0.1,1433')
username = instance.get('username')
password = instance.get('password')
database = instance.get('database', 'master')
return host, username, password, database
def _conn_key(self, instance):
''' Return a key to use for the connection cache
'''
host, username, password, database = self._get_access_info(instance)
return '%s:%s:%s:%s' % (host, username, password, database)
def _conn_string(self, instance):
''' Return a connection string to use with adodbapi
'''
host, username, password, database = self._get_access_info(instance)
conn_str = 'Provider=SQLOLEDB;Data Source=%s;Initial Catalog=%s;' \
% (host, database)
if username:
conn_str += 'User ID=%s;' % (username)
if password:
conn_str += 'Password=%s;' % (password)
if not username and not password:
conn_str += 'Integrated Security=SSPI;'
return conn_str
def get_cursor(self, instance, cache_failure=False):
'''
Return a cursor to execute query against the db
Cursor are cached in the self.connections dict
'''
conn_key = self._conn_key(instance)
host = instance.get('host')
database = instance.get('database')
service_check_tags = [
'host:%s' % host,
'db:%s' % database
]
if conn_key in self.failed_connections:
raise self.failed_connections[conn_key]
if conn_key not in self.connections:
try:
conn = adodbapi.connect(
self._conn_string(instance),
timeout=int(instance.get('command_timeout',
self.DEFAULT_COMMAND_TIMEOUT))
)
self.connections[conn_key] = conn
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.OK, tags=service_check_tags)
except Exception:
cx = "%s - %s" % (host, database)
message = "Unable to connect to SQL Server for instance %s." % cx
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,
tags=service_check_tags, message=message)
password = instance.get('password')
tracebk = traceback.format_exc()
if password is not None:
tracebk = tracebk.replace(password, "*" * 6)
# Avoid multiple connection timeouts (too slow):
# save the exception, re-raise it when needed
cxn_failure_exp = SQLConnectionError("%s \n %s" % (message, tracebk))
if cache_failure:
self.failed_connections[conn_key] = cxn_failure_exp
raise cxn_failure_exp
conn = self.connections[conn_key]
cursor = conn.cursor()
return cursor
def get_sql_type(self, instance, counter_name):
'''
Return the type of the performance counter so that we can report it to
Datadog correctly
If the sql_type is one that needs a base (PERF_RAW_LARGE_FRACTION and
PERF_AVERAGE_BULK), the name of the base counter will also be returned
'''
cursor = self.get_cursor(instance, cache_failure=True)
cursor.execute(COUNTER_TYPE_QUERY, (counter_name,))
(sql_type,) = cursor.fetchone()
if sql_type == PERF_LARGE_RAW_BASE:
self.log.warning("Metric %s is of type Base and shouldn't be reported this way",
counter_name)
base_name = None
if sql_type in [PERF_AVERAGE_BULK, PERF_RAW_LARGE_FRACTION]:
# This is an ugly hack. For certains type of metric (PERF_RAW_LARGE_FRACTION
# and PERF_AVERAGE_BULK), we need two metrics: the metrics specified and
# a base metrics to get the ratio. There is no unique schema so we generate
# the possible candidates and we look at which ones exist in the db.
candidates = (counter_name + " base",
counter_name.replace("(ms)", "base"),
counter_name.replace("Avg ", "") + " base"
)
try:
cursor.execute(BASE_NAME_QUERY, candidates)
base_name = cursor.fetchone().counter_name.strip()
self.log.debug("Got base metric: %s for metric: %s", base_name, counter_name)
except Exception, e:
self.log.warning("Could not get counter_name of base for metric: %s", e)
self.close_cursor(cursor)
return sql_type, base_name
def check(self, instance):
"""
Fetch the metrics from the sys.dm_os_performance_counters table
"""
cursor = self.get_cursor(instance)
custom_tags = instance.get('tags', [])
instance_key = self._conn_key(instance)
metrics_to_collect = self.instances_metrics[instance_key]
for metric in metrics_to_collect:
try:
metric.fetch_metric(cursor, custom_tags)
except Exception, e:
self.log.warning("Could not fetch metric %s: %s" % (metric.datadog_name, e))
self.close_cursor(cursor)
def close_cursor(self, cursor):
"""
We close the cursor explicitly b/c we had proven memory leaks
We handle any exception from closing, although according to the doc:
"in adodbapi, it is NOT an error to re-close a closed cursor"
"""
try:
cursor.close()
except Exception as e:
self.log.warning("Could not close adodbapi cursor\n{0}".format(e))
class SqlServerMetric(object):
'''General class for common methods, should never be instantiated directly
'''
def __init__(self, datadog_name, sql_name, base_name,
report_function, instance, tag_by, logger):
self.datadog_name = datadog_name
self.sql_name = sql_name
self.base_name = base_name
self.report_function = report_function
self.instance = instance
self.tag_by = tag_by
self.instances = None
self.past_values = {}
self.log = logger
def fetch_metrics(self, cursor, tags):
raise NotImplementedError
class SqlSimpleMetric(SqlServerMetric):
def fetch_metric(self, cursor, tags):
query_base = '''
select instance_name, cntr_value
from sys.dm_os_performance_counters
where counter_name = ?
'''
if self.instance == ALL_INSTANCES:
query = query_base + "and instance_name!= '_Total'"
query_content = (self.sql_name,)
else:
query = query_base + "and instance_name=?"
query_content = (self.sql_name, self.instance)
cursor.execute(query, query_content)
rows = cursor.fetchall()
for instance_name, cntr_value in rows:
metric_tags = tags
if self.instance == ALL_INSTANCES:
metric_tags = metric_tags + ['%s:%s' % (self.tag_by, instance_name.strip())]
self.report_function(self.datadog_name, cntr_value,
tags=metric_tags)
class SqlFractionMetric(SqlServerMetric):
def set_instances(self, cursor):
if self.instance == ALL_INSTANCES:
cursor.execute(INSTANCES_QUERY, (self.sql_name,))
self.instances = [row.instance_name for row in cursor.fetchall()]
else:
self.instances = [self.instance]
def fetch_metric(self, cursor, tags):
'''
Because we need to query the metrics by matching pairs, we can't query
all of them together without having to perform some matching based on
the name afterwards so instead we query instance by instance.
We cache the list of instance so that we don't have to look it up every time
'''
if self.instances is None:
self.set_instances(cursor)
for instance in self.instances:
cursor.execute(VALUE_AND_BASE_QUERY, (self.sql_name, self.base_name, instance))
rows = cursor.fetchall()
if len(rows) != 2:
self.log.warning("Missing counter to compute fraction for "
"metric %s instance %s, skipping", self.sql_name, instance)
continue
value = rows[0, "cntr_value"]
base = rows[1, "cntr_value"]
metric_tags = tags
if self.instance == ALL_INSTANCES:
metric_tags = metric_tags + ['%s:%s' % (self.tag_by, instance.strip())]
self.report_fraction(value, base, metric_tags)
def report_fraction(self, value, base, metric_tags):
try:
result = value / float(base)
self.report_function(self.datadog_name, result, tags=metric_tags)
except ZeroDivisionError:
self.log.debug("Base value is 0, won't report metric %s for tags %s",
self.datadog_name, metric_tags)
class SqlIncrFractionMetric(SqlFractionMetric):
def report_fraction(self, value, base, metric_tags):
key = "key:" + "".join(metric_tags)
if key in self.past_values:
old_value, old_base = self.past_values[key]
diff_value = value - old_value
diff_base = base - old_base
try:
result = diff_value / float(diff_base)
self.report_function(self.datadog_name, result, tags=metric_tags)
except ZeroDivisionError:
self.log.debug("Base value is 0, won't report metric %s for tags %s",
self.datadog_name, metric_tags)
self.past_values[key] = (value, base)
| oneandoneis2/dd-agent | checks.d/sqlserver.py | Python | bsd-3-clause | 17,085 | 0.002341 |
"""
Support for interface with an LG webOS Smart TV.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.webostv/
"""
import logging
import asyncio
from datetime import timedelta
from urllib.parse import urlparse
import voluptuous as vol
import homeassistant.util as util
from homeassistant.components.media_player import (
SUPPORT_TURN_ON, SUPPORT_TURN_OFF, SUPPORT_PLAY,
SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_PREVIOUS_TRACK,
SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_STEP,
SUPPORT_SELECT_SOURCE, SUPPORT_PLAY_MEDIA, MEDIA_TYPE_CHANNEL,
MediaPlayerDevice, PLATFORM_SCHEMA)
from homeassistant.const import (
CONF_HOST, CONF_MAC, CONF_CUSTOMIZE, STATE_OFF,
STATE_PLAYING, STATE_PAUSED,
STATE_UNKNOWN, CONF_NAME, CONF_FILENAME)
from homeassistant.loader import get_component
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['pylgtv==0.1.7',
'websockets==3.2',
'wakeonlan==0.2.2']
_CONFIGURING = {} # type: Dict[str, str]
_LOGGER = logging.getLogger(__name__)
CONF_SOURCES = 'sources'
DEFAULT_NAME = 'LG webOS Smart TV'
WEBOSTV_CONFIG_FILE = 'webostv.conf'
SUPPORT_WEBOSTV = SUPPORT_TURN_OFF | \
SUPPORT_NEXT_TRACK | SUPPORT_PAUSE | SUPPORT_PREVIOUS_TRACK | \
SUPPORT_VOLUME_MUTE | SUPPORT_VOLUME_STEP | \
SUPPORT_SELECT_SOURCE | SUPPORT_PLAY_MEDIA | SUPPORT_PLAY
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=10)
MIN_TIME_BETWEEN_FORCED_SCANS = timedelta(seconds=1)
CUSTOMIZE_SCHEMA = vol.Schema({
vol.Optional(CONF_SOURCES):
vol.All(cv.ensure_list, [cv.string]),
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_HOST): cv.string,
vol.Optional(CONF_MAC): cv.string,
vol.Optional(CONF_CUSTOMIZE, default={}): CUSTOMIZE_SCHEMA,
vol.Optional(CONF_FILENAME, default=WEBOSTV_CONFIG_FILE): cv.string
})
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the LG WebOS TV platform."""
if discovery_info is not None:
host = urlparse(discovery_info[1]).hostname
else:
host = config.get(CONF_HOST)
if host is None:
_LOGGER.error("No TV found in configuration file or with discovery")
return False
# Only act if we are not already configuring this host
if host in _CONFIGURING:
return
mac = config.get(CONF_MAC)
name = config.get(CONF_NAME)
customize = config.get(CONF_CUSTOMIZE)
config = hass.config.path(config.get(CONF_FILENAME))
setup_tv(host, mac, name, customize, config, hass, add_devices)
def setup_tv(host, mac, name, customize, config, hass, add_devices):
"""Set up a LG WebOS TV based on host parameter."""
from pylgtv import WebOsClient
from pylgtv import PyLGTVPairException
from websockets.exceptions import ConnectionClosed
client = WebOsClient(host, config)
if not client.is_registered():
if host in _CONFIGURING:
# Try to pair.
try:
client.register()
except PyLGTVPairException:
_LOGGER.warning(
"Connected to LG webOS TV %s but not paired", host)
return
except (OSError, ConnectionClosed, TypeError,
asyncio.TimeoutError):
_LOGGER.error("Unable to connect to host %s", host)
return
else:
# Not registered, request configuration.
_LOGGER.warning("LG webOS TV %s needs to be paired", host)
request_configuration(
host, mac, name, customize, config, hass, add_devices)
return
# If we came here and configuring this host, mark as done.
if client.is_registered() and host in _CONFIGURING:
request_id = _CONFIGURING.pop(host)
configurator = get_component('configurator')
configurator.request_done(request_id)
add_devices([LgWebOSDevice(host, mac, name, customize, config)], True)
def request_configuration(
host, mac, name, customize, config, hass, add_devices):
"""Request configuration steps from the user."""
configurator = get_component('configurator')
# We got an error if this method is called while we are configuring
if host in _CONFIGURING:
configurator.notify_errors(
_CONFIGURING[host], 'Failed to pair, please try again.')
return
# pylint: disable=unused-argument
def lgtv_configuration_callback(data):
"""Handle configuration changes."""
setup_tv(host, mac, name, customize, config, hass, add_devices)
_CONFIGURING[host] = configurator.request_config(
hass, name, lgtv_configuration_callback,
description='Click start and accept the pairing request on your TV.',
description_image='/static/images/config_webos.png',
submit_caption='Start pairing request'
)
class LgWebOSDevice(MediaPlayerDevice):
"""Representation of a LG WebOS TV."""
def __init__(self, host, mac, name, customize, config):
"""Initialize the webos device."""
from pylgtv import WebOsClient
from wakeonlan import wol
self._client = WebOsClient(host, config)
self._wol = wol
self._mac = mac
self._customize = customize
self._name = name
# Assume that the TV is not muted
self._muted = False
# Assume that the TV is in Play mode
self._playing = True
self._volume = 0
self._current_source = None
self._current_source_id = None
self._state = STATE_UNKNOWN
self._source_list = {}
self._app_list = {}
@util.Throttle(MIN_TIME_BETWEEN_SCANS, MIN_TIME_BETWEEN_FORCED_SCANS)
def update(self):
"""Retrieve the latest data."""
from websockets.exceptions import ConnectionClosed
try:
current_input = self._client.get_input()
if current_input is not None:
self._current_source_id = current_input
if self._state in (STATE_UNKNOWN, STATE_OFF):
self._state = STATE_PLAYING
else:
self._state = STATE_OFF
self._current_source = None
self._current_source_id = None
if self._state is not STATE_OFF:
self._muted = self._client.get_muted()
self._volume = self._client.get_volume()
self._source_list = {}
self._app_list = {}
conf_sources = self._customize.get(CONF_SOURCES, [])
for app in self._client.get_apps():
self._app_list[app['id']] = app
if conf_sources:
if app['id'] == self._current_source_id:
self._current_source = app['title']
self._source_list[app['title']] = app
elif (app['id'] in conf_sources or
any(word in app['title']
for word in conf_sources) or
any(word in app['id']
for word in conf_sources)):
self._source_list[app['title']] = app
else:
self._current_source = app['title']
self._source_list[app['title']] = app
for source in self._client.get_inputs():
if conf_sources:
if source['id'] == self._current_source_id:
self._source_list[source['label']] = source
elif (source['label'] in conf_sources or
any(source['label'].find(word) != -1
for word in conf_sources)):
self._source_list[source['label']] = source
else:
self._source_list[source['label']] = source
except (OSError, ConnectionClosed, TypeError,
asyncio.TimeoutError):
self._state = STATE_OFF
self._current_source = None
self._current_source_id = None
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._muted
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._volume / 100.0
@property
def source(self):
"""Return the current input source."""
return self._current_source
@property
def source_list(self):
"""List of available input sources."""
return sorted(self._source_list.keys())
@property
def media_content_type(self):
"""Content type of current playing media."""
return MEDIA_TYPE_CHANNEL
@property
def media_image_url(self):
"""Image url of current playing media."""
if self._current_source_id in self._app_list:
icon = self._app_list[self._current_source_id]['largeIcon']
if not icon.startswith('http'):
icon = self._app_list[self._current_source_id]['icon']
return icon
return None
@property
def supported_features(self):
"""Flag media player features that are supported."""
if self._mac:
return SUPPORT_WEBOSTV | SUPPORT_TURN_ON
return SUPPORT_WEBOSTV
def turn_off(self):
"""Turn off media player."""
from websockets.exceptions import ConnectionClosed
self._state = STATE_OFF
try:
self._client.power_off()
except (OSError, ConnectionClosed, TypeError,
asyncio.TimeoutError):
pass
def turn_on(self):
"""Turn on the media player."""
if self._mac:
self._wol.send_magic_packet(self._mac)
def volume_up(self):
"""Volume up the media player."""
self._client.volume_up()
def volume_down(self):
"""Volume down media player."""
self._client.volume_down()
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
tv_volume = volume * 100
self._client.set_volume(tv_volume)
def mute_volume(self, mute):
"""Send mute command."""
self._muted = mute
self._client.set_mute(mute)
def media_play_pause(self):
"""Simulate play pause media player."""
if self._playing:
self.media_pause()
else:
self.media_play()
def select_source(self, source):
"""Select input source."""
if self._source_list.get(source).get('title'):
self._current_source_id = self._source_list[source]['id']
self._current_source = self._source_list[source]['title']
self._client.launch_app(self._source_list[source]['id'])
elif self._source_list.get(source).get('label'):
self._current_source_id = self._source_list[source]['id']
self._current_source = self._source_list[source]['label']
self._client.set_input(self._source_list[source]['id'])
def media_play(self):
"""Send play command."""
self._playing = True
self._state = STATE_PLAYING
self._client.play()
def media_pause(self):
"""Send media pause command to media player."""
self._playing = False
self._state = STATE_PAUSED
self._client.pause()
def media_next_track(self):
"""Send next track command."""
self._client.fast_forward()
def media_previous_track(self):
"""Send the previous track command."""
self._client.rewind()
| JshWright/home-assistant | homeassistant/components/media_player/webostv.py | Python | apache-2.0 | 12,114 | 0 |
# -*- coding: utf-8 -*-
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'django',
'USER': 'django',
'PASSWORD': 'PUTPASSWORDHERE',
'HOST': '127.0.0.1',
'PORT': '5432',
}
} | makiwara/onemoretime | settings/db_settings_sample.py | Python | mit | 267 | 0.003745 |
# Functions for working with H3 and hemispheres etc.
from itertools import chain
from sage.all import (Infinity, Matrix, ZZ, QQ, RR, CC, NumberField,
Graph, srange, Set, sign, var, implicit_plot3d, NFCusp, Integer, oo,
infinity, polygen, point, line, circle)
from utils import (nf, to_k, cusp, cusp_label, Imat, apply,
translate_cusp, negate_cusp, conj_cusp,
smallest_ideal_class_representatives,
alpha_index_with_translation)
from alphas import precomputed_alphas
def make_k(dk):
"""Given a negative fundamental discriminant, or positive square-free
d, constructs the associated imaginary quadratic field and returns
a dict containing this and useful other data
"""
x = polygen(QQ)
if dk>0:
assert dk.is_squarefree()
dk = -dk if dk%4==3 else -4*dk
if dk%4==1:
k = NumberField(x**2-x+(1-dk)//4, 'w')
else:
k = NumberField(x**2-dk//4, 'w')
assert k.discriminant() == dk
w = k.gen()
emb = next(e for e in k.embeddings(CC) if e(w).imag()>0)
return {'k': k, 'dk': dk, 'w': w, 'wbar': w.trace()-w, 'Ok': k.ring_of_integers(),
'emb': emb, 'Ymax': emb(w).imag()/2,
'Ireps': [c.ideal() for c in k.class_group()]}
# Points of H_3 are represented as pairs [z,t2] where z is in k and t2
# in QQ is the square of the height (so the actual point coordinates
# are (z,sqrt(t2))).
# Each principal cusp alpha=r/s with (r,s)=(1) determines the
# hemisphere S_alpha with equation |z-alpha|^2+t^2=1/|s|^2, or
# N(s*z-r)+N(s)*t^2=1.
def radius_squared(alpha):
"""
For a principal cusp alpha, return the square radius of S_alpha.
"""
return 1/alpha.denominator().norm()
def cusp_to_point(alpha):
"""
For a principal cusp alpha = a in k, return the point [a,
radius_squared(alpha)].
"""
return [to_k(alpha), radius_squared(alpha)]
def tri_inter(a0, a1, a2):
"""Returns the triple intersection point of the hemispheres S_a_i,
where a0, a1, a2 are principal cusps, if there is one, as a pair
[z,t2] where z is in k and t2 in QQ is the square of the vertical
coordinate.
"""
alist = [a0,a1,a2]
# Check the cusps are principal, not infinity, and with unit ideal
assert all((not a.is_infinity()) and (a.ideal()==1) for a in alist)
# Define the square radii and centres
rho0, rho1, rho2 = [radius_squared(a) for a in alist]
al0, al1, al2 = [to_k(a) for a in alist]
n0, n1, n2 = [a.norm() for a in [al0, al1, al2]]
#
delta = al1*(al0-al2).conjugate() + al2*(al1-al0).conjugate() + al0*(al2-al1).conjugate()
if delta==0:
return None
z = (al1*(n0-n2+rho2-rho0) + al2*(n1-n0+rho0-rho1) + al0*(n2-n1+rho1-rho2)) / delta
t2 = rho0 - n0 - z.norm() + 2*(al0*z.conjugate()).real()
assert t2 == rho1 - n1 - z.norm() + 2*(al1*z.conjugate()).real()
assert t2 == rho2 - n2 - z.norm() + 2*(al2*z.conjugate()).real()
return None if t2<0 else [z,t2]
def bi_inter(a1, a2):
"""Returns the point on the intersection of the hemispheres S_a_i
(where a1, a2 are principal cusps) which is on the line from a1 to
a2, as a pair [z,t2] where z is in k and t2 in QQ is the square of
the vertical coordinate.
Use: when both S_a_i pass through a singular point.
"""
alist = [a1,a2]
# Check the cusps are principal, not infinity, and with unit ideal
assert all((not a.is_infinity()) and (a.ideal()==1) for a in alist)
# Define the square radii and centres
rho1, rho2 = [radius_squared(a) for a in alist]
al1, al2 = [to_k(a) for a in alist]
n1, n2 = [a.norm() for a in [al1, al2]]
#
delta = al2-al1
z = ((al1+al2) + (rho1-rho2)*delta/delta.norm())/2
t2 = rho1 - n1 - z.norm() + 2*(al1*z.conjugate()).real()
assert t2 == rho2 - n2 - z.norm() + 2*(al2*z.conjugate()).real()
return None if t2<0 else [z,t2]
def is_under(P, a):
"""
Returns -1,0,+1 according as P is over, on, under S_a (a principal)
"""
z, t2 = P
ad = a.denominator()
al = a.numerator()/ad
return sign(1/ad.norm() - (z-al).norm() - t2)
def is_inside(a, b, strict=False):
"""Returns True iff a is inside (or strictly inside) the circle
centred on b, where a,b are cusps with b principal.
"""
k = nf(a)
d2 = (to_k(a,k)-to_k(b,k)).norm()
r2 = radius_squared(b)
if strict:
return d2 < r2
else:
return d2 <= r2
def covering_hemispheres1(P, option=None):
"""For P=[z,t2] in H_3, returns a list of cusps alpha such that P lies
on or under S_alpha.
If option is 'exact' only returns alpha for which P is on S_alpha exactly.
If option is 'strict' only returns alpha for which P is strictly under S_alpha.
Otherwise (default), returns alpha for which P is under or on S_alpha.
"""
alphas = []
z, t2 = P
k = z.parent()
a = z.numerator() # in O_K
b = z.denominator() # in Z
sbound = (1/t2).floor()
for snorm in range(1,1+sbound):
umax = b*b*(1-snorm*t2)
for s in k.elements_of_norm(snorm):
#print("s = {}".format(s))
if option=='exact':
urange = [umax] if umax in ZZ else []
else:
urange = srange(umax.floor()+1)
sa = s*a
#print("umax={}, urange={}".format(umax,list(urange)))
for unorm in urange:
if unorm<umax or option != 'strict':
for u in k.elements_of_norm(unorm):
#print(" u = {}".format(u))
for rb in [sa+u, sa-u] if u else [sa]:
r = rb/b
#print(" r = {}".format(r))
if r.is_integral() and k.ideal(r,s)==1:
alphas.append(cusp(r/s, k))
return alphas
def covering_hemispheres2(P, option=None, debug=False):
"""For P=[z,t2] in H_3, returns a list of cusps alpha such that P lies
on or under S_alpha.
If option is 'exact' only returns alpha for which P is on S_alpha exactly.
If option is 'strict' only returns alpha for which P is strictly under S_alpha.
Otherwise (default), returns alpha for which P is under or on S_alpha.
"""
alphas = []
z, t2 = P
k = z.parent()
a = z.numerator() # in O_K
sbound = (1/t2).floor()
if debug:
print("t2={} so bound on N(s) = {}".format(t2, sbound))
for snorm in srange(1,1+sbound):
for s in k.elements_of_norm(snorm):
sz = s*z
d1 = 1/snorm - t2
assert d1>=0
if debug:
print("s = {}, norm {}: d1 = {}".format(s, snorm, d1))
rbound = ((RR(sz.norm()).sqrt()+1)**2).floor()
if debug:
print("Bound on N(r) = {}".format(rbound))
for rnorm in srange(1+rbound):
for r in k.elements_of_norm(rnorm):
if k.ideal(r,s)!=1:
continue
for pm in [-1,1] if r else [1]:
a = pm*r/s
d = d1 - (a-z).norm()
if debug and d>=0:
print("a = {}, d = {}".format(a, d))
# we need d==0 for exact, d>0 for strict, else d>=0
ok = (d>0) if option=='strict' else (d==0) if option=='exact' else (d>=0)
if ok:
a = cusp(a,k)
if debug:
print(" OK {}".format(a))
alphas.append(a)
return alphas
def covering_hemispheres_test(P, option=None):
res1 = covering_hemispheres1(P, option)
res2 = covering_hemispheres2(P, option)
if sorted(res1) != sorted(res2):
print("old and new disagree for P={}".format(P))
return res1
covering_hemispheres = covering_hemispheres2
def hemispheres_through(P):
return covering_hemispheres(P, 'exact')
def properly_covering_hemispheres(P):
return covering_hemispheres(P, 'strict')
def is_maximal(P):
return len(properly_covering_hemispheres(P))==0
def apply3d(M, P):
"""
Return M(P) where M is in SL(2,O_K) and P=[z,t2] in H3.
"""
z, t2 = P
k = z.parent()
try:
a, b, c, d = [k(r) for r in M.list()]
except AttributeError:
a, b, c, d = [k(r) for r in M]
n = (c*z+d).norm() + c.norm()*t2
new_z = ((a*z+b)*(c*z+d).conjugate() + a*c.conjugate()*t2) / n
new_t2 = t2 / n**2
return [new_z, new_t2]
def infinity_matrix(a, P=None, Plist=None):
"""For a principal cusp a, returns M_a in GL(2,O_K) with M_a(a)=oo.
If P is given in H3 it should be an interior point on S_a and then
the matrix will be adjusted by premultiplying by a translation so
that M_a(P) is in Plist.
"""
M0 = Matrix(2,2,a.ABmatrix()).inverse()
if P is None:
return M0
else:
Q = apply3d(M0,P)
assert P[1]==Q[1]
if Q in Plist:
return M0
for R in Plist:
if R[1]!=Q[1]:
continue
z = R[0]-Q[0]
if z.is_integral():
M = Matrix([[1,z],[0,1]])*M0
if apply3d(M,P) not in Plist:
print("P = {}".format(P))
print("M = {}".format(M))
print("M(P) = {}".format(apply3d(M,P)))
assert apply3d(M,P) in Plist
return M
raise RuntimeError("infinity_matrix failed with a={}, P={}, Plist={}".format(a,P,Plist))
def singular_points_in_class(I, IC=None, verbose=False):
"""Given an ideal I, return a list of singular points of class [I]
(one representative for each orbit under integral translations).
Uses the new characterization of singular points as a/b for b one
nonzero element of minimal norm in one non-principal ideal I in
each ideal class, where I=(a,b).
IC can be set to a list of ideal class representatives.
"""
k = I.number_field()
if I.is_principal():
return [NFCusp(k, infinity)]
if IC is None:
IC = smallest_ideal_class_representatives(k)
sigmas = []
Inorm = I.norm()
Ibar = k.ideal(Inorm)/I
s = k(I.norm())
slist = [s]
if I!=Ibar:
I2 = I*I
if I2.is_principal():
s2 = I2.gens_reduced()[0]
assert s.norm()==s2.norm()
slist.append(s2)
if verbose:
print("Ideal class #{}: denominators {}".format(IC.index(I), slist))
for s in slist:
rlist = [r for r in k.ideal(s).residues() if k.ideal(r,s) == I]
ss = [cusp(reduce_mod_Ok(r/s), k, IC) for r in rlist]
if verbose:
print(" - denominator s = {}, numerators {}, sigmas {}".format(s, rlist, ss))
sigmas += ss
return sigmas
def singular_points_by_class(IC, verbose=False):
"""Return a list of lists of singular points, one sublist for each
nontrivial ideal class, representative for each orbit under
integral translations.
Uses the new characterization of singular points as a/b for b one
nonzero element of minimal norm in one non-principal ideal I in
each ideal class, where I=(a,b).
"""
return [singular_points_in_class(I, IC=IC, verbose=verbose) for I in IC]
def singular_points(k, verbose=False):
"""Return a list of singular points, one representative for each
orbit under integral translations.
Uses the new characterization of singular points as a/b for b one
nonzero element of minimal norm in one non-principal ideal I in
each ideal class, where I=(a,b).
"""
return sum(singular_points_by_class(smallest_ideal_class_representatives(k), verbose), [])
def ab_to_k(k,ab):
"""MA's code returns each singular point in the form (a,b) with a,b
rational, representing a+b*sqrt(-d) with d squarefree. We convert
to an element of k, assuming that k's defining polynomial is
either X^2+d or X^2-X+(d+1)/4.
"""
w = k.gen()
rootd = 2*w-1 if k.discriminant()%4 else w
a,b = ab
return a+b*rootd
def singular_points_MA(k):
"""
Singular points from MA's code
"""
if k.class_number()==1:
return []
from FundDomains import singular_points as spMA, reduce_ab_mod_ok
S = spMA(k)
# include negatives:
S = S + [[-ab[0],-ab[1]] for ab in S]
# reduce mod O_k
S = [reduce_ab_mod_ok(k, ab) for ab in S]
# convert to field elements
S = [ab_to_k(k,ab) for ab in S]
# remove repeats
S = list(set(S))
# convert into cusps whose ideals are standardised, and prepend oo
IC = smallest_ideal_class_representatives(k)
return [cusp(oo,k)] + [cusp(s,k,IC) for s in S]
def differ_by_integer(s,t):
"""
If s,t are cusps, return True iff s-t is integral
"""
if s.is_infinity():
return t.is_infinity()
if t.is_infinity():
return False
ks = s.numerator()/s.denominator()
kt = t.numerator()/t.denominator()
return (ks-kt).is_integral()
def test_singular_points(dmin, dmax, verbose=False):
x = polygen(QQ)
for d in srange(dmin,dmax+1):
if not d.is_squarefree():
continue
k = NumberField(x**2-x+(d+1)//4 if d%4==3 else x**2+d, 'w')
h = k.class_number()
if h==1:
continue
if verbose:
print("d={}, {} has class number {}".format(d, k, h))
sigmas = singular_points(k)
if verbose:
print("New sigmas: {}".format(sigmas))
old_sigmas = singular_points_MA(k)
if verbose:
print("Old sigmas: {}".format(old_sigmas))
diff1 = [s for s in sigmas if not any(differ_by_integer(s,t) for t in old_sigmas)]
diff2 = [s for s in old_sigmas if not any(differ_by_integer(s,t) for t in sigmas)]
ok = True
if diff1:
ok = False
print("d={}: sigmas from new code not in old: {}".format(d,diff1))
if diff2:
ok = False
print("d={}: sigmas from old code not in new: {}".format(d,diff2))
if ok:
print("Old and new agree for d={}".format(d))
def tau(P1, P2):
"""Given P_i=[alpha_i,rho_i^2] for i=1,2, where alpha_i=r_i/s_i are
principal cusps defining hemispheres (or circles) with square
radius rho_i^2=1/N(s_i), return +2, +1, 0, -1, -2 as follows:
+2 if they do not intersect and are external to each other
+1 if they are externally tangent
0 if they intersect in two distinct points
-1 if they are internally tangent (or equal)
-2 if they do not intersect and one is inside the other
"""
a1, r1sq = P1
a2, r2sq = P2
d1 = (a1-a2).norm() - (r1sq + r2sq)
d2 = d1**2 - 4*r1sq*r2sq
if d2 < 0:
return 0
return sign(d1) * (1 if d2==0 else 2)
def circles_intersect(P1,P2):
return tau(P1,P2)==0
def circles_tangent(P1,P2, exterior=True):
return tau(P1,P2) == (+1 if exterior else -1)
def circle_inside_circle(P1,P2, strict=True):
# t = P1[1]<P2[1] and tau(P1,P2)==-2
# if strict or t:
# return t
# return (P1[1]<P2[1] and tau(P1,P2)==-1)
t1 = (P1[1]<P2[1]) if strict else (P1[1]<=P2[1])
t2 = tau(P1,P2) in ([-2] if strict else [-2,-1])
return t1 and t2
def xy_coords(alpha):
"""
alpha = x+y*sqrt(-d) in k = Q(w) with either w=sqrt(-d) or w=(1+sqrt(-d))/2
"""
x, y = list(alpha)
if alpha.parent().gen().trace():
y /=2
x +=y
return (x,y)
def reduce_mod_Ok(alpha):
"""
Return in integer translate of alpha whose xy-coords satisfy
-1/2 < x <= 1/2 and
-1/2 < y <= 1/2 (even discriminant, w=sqrt(-d))
-1/4 < y <= 1/4 (odd discriminant, w=(1+sqrt(-d))/2)
"""
k = alpha.parent()
w = k.gen()
y = xy_coords(alpha)[1]
r = 2 if w.trace() else 1
alpha -= (r*y).round('down')*w
x = xy_coords(alpha)[0]
alpha -= x.round('down')
assert in_rectangle(alpha)
return alpha
def slope2(x,y):
"""
Function used to order nonzero (x,y) in R^2 via their argument, going clockwise around the origin:
(+,-) < (0,-) < (-,-) < (-,0) < (-,+) < (0,+) < (+,+) < (+,0)
"""
return (sign(y), x/y) if y else (sign(x), Infinity)
def slope(alpha, centre=0):
"""
As above for elements of an imaginary quadratic field k, assuming
k=Q(w) with either w=sqrt(-d) or w=(1+sqrt(-d))/2.
"""
return slope2(*xy_coords(alpha-centre))
def slope_before(es1, es2):
e1, s1 = es1
e2, s2 = es2
return (e1==e2 and s1<=s2) or (e1!=e2 and s1>s2)
def in_first_half(alpha1, alpha2, centre=0):
"""
Return True if the clockwise angle from alpha1 round to alpha2 is < pi.
"""
return slope_before(slope(alpha1, centre), slope(alpha2, centre))
# plotting functions taken essentiall from MA
def plot1hemi(kdata, H):
"""
kdata is a dict with keys 'k' (the field), 'emb' (embedding of k into CC)
H = [z, rsq] with z in k defines a hemisphere
"""
X, Y, Z = var('X, Y, Z')
Ymax = kdata['Ymax']
Xmax = 0.5
x0, y0 = kdata['emb'](H[0])
eq = (X - x0)**2 + (Y - y0)**2 + Z**2 - H[1]
return implicit_plot3d(eq, (Y, -Ymax, Ymax ), (X, -Xmax, Xmax), (Z, 0, 1), plot_points=60, aspect_ratio=1, color='lightgreen')
def plot_Bianchi_diagram(k, Hlist):
"""
Hlist is a list of hemispheres H = [z,rsq] with z in k and square radius rsq
"""
kdata = make_k(k.discriminant())
return sum([plot1hemi(kdata, H) for H in Hlist])
def circ(c,r, fill):
return circle(c, r,
aspect_ratio=1,
edgecolor='blue' if fill else 'black',
thickness=1 if fill else 2,
alpha = 0.2,
fill=fill)
def disc(c,r):
return circle(c, r,
aspect_ratio=1, fill=True, rgbcolor='blue', alpha = 0.2)
def plot_circles_and_points(cc, pp1, pp2=[], pp3=[], fill=False):
circles = [circ(c, r, fill) for c, r in cc]
points1 = [point(P, rgbcolor='red', pointsize=30) for P in pp1]
points2 = [point(P, rgbcolor='black', pointsize=30) for P in pp2]
points3 = [point(P, rgbcolor='blue', pointsize=30) for P in pp3]
return sum(circles) + sum(points1) + sum(points2) + sum(points3)
def plot_circles(alist, fill=False):
k = nf(alist[0])
emb = next(e for e in k.embeddings(CC) if e(k.gen()).imag()>0)
A = [list(emb(to_k(a, k))) for a in alist]
R = [RR(radius_squared(a)).sqrt() for a in alist]
circles = [(c,r) for c,r in zip(A,R)]
return plot_circles_and_points(circles, fill=fill)
def plot_FunDomain_projection(k, alphas, sigmas, fill=False):
w = k.gen()
D = k.discriminant().abs()
emb = next(e for e in k.embeddings(CC) if e(w).imag()>0)
rootd = emb(w).imag()
Ymax = (2.5*rootd)/(4 if ZZ(D).mod(4) == 3 else 2)
Xmax = 3*0.5
triplets, extra_alphas = alpha_triples(alphas)
A = [list(emb(to_k(a))) for a in alphas+extra_alphas]
R = [RR(radius_squared(a)).sqrt() for a in alphas+extra_alphas]
#print("circle centres: {}".format(A))
S = [list(emb(to_k(s))) for s in sigmas if not s.is_infinity()]
#print("singular points: {}".format(S))
#C = [list(emb(P[2][0])) for P in triplets]
#print(triplets)
#print("corners: {}".format(C))
circles = [(c, r) for c, r in zip(A,R)]
proj = plot_circles_and_points(circles, S, A, fill=fill)
z = w-ZZ(1)/2 if ZZ(D).mod(4)==3 else w
TL=list(emb((-1+z)/2))
TR=list(emb((1+z)/2))
BR=list(emb((1-z)/2))
BL=list(emb((-1-z)/2))
lines = [line([TL,TR], rgbcolor='black'),
line([TR,BR], rgbcolor='black'),
line([BR,BL], rgbcolor='black'),
line([BL,TL], rgbcolor='black')]
proj += sum(lines)
proj.set_axes_range(-Xmax, Xmax, -Ymax, Ymax)
return proj
def is_redundant(P, alphas):
"""Return True iff P is strictly covered by any of the hemispheres
S_a for a in the list alphas.
"""
return any(is_under(P,a)==1 for a in alphas)
def triple_intersections(alphas):
"""Given a list of principal cusps alpha (all reduced mod O_k) return
a list of "corners" P = [z,tsq] each the intersection of an S_a
with at least two other S_{b+t} with z in the fundamental
rectangle and tsq>0.
Let u = (w-wbar)/2. The fundamental rectangle F has TR corner at
(u+1)/2 and BL corner minus this. Using symmetries (negation and
conjugation) we can work with the quarter-rectangle F4 with the
same TR and BL=0. To recover F from F4 take the union of
z,-z,zbar,-zbar for z in F4.
The 9 quarter-rectangles adjacent to F4 consist of
-z, zbar, 1-zbar; -zbar, z, 1-zbar; u-z, u_zbar, u+1-zbar
for z in F4.
"""
# Extract the alphas in F4:
alphas4 = [a for a in alphas if cusp_in_quarter_rectangle(a)]
# Extend these by 8 translations:
def nbrs(a):
k = nf(a)
w = k.gen()
z = to_k(a, k)
cz = z.conjugate()
zlist = [-z, cz, 1-z, -cz, 1-cz, w-z, w+cz,
cz+w-1 if w.trace() else 1+w-z]
alist = [cusp(z2, k) for z2 in zlist]
for b in alist:
if not b.ideal()==1:
print("cusp {} is a neighbour of principal cusp {} but is not principal".format(b,a))
return alist
xalphas4 = sum([nbrs(a) for a in alphas4], alphas4)
# convert each cusp to a point P = [z,tsq] with tsq the square
# radius of S_a:
Alist = [cusp_to_point(a) for a in xalphas4]
corners4 = []
for a, A in zip(alphas4, Alist):
bb = [(b,B) for b,B in zip(xalphas4, Alist) if circles_intersect(A,B)]
for b,B in bb:
cc = [c for c,C in bb if circles_intersect(B,C)]
# now each pair of {a,b,c} intersect
for c in cc:
P = tri_inter(a, b, c)
if P and P[1] and in_quarter_rectangle(P[0]) and not is_redundant(P, xalphas4) and P not in corners4:
corners4.append(P)
# These corners are in F4, so we apply symmetries to get all those in F:
corners = []
for P in corners4:
z = P[0]
zbar = z.conjugate()
for z2 in [z, -z, zbar, -zbar]:
if in_rectangle(z2):
P2 = [z2, P[1]]
if P2 not in corners:
corners.append(P2)
return corners
def alpha_triples(alphas):
"""Given a list of principal cusps
alpha (all reduced mod O_k)
return (1) a list of
[tsq,(a1,a2,a3),P] where each ai is
the translate of an alpha, P =
[z,tsq] is a "corner", the triple
intersection of the S_ai with P
in the fundamental rectangle and
tsq>0; (2) a list of the extra translates required
"""
k = nf(alphas[0])
corners = []
triples = []
alpha_translates = []
# Extend the alphas by 8 translations:
#w = k.gen()
# xalphas = sum([[translate_cusp(a,t) for t in
# [a+b*w for a in [-1,0,1] for b in [-1,0,1]]] for a in alphas],
# [])
#xalphas = alphas + sum([[translate_cusp(a,t) for t in [-w-1,-w,1-w,-1,1,-1+w,w,1+w]] for a in alphas], [])
xalphas = sum([translates(a) for a in alphas], [])
n = len(xalphas)
# convert each cusp to a point
# [a,tsq] with tsq the square
# radius of S_a:
Alist = [cusp_to_point(a) for a in xalphas]
# get a list of pairs {i,j} with
# i<j such that S_ai and S_aj
# intersect properly:
ij_list = [{i,j} for i,ai in
enumerate(Alist) for j,aj in
enumerate(Alist) if i<j and
circles_intersect(ai, aj)]
for i,j in ij_list:
ai = xalphas[i]
aj = xalphas[j]
#for k, ak in enumerate(alphas):
for k in range(max(i,j)+1, n):
if {i,k} in ij_list and {j,k} in ij_list:
ak = xalphas[k]
P = tri_inter(ai, aj, ak)
if P and P[1] and in_rectangle(P[0]) and not is_redundant(P, xalphas):
if P not in corners:
trip = [P[1],(ai,aj,ak),P]
triples.append(trip)
corners.append(P)
for a in trip[1]:
if a not in alpha_translates and a not in alphas:
alpha_translates.append(a)
triples.sort(key = lambda t:t[0])
return triples, alpha_translates
def sigma_triples(alphas, sigmas):
"""Given alphas, and sigmas (which can be a complete set or just those
in one ideal class), returns a list of [rsq, (s, a1,a2), R] where
each ai is the translate of an alpha, s is a sigma on both S_a1
and S_a2, and R = [z,rsq] is the bi-intersection (with rsq>0) of
S_a1, S_a2.
"""
k = nf(alphas[0])
# get list of finite sigmas as full points:
xsigmas = [s for s in sigmas if not s.is_infinity()]
SP = [[to_k(s),0] for s in xsigmas]
# get list of alpha translates a such that at least one sigma is on S_a:
w = alphas[0].number_field().gen()
xalphas = sum([[translate_cusp(al, a+b*w) for a in [-1,0,1] for b in [-1,0,1]] for al in alphas], [])
xalphas = [a for a in xalphas if any(is_under(P,a)==0 for P in SP)]
triples = []
for s,S in zip(xsigmas, SP):
# find the alphas a such that S_a passes through s:
alist = [a for a in xalphas if is_under(S,a)==0]
# sort these by slope:
alist.sort(key=lambda a: slope(to_k(a,k), S[0]))
for i, ai in enumerate(alist):
aj = alist[i-1]
R = bi_inter(ai, aj)
assert R and R[1]
# test whether this corner R is a translate of one we already have
old = False
for t in triples:
x = t[1][0] - R[0]
if x.is_integral(): # we have a repeat corner, up to translation
old = True
R[0] += x
trip = [[translate_cusp(c, x) for c in (s,ai,aj)], R]
break
if not old:
trip = [[s,ai,aj], R]
triples.append(trip)
return triples
def orbit_polyhedron(orb, Plist, Pverts, Pmats, debug=False):
if debug:
print("Constructing orbit polyhedron {}".format(orb))
i = orb[0]
P = Plist[i]
if debug:
print("Base point P = {}".format(P))
E = []
for j in orb:
Q = Plist[j]
QV = Pverts[j]
if debug:
print(" Q = {} with vertices {} and matrices {}".format(Q, QV, Pmats[j]))
print(" which map Q to {}".format([apply3d(M,Q) for M in Pmats[j]]))
MQP = [M for M in Pmats[j] if apply3d(M,Q)==P]
V = [[apply(M, a) for a in QV] for M in MQP]
if debug:
print(" {} transfer matrices map these to {}".format(len(MQP), V))
print(" adding edges {}".format([[[t[0],t[i]] for i in range(1,len(QV))] for t in V]))
E += sum([[[t[0],t[i]] for i in range(1,len(QV))] for t in V], [])
G = Graph([[cusp_label(a),cusp_label(b)] for a,b in E])
return G
def principal_polyhedra(alphas, debug=False):
print("Constructing principal polyhedra")
k = nf(alphas[0])
triplets, extra_alphas = alpha_triples(alphas)
# only used for the 3d plot:
hemispheres = [cusp_to_point(a) for a in alphas+extra_alphas]
if debug:
print("{} triplets:".format(len(triplets)))
for t in triplets:
print(t)
Plist = [t[2] for t in triplets]
if debug:
print("Plist: {}".format(Plist))
Psupps = [hemispheres_through(P) for P in Plist]
if debug:
print("Psupps: {}".format(Psupps))
Pmats = [[Imat] + [infinity_matrix(a, P, Plist) for a in Psupp] for P, Psupp in zip(Plist, Psupps)]
if debug:
print("Pmats: {}".format(Pmats))
Pverts = [[cusp(oo,k)] + [a for a in alphas+extra_alphas if a in Psupp] for P,Psupp in zip(Plist,Psupps)]
orbits = set()
used_Pi = Set()
for i,P in enumerate(Plist):
if i in used_Pi:
continue
Qlist = [apply3d(M,P) for M in Pmats[i]]
orb = Set([Plist.index(Q) for Q in Qlist])
if debug:
print("New orbit from P_{}={}: {}".format(i,P,orb))
used_Pi = used_Pi.union(orb)
orbits.add(orb)
orbits = [list(orb) for orb in orbits]
print("Found {} orbits".format(len(orbits)))
print("Orbits:")
for orb in orbits:
print(orb)
polyhedra = [orbit_polyhedron(orb, Plist, Pverts, Pmats) for orb in orbits]
print("Constructed {} polyhedra".format(len(polyhedra)))
print("Faces: {}".format([[len(F) for F in G.faces()] for G in polyhedra]))
return polyhedra, hemispheres
def singular_polyhedra(alphas, sigmas, debug=False):
print("Constructing polyhedra from one ideal class, sigmas {}".format(sigmas))
k = nf(alphas[0])
triples = sigma_triples(alphas, sigmas)
if debug:
print("{} triples from sigmas {}:".format(len(triples), sigmas))
for t in triples:
print(t)
Rlist = []
Rsupps = []
for t in triples:
R = t[1]
if R in Rlist:
continue
Rlist.append(R)
Rsupps.append(hemispheres_through(R))
if debug:
print("Rlist: {}".format(Rlist))
print("Rsupps: {}".format(Rsupps))
Rmats = []
Rverts = []
for R,Rsupp in zip(Rlist, Rsupps):
Rmats.append([Imat] + [infinity_matrix(a, R, Rlist) for a in Rsupp])
verts = [cusp(oo,k)]
for t in triples:
if t[1]==R:
for v in t[0]:
if v not in verts:
verts.append(v)
Rverts.append(verts)
if debug:
for R, mats, verts in zip(Rlist, Rmats, Rverts):
print("R = {}\nmats: {}\nverts: {}".format(R, mats, verts))
orbits = set()
used_Ri = Set()
for i,R in enumerate(Rlist):
if i in used_Ri:
continue
print("i={}, R={}, {} Rmats".format(i,R, len(Rmats[i])))
Qlist = [apply3d(M,R) for M in Rmats[i]]
print("Qlist = {}".format(Qlist))
print("Qlist indices: {}".format([Rlist.index(Q) for Q in Qlist]))
orb = Set([Rlist.index(Q) for Q in Qlist])
if debug:
print("New orbit from R_{}={}: {}".format(i,R,orb))
used_Ri = used_Ri.union(orb)
orbits.add(orb)
orbit_reps = [list(orb) for orb in orbits]
print("Found {} orbits with representative points {}:".format(len(orbits), orbit_reps))
polyhedra = [orbit_polyhedron(orb, Rlist, Rverts, Rmats, debug=debug) for orb in orbit_reps]
print("Constructed {} polyhedra".format(len(polyhedra)))
print("Faces: {}".format([[len(F) for F in G.faces()] for G in polyhedra]))
#faces = sum([G.faces() for G in polyhedra],[])
return polyhedra
def all_polyhedra(k, alphas=None, debug=False):
if alphas is None:
alphas = precomputed_alphas(k)
sigmas = singular_points_by_class(smallest_ideal_class_representatives(k))[1:]
polys, hemis = principal_polyhedra(alphas, debug)
polys += sum([singular_polyhedra(alphas, sigs, debug) for sigs in sigmas], [])
return polys, hemis
def is_poly_principal(T):
return all(a.ideal()==1 for a in T)
half = Integer(1)/2
def xy_in_rectangle(xy, f):
"""
f = 1 or 2
"""
x,y = xy
fy = f*y
return -half<x and x<= half and -half<fy and fy<=half
def xy_in_quarter_rectangle(xy, f):
"""
f = 1 or 2
"""
x,y = xy
fy = f*y
return 0<=x and x<= half and 0<=fy and fy<=half
def in_rectangle(a):
f = 1 + a.parent().disc()%2
return xy_in_rectangle(xy_coords(a), f)
def in_quarter_rectangle(a):
f = 1 + nf(a).disc()%2
return xy_in_quarter_rectangle(xy_coords(a), f)
def cusp_in_rectangle(a):
return in_rectangle(to_k(a))
def cusp_in_quarter_rectangle(a):
return in_quarter_rectangle(to_k(a))
def is_sigma_surrounded(sigma, alist, debug=False):
"""Given a singular point s and a candidate list of principal cusps
alist, tests whether the discs S_{a+t} for a in alist and t in Ok
completely surround sigma.
Returns either (True, xlist) with xlist a list of all a+t needed,
or (False, [])
"""
k = nf(alist[0])
w = k.gen()
# convert s to a point
s = to_k(sigma, k)
if debug:
print("s = {}".format(s))
# extend the candidate list by including offsets:
offsets = [-1-w,-w,1-w,-1,1,-1+w,w,1+w]
alist = sum([[translate_cusp(b,t) for t in offsets] for b in alist], alist)
# extract the relevant alphas, if any:
alist = [a for a in alist if is_under([s,0], a)==0]
alist = [a for a in alist if not any(circle_inside_circle(cusp_to_point(a), cusp_to_point(b), False)
for b in alist if b!=a)]
if debug:
print(" relevant alphas: {}".format(alist))
Alist = [cusp_to_point(a) for a in alist]
# sort these by slope:
Alist.sort(key=lambda a: slope(a[0], s))
Aslopes = [slope(a[0], s) for a in Alist]
if debug:
print(" Alist (sorted) = {}".format(Alist))
print(" Relative slopes: {}".format(Aslopes))
for i, t2 in enumerate(Aslopes):
t1 = Aslopes[i-1]
if not slope_before(t1, t2):
if debug:
print(" !Failure around {} between {} and {}".format(s, alist[i-1], alist[i]))
return False, []
return True, alist
def are_sigmas_surrounded(sigmas, alist, debug=False):
"""Given a list of singular points s and a candidate list of principal
cusps alist, tests whether the discs S_{a+t} for a in alist and t
in Ok completely surround all sigmas.
Returns either (True, xlist) with xlist a list of all a+t needed,
or (False, [])
"""
xlist = []
for s in sigmas:
if s.is_infinity():
continue
ok, xlist1 = is_sigma_surrounded(s, alist, debug)
if not ok:
if debug:
print("{} is not surrounded".format(s))
return False, s
if debug:
print("{} is surrounded by {}".format(s, xlist1))
for a in xlist1:
if a not in xlist:
xlist.append(a)
if debug:
print("All sigmas are surrounded, by {}".format(xlist))
return True, xlist
def tri_det(a1, a2, a3):
return Matrix(3,3,[a1,a2,a3, a1.conjugate(), a2.conjugate(), a3.conjugate(), 1, 1, 1]).det()
def intersection_points_in_k(a1,a2):
"""Given principal cusps a1,a2 returns a list of 0, 1 or 2 points (in
k) where the circles S_a1, S_a2 intersect.
"""
k = nf(a1)
alist = [a1,a2]
# Check the cusps are principal, not infinity, and with unit ideal
assert all((not a.is_infinity()) and (a.ideal()==1) for a in alist)
# Define the square radii and centres
r1sq, r2sq = [radius_squared(a) for a in alist]
al1, al2 = [to_k(a, k) for a in alist]
delta = al2-al1
n = delta.norm()
d1 = n - (r1sq + r2sq)
d2 = d1**2 - 4*r1sq*r2sq
if d2 > 0:
return []
z = ((al1+al2) + (r1sq-r2sq)/delta.conjugate())/2
return [z + r/(2*delta.conjugate()) for r in k(d2).sqrt(all=True, extend=False)]
def intersection_points_in_CC(a1,a2):
"""Given principal cusps a1,a2 returns a list of 0, 1 or 2 points (in
CC) where the circles S_a1, S_a2 intersect.
"""
k = nf(a1)
emb = next(e for e in k.embeddings(CC) if e(k.gen()).imag()>0)
alist = [a1,a2]
# Check the cusps are principal, not infinity, and with unit ideal
assert all((not a.is_infinity()) and (a.ideal()==1) for a in alist)
# Define the square radii and centres
r1sq, r2sq = [radius_squared(a) for a in alist]
al1, al2 = [to_k(a, k) for a in alist]
delta = al2-al1
n = delta.norm()
d1 = n - (r1sq + r2sq)
d2 = d1**2 - 4*r1sq*r2sq
if d2 > 0:
return []
z = emb(((al1+al2) + (r1sq-r2sq)/delta.conjugate())/2)
if d2 == 0:
return [z]
rd2 = CC(d2).sqrt() # pure imaginary
z1 = z + rd2/(2*emb(delta.conjugate()))
z2 = 2*z-z1 # = z - rd2/(2*emb(delta.conjugate()))
return [z1,z2]
def show_intersection(a1,a2):
zz = intersection_points_in_CC(a1,a2)
if len(zz)==2:
zz.append((zz[0]+zz[1])/2)
points = [list(z) for z in zz]
k = nf(a1)
emb = next(e for e in k.embeddings(CC) if e(k.gen()).imag()>0)
A = [list(emb(to_k(a, k))) for a in [a1,a2]]
R = [RR(radius_squared(a)).sqrt() for a in [a1,a2]]
circles = [(c,r) for c,r in zip(A,R)]
return plot_circles_and_points(circles, points, fill=True)
def are_intersection_points_covered_by_one(a1, a2, a, plot=False):
"""Given principal cusps a1, a2, a such that the circles S_a1 and
S_a2 intersect in distinct points, test whether S_a covers either
or both.
Returns 0 if neither, 2 if both, +1 or -1 if just one. The signs
are consistent so that if a returns +1 and a' returns -1 then each
intersection point is covered by either S_a or S_a'.
"""
k = nf(a1)
w = k.gen()
emb = next(e for e in k.embeddings(CC) if e(w).imag()>0)
alist = [a1,a2,a]
# Check the cusps are principal, not infinity, and with unit ideal
assert all((not a.is_infinity()) and (a.ideal()==1) for a in alist)
# Define the square radii and centres
r1sq, r2sq, rsq = [radius_squared(a) for a in alist]
al1, al2, al = [to_k(a, k) for a in alist]
n1, n2 = [a.norm() for a in [al1, al2]]
#
delta = al2-al1
n = delta.norm()
z0 = ((al1+al2) + (r1sq-r2sq)/delta.conjugate())/2
d1 = n - (r1sq + r2sq)
d2 = d1**2 - 4*r1sq*r2sq
if d2 >= 0:
raise RuntimeError("cusps {} and {} have non-intersecting circles")
if plot:
points = [list(z) for z in intersection_points_in_CC(a1,a2)]
circle = (list(emb(to_k(a, k))), RR(radius_squared(a)).sqrt())
pic = plot_circles([a1,a2], fill=False) + plot_circles_and_points([circle], points, fill=True)
pic.show()
input("press Enter...")
T = 2 * n * (rsq - (z0-al).norm()) + d2/2 # rational
T2 = T**2
D = tri_det(al, al2, al1) # pure imaginary
D2 = QQ(D**2) # negative rational
d2D2 = d2*D2 # positive rational
# the covering condition is \pm sqrt(d2)*D < T
#print("T = {}, D = {}, d2 = {}".format(T,D,d2))
if d2D2 < T2:
return 2 if T>0 else 0 if T<0 else '?'
if d2D2 > T2:
u = QQ(D/(w-w.conjugate()))
return -1 if u>0 else +1 if u<0 else 0
return 0
def is_singular(s, sigmas):
from utils import sigma_index_with_translation
return sigma_index_with_translation(s, sigmas)[0]!=-1
def translates(a):
w = nf(a).gen()
return [translate_cusp(a,t) for t in [-w-1,-w,-w+1,-1,0,1,w-1,w,w+1]]
def is_inside_one(z, alist):
"""Test whether the cusp z is strictly inside at least one S_a for a
in alist. If so return True, a; otherwise return False, None.
"""
try:
a = next(a for a in alist if is_inside(z, a, strict=True))
return True, a
except StopIteration:
return False, None
def are_intersection_points_covered(a0, a1, alist, sigmas, debug=False):
"""Given principal cusps a0, a1 whose circles S_a0, S_a1 intersect,
and a list of principal cusps alist each of whose circles S_a also
intersects S_a0, test whether each of the two intersection points
of S_a0 and S_a1 is either singular or strictly inside one of the
S_a.
We treat as a special case when the two intersection points are in
k. If not, the code still uses exact arithmetic.
"""
k = nf(a0)
z_in_k = intersection_points_in_k(a0,a1)
if z_in_k:
zz = [cusp(z, k) for z in z_in_k]
if debug:
print("intersection points in k: {}".format(z_in_k))
# check that each is *either* singular *or* contained in some S_a2
for z in zz:
if is_singular(z, sigmas):
if debug:
print("{} is ok: singular".format(z))
else:
ok, a1 = is_inside_one(z, alist)
if ok:
if debug:
print("{} is ok: inside S_{}".format(z, a1))
else:
return False
return True
# Now the intersection points are not in k. Check that either one
# S_a covers both, or two cover one each:
t = 0 # will hold +1 or -1 if we have covered only one of the two
for a2 in alist:
if a2 == a1:
continue
t2 = are_intersection_points_covered_by_one(a0, a1, a2, plot=False)
if debug:
print("a0={}, a1={}, a2={}: t2={}, t={}".format(a0, a1,a2,t2,t))
if t2: # it is 2, +1 or -1
assert t2 in [-1,1,2]
if debug:
are_intersection_points_covered_by_one(a0, a1, a2, plot=True)
if t2==2 or ([t,t2] in [[1,-1],[-1,1]]):
if debug:
print("t={}, t2={}, about to return True".format(t,t2))
return True
assert t2 in [-1,1] and t in [0,t2]
if debug:
print("t={}, t2={}, setting t to {}".format(t,t2,t2))
t = t2
return False
def is_alpha_surrounded(a0, alist, sigmas, pairs_ok=[], debug=False, plot=False):
"""Given a principal cusp a0, a candidate list of principal cusps
alist, tests whether the boundary of the disc S_a0 is contained in
the union of the translates S_{b+t} for b in alist, apart from any
singular points on the boundary. It suffices to consider all b+t
such that S_{b+t} intersects S_a in two points and check that each
of the points is either singular or contained in some other
S_{b+t}. This is simplest when the intersection points are in k;
if not then the method still uses exact arithmetic in k
throughout.
pairs_ok is a list of pairs (a1,a2) whose intersection points are
known to be covered, so can be skipped.
Returns (True/False, new_pairs_ok) where new_pairs_ok is an
updated list of pairs whose intersections have been shown to be
covered.
"""
# convert a0 to a point with radius
A0 = cusp_to_point(a0)
if debug:
print("A0 = {}".format(A0))
# extend the candidate list by including offsets:
alist = sum([translates(b) for b in alist], [])
# check if S_a0 is strictly entirely contained in one S_alpha:
if any(circle_inside_circle(A0, cusp_to_point(b), True) for b in alist):
if debug:
a1 = next(b for b in alist if circle_inside_circle(A0, cusp_to_point(b), True))
print(" ok: circle {} is entirely inside circle {}".format(A0, cusp_to_point(a1)))
return True, pairs_ok
# extract the relevant alphas, if any, namely those for which
# S_alpha and S_a0 properly intersect:
alist = [a for a in alist if circles_intersect(A0, cusp_to_point(a))]
# alist = [a for a in alist if not any(circle_inside_circle(cusp_to_point(a), cusp_to_point(b), False)
# for b in alist if b!=a)]
if debug:
print(" relevant alphas: {}".format(alist))
if debug and plot:
pic = plot_circles([a0], fill=False) + plot_circles(alist, fill=True)
pic.show(figsize=[30,30])
input("press Enter...")
a0_pairs_ok = [pr for pr in pairs_ok if a0 in pr]
new_pairs_ok = pairs_ok.copy()
all_ok = True
for i, a1 in enumerate(alist):
pair = [a0,a1]
pair.sort()
if pair in a0_pairs_ok:
if debug:
print("\nSkipping pair {}".format(pair))
continue
if debug:
print("\nTesting intersection points of {}".format(pair))
ok = are_intersection_points_covered(a0, a1, alist, sigmas, debug)
if ok:
new_pairs_ok.append(pair)
if debug:
print(" - ok: intersection points of {} and {} are covered".format(a0,a1))
else:
if debug:
print(" - not ok: intersection points of {} and {} are not covered".format(a0,a1))
all_ok = False
if debug:
if all_ok:
print("OK: all intersection points of {} are covered".format(a0))
else:
print("No: not all intersection points of {} are covered".format(a0))
return all_ok, new_pairs_ok
def are_alphas_surrounded(alist_ok, alist_open, slist, pairs_ok=[],
verbose=False, debug=False):
"""Given alist_ok and alist_open, lists of principal cusps, and slist,
a complete list of singular points, tests whether the boundary of
every disc S_a for a in alist_open is contained in the union of
the translates of the S_b for b in alist_ok+alist_open, apart from
any singular points on the boundary.
Any a which pass are added to a new copy of alist_ok, while any
which fail are added to a new alist_open, so success means that
the latter is empty. This allows for incremental testing by
adding more a to alist_open.
pairs_ok is list of pairs (a1,a2) whose intersection points are
known to be covered.
Returns (True/False, new_alist_ok, new_alist_open, new_pairs_ok).
NB All a in alist_open will be tested, i.e. we carry on after a
failure.
"""
alist = alist_ok + alist_open
new_alist_ok = alist_ok.copy()
new_alist_open = []
all_ok = True
for i, a in enumerate(alist_open):
if cusp_in_quarter_rectangle(a):
if verbose or debug:
print("Testing alpha #{}/{} = {}".format(i+1, len(alist_open), a))
ok, new_pairs_ok = is_alpha_surrounded(a, alist, slist, pairs_ok, debug)
pairs_ok = new_pairs_ok
if ok:
if verbose or debug:
print(" ok, {} is surrounded".format(a))
else:
all_ok = False
if verbose or debug:
print(" no, {} is not surrounded".format(a))
else:
ok = True
if ok:
new_alist_ok.append(a)
else:
new_alist_open.append(a)
return all_ok, new_alist_ok, new_alist_open, pairs_ok
def next_norm(k, n):
"""
Returns the smallest integer m>=n which is a norm from O_k
"""
while not k.elements_of_norm(n):
n+=1
return n
def elements_of_norm(k, n):
return iter(k.elements_of_norm(n))
def elements_of_norm_upto(k, n, start=1):
return chain(*(iter(k.elements_of_norm(n)) for n in range(start, n+1)))
def reduced_numerators(s):
k = s.parent()
one = ZZ(1)
for r in k.ideal(s).residues():
if k.ideal(r,s).norm() == one:
yield r
def principal_cusps_iter(k, maxnorm_s):
"""
Return iterator yielding all principal r/s with N(s)<=maxnorm_s
"""
for s in elements_of_norm_upto(k, maxnorm_s):
for r in reduced_numerators(s):
yield cusp(reduce_mod_Ok(r/s), k)
def principal_cusps_of_norm(k, norm_s):
"""
Return iterator yielding all principal r/s with N(s)=maxnorm_s
"""
for s in k.elements_of_norm(norm_s):
for r in reduced_numerators(s):
yield cusp(reduce_mod_Ok(r/s), k)
def principal_cusps_up_to(k, maxn, fussy=True):
"""List of all principal r/s with N(s)<=maxn, omitting any whose
circles are contained in an earlier circle. Since we loop through
circles in decreasing order of radius, no circle can be contained
in a later one.
If fussy, automatically increment maxn to the next integer which is a norm.
"""
alist = []
Alist = []
if fussy:
maxn0 = maxn
maxn = next_norm(k, maxn0)
if maxn != maxn0:
print(" increasing maxn to {} since there are no elements of norm {}".format(maxn, list(range(maxn0,maxn))))
for a in principal_cusps_iter(k, maxn):
A = cusp_to_point(a)
#print("Testing {} = {} against {}".format(a, A, alist))
if not any(circle_inside_circle(A, B, False) for B in Alist):
#print("appending {} = {} to {}".format(a, A, alist))
alist.append(a)
Alist.append(A)
return alist
def find_covering_alphas(k, sigmas=None, verbose=False):
"""Returns a finite list of principal cusps a such that the S_{a+t}
for all integral t cover CC apart from singular points.
For n>=1 successively, we test as a candidate set all a=r/s with
r,s coprime, r reduced mod s, N(s)<=n (omitting any for which S_a
is contained in any earlier S_a') until we succeed.
sigmas can be set to a list of singular points (up to
translation), otherwise these will be computed.
Returns maxn, alphas, sigmas
Other functions will then (1) saturate the set, (2) discard
redundancies.
"""
if sigmas is None:
sigmas = singular_points(k)
ok = False
maxn = 0
alphas_ok = []
alphas_open = []
pairs_ok = []
Alist = []
while not ok:
maxn = next_norm(k, maxn+1)
nc = 0 # number of new alphas added to list
for a in principal_cusps_of_norm(k, maxn):
A = cusp_to_point(a)
if not any(circle_inside_circle(A, B, False) for B in Alist):
if cusp_in_quarter_rectangle(a):
alphas_open.append(a)
nc += 1
else:
alphas_ok.append(a)
Alist.append(A)
if verbose:
print("Adding {} alphas of norm {} (plus symmetrics)".format(nc, maxn))
if nc==0:
continue
ok, new_alphas_ok, new_alphas_open, new_pairs_ok = are_alphas_surrounded(alphas_ok, alphas_open, sigmas, pairs_ok, verbose=verbose, debug=False)
if ok:
if verbose:
print("Success using {} alphas of with max norm {}!".format(len(new_alphas_ok), maxn))
return maxn, new_alphas_ok, sigmas
else:
alphas_ok = new_alphas_ok
alphas_open = new_alphas_open
pairs_ok = new_pairs_ok
if verbose:
print("{} alphas out of {} with max norm {} are not surrounded, continuing...".format
(len(alphas_open), len(alphas_open)+len(alphas_ok), maxn))
def point_translates(P):
w = P[0].parent().gen()
return [[P[0]+a+b*w, P[1]] for a in [-1,0,1] for b in [-1,0,1]]
def nverts(a, plist):
return sum([1 for P in plist if is_under(P,a)==0])
def saturate_covering_alphas(k, alphas, sigmas, debug=False, verbose=False):
"""Given a covering set of alphas as produced by
find_covering_alphas(), add extras if necessary so that they are
"saturated", i.e. define the extended fundamental domain.
By Swan, we need to find the points P in H^3 with positive height
where at least 3 hemispheres S_a intersect, and for each P check
whether P is properly covered by an S_a for a not in the set of
alphas (up to translation). If so, we need to add a to the set of
alphas. If none, then we have the fundamental region (and can go
on to discard any redundant alphas).
At the end we discard any alphas with <3 vertices (including translates and singular points).
"""
sat = False
checked_points = []
alphas1 = alphas.copy() # copy so original list unchanged
while not sat:
n = max(a.denominator().norm() for a in alphas1)
m = next_norm(k, n+1)
all_points = triple_intersections(alphas1)
if debug:
print("Found {} potential vertices".format(len(all_points)))
points = [P for P in all_points if P[1]<=1/m]
if debug:
print(" -- of which {} are low enough to be properly covered by a new alpha".format(len(points)))
points = [P for P in points if in_quarter_rectangle(P[0])]
if debug:
print(" -- of which {} lie in the first quadrant".format(len(points)))
points = [P for P in points if P not in checked_points]
if debug:
print(" -- of which {} have not already been checked".format(len(points)))
sat = True # will be set to False if we find out that the alphas are not already saturated
extra_alphas = [] # will be filled with any extra alphas needed
for P in points:
if debug:
print(" - checking P = {}".format(P))
extras = properly_covering_hemispheres(P)
if extras:
sat = False
hts = [radius_squared(a) - (P[0]-to_k(a)).norm() for a in extras]
m = max(hts)
extras0 = [a for a,h in zip(extras, hts) if h==m]
norms0 = [a.denominator().norm() for a in extras0]
if debug:
print(" - found properly covering {} with norms {}".format(extras, [a.denominator().norm() for a in extras]))
print(" max height above P (height {}) is {}, for {} with norms {}".format(P[1], m, extras0, norms0))
for a in extras0:
ca = conj_cusp(a)
for b in [a, negate_cusp(a), ca, negate_cusp(ca)]:
if cusp_in_rectangle(b) and b not in extra_alphas:
extra_alphas.append(b)
else:
if debug:
print(" - OK, no properly covering alphas found")
checked_points.append(P)
if verbose:
if sat:
m = max([a.denominator().norm() for a in alphas1])
print(" alphas are saturated! {} alphas with max norm {}".format(len(alphas1), m))
else:
m = max([a.denominator().norm() for a in extra_alphas])
print(" alphas not saturated, {} extras needed: {} (norms at most {})".format(len(extra_alphas), extra_alphas, m))
alphas1 += extra_alphas
m = max([a.denominator().norm() for a in alphas1])
if verbose:
print("After saturation we now have {} alphas with max norm {}".format(len(alphas1), m))
# Now delete any alphas with <3 vertices, allowing for translates
pointsx = []
for P in all_points+[[to_k(s,k),0] for s in sigmas if not s.is_infinity()]:
for Q in point_translates(P):
if Q not in pointsx:
pointsx.append(Q)
nv = [nverts(a, pointsx) for a in alphas1]
if verbose:
print("# vertices for these alphas: {}".format(nv))
alphas1 = [a for a in alphas1 if nverts(a, pointsx)>=3]
m = max([a.denominator().norm() for a in alphas1])
if verbose:
print("After removing alphas which go through <3 vertices, we now have {} alphas with max norm {}".format(len(alphas1), m))
points1 = triple_intersections(alphas1)
return alphas1, points1
def reduce_alphas_mod_Ok(alist):
"""Rahm's list of alpha = lambda/mu in k includes repeats (up to
translation by Ok).
This function returns a list with no repeats, as cusps.
"""
a0 = next(a for a in alist if not a in QQ)
k = a0.parent()
Ireps = smallest_ideal_class_representatives(k)
alist = [k(a) for a in alist]
alphas = []
for a in alist:
if not any((a-b).is_integral() for b in alphas):
alphas.append(reduce_mod_Ok(a))
else:
print("omitting a={} which is a translate of {}".format(a, next(b for b in alist if (a-b).is_integral())))
return [cusp(a, k, Ireps) for a in alphas]
def cong_mod(r1, r2, s):
return ((r1-r2)/s).is_integral()
def denom_2_alphas(k):
d = -k.discriminant().squarefree_part() # for compatibility with C++'s d
w = k.gen()
d8 = d%8
alist = []
if d8 in [1,3,5]:
alist.append(cusp(w/2,k))
if d8 in [2,6]:
alist.append(cusp((1+w)/2,k))
if d8 == 3:
alist.append(cusp((w-1)/2,k))
return alist
def denom_2_sigmas(k):
d = -k.discriminant().squarefree_part() # for compatibility with C++'s d
w = k.gen()
d8 = d%8
slist = []
if d8 in [1,5]:
slist.append(cusp((1+w)/2,k))
if d8 in [2,6]:
slist.append(cusp(w/2,k))
if d8 == 7:
slist.append(cusp(w/2,k))
slist.append(cusp((1-w)/2,k))
return slist
def denom_3_alphas(k):
d = -k.discriminant().squarefree_part() # for compatibility with C++'s d
if d in [1, 2, 3, 7, 11, 5, 6, 15, 19]:
return []
w = k.gen()
d12 = d%12
if d12==3:
alist = [w, w-1]
if d12==7:
alist = [w, 1-w, 1+w]
if d12==11:
alist = [1+w]
if d12 in [1,10]:
alist = [w, 1+w, 1-w]
if d12 in [2,5]:
alist = [w]
if d12 in [6,9]:
alist = [1+w, w-1]
return sum([[cusp(a/3,k), cusp(-a/3,k)] for a in alist], [])
def denom_3_sigmas(k):
d = -k.discriminant().squarefree_part() # for compatibility with C++'s d
w = k.gen()
d12 = d%12
slist = []
if d12 in [2, 5]:
slist.append(cusp((1+w)/3,k))
slist.append(cusp((-1-w)/3,k))
slist.append(cusp((1-w)/3,k))
slist.append(cusp((w-1)/3,k))
if d12 == 11:
# if d==35:
# slist.append(cusp(w/3,k))
# slist.append(cusp(-w/3,k))
if d>=35:
slist.append(cusp(w/3, k))
slist.append(cusp(-w/3, k))
slist.append(cusp((w-1)/3,k))
slist.append(cusp((1-w)/3,k))
if d12 == 3 and d>15:
slist.append(cusp((1+w)/3,k))
slist.append(cusp((-1-w)/3,k))
if d12 in [6,9] and d>6:
slist.append(cusp(w/3,k))
slist.append(cusp(-w/3,k))
return slist
def alpha_in_list(a, alist, up_to_translation=True):
if up_to_translation:
return alpha_index_with_translation(a, alist)[0]>=0
else:
return a in alist
def compare_alpha_lists(alist1, alist2):
return len(alist1)==len(alist2) and all(alpha_in_list(a,alist2) for a in alist1) and all(alpha_in_list(a,alist1) for a in alist2)
def find_edge_pairs(alphas, sigmas, debug=False):
from utils import nf, ispos, add_two_alphas, add_four_alphas
k = nf(alphas[0])
w = k.gen()
d = -k.discriminant().squarefree_part() # for compatibility with C++'s d
# Extract the a for which 2*a or 3*a is integral, which we treat
# separately:
A1 = [a for a in alphas if to_k(a,k).is_integral()]
assert A1 == [cusp(0,k)]
A2 = [a for a in alphas if a not in A1 and (2*to_k(a,k)).is_integral()]
A2exp = denom_2_alphas(k)
if not compare_alpha_lists(A2, A2exp):
print("*******************denom 2 alphas are {}, expected {}".format(A2, A2exp))
A2 = A2exp # use the expected list for consistent normalisation and order
A12 = A1 + A2
A3 = [a for a in alphas if not alpha_in_list(a, A12) and (3*to_k(a,k)).is_integral()]
A3exp = denom_3_alphas(k)
if not compare_alpha_lists(A3, A3exp):
print("*******************denom 3 alphas are {}, expected {}".format(A3, A3exp))
A3 = A3exp # use the expected list for consistent normalisation and order
A123 = A12 + A3
# For a such that 2*a, 3*a are not integral and we make sure that we
# have complete sets of {a,-a} pairs, not just up to translation:
A = []
for a in alphas:
da = a.denominator()
if not ispos(da):
a = cusp(to_k(a))
ma = negate_cusp(a)
if not alpha_in_list(a, A123) and not alpha_in_list(ma, A):
r,i = to_k(a,k)
if w.trace()==0:
if r<0 and i==half:
a = cusp(k([r,-half]), k)
elif i<0 and r==half:
a = cusp(k([-half,i]), k)
else:
if i==half:
if r>0:
a = cusp(k([r,i-1]), k)
elif r<-half:
a = cusp(k([r+1,i-1]), k)
elif 2*r+i==1 and i<0:
a = cusp(k([r-1,i]), k)
r,i = to_k(a,k)
if i>0:
A.append(a)
A.append(negate_cusp(a))
else:
A.append(negate_cusp(a))
A.append(a)
S = list(set(k(a.denominator()) for a in A))
S.sort(key = lambda z: z.norm())
if debug:
print("Denominator 1,2,3: {}".format(A123))
print("Other denominators: {}".format(S))
for s in S:
print("s = {}: numerators {}".format(s, [a for a in A if a.denominator()==s]))
new_alphas = []
M_alphas = []
pluspairs = []
minuspairs = []
fours = []
long_fours = []
for s in S:
if debug:
print("s = {}".format(s))
As = [a for a in A if a.denominator()==s]
for a in As:
if debug:
print(" a = {}".format(a))
r = k(a.numerator())
if debug:
print(" r = {}".format(r))
rs = (r,s)
mrs = (-r,s)
rsq = r*r
if cong_mod(rsq, +1, s):
if not any(pair in pluspairs for pair in (rs, mrs)):
if ispos(r):
if debug:
print(" - adding plus pair {}".format(rs))
pluspairs.append(rs)
else:
if debug:
print(" - adding plus pair {}".format(mrs))
pluspairs.append(mrs)
continue
if cong_mod(rsq, -1, s):
if not any(pair in minuspairs for pair in (rs, mrs)):
if ispos(r):
if debug:
print(" - adding minus pair {}".format(rs))
minuspairs.append(rs)
else:
if debug:
print(" - adding minus pair {}".format(mrs))
minuspairs.append(mrs)
continue
if debug:
print(" - looking for a foursome")
try:
adash = next(ad for ad in As if cong_mod(r*ad.numerator(), -1, s))
rdash = k(adash.numerator())
rds = (rdash,s)
mrds = (-rdash,s)
if not any(pair in fours for pair in (rs, mrs, rds, mrds)):
if ispos(r):
if debug:
print(" - adding foursome {}".format((r,s,rdash)))
fours.append(rs)
long_fours.append((s,r,rdash))
else:
if debug:
print(" - adding foursome {}".format((-r,s,rdash)))
fours.append(mrs)
long_fours.append((s,-r,-rdash))
except StopIteration:
print("no negative inverse found for {} mod {}".format(r, s))
for r,s in pluspairs:
add_two_alphas(s, r, +1, new_alphas, M_alphas)
for r,s in minuspairs:
add_two_alphas(s, r, -1, new_alphas, M_alphas)
for s, r1, r2 in long_fours:
add_four_alphas(s, r1, r2, new_alphas, M_alphas)
# Process the sigmas, standardising those with denominator 2 and 3 and putting the rest inot +/- pairs
# Extract the s with denominator 2 or 3, which we treat
# separately:
S2 = [s for s in sigmas if (not s.is_infinity()) and (2*to_k(s,k)).is_integral()]
S2exp = denom_2_sigmas(k)
if not compare_alpha_lists(S2, S2exp):
print("*******************denom 2 sigmas are {}, expected {}".format(S2, S2exp))
S2 = S2exp # use the expected list for consistent normalisation and order
S3 = [s for s in sigmas if (not s.is_infinity()) and (not alpha_in_list(s, S2)) and (3*to_k(s,k)).is_integral()]
S3exp = denom_3_sigmas(k)
if not compare_alpha_lists(S3, S3exp):
print("*******************denom 3 sigmas are {}, expected {}".format(S3, S3exp))
S3 = S3exp # use the expected list for consistent normalisation and order
S23 = S2 + S3
S = []
S_mod_neg = []
for s in sigmas:
ms = negate_cusp(s)
if not s.is_infinity() and not alpha_in_list(s, S23) and not alpha_in_list(ms, S):
r,i = to_k(s,k)
if w.trace()==0:
if r<0 and i==half:
s = cusp(k([r,-half]), k)
elif i<0 and r==half:
s = cusp(k([-half,i]), k)
else:
if i==half:
if r>0:
s = cusp(k([r,i-1]), k)
elif r<-half:
s = cusp(k([r+1,i-1]), k)
elif 2*r+i==1 and i<0:
s = cusp(k([r-1,i]), k)
r,i = to_k(s,k)
neg_s = negate_cusp(s)
if i>0:
S_mod_neg.append(s)
S.append(s)
S.append(neg_s)
else:
S_mod_neg.append(neg_s)
S.append(neg_s)
S.append(s)
print("alphas with denominator | 2: {}".format(A2))
print("alphas with denominator | 3: {}".format(A3))
print("plus pairs: {}".format(pluspairs))
print("minus pairs: {}".format(minuspairs))
print("fours: {}".format(fours))
print("sigmas with denominator 2: {}".format(S2))
print("sigmas with denominator 3: {}".format(S3))
print("other (finite) sigmas (up to sign): {}".format(S_mod_neg))
new_sigmas = [cusp(oo,k)] + S2 + S3 + S
# # for pasting into C++:
# print("// C++ code")
# for r,s in pluspairs:
# print("add_alpha_orbit({}, {}, {});".format(s,r,-r))
# for r,s in minuspairs:
# print("add_alpha_orbit({}, {}, {});".format(s,r,r))
# for s, r1, r2 in long_fours:
# print("add_alpha_orbit({}, {}, {});".format(s,r1,r2))
# for pasting into data file:
print("//////////////////////////////")
print("// for copying into geodat.dat")
print("0")
print("0 d={}".format(d))
print("0")
for r,s in pluspairs:
sr, si = s
r1r, r1i = r
r2r, r2i = -r
print("{} A {} {} {} {} {} {}".format(d, sr,si, r1r,r1i, r2r,r2i))
for r,s in minuspairs:
sr, si = s
r1r, r1i = r
r2r, r2i = r
print("{} A {} {} {} {} {} {}".format(d, sr,si, r1r,r1i, r2r,r2i))
for s, r1, r2 in long_fours:
sr, si = s
r1r, r1i = r1
r2r, r2i = r2
print("{} A {} {} {} {} {} {}".format(d, sr,si, r1r,r1i, r2r,r2i))
for s in S_mod_neg:
sr, si = s.denominator()
rr, ri = s.numerator()
print("{} S {} {} {} {}".format(d, rr,ri, sr,si))
print("//////////////////////////////")
return A123, new_alphas, new_sigmas
#
# From scratch:
#
def alpha_sigma_data(d, verbose=False):
k = make_k(d)['k']
print("k = {}, class number {}".format(k,k.class_number()))
sigmas = singular_points(k)
print("{} singular points: {}".format(len(sigmas), sigmas))
maxn, alphas0, sigmas = find_covering_alphas(k, sigmas, verbose=verbose)
print("{} covering alphas, max denom norm {}: {}".format(len(alphas0), maxn, alphas0))
alphas1, points = saturate_covering_alphas(k, alphas0, sigmas, debug=verbose, verbose=verbose)
maxn = max(a.denominator().norm() for a in alphas1)
print("{} fundamental domain alphas, max denom norm {}: {}".format(len(alphas1), maxn, alphas1))
print("{} fundamental vertices, min square height = {}".format(len(points), min(P[1] for P in points)))
# A2, new_alphas, M_alphas, pluspairs, minuspairs, long_fours
data = find_edge_pairs(alphas1, sigmas)
alphas2 = data[0] + data[1]
new_sigmas = data[2]
# for adding to precomputed alphas in alphas.py:
alpha_string = "alphalist[{}] = [".format(d) + ", ".join(["({})/({})".format(a.numerator(), a.denominator()) for a in alphas2]) + "]\n"
alpha_string = alpha_string.replace(" ", "").replace('w','t').replace(",(",", (").replace("="," = ")
print(alpha_string)
sigma_string = "sigmas: [" + ", ".join(["({})/({})".format(s.numerator(), s.denominator()) for s in new_sigmas]) + "]\n"
print(sigma_string)
return alphas2, new_sigmas
def tessellation(d, verbose=False, plot2D=False, plot3D=False, browser="/usr/bin/firefox"):
from utils import (make_M_alphas,
make_poly_from_edges,
poly_equiv, tri0, tri1, tri2, cycle_poly, std_poly,
poly_gl2_orbit_reps,
aas_triangle_gl2_orbit_reps,
square_parameters,
aaa_triangle_parameters,
aas_triangle_parameters,
hexagon_parameters)
from polyhedra import all_poly_types, poly_type, poly_types
kdata = make_k(d)
k = kdata['k']
if verbose:
print("Field: {}".format(k))
print("Discriminant: {}".format(k.discriminant()))
print("Class number: {}".format(k.class_number()))
alphas = precomputed_alphas(d)
if alphas:
if verbose:
print("using precomputed alphas")
sigmas = singular_points(k)
data = find_edge_pairs(alphas, sigmas)
alphas = data[0] + data[1]
sigmas = data[2]
else:
if verbose:
print("computing alphas from scratch")
alphas, sigmas = alpha_sigma_data(d, verbose)
M_alphas, alpha_inv = make_M_alphas(alphas)
print("{} alphas".format(len(alphas)))
print("{} sigmas".format(len(sigmas)))
if plot2D:
print("plotting projection of fundamental domain")
show(plot_FunDomain_projection(k, alphas, sigmas))
polys, hemis = all_polyhedra(k, alphas)
print("{} polyhedra constructed".format(len(polys)))
if plot3D:
print("plotting fundamental domain")
from sage.misc.viewer import viewer
viewer.browser(browser)
show(plot_Bianchi_diagram(k,hemis))
pt = poly_types(polys)
if pt['unknown']:
print("{} polyhedra have unknown type!".format(pt['unknown']))
return
for pol,num in pt.items():
if num:
print("{}: {}".format(pol,num))
triangles = [make_poly_from_edges(t,k) for t in sum([[F for F in G.faces() if len(F)==3] for G in polys],[])]
squares = [make_poly_from_edges(t,k) for t in sum([[F for F in G.faces() if len(F)==4] for G in polys],[])]
hexagons = [make_poly_from_edges(t,k) for t in sum([[F for F in G.faces() if len(F)==6] for G in polys],[])]
aaa_triangles = [T for T in triangles if is_poly_principal(T)]
aas_triangles = [T for T in triangles if not is_poly_principal(T)]
if verbose:
print("All polyhedron faces:")
print("{} triangles, of which {} are aaa and {} are aas".format(len(triangles),len(aaa_triangles),len(aas_triangles)))
print("{} squares".format(len(squares)))
print("{} hexagons".format(len(hexagons)))
print()
print("Finding GL2-orbits of faces...")
aaa_triangles0 = poly_gl2_orbit_reps(aaa_triangles, alphas)
aas_triangles0 = aas_triangle_gl2_orbit_reps(aas_triangles, alphas)
squares0 = poly_gl2_orbit_reps(squares, alphas)
hexagons0 = poly_gl2_orbit_reps(hexagons, alphas)
if verbose:
print("GL2-orbits of faces:")
print("{} aaa-triangles".format(len(aaa_triangles0)))
print("{} aas-triangles".format(len(aas_triangles0)))
print("{} squares".format(len(squares0)))
print("{} hexagons".format(len(hexagons0)))
print("Face parameters")
print("//////////////////////////////")
for T in aaa_triangles0:
aaa_triangle_parameters(T, alphas, M_alphas)
for T in aas_triangles0:
aas_triangle_parameters(T, alphas, M_alphas, sigmas)
for S in squares0:
square_parameters(S, alphas, M_alphas, alpha_inv)
for H in hexagons0:
hexagon_parameters(H, alphas, M_alphas)
print("//////////////////////////////")
| JohnCremona/bianchi-progs | FD/H3.py | Python | gpl-3.0 | 72,028 | 0.008872 |
#!/usr/bin/env python
import roslib
roslib.load_manifest('camera_controller')
import rospy
import tf
if __name__ == '__main__':
rospy.init_node('frame_broadcaster')
br = tf.TransformBroadcaster()
rate = rospy.Rate(10.0)
target_frame = rospy.get_param("~target_frame")
# Camera position
# Translation
x = rospy.get_param("~x",0)
y = rospy.get_param("~y",0)
z = rospy.get_param("~z",0)
# Pose quaternion
qm = rospy.get_param("~qm",0)
qx = rospy.get_param("~qx",0)
qy = rospy.get_param("~qy",0)
qz = rospy.get_param("~qz",1)
while not rospy.is_shutdown():
br.sendTransform((x,y,z), (qm, qx, qy, qz), rospy.Time.now(), target_frame, "world")
rate.sleep()
| dsaldana/roomba_sensor_network | localization_artrack/camera_controller/scripts/frame_broadcaster.py | Python | gpl-3.0 | 678 | 0.042773 |
# Copyright (C) 2013 Statoil ASA, Norway.
#
# The file 'test_run.py' is part of ERT - Ensemble based Reservoir Tool.
#
# ERT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, teither version 3 of the License, or
# (at your option) any later version.
#
# ERT is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html>
# for more details.
import random
import os.path
import subprocess
import argparse
from .test_area import TestAreaContext
def path_exists( path ):
if os.path.exists( path ):
return (True , "Path:%s exists" % path)
else:
return (False , "ERROR: Path:%s does not exist" % path)
class TestRun(object):
default_ert_cmd = "ert"
default_ert_version = "stable"
default_path_prefix = None
def __init__(self , config_file , args = [] , name = None):
if os.path.exists( config_file ) and os.path.isfile( config_file ):
self.parseArgs(args)
self.__ert_cmd = TestRun.default_ert_cmd
self.path_prefix = TestRun.default_path_prefix
self.config_file = config_file
self.check_list = []
self.workflows = []
if name:
self.name = name
else:
self.name = config_file.replace("/" , ".")
while True:
if self.name[0] == ".":
self.name = self.name[1:]
else:
break
self.name += "/%08d" % random.randint(0,100000000)
else:
raise IOError("No such config file: %s" % config_file)
def parseArgs(self , args):
parser = argparse.ArgumentParser()
parser.add_argument("-v" , "--version" , default = self.default_ert_version)
parser.add_argument("args" , nargs="*")
result = parser.parse_args(args)
self.ert_version = result.version
self.args = result.args
def get_config_file(self):
return self.__config_file
def set_config_file(self , input_config_file):
self.__config_file = os.path.basename( input_config_file )
self.abs_config_file = os.path.abspath( input_config_file )
config_file = property( get_config_file , set_config_file )
#-----------------------------------------------------------------
def set_path_prefix(self , path_prefix):
self.__path_prefix = path_prefix
def get_path_prefix(self):
return self.__path_prefix
path_prefix = property( get_path_prefix , set_path_prefix )
#-----------------------------------------------------------------
def get_ert_cmd(self):
return self.__ert_cmd
def set_ert_cmd(self , cmd):
self.__ert_cmd = cmd
ert_cmd = property( get_ert_cmd , set_ert_cmd)
#-----------------------------------------------------------------
def get_workflows(self):
return self.workflows
def add_workflow(self , workflow):
self.workflows.append( workflow )
#-----------------------------------------------------------------
def get_args(self):
return self.args
#-----------------------------------------------------------------
def add_check( self , check_func , arg):
if callable(check_func):
self.check_list.append( (check_func , arg) )
else:
raise Exception("The checker:%s is not callable" % check_func )
#-----------------------------------------------------------------
def __run(self , work_area ):
argList = [ self.ert_cmd , "-v" , self.ert_version ]
for arg in self.args:
argList.append( arg )
argList.append( self.config_file )
for wf in self.workflows:
argList.append( wf )
status = subprocess.call( argList )
if status == 0:
return (True , "ert has run successfully")
else:
return (False , "ERROR:: ert exited with status code:%s" % status)
def run(self):
if len(self.workflows):
with TestAreaContext(self.name , prefix = self.path_prefix , store_area = False) as work_area:
test_cwd = work_area.get_cwd()
work_area.copy_parent_content( self.abs_config_file )
status = self.__run( work_area )
global_status = status[0]
status_list = [ status ]
if status[0]:
for (check_func , arg) in self.check_list:
status = check_func( arg )
status_list.append( status )
if not status[0]:
global_status = False
if not global_status:
work_area.set_store( True )
return (global_status , test_cwd , status_list)
else:
raise Exception("Must have added workflows before invoking start()")
| arielalmendral/ert | python/python/ert/test/test_run.py | Python | gpl-3.0 | 5,315 | 0.023518 |
from equity import EquityPricer
class FuturePricer(EquityPricer):
def __init__(self):
super(FuturePricer,self).__init__()
| lsbardel/flow | flow/db/instdata/pricers/future.py | Python | bsd-3-clause | 158 | 0.031646 |
import gzip
import json
import os
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.paginator import EmptyPage
from django.core.paginator import PageNotAnInteger
from django.core.paginator import Paginator
from django.db.models import Avg
from django.db.models import Count
from django.db.models import Max
from django.db.models import Min
from django.db.models import Q
from django.db.models import Sum
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.shortcuts import redirect
from django.shortcuts import render
from django.template import RequestContext
from django.utils.text import slugify
from django.views.generic import DeleteView
from individuals.forms import IndividualForm, ComparisonForm, GroupForm, BrowserForm
from individuals.models import Individual, Group
from individuals.tasks import VerifyVCF, AnnotateVariants, PopulateVariants
from variants.models import Variant
def response_mimetype(request):
if "application/json" in request.META['HTTP_ACCEPT']:
return "application/json"
else:
return "text/plain"
class JSONResponse(HttpResponse):
"""JSON response class."""
def __init__(self,obj='',json_opts={},mimetype="application/json",*args,**kwargs):
content = json.dumps(obj,**json_opts)
super(JSONResponse,self).__init__(content,mimetype,*args,**kwargs)
def create(request):
if request.method == 'POST':
form = IndividualForm(request.POST, request.FILES)
if form.is_valid():
if request.user.is_authenticated:
individual = Individual.objects.create(user=request.user, status='new')
else:
individual = Individual.objects.create(user=None, status='new')
individual.vcf_file= request.FILES.get('file')
print('file')
print(request.FILES.get('file'))
filename = individual.vcf_file.name.split('.')
new_filename = []
for tag in filename:
new_filename.append(slugify(tag))
individual.vcf_file.name = ".".join(new_filename)
print('filename ', filename)
#get name from inside vcf file
individual.name= str(os.path.splitext(individual.vcf_file.name)[0]).replace('.vcf','').replace('.gz','').replace('.rar','').replace('.zip','').replace('._',' ').replace('.',' ')
# individual.shared_with_groups = form.cleaned_data['shared_with_groups']
individual.shared_with_groups.set(form.cleaned_data['shared_with_groups'])
individual.save()
f = individual.vcf_file
#fix permissions
#os.chmod("%s/genomes/%s/" % (settings.BASE_DIR, individual.user), 0777)
#if request.user.is_authenticated:
# os.chmod("%s/genomes/%s/%s" % (settings.BASE_DIR, slugify(individual.user), individual.id), 0o777)
#else:
# os.chmod("%s/genomes/public/%s" % (settings.BASE_DIR, individual.id), 0o777)
# AnnotateVariants.delay(individual.id)
# VerifyVCF.delay(individual.id)
data = {'files': [{'deleteType': 'DELETE', 'name': individual.name, 'url': '', 'thumbnailUrl': '', 'type': 'image/png', 'deleteUrl': '', 'size': f.size}]}
response = JSONResponse(data, mimetype=response_mimetype(request))
response['Content-Disposition'] = 'inline; filename=files.json'
return response
else:
print(form.errors)
else:
form = IndividualForm()
return render(request, 'individuals/create.html', {'form':form})
# Create your views here.
@login_required
def edit(request, individual_id):
individual = get_object_or_404(Individual, pk=individual_id)
if request.method == 'POST':
form = IndividualForm(request.POST, instance=individual)
if form.is_valid():
form.save()
return redirect('dashboard')
# form = IndividualForm(request.POST, request.FILES)
# if form.is_valid():
# individual = form.save(commit=False)
# individual.user = request.user
# individual.save()
# return redirect('dashboard')
else:
form = IndividualForm(instance=individual)
return render(request, 'individuals/individual_form.html', {'form':form})
class IndividualDeleteView(DeleteView):
model = Individual
def delete(self, request, *args, **kwargs):
"""
This does not actually delete the file, only the database record. But
that is easy to implement.
"""
self.object = self.get_object()
individual_id = self.object.id
if self.object.user:
username = self.object.user.username
else:
username = 'public'
#delete files
if self.object.vcf_file:
self.object.vcf_file.delete()
# if self.object.strs_file:
# self.object.strs_file.delete()
# if self.object.cnvs_file:
# self.object.cnvs_file.delete()
os.system('rm -rf %s/genomes/%s/%s' % (settings.BASE_DIR, username, individual_id))
self.object.delete()
# response = JSONResponse(True, {}, response_mimetype(self.request))
# response['Content-Disposition'] = 'inline; filename=files.json'
# return response
messages.add_message(request, messages.INFO, "Individual deleted with success!")
#return redirect('individuals_list')
return redirect('individuals_list')
def view(request, individual_id):
individual = get_object_or_404(Individual, pk=individual_id)
variant_list = Variant.objects.filter(individual=individual)
# snpeff = SnpeffAnnotation.objects.filter(individual=individual)
individual.n_variants = variant_list.count()
individual.novel_variants = variant_list.filter(variant_id = '.').count()
individual.summary = []
#get calculated values from database
summary_item = {
'type': 'Total SNVs',
'total': variant_list.values('genotype').count(),
'discrete': variant_list.values('genotype').annotate(total=Count('genotype'))
}
individual.summary.append(summary_item)
summary_item = {
'type': 'Total Gene-associated SNVs',
'total': variant_list.values('gene').exclude(gene="").count(),
'discrete': variant_list.exclude(gene="").values('genotype').annotate(total=Count('genotype'))
}
individual.summary.append(summary_item)
individual.snp_eff = variant_list.values('snpeff_effect').annotate(Count('snpeff_effect')).order_by('snpeff_effect')
# print 'individual.snp_eff', individual.snp_eff
# variant_list.values('snpeff__effect').annotate(Count('snpeff__effect')).order_by('snpeff__effect')
#
individual.functional_class = variant_list.values('snpeff_func_class').annotate(Count('snpeff_func_class')).order_by('snpeff_func_class')
individual.impact_variants = variant_list.values('snpeff_impact').annotate(Count('snpeff_impact')).order_by('snpeff_impact')
individual.filter_variants = variant_list.values('filter').annotate(Count('filter')).order_by('filter')
individual.quality = variant_list.aggregate(Avg('qual'), Max('qual'), Min('qual'))
individual.read_depth = variant_list.aggregate(Avg('read_depth'), Max('read_depth'), Min('read_depth'))
individual.clinvar_clnsig = variant_list.values('clinvar_clnsig').annotate(total=Count('clinvar_clnsig'))
individual.chromossome = variant_list.values('chr').annotate(total=Count('chr')).order_by('chr')
# variants_with_snpid = variant_list.values('variant_id').exclude(variant_id=".")
#print variants_with_snpid
# fields = Variant._meta.get_all_field_names()
paginator = Paginator(variant_list, 25) # Show 25 contacts per page
try:
page = int(request.GET.get('page', '1'))
except ValueError:
page = 1
try:
variants = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
variants = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
variants = paginator.page(paginator.num_pages)
#'fields':fields
return render(request, 'individuals/view.html', {'individual': individual, 'variants':variants})
@login_required
def browse(request, individual_id):
query_string = request.META['QUERY_STRING']
individual = get_object_or_404(Individual, pk=individual_id)
query = {}
# DEFAULT_SORT = 'pk'
# sort_key = request.GET.get('sort', DEFAULT_SORT)
# tags = ['genotype', 'snpeffannotation__effect']#, 'func_class', 'impact', 'cln_omim', 'chr'
# for tag in tags:
# criteria = request.GET.get(tag, '')
# if criteria:
# query[tag] = criteria
if request.method == 'GET':
form = BrowserForm(request.GET)
if form.is_valid():
print('form is valid')
#chr
chr = request.GET.get('chr', '')
if chr != '':
query['chr'] = chr
#pos
pos = request.GET.get('pos', '')
if pos != '':
query['pos'] = pos
effect = request.GET.get('effect', '')
if effect != '':
print('effect', effect)
query['snpeff_effect'] = effect
#snp_id
# snp_id = request.GET.get('snp_id', '')
# if snp_id != '':
# query['variant_id'] = snp_id
# snp_list = request.GET.get('snp_list', '')
# snp_list = snp_list.split('\r\n')
# if snp_list[0] != u'':
# query['variant_id__in'] = snp_list
# snp_eff = request.GET.getlist('effect')
# if len(snp_eff) > 0:
# query['snp_eff__in'] = snp_eff
# func_class = request.GET.getlist('func_class')
# if len(func_class) > 0:
# query['snp_eff_functional_class__in'] = func_class
# gene = request.GET.get('gene', '')
# if gene != '':
# query['gene_name'] = gene
# gene_list = request.GET.get('gene_list', '')
# gene_list = gene_list.split('\r\n')
# if gene_list[0] != u'':
# query['gene_name__in'] = gene_list
# cln = request.GET.get('cln_omim', '')
# print 'clnomim', cln
# if cln == 'on':
# query['cln_omim'] != ''
variants = Variant.objects.filter(individual=individual, **query)
# snpeff_annotations = SnpeffAnnotation.objects.filter(variant__in=variants)
# #b.entry_set.filter(headline__contains='Lennon')
# print 'snpeff_annotations', len(snpeff_annotations)
# for variant in variants:
# print variant.entry_set.all()
# variant.snpeff=
else:
form = BrowserForm(request.GET)
variants = Variant.objects.filter(individual=individual, **query)
#Pagination
paginator = Paginator(variants, 25) # Show 25 contacts per page
try:
page = int(request.GET.get('page', '1'))
except ValueError:
page = 1
try:
variants = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
variants = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
variants = paginator.page(paginator.num_pages)
return render(request, 'variants/variants.html', {'individual': individual, 'variants':variants, 'form':form, 'query_string':query_string})
@login_required
def list(request):
if request.method == 'POST':
individuals = request.POST.getlist('individuals')
print(individuals)
individuals = [int(x) for x in individuals]
print(individuals)
if request.POST['selectionField'] == "Show":
for individual_id in individuals:
individual = get_object_or_404(Individual, pk=individual_id)
individual.is_featured = True
individual.save()
if request.POST['selectionField'] == "Hide":
for individual_id in individuals:
individual = get_object_or_404(Individual, pk=individual_id)
individual.is_featured = False
individual.save()
if request.POST['selectionField'] == "Delete":
for individual_id in individuals:
individual = get_object_or_404(Individual, pk=individual_id)
individual_id = individual.id
username = individual.user.username
#delete files
if individual.vcf_file:
individual.vcf_file.delete()
# if individual.strs_file:
# individual.strs_file.delete()
# if individual.cnvs_file:
# individual.cnvs_file.delete()
os.system('rm -rf %s/genomes/%s/%s' % (settings.BASE_DIR, username, individual_id))
individual.delete()
#os.system('rm -rf mendelmd14/site_media/media/genomes/%s/%s' % (username, individual_id))
if request.POST['selectionField'] == "Populate":
for individual_id in individuals:
individual = get_object_or_404(Individual, pk=individual_id)
PopulateVariants.delay(individual.id)
if request.POST['selectionField'] == "Annotate":
for individual_id in individuals:
individual = get_object_or_404(Individual, pk=individual_id)
AnnotateVariants.delay(individual.id)
if request.POST['selectionField'] == "Find_Medical_Conditions_and_Medicines":
for individual_id in individuals:
individual = get_object_or_404(Individual, pk=individual_id)
Find_Medical_Conditions_and_Medicines.delay(individual.id)
args = []
# groups = Groups.objects.filter(user=request.user, shared_with_users=).order_by("-id")
args.append(Q(user=request.user) | Q(shared_with_users=request.user) | Q(shared_with_groups__members=request.user))
if request.user.is_staff:
individuals = Individual.objects.all()
else:
individuals = Individual.objects.filter(*args).order_by("-id")
ind_featured = Individual.objects.filter(is_featured= True).order_by("id")
# paginator = Paginator(individuals, 25) # Show 25 contacts per page
# try:
# page = int(request.GET.get('page', '1'))
# except ValueError:
# page = 1
# try:
# individuals = paginator.page(page)
# except PageNotAnInteger:
# # If page is not an integer, deliver first page.
# individuals = paginator.page(1)
# except EmptyPage:
# # If page is out of range (e.g. 9999), deliver last page of results.
# individuals = paginator.page(paginator.num_pages)
groups = Group.objects.all()
# individuals = Individual.objects.annotate(number_of_variants=Count('variant'))
return render(request, 'individuals/list.html', {'individuals': individuals, 'groups':groups, 'ind_featured':ind_featured})
@login_required
def annotate(request, individual_id):
individual = get_object_or_404(Individual, pk=individual_id)
individual.status = 'new'
individual.n_lines = 0
VerifyVCF.delay(individual.id)
individual.save()
messages.add_message(request, messages.INFO, "Your individual is being annotated.")
return redirect('dashboard')
@login_required
def populate(request, individual_id):
individual = get_object_or_404(Individual, pk=individual_id)
PopulateVariants.delay(individual.id)
messages.add_message(request, messages.INFO, "Your individual is being populated.")
return redirect('dashboard')
@login_required
def populate_mongo(request, individual_id):
individual = get_object_or_404(Individual, pk=individual_id)
PopulateMongoVariants.delay(individual.id)
messages.add_message(request, messages.INFO, "Your individual is being inserted at MongoDB.")
return redirect('individuals_list')
def download(request, individual_id):
individual = get_object_or_404(Individual, pk=individual_id)
filepath = os.path.dirname(str(individual.vcf_file.name))
filename = os.path.basename(str(individual.vcf_file.name))
path = ''
# os.chmod("%s/genomes/%s/%s" % (settings.MEDIA_ROOT, individual.user, individual.id), 0777)
# if filename.endswith('vcf.zip'):
# basename = filename.split('.vcf.zip')[0]
# elif filename.endswith('.zip'):
# basename = filename.split('.zip')[0]
# else:
# basename = filename.split('.vcf')[0]
#print basename
#print path
#print filepath
fullpath = '%s/%s' % (filepath, filename)
if filename.endswith('.gz'):
vcffile = gzip.open(fullpath, 'r')
else:
vcffile = open(fullpath, 'r')
content = vcffile.read()
vcffile.close()
response = HttpResponse(content, content_type='text/plain')
response['Content-Disposition'] = 'attachment; filename=%s' % filename
response['Content-Length'] = os.path.getsize(fullpath)
return response
def download_annotated(request, individual_id):
individual = get_object_or_404(Individual, pk=individual_id)
filepath = os.path.dirname(str(individual.vcf_file.name))
filename = os.path.basename(str(individual.vcf_file.name))
# path = settings.MEDIA_ROOT
# if filename.endswith('vcf.zip'):
# basename = filename.split('.vcf.zip')[0]
# else:
basename = filename.split('.vcf')[0]
fullpath = '%s/annotation.final.vcf.zip' % (filepath)
vcffile = open(fullpath, 'rb')
response = HttpResponse(vcffile, content_type='application/x-zip-compressed')
# # response['Content-Encoding'] = 'gzip'
response['Content-Disposition'] = 'attachment; filename=%s.annotated.mendelmd.vcf.zip' % basename
response['Content-Length'] = os.path.getsize(fullpath)
return response
@login_required
def create_group(request):
if request.method == 'POST':
form = GroupForm(request.POST, request.FILES)
if form.is_valid():
form.save()
return redirect('individuals_list')
else:
form = GroupForm()
return render(request, 'groups/create_group.html', {'form': form})
@login_required
def view_group(request, group_id):
group = get_object_or_404(Group, pk=group_id)
return render(request, 'groups/view_group.html', {'group': group})
class GroupDeleteView(DeleteView):
model = Group
def delete(self, request, *args, **kwargs):
"""
This does not actually delete the file, only the database record. But
that is easy to implement.
"""
self.object = self.get_object()
#username = self.object.user.username
self.object.delete()
messages.add_message(request, messages.INFO, "Group deleted with success!")
return redirect('individuals_list')
def comparison(request):
query = {}
summary = {}
variants = []
query_string = request.META['QUERY_STRING']
if request.method == 'GET':
form = ComparisonForm(request.user, request.GET, request.FILES)
if form.is_valid():
individual_one_id = request.GET.get('individual_one', '')
individual_two_id = request.GET.get('individual_two', '')
read_depth = request.GET.get('read_depth', '')
if read_depth != '':
query['read_depth__gte'] = float(read_depth)
if individual_one_id != '' and individual_two_id != '':
variants_ind_one = Variant.objects.filter(individual__id=individual_one_id, **query).values('chr', 'pos', 'genotype')
variants_ind_two = Variant.objects.filter(individual__id=individual_two_id, **query).values('chr', 'pos', 'genotype')
print('Got Variants from Both!')
genotypes_in_common = 0
genotypes_not_in_common = 0
ind_one = {}
ind_two = {}
summary['variants_ind_one'] = variants_ind_one.count()
for variant in variants_ind_one:
id = '%s-%s' % (variant['chr'], variant['pos'])
if id in ind_one:
ind_one[id].append(variant['genotype'])
else:
ind_one[id] = []
ind_one[id].append(variant['genotype'])
summary['variants_ind_two'] = variants_ind_two.count()
for variant in variants_ind_two:
id = '%s-%s' % (variant['chr'], variant['pos'])
if id in ind_two:
ind_two[id].append(variant['genotype'])
else:
ind_two[id] = []
ind_two[id].append(variant['genotype'])
print('Finished creating indexes')
for pos in ind_one:
if pos in ind_two:
for genotype in ind_one[pos]:
if genotype in ind_two[pos]:
genotypes_in_common += 1
# variant ={}
# variant['chr'] = item.split('-')[0]
# variant['pos'] = item.split('-')[1]
# variant['genotype'] = ind_two[item]
# variants.append(variant)
else:
genotypes_not_in_common += 1
#
print('genotypes in common: %s' % genotypes_in_common)
summary['genotypes_in_common'] = genotypes_in_common
summary['genotypes_not_in_common'] = genotypes_not_in_common
summary['total_variants'] = genotypes_in_common + genotypes_not_in_common
summary['percent_ind_one'] = round((float(genotypes_in_common)/summary['variants_ind_one'])*100, 2)
summary['percent_ind_two'] = round((float(genotypes_in_common)/summary['variants_ind_two'])*100, 2)
print(summary)
else:
form = ComparisonForm(request.user)
return render(request, 'individuals/comparison.html', {'form':form, 'summary':summary, 'query_string':query_string})
| raonyguimaraes/mendelmd | individuals/views.py | Python | bsd-3-clause | 23,385 | 0.008766 |
from __future__ import (absolute_import, division, print_function)
import unittest
import mantid
import os
import numpy as np
from sans.test_helper.test_director import TestDirector
from sans.state.wavelength_and_pixel_adjustment import get_wavelength_and_pixel_adjustment_builder
from sans.common.enums import (RebinType, RangeStepType, DetectorType)
from sans.common.general_functions import (create_unmanaged_algorithm)
from sans.common.constants import EMPTY_NAME
class SANSCalculateTransmissionTest(unittest.TestCase):
@staticmethod
def _create_test_wavelength_adjustment_file(file_name):
test_file = (" Tue 24-MAR-2015 00:02 Workspace: directbeam_new_hist\n"
"\n"
" 6 0 0 0 1 6 0\n"
" 0 0 0 0\n"
" 3 (F12.5,2E16.6)\n"
" 1.00000 5.000000e-01 5.000000e-01\n"
" 3.00000 5.000000e-01 5.000000e-01\n"
" 5.00000 5.000000e-01 5.000000e-01\n"
" 7.00000 5.000000e-01 5.000000e-01\n"
" 9.00000 5.000000e-01 5.000000e-01\n"
" 11.00000 5.000000e-01 5.000000e-01\n")
full_file_path = os.path.join(mantid.config.getString('defaultsave.directory'), file_name)
if os.path.exists(full_file_path):
os.remove(full_file_path)
with open(full_file_path, 'w') as f:
f.write(test_file)
return full_file_path
@staticmethod
def _remove_test_file(file_name):
if os.path.exists(file_name):
os.remove(file_name)
@staticmethod
def _get_state(lab_pixel_file=None, hab_pixel_file=None, lab_wavelength_file=None, hab_wavelength_file=None,
wavelength_low=None, wavelength_high=None, wavelength_step=None,
wavelength_step_type=None):
test_director = TestDirector()
state = test_director.construct()
data_state = state.data
wavelength_and_pixel_builder = get_wavelength_and_pixel_adjustment_builder(data_state)
if lab_pixel_file:
wavelength_and_pixel_builder.set_LAB_pixel_adjustment_file(lab_pixel_file)
if hab_pixel_file:
wavelength_and_pixel_builder.set_HAB_pixel_adjustment_file(hab_pixel_file)
if lab_wavelength_file:
wavelength_and_pixel_builder.set_LAB_wavelength_adjustment_file(lab_wavelength_file)
if hab_wavelength_file:
wavelength_and_pixel_builder.set_HAB_wavelength_adjustment_file(hab_wavelength_file)
if wavelength_step_type:
wavelength_and_pixel_builder.set_wavelength_step_type(wavelength_step_type)
if wavelength_low:
wavelength_and_pixel_builder.set_wavelength_low(wavelength_low)
if wavelength_high:
wavelength_and_pixel_builder.set_wavelength_high(wavelength_high)
if wavelength_step:
wavelength_and_pixel_builder.set_wavelength_step(wavelength_step)
wavelength_and_pixel_state = wavelength_and_pixel_builder.build()
state.adjustment.wavelength_and_pixel_adjustment = wavelength_and_pixel_state
return state.property_manager
@staticmethod
def _get_workspace(data):
create_name = "CreateSampleWorkspace"
create_options = {"NumBanks": 1,
"BankPixelWidth": 1,
"XMin": 1,
"XMax": 11,
"BinWidth": 2,
"XUnit": "Wavelength",
"OutputWorkspace": EMPTY_NAME}
create_alg = create_unmanaged_algorithm(create_name, **create_options)
create_alg.execute()
workspace = create_alg.getProperty("OutputWorkspace").value
data_y = workspace.dataY(0)
for index in range(len(data_y)):
data_y[index] = data[index]
return workspace
@staticmethod
def _run_test(transmission_workspace, norm_workspace, state, is_lab=True):
adjust_name = "SANSCreateWavelengthAndPixelAdjustment"
adjust_options = {"TransmissionWorkspace": transmission_workspace,
"NormalizeToMonitorWorkspace": norm_workspace,
"SANSState": state,
"OutputWorkspaceWavelengthAdjustment": "out_wavelength",
"OutputWorkspacePixelAdjustment": "out_pixels"}
if is_lab:
adjust_options.update({"Component": DetectorType.to_string(DetectorType.LAB)})
else:
adjust_options.update({"Component": DetectorType.to_string(DetectorType.HAB)})
adjust_alg = create_unmanaged_algorithm(adjust_name, **adjust_options)
adjust_alg.execute()
wavelength_adjustment = adjust_alg.getProperty("OutputWorkspaceWavelengthAdjustment").value
pixel_adjustment = adjust_alg.getProperty("OutputWorkspacePixelAdjustment").value
return wavelength_adjustment, pixel_adjustment
def test_that_gets_wavelength_workspace_when_no_files_are_specified(self):
# Arrange
data_trans = [3., 4., 5., 7., 3.]
data_norm = [9., 3., 8., 3., 1.]
transmission_workspace = SANSCalculateTransmissionTest._get_workspace(data_trans)
norm_workspace = SANSCalculateTransmissionTest._get_workspace(data_norm)
state = SANSCalculateTransmissionTest._get_state(wavelength_low=1., wavelength_high=11., wavelength_step=2.,
wavelength_step_type=RangeStepType.Lin)
# Act
wavelength_adjustment, pixel_adjustment = SANSCalculateTransmissionTest._run_test(transmission_workspace,
norm_workspace, state, True)
# Assert
self.assertTrue(pixel_adjustment is None)
self.assertTrue(wavelength_adjustment.getNumberHistograms() == 1)
expected = np.array(data_trans)*np.array(data_norm)
data_y = wavelength_adjustment.dataY(0)
for e1, e2, in zip(expected, data_y):
self.assertTrue(e1 == e2)
def test_that_gets_adjustment_workspace_if_files_are_specified(self):
# Arrange
data_trans = [3., 4., 5., 7., 3.]
data_norm = [9., 3., 8., 3., 1.]
expected_direct_file_workspace = [0.5, 0.5, 0.5, 0.5, 0.5]
transmission_workspace = SANSCalculateTransmissionTest._get_workspace(data_trans)
norm_workspace = SANSCalculateTransmissionTest._get_workspace(data_norm)
direct_file_name = "DIRECT_test.txt"
direct_file_name = SANSCalculateTransmissionTest._create_test_wavelength_adjustment_file(direct_file_name)
state = SANSCalculateTransmissionTest._get_state(hab_wavelength_file=direct_file_name,
wavelength_low=1., wavelength_high=11., wavelength_step=2.,
wavelength_step_type=RangeStepType.Lin)
# Act
wavelength_adjustment, pixel_adjustment = SANSCalculateTransmissionTest._run_test(transmission_workspace,
norm_workspace, state, False)
# Assert
self.assertTrue(pixel_adjustment is None)
self.assertTrue(wavelength_adjustment.getNumberHistograms() == 1)
expected = np.array(data_trans)*np.array(data_norm)*np.array(expected_direct_file_workspace)
data_y = wavelength_adjustment.dataY(0)
for e1, e2, in zip(expected, data_y):
self.assertTrue(e1 == e2)
# Clean up
SANSCalculateTransmissionTest._remove_test_file(direct_file_name)
if __name__ == '__main__':
unittest.main()
| dymkowsk/mantid | Framework/PythonInterface/test/python/plugins/algorithms/WorkflowAlgorithms/sans/SANSCreateWavelengthAndPixelAdjustmentTest.py | Python | gpl-3.0 | 7,906 | 0.004048 |
from django.utils.translation import ugettext as _, ugettext_lazy as _lazy
from django.core import urlresolvers
from gasistafelice.rest.views.blocks.base import BlockSSDataTables, ResourceBlockAction
from gasistafelice.consts import CREATE, EDIT, EDIT_MULTIPLE, VIEW
from gasistafelice.lib.shortcuts import render_to_xml_response, render_to_context_response
from gasistafelice.supplier.models import Supplier
from gasistafelice.gas.models import GASMemberOrder
from gasistafelice.gas.forms.order.gmo import SingleGASMemberOrderForm
from gasistafelice.lib.formsets import BaseFormSetWithRequest
from django.forms.formsets import formset_factory
import logging
log = logging.getLogger(__name__)
#------------------------------------------------------------------------------#
# #
#------------------------------------------------------------------------------#
class Block(BlockSSDataTables):
# COMMENT fero: name of this block should be
# something different from "order" (p.e: "make_order")
# because usually we refer to "order" for GASSupplierOrder
BLOCK_NAME = "order"
BLOCK_DESCRIPTION = _("Order")
BLOCK_VALID_RESOURCE_TYPES = ["gasmember"]
COLUMN_INDEX_NAME_MAP = {
0: 'pk',
1: 'gasstock__stock__supplier__name',
2: 'gasstock__stock__product__name',
3: 'order_price',
4: 'tot_amount',
5: 'tot_price',
6: '',
7: 'gasstock__stock__product__category__name',
}
# 3: 'gasstock__stock__product__description',
def _get_resource_list(self, request):
selected_orders = request.GET.getlist('gfCP_order')
rv = request.resource.orderable_products
if (selected_orders):
rv = rv.filter(order__pk__in=map(int, selected_orders))
return rv
def options_response(self, request, resource_type, resource_id):
"""Get options for orders block. Check GAS configuration.
WARNING: call to this method doesn't pass through get_response
so you have to reset self.request and self.resource attribute if you want
"""
log.debug("order options_response")
self.request = request
self.resource = request.resource
gas = self.resource.gas
orders = gas.orders.open()
field_type = "checkbox"
if gas.config.order_show_only_next_delivery:
orders = orders.order_by('-delivery__date')
if orders[0].delivery:
orders.filter(delivery__date=orders[0].delivery.date)
else:
orders.filter(delivery__date__isnull=True)
if gas.config.order_show_only_one_at_a_time:
field_type = "radio"
fields = []
for i,open_order in enumerate(orders):
if field_type == "radio":
selected = i == 0
else:
selected = True
fields.append({
'field_type' : field_type,
'field_label' : open_order,
'field_name' : 'order',
'field_values' : [{ 'value' : open_order.pk, 'selected' : selected}]
})
ctx = {
'block_name' : self.description,
'fields': fields,
}
return render_to_xml_response('options.xml', ctx)
def _get_edit_multiple_form_class(self):
qs = self._get_resource_list(self.request)
return formset_factory(
form=SingleGASMemberOrderForm,
formset=BaseFormSetWithRequest,
extra=qs.count() - self.__get_gmos(qs).count()
)
def __get_gmos(self, gsop):
log.debug("order block __get_gmos (%s)" % (self.request.resource.gasmember))
return GASMemberOrder.objects.filter(
ordered_product__in=gsop,
purchaser=self.request.resource.gasmember
)
def _get_records(self, request, querySet):
"""Return records of rendered table fields."""
# [:] forces evaluation of the querySet
#FIXME: filtering by purchaser not ok --> return all orders for all gasmembers
gmos = self.__get_gmos(querySet)[:]
data = {}
i = 0
c = querySet.count()
# Store mapping between GSSOP-id and neededs info: formset_index and ordered_total
gmo_info = { }
gmo_lint = GASMemberOrder()
for i,el in enumerate(querySet):
try:
#TODO: to be improved in performance
gmo = el.gasmember_order_set.get(
purchaser=self.request.resource.gasmember
)
except GASMemberOrder.DoesNotExist:
gmo=gmo_lint
key_prefix = 'form-%d' % i
data.update({
'%s-id' % key_prefix : gmo.pk,
'%s-ordered_amount' % key_prefix : gmo.ordered_amount or 0,
'%s-ordered_price' % key_prefix : el.gasstock.price, #displayed as hiddend field
'%s-gsop_id' % key_prefix : el.pk, #displayed as hiddend field
'%s-note' % key_prefix : gmo.note,
})
gmo_info[el.pk] = {
'formset_index' : i,
'ordered_total' : (el.gasstock.price or 0)*(gmo.ordered_amount or 0), # This is the total computed NOW (with ordered_product.price)
}
data['form-TOTAL_FORMS'] = c
data['form-INITIAL_FORMS'] = gmos.count()
data['form-MAX_NUM_FORMS'] = 0
formset = self._get_edit_multiple_form_class()(request, data)
records = []
for i,el in enumerate(querySet):
#log.debug("order ordered_amount (%s)" % (i))
try:
form = formset[gmo_info[el.pk]['formset_index']]
total = gmo_info[el.pk]['ordered_total']
except KeyError:
# GASMember has not ordered this product: build an empty form
form = SingleGASMemberOrderForm(self.request)
total = 0
#try:
form.fields['ordered_amount'].widget.attrs = {
'class' : 'amount',
'step' : el.gasstock.step or 1,
'minimum_amount' : el.gasstock.minimum_amount or 1,
's_url' : el.supplier.urn,
'p_url' : el.gasstock.stock.urn,
}
#'p_url' : el.product.urn,
records.append({
'id' : "%s %s %s %s" % (el.pk, form['id'], form['gsop_id'], form['ordered_price']),
'supplier' : el.supplier,
'product' : el.gasstock,
'price' : el.gasstock.price,
'ordered_amount' : form['ordered_amount'], #field inizializzato con il minimo amount e che ha l'attributo step
'ordered_total' : total,
'note' : form['note'],
'category' : el.product.category
})
#'description' : el.product.description,
#except KeyError:
# log.debug("order ordered_amount (%s %s)" % (el.pk, i))
return formset, records, {}
| OrlyMar/gasistafelice | gasistafelice/rest/views/blocks/order.py | Python | agpl-3.0 | 7,205 | 0.010548 |
from simulator.sensors.SimSensor import SimSensor
from environment.SensoryData import SensoryData
class SimAudioSensor(SimSensor):
def __init__(self, parentBot, name):
super().__init__('Audio', parentBot, name)
def receiveAudio(self, audio):
return SensoryData(self.name, 'Audio', audio)
| randyhook/knynet | simulator/sensors/SimAudioSensor.py | Python | mit | 325 | 0.006154 |
# coding=latin-1
from flask import request, g
from flask import abort, flash
from functools import wraps
def checa_permissao(permissao):
def decorator(f):
@wraps(f)
def inner(*args, **kwargs):
if g.user and g.user.checa_permissao(permissao):
return f(*args, **kwargs)
else:
flash(u'Atenção você não possui a permissão: %s. Se isto não estiver correto, entre em contato solicitando esta permissão.' % permissao.upper(),u'notice')
abort(401)
return inner
return decorator
| dedeco/cnddh-denuncias | cnddh/decorators.py | Python | apache-2.0 | 593 | 0.005059 |
#!/usr/bin/env python
"""
@package mi.dataset.driver.velpt_ab.dcl
@file mi-dataset/mi/dataset/driver/velpt_ab/dcl/velpt_ab_dcl_recovered_driver.py
@author Joe Padula
@brief Recovered driver for the velpt_ab_dcl instrument
Release notes:
Initial Release
"""
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.dataset.dataset_driver import SimpleDatasetDriver
from mi.dataset.parser.velpt_ab_dcl import VelptAbDclParser, \
VelptAbDclParticleClassKey
from mi.dataset.parser.velpt_ab_dcl_particles import VelptAbDclDiagnosticsDataParticleRecovered, \
VelptAbDclInstrumentDataParticleRecovered, \
VelptAbDclDiagnosticsHeaderParticleRecovered
from mi.core.versioning import version
@version("15.7.0")
def parse(basePythonCodePath, sourceFilePath, particleDataHdlrObj):
"""
This is the method called by Uframe
:param basePythonCodePath This is the file system location of mi-dataset
:param sourceFilePath This is the full path and filename of the file to be parsed
:param particleDataHdlrObj Java Object to consume the output of the parser
:return particleDataHdlrObj
"""
with open(sourceFilePath, 'rb') as stream_handle:
# create and instance of the concrete driver class defined below
driver = VelptAbDclRecoveredDriver(basePythonCodePath, stream_handle, particleDataHdlrObj)
driver.processFileStream()
return particleDataHdlrObj
class VelptAbDclRecoveredDriver(SimpleDatasetDriver):
"""
The velpt_ab_dcl driver class extends the SimpleDatasetDriver.
All this needs to do is create a concrete _build_parser method
"""
def _build_parser(self, stream_handle):
parser_config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.velpt_ab_dcl_particles',
DataSetDriverConfigKeys.PARTICLE_CLASS: None,
DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: {
VelptAbDclParticleClassKey.METADATA_PARTICLE_CLASS: VelptAbDclDiagnosticsHeaderParticleRecovered,
VelptAbDclParticleClassKey.DIAGNOSTICS_PARTICLE_CLASS: VelptAbDclDiagnosticsDataParticleRecovered,
VelptAbDclParticleClassKey.INSTRUMENT_PARTICLE_CLASS: VelptAbDclInstrumentDataParticleRecovered
}
}
parser = VelptAbDclParser(parser_config,
stream_handle,
self._exception_callback)
return parser
| JeffRoy/mi-dataset | mi/dataset/driver/velpt_ab/dcl/velpt_ab_dcl_recovered_driver.py | Python | bsd-2-clause | 2,466 | 0.003244 |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet.stdio}.
@var properEnv: A copy of L{os.environ} which has L{bytes} keys/values on POSIX
platforms and native L{str} keys/values on Windows.
"""
from __future__ import absolute_import, division
import os
import sys
import itertools
from twisted.trial import unittest
from twisted.python import filepath, log
from twisted.python.reflect import requireModule
from twisted.python.runtime import platform
from twisted.python.compat import xrange, intToBytes, bytesEnviron
from twisted.internet import error, defer, protocol, stdio, reactor
from twisted.test.test_tcp import ConnectionLostNotifyingProtocol
# A short string which is intended to appear here and nowhere else,
# particularly not in any random garbage output CPython unavoidable
# generates (such as in warning text and so forth). This is searched
# for in the output from stdio_test_lastwrite and if it is found at
# the end, the functionality works.
UNIQUE_LAST_WRITE_STRING = b'xyz123abc Twisted is great!'
skipWindowsNopywin32 = None
if platform.isWindows():
if requireModule('win32process') is None:
skipWindowsNopywin32 = ("On windows, spawnProcess is not available "
"in the absence of win32process.")
properEnv = dict(os.environ)
properEnv["PYTHONPATH"] = os.pathsep.join(sys.path)
else:
properEnv = bytesEnviron()
properEnv[b"PYTHONPATH"] = os.pathsep.join(sys.path).encode(
sys.getfilesystemencoding())
class StandardIOTestProcessProtocol(protocol.ProcessProtocol):
"""
Test helper for collecting output from a child process and notifying
something when it exits.
@ivar onConnection: A L{defer.Deferred} which will be called back with
C{None} when the connection to the child process is established.
@ivar onCompletion: A L{defer.Deferred} which will be errbacked with the
failure associated with the child process exiting when it exits.
@ivar onDataReceived: A L{defer.Deferred} which will be called back with
this instance whenever C{childDataReceived} is called, or C{None} to
suppress these callbacks.
@ivar data: A C{dict} mapping file descriptors to strings containing all
bytes received from the child process on each file descriptor.
"""
onDataReceived = None
def __init__(self):
self.onConnection = defer.Deferred()
self.onCompletion = defer.Deferred()
self.data = {}
def connectionMade(self):
self.onConnection.callback(None)
def childDataReceived(self, name, bytes):
"""
Record all bytes received from the child process in the C{data}
dictionary. Fire C{onDataReceived} if it is not C{None}.
"""
self.data[name] = self.data.get(name, b'') + bytes
if self.onDataReceived is not None:
d, self.onDataReceived = self.onDataReceived, None
d.callback(self)
def processEnded(self, reason):
self.onCompletion.callback(reason)
class StandardInputOutputTests(unittest.TestCase):
skip = skipWindowsNopywin32
def _spawnProcess(self, proto, sibling, *args, **kw):
"""
Launch a child Python process and communicate with it using the
given ProcessProtocol.
@param proto: A L{ProcessProtocol} instance which will be connected
to the child process.
@param sibling: The basename of a file containing the Python program
to run in the child process.
@param *args: strings which will be passed to the child process on
the command line as C{argv[2:]}.
@param **kw: additional arguments to pass to L{reactor.spawnProcess}.
@return: The L{IProcessTransport} provider for the spawned process.
"""
args = [sys.executable,
b"-m", b"twisted.test." + sibling,
reactor.__class__.__module__] + list(args)
return reactor.spawnProcess(
proto,
sys.executable,
args,
env=properEnv,
**kw)
def _requireFailure(self, d, callback):
def cb(result):
self.fail("Process terminated with non-Failure: %r" % (result,))
def eb(err):
return callback(err)
return d.addCallbacks(cb, eb)
def test_loseConnection(self):
"""
Verify that a protocol connected to L{StandardIO} can disconnect
itself using C{transport.loseConnection}.
"""
errorLogFile = self.mktemp()
log.msg("Child process logging to " + errorLogFile)
p = StandardIOTestProcessProtocol()
d = p.onCompletion
self._spawnProcess(p, b'stdio_test_loseconn', errorLogFile)
def processEnded(reason):
# Copy the child's log to ours so it's more visible.
with open(errorLogFile, 'r') as f:
for line in f:
log.msg("Child logged: " + line.rstrip())
self.failIfIn(1, p.data)
reason.trap(error.ProcessDone)
return self._requireFailure(d, processEnded)
def test_readConnectionLost(self):
"""
When stdin is closed and the protocol connected to it implements
L{IHalfCloseableProtocol}, the protocol's C{readConnectionLost} method
is called.
"""
errorLogFile = self.mktemp()
log.msg("Child process logging to " + errorLogFile)
p = StandardIOTestProcessProtocol()
p.onDataReceived = defer.Deferred()
def cbBytes(ignored):
d = p.onCompletion
p.transport.closeStdin()
return d
p.onDataReceived.addCallback(cbBytes)
def processEnded(reason):
reason.trap(error.ProcessDone)
d = self._requireFailure(p.onDataReceived, processEnded)
self._spawnProcess(
p, b'stdio_test_halfclose', errorLogFile)
return d
def test_lastWriteReceived(self):
"""
Verify that a write made directly to stdout using L{os.write}
after StandardIO has finished is reliably received by the
process reading that stdout.
"""
p = StandardIOTestProcessProtocol()
# Note: the OS X bug which prompted the addition of this test
# is an apparent race condition involving non-blocking PTYs.
# Delaying the parent process significantly increases the
# likelihood of the race going the wrong way. If you need to
# fiddle with this code at all, uncommenting the next line
# will likely make your life much easier. It is commented out
# because it makes the test quite slow.
# p.onConnection.addCallback(lambda ign: __import__('time').sleep(5))
try:
self._spawnProcess(
p, b'stdio_test_lastwrite', UNIQUE_LAST_WRITE_STRING,
usePTY=True)
except ValueError as e:
# Some platforms don't work with usePTY=True
raise unittest.SkipTest(str(e))
def processEnded(reason):
"""
Asserts that the parent received the bytes written by the child
immediately after the child starts.
"""
self.assertTrue(
p.data[1].endswith(UNIQUE_LAST_WRITE_STRING),
"Received %r from child, did not find expected bytes." % (
p.data,))
reason.trap(error.ProcessDone)
return self._requireFailure(p.onCompletion, processEnded)
def test_hostAndPeer(self):
"""
Verify that the transport of a protocol connected to L{StandardIO}
has C{getHost} and C{getPeer} methods.
"""
p = StandardIOTestProcessProtocol()
d = p.onCompletion
self._spawnProcess(p, b'stdio_test_hostpeer')
def processEnded(reason):
host, peer = p.data[1].splitlines()
self.assertTrue(host)
self.assertTrue(peer)
reason.trap(error.ProcessDone)
return self._requireFailure(d, processEnded)
def test_write(self):
"""
Verify that the C{write} method of the transport of a protocol
connected to L{StandardIO} sends bytes to standard out.
"""
p = StandardIOTestProcessProtocol()
d = p.onCompletion
self._spawnProcess(p, b'stdio_test_write')
def processEnded(reason):
self.assertEqual(p.data[1], b'ok!')
reason.trap(error.ProcessDone)
return self._requireFailure(d, processEnded)
def test_writeSequence(self):
"""
Verify that the C{writeSequence} method of the transport of a
protocol connected to L{StandardIO} sends bytes to standard out.
"""
p = StandardIOTestProcessProtocol()
d = p.onCompletion
self._spawnProcess(p, b'stdio_test_writeseq')
def processEnded(reason):
self.assertEqual(p.data[1], b'ok!')
reason.trap(error.ProcessDone)
return self._requireFailure(d, processEnded)
def _junkPath(self):
junkPath = self.mktemp()
with open(junkPath, 'wb') as junkFile:
for i in xrange(1024):
junkFile.write(intToBytes(i) + b'\n')
return junkPath
def test_producer(self):
"""
Verify that the transport of a protocol connected to L{StandardIO}
is a working L{IProducer} provider.
"""
p = StandardIOTestProcessProtocol()
d = p.onCompletion
written = []
toWrite = list(range(100))
def connectionMade(ign):
if toWrite:
written.append(intToBytes(toWrite.pop()) + b"\n")
proc.write(written[-1])
reactor.callLater(0.01, connectionMade, None)
proc = self._spawnProcess(p, b'stdio_test_producer')
p.onConnection.addCallback(connectionMade)
def processEnded(reason):
self.assertEqual(p.data[1], b''.join(written))
self.assertFalse(
toWrite,
"Connection lost with %d writes left to go." % (len(toWrite),))
reason.trap(error.ProcessDone)
return self._requireFailure(d, processEnded)
def test_consumer(self):
"""
Verify that the transport of a protocol connected to L{StandardIO}
is a working L{IConsumer} provider.
"""
p = StandardIOTestProcessProtocol()
d = p.onCompletion
junkPath = self._junkPath()
self._spawnProcess(p, b'stdio_test_consumer', junkPath)
def processEnded(reason):
with open(junkPath, 'rb') as f:
self.assertEqual(p.data[1], f.read())
reason.trap(error.ProcessDone)
return self._requireFailure(d, processEnded)
def test_normalFileStandardOut(self):
"""
If L{StandardIO} is created with a file descriptor which refers to a
normal file (ie, a file from the filesystem), L{StandardIO.write}
writes bytes to that file. In particular, it does not immediately
consider the file closed or call its protocol's C{connectionLost}
method.
"""
onConnLost = defer.Deferred()
proto = ConnectionLostNotifyingProtocol(onConnLost)
path = filepath.FilePath(self.mktemp())
self.normal = normal = path.open('wb')
self.addCleanup(normal.close)
kwargs = dict(stdout=normal.fileno())
if not platform.isWindows():
# Make a fake stdin so that StandardIO doesn't mess with the *real*
# stdin.
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
kwargs['stdin'] = r
connection = stdio.StandardIO(proto, **kwargs)
# The reactor needs to spin a bit before it might have incorrectly
# decided stdout is closed. Use this counter to keep track of how
# much we've let it spin. If it closes before we expected, this
# counter will have a value that's too small and we'll know.
howMany = 5
count = itertools.count()
def spin():
for value in count:
if value == howMany:
connection.loseConnection()
return
connection.write(intToBytes(value))
break
reactor.callLater(0, spin)
reactor.callLater(0, spin)
# Once the connection is lost, make sure the counter is at the
# appropriate value.
def cbLost(reason):
self.assertEqual(next(count), howMany + 1)
self.assertEqual(
path.getContent(),
b''.join(map(intToBytes, range(howMany))))
onConnLost.addCallback(cbLost)
return onConnLost
if platform.isWindows():
test_normalFileStandardOut.skip = (
"StandardIO does not accept stdout as an argument to Windows. "
"Testing redirection to a file is therefore harder.")
| Architektor/PySnip | venv/lib/python2.7/site-packages/twisted/test/test_stdio.py | Python | gpl-3.0 | 13,157 | 0.001216 |
from lexer import lang
from ..tree import Node
class Integer(Node):
datatype = lang.SEMANTIC_INT_TYPE
"""docstring for Integer."""
def __init__(self, symbol, token):
super().__init__(symbol, token)
def generate_code(self, **cond):
array, line = Node.assignated_array()
Node.array_append(array, f'{line} LIT {self.symbol}, 0')
| andaviaco/tronido | src/syntax/types/integer.py | Python | mit | 370 | 0.002703 |
# import multiprocessing to avoid this bug (http://bugs.python.org/issue15881#msg170215)
import multiprocessing
assert multiprocessing
import re
from setuptools import setup, find_packages
def get_version():
"""
Extracts the version number from the version.py file.
"""
VERSION_FILE = 'tour/version.py'
mo = re.search(r'^__version__ = [\'"]([^\'"]*)[\'"]', open(VERSION_FILE, 'rt').read(), re.M)
if mo:
return mo.group(1)
else:
raise RuntimeError('Unable to find version string in {0}.'.format(VERSION_FILE))
setup(
name='django-tour',
version=get_version(),
description='Require the django user to complete a series of steps with custom logic',
long_description=open('README.md').read(),
url='https://github.com/ambitioninc/django-tour',
author='Wes Okes',
author_email='wes.okes@gmail.com',
keywords='',
packages=find_packages(),
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Framework :: Django',
'Framework :: Django :: 1.7',
'Framework :: Django :: 1.8',
],
license='MIT',
install_requires=[
'Django>=1.7',
'djangorestframework>=2.3.13',
'django-manager-utils>=0.8.2',
'django_filter>=0.7',
],
tests_require=[
'psycopg2',
'django-nose>=1.4',
'mock==1.0.1',
'django_dynamic_fixture',
],
test_suite='run_tests.run_tests',
include_package_data=True,
)
| ambitioninc/django-tour | setup.py | Python | mit | 1,710 | 0.003509 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.