repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 18
values | size
stringlengths 4
7
| content
stringlengths 736
1.04M
| license
stringclasses 15
values | hash
int64 -9,222,983,980,000,580,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
GeoNode/geonode | geonode/base/populate_test_data.py | 1 | 17654 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import logging
import os.path
from io import BytesIO
from uuid import uuid4
from itertools import cycle
from taggit.models import Tag
from taggit.models import TaggedItem
from datetime import datetime, timedelta
from django.db import transaction
from django.utils import timezone
from django.contrib.gis.geos import Polygon
from django.contrib.auth.models import Permission, Group
from django.core.serializers import serialize
from django.contrib.auth import get_user_model
from django.core.files.uploadedfile import SimpleUploadedFile
from geonode import geoserver # noqa
from geonode.maps.models import Map
from geonode.layers.models import Layer
from geonode.compat import ensure_string
from geonode.base.models import ResourceBase, TopicCategory
from geonode.documents.models import Document
# This is used to populate the database with the search fixture data. This is
# primarily used as a first step to generate the json data for the fixture using
# django's dumpdata
logger = logging.getLogger(__name__)
imgfile = BytesIO(
b'GIF87a\x01\x00\x01\x00\x80\x01\x00\x00\x00\x00ccc,\x00'
b'\x00\x00\x00\x01\x00\x01\x00\x00\x02\x02D\x01\x00;'
)
f = SimpleUploadedFile('test_img_file.gif', imgfile.read(), 'image/gif')
def all_public():
'''ensure all layers, maps and documents are publicly available'''
for lyr in Layer.objects.all():
lyr.set_default_permissions()
lyr.clear_dirty_state()
for mp in Map.objects.all():
mp.set_default_permissions()
mp.clear_dirty_state()
for doc in Document.objects.all():
doc.set_default_permissions()
doc.clear_dirty_state()
ResourceBase.objects.all().update(dirty_state=False)
def create_fixtures():
biota = TopicCategory.objects.get(identifier='biota')
location = TopicCategory.objects.get(identifier='location')
elevation = TopicCategory.objects.get(identifier='elevation')
farming = TopicCategory.objects.get(identifier='farming')
world_extent = [-180, 180, -90, 90]
map_data = [
('GeoNode Default Map', 'GeoNode default map abstract', ('populartag',), world_extent, biota),
('ipsum lorem', 'common ipsum lorem', ('populartag', 'maptagunique'), world_extent, biota),
('lorem1 ipsum1', 'common abstract1', ('populartag',), world_extent, biota),
('ipsum foo', 'common bar lorem', ('populartag',), world_extent, location),
('map one', 'common this is a unique thing', ('populartag',), [0, 1, 0, 1], location),
('quux', 'common double thing', ('populartag',), [0, 5, 0, 5], location),
('morx', 'common thing double', ('populartag',), [0, 10, 0, 10], elevation),
('titledupe something else ', 'whatever common', ('populartag',), [0, 10, 0, 10], elevation),
('something titledupe else ', 'bar common', ('populartag',), [0, 50, 0, 50], elevation),
('map metadata true', 'map metadata true', ('populartag',), [0, 22, 0, 22], farming),
]
user_data = [
('bobby', 'bob', 'bobby', ''),
('norman', 'norman', 'norman', ''),
('user1', 'pass', 'uniquefirst', 'foo'),
('user2', 'pass', 'foo', 'uniquelast'),
('unique_username', 'pass', 'foo', 'uniquelast'),
('jblaze', 'pass', 'johnny', 'blaze'),
('foo', 'pass', 'bar', 'baz'),
]
people_data = [
('this contains all my interesting profile information',),
('some other information goes here',),
]
now = datetime.now(timezone.get_current_timezone())
step = timedelta(days=60)
def get_test_date():
def it():
current = now - step
while True:
yield current
current = current - step
itinst = it()
def callable():
return next(itinst)
return callable
next_date = get_test_date()
layer_data = [
('CA', 'abstract1', 'CA', 'geonode:CA', world_extent, next_date(), ('populartag', 'here'), elevation),
('layer2', 'abstract2', 'layer2', 'geonode:layer2', world_extent, next_date(), ('populartag',), elevation),
('uniquetitle', 'something here', 'mylayer', 'geonode:mylayer', world_extent, next_date(), ('populartag',), elevation),
('common blar', 'lorem ipsum', 'foo', 'geonode:foo', world_extent, next_date(), ('populartag', 'layertagunique'), location),
('common double it', 'whatever', 'whatever', 'geonode:whatever', [0, 1, 0, 1], next_date(), ('populartag',), location),
('common double time', 'else', 'fooey', 'geonode:fooey', [0, 5, 0, 5], next_date(), ('populartag',), location),
('common bar', 'uniqueabstract', 'quux', 'geonode:quux', [0, 10, 0, 10], next_date(), ('populartag',), biota),
('common morx', 'lorem ipsum', 'fleem', 'geonode:fleem', [0, 50, 0, 50], next_date(), ('populartag',), biota),
('layer metadata true', 'lorem ipsum', 'fleem', 'geonode:metadatatrue', [0, 22, 0, 22], next_date(), ('populartag',), farming)
]
document_data = [
('lorem ipsum', 'common lorem ipsum', ('populartag',), world_extent, biota),
('ipsum lorem', 'common ipsum lorem', ('populartag', 'doctagunique'), world_extent, biota),
('lorem1 ipsum1', 'common abstract1', ('populartag',), world_extent, biota),
('ipsum foo', 'common bar lorem', ('populartag',), world_extent, location),
('doc one', 'common this is a unique thing', ('populartag',), [0, 1, 0, 1], location),
('quux', 'common double thing', ('populartag',), [0, 5, 0, 5], location),
('morx', 'common thing double', ('populartag',), [0, 10, 0, 10], elevation),
('titledupe something else ', 'whatever common', ('populartag',), [0, 10, 0, 10], elevation),
('something titledupe else ', 'bar common', ('populartag',), [0, 50, 0, 50], elevation),
('doc metadata true', 'doc metadata true', ('populartag',), [0, 22, 0, 22], farming)
]
return map_data, user_data, people_data, layer_data, document_data
def create_models(type=None, integration=False):
users = []
obj_ids = []
with transaction.atomic():
map_data, user_data, people_data, layer_data, document_data = create_fixtures()
anonymous_group, created = Group.objects.get_or_create(name='anonymous')
cont_group, created = Group.objects.get_or_create(name='contributors')
perm = Permission.objects.get(codename='add_resourcebase')
cont_group.permissions.add(perm)
logger.debug("[SetUp] Get or create user admin")
u, created = get_user_model().objects.get_or_create(username='admin')
u.set_password('admin')
u.is_superuser = True
u.first_name = 'admin'
u.save()
u.groups.add(anonymous_group)
users.append(u)
for ud, pd in zip(user_data, cycle(people_data)):
user_name, password, first_name, last_name = ud
logger.debug(f"[SetUp] Get or create user {user_name}")
u, created = get_user_model().objects.get_or_create(username=user_name)
u.set_password(password)
u.first_name = first_name
u.last_name = last_name
u.save()
u.groups.add(anonymous_group)
if not (u.is_superuser or u.is_staff or u.is_anonymous):
u.groups.add(cont_group)
users.append(u)
logger.debug(f"[SetUp] Add group {anonymous_group}")
get_user_model().objects.get(username='AnonymousUser').groups.add(anonymous_group)
from geonode.utils import DisableDjangoSignals
with DisableDjangoSignals(skip=integration):
if not type or ensure_string(type) == 'map':
for md, user in zip(map_data, cycle(users)):
title, abstract, kws, (bbox_x0, bbox_x1, bbox_y0, bbox_y1), category = md
logger.debug(f"[SetUp] Add map {title}")
m = Map(
title=title,
abstract=abstract,
zoom=4,
projection='EPSG:4326',
center_x=42,
center_y=-73,
owner=user,
bbox_polygon=Polygon.from_bbox((bbox_x0, bbox_y0, bbox_x1, bbox_y1)),
ll_bbox_polygon=Polygon.from_bbox((bbox_x0, bbox_y0, bbox_x1, bbox_y1)),
srid='EPSG:4326',
category=category,
metadata_only=title == 'map metadata true'
)
m.save()
m.set_default_permissions()
m.clear_dirty_state()
obj_ids.append(m.id)
for kw in kws:
m.keywords.add(kw)
m.save()
if not type or ensure_string(type) == 'document':
for dd, user in zip(document_data, cycle(users)):
title, abstract, kws, (bbox_x0, bbox_x1, bbox_y0, bbox_y1), category = dd
logger.debug(f"[SetUp] Add document {title}")
m = Document(
title=title,
abstract=abstract,
owner=user,
bbox_polygon=Polygon.from_bbox((bbox_x0, bbox_y0, bbox_x1, bbox_y1)),
ll_bbox_polygon=Polygon.from_bbox((bbox_x0, bbox_y0, bbox_x1, bbox_y1)),
srid='EPSG:4326',
category=category,
doc_file=f,
metadata_only=title == 'doc metadata true'
)
m.save()
m.set_default_permissions()
m.clear_dirty_state()
obj_ids.append(m.id)
for kw in kws:
m.keywords.add(kw)
m.save()
if not type or ensure_string(type) == 'layer':
for ld, owner, storeType in zip(layer_data, cycle(users), cycle(('coverageStore', 'dataStore'))):
title, abstract, name, alternate, (bbox_x0, bbox_x1, bbox_y0, bbox_y1), start, kws, category = ld
end = start + timedelta(days=365)
logger.debug(f"[SetUp] Add layer {title}")
layer = Layer(
title=title,
abstract=abstract,
name=name,
alternate=alternate,
bbox_polygon=Polygon.from_bbox((bbox_x0, bbox_y0, bbox_x1, bbox_y1)),
ll_bbox_polygon=Polygon.from_bbox((bbox_x0, bbox_y0, bbox_x1, bbox_y1)),
srid='EPSG:4326',
uuid=str(uuid4()),
owner=owner,
temporal_extent_start=start,
temporal_extent_end=end,
date=start,
storeType=storeType,
category=category,
metadata_only=title == 'layer metadata true'
)
layer.save()
layer.set_default_permissions()
layer.clear_dirty_state()
obj_ids.append(layer.id)
for kw in kws:
layer.keywords.add(kw)
layer.save()
return obj_ids
def remove_models(obj_ids, type=None, integration=False):
from geonode.utils import DisableDjangoSignals
with DisableDjangoSignals(skip=integration):
if not type:
remove_models(None, type=b'map')
remove_models(None, type=b'layer')
remove_models(None, type=b'document')
if type == 'map':
try:
m_ids = obj_ids or [mp.id for mp in Map.objects.all()]
for id in m_ids:
m = Map.objects.get(pk=id)
m.delete()
except Exception:
pass
elif type == 'layer':
try:
l_ids = obj_ids or [lyr.id for lyr in Layer.objects.all()]
for id in l_ids:
layer = Layer.objects.get(pk=id)
layer.delete()
except Exception:
pass
elif type == 'document':
try:
d_ids = obj_ids or [doc.id for doc in Document.objects.all()]
for id in d_ids:
d = Document.objects.get(pk=id)
d.delete()
except Exception:
pass
def dump_models(path=None):
result = serialize("json", sum([list(x) for x in
[get_user_model().objects.all(),
Layer.objects.all(),
Map.objects.all(),
Document.objects.all(),
Tag.objects.all(),
TaggedItem.objects.all(),
]], []), indent=2, use_natural_keys=True)
if path is None:
parent, _ = os.path.split(__file__)
path = os.path.join(parent, 'fixtures', 'search_testdata.json')
with open(path, 'w') as f:
f.write(result)
def create_single_layer(name):
admin, created = get_user_model().objects.get_or_create(username='admin')
if created:
admin.is_superuser = True
admin.first_name = 'admin'
admin.set_password('admin')
admin.save()
test_datetime = datetime.strptime('2020-01-01', '%Y-%m-%d')
user = get_user_model().objects.get(username='AnonymousUser')
ll = (name, 'lorem ipsum', name, f'geonode:{name}', [
0, 22, 0, 22], test_datetime, ('populartag',), "farming")
title, abstract, name, alternate, (bbox_x0, bbox_x1, bbox_y0, bbox_y1), start, kws, category = ll
layer = Layer(
title=title,
abstract=abstract,
name=name,
alternate=alternate,
bbox_polygon=Polygon.from_bbox((bbox_x0, bbox_y0, bbox_x1, bbox_y1)),
ll_bbox_polygon=Polygon.from_bbox((bbox_x0, bbox_y0, bbox_x1, bbox_y1)),
srid='EPSG:4326',
uuid=str(uuid4()),
owner=user,
temporal_extent_start=test_datetime,
temporal_extent_end=test_datetime,
date=start,
storeType="dataStore",
resource_type="layer",
typename=f"geonode:{title}"
)
layer.save()
layer.set_default_permissions()
layer.clear_dirty_state()
return layer
def create_single_map(name):
admin, created = get_user_model().objects.get_or_create(username='admin')
if created:
admin.is_superuser = True
admin.first_name = 'admin'
admin.set_password('admin')
admin.save()
test_datetime = datetime.strptime('2020-01-01', '%Y-%m-%d')
user = get_user_model().objects.get(username='AnonymousUser')
ll = (name, 'lorem ipsum', name, f'{name}', [
0, 22, 0, 22], test_datetime, ('populartag',))
title, abstract, name, alternate, (bbox_x0, bbox_x1, bbox_y0, bbox_y1), start, kws = ll
m = Map(
title=title,
abstract=abstract,
zoom=4,
projection='EPSG:4326',
center_x=42,
center_y=-73,
owner=user,
bbox_polygon=Polygon.from_bbox((bbox_x0, bbox_y0, bbox_x1, bbox_y1)),
ll_bbox_polygon=Polygon.from_bbox((bbox_x0, bbox_y0, bbox_x1, bbox_y1)),
srid='EPSG:4326',
resource_type="map"
)
m.save()
m.set_default_permissions()
m.clear_dirty_state()
return m
def create_single_doc(name):
admin, created = get_user_model().objects.get_or_create(username='admin')
if created:
admin.is_superuser = True
admin.first_name = 'admin'
admin.set_password('admin')
admin.save()
test_datetime = datetime.strptime('2020-01-01', '%Y-%m-%d')
user = get_user_model().objects.get(username='AnonymousUser')
dd = (name, 'lorem ipsum', name, f'{name}', [
0, 22, 0, 22], test_datetime, ('populartag',))
title, abstract, name, alternate, (bbox_x0, bbox_x1, bbox_y0, bbox_y1), start, kws = dd
logger.debug(f"[SetUp] Add document {title}")
m = Document(
title=title,
abstract=abstract,
owner=user,
bbox_polygon=Polygon.from_bbox((bbox_x0, bbox_y0, bbox_x1, bbox_y1)),
ll_bbox_polygon=Polygon.from_bbox((bbox_x0, bbox_y0, bbox_x1, bbox_y1)),
srid='EPSG:4326',
doc_file=f,
resource_type="document"
)
m.save()
m.set_default_permissions()
m.clear_dirty_state()
return m
if __name__ == '__main__':
create_models()
dump_models()
| gpl-3.0 | 3,008,828,144,845,505,000 | 41.539759 | 134 | 0.552396 | false |
litedesk/litedesk-webserver-provision | src/provisioning/models.py | 1 | 20144 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2014, Deutsche Telekom AG - Laboratories (T-Labs)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
import datetime
from urlparse import urlparse
from autoslug import AutoSlugField
from django.conf import settings
from django.core.mail import send_mail
from django.db import models
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.template.loader import render_to_string
from litedesk.lib import airwatch
from model_utils import Choices
from model_utils.managers import InheritanceManager
from model_utils.models import TimeStampedModel, TimeFramedModel, StatusModel
from qrcode.image.pure import PymagingImage
import qrcode
from audit.models import Trackable
from contrib.models import PropertyTable
from tenants.models import Tenant, TenantService, User
from signals import item_provisioned, item_deprovisioned
import okta
log = logging.getLogger(__name__)
class Provisionable(object):
def activate(self, user, **kw):
raise NotImplementedError
def deprovision(self, service, user, *args, **kw):
raise NotImplementedError
def provision(self, service, user, *args, **kw):
raise NotImplementedError
class UserProvisionable(TimeStampedModel):
user = models.ForeignKey(User)
service = models.ForeignKey(TenantService)
item_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
item = GenericForeignKey('item_type', 'object_id')
@property
def tenant(self):
return self.user.tenant
def __unicode__(self):
return '%s provision for user %s on %s' % (
self.item, self.user, self.service)
class Meta:
unique_together = ('user', 'service', 'item_type', 'object_id')
class UserProvisionHistory(Trackable, TimeFramedModel):
user = models.ForeignKey(User)
service = models.ForeignKey(TenantService)
item_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
item = GenericForeignKey('item_type', 'object_id')
@staticmethod
def on_provision(*args, **kw):
user = kw.get('user')
provisioned_item = kw.get('instance')
item_type = ContentType.objects.get_for_model(provisioned_item)
entry = UserProvisionHistory(
user=user,
service=kw.get('service'),
item_type=item_type,
object_id=provisioned_item.id,
start=datetime.datetime.now()
)
entry.save(editor=kw.get('editor'))
@staticmethod
def on_deprovision(*args, **kw):
user = kw.get('user')
provisioned_item = kw.get('instance')
item_type = ContentType.objects.get_for_model(provisioned_item)
for entry in user.userprovisionhistory_set.filter(
item_type=item_type,
object_id=provisioned_item.id,
service=kw.get('service'),
end__isnull=True
):
entry.end = datetime.datetime.now()
entry.save(editor=kw.get('editor'))
class Asset(TimeStampedModel, Provisionable):
objects = InheritanceManager()
name = models.CharField(max_length=1000)
slug = AutoSlugField(populate_from='name', unique=False, default='')
description = models.TextField(null=True, blank=True)
web = models.BooleanField(default=True)
mobile = models.BooleanField(default=False)
desktop = models.BooleanField(default=False)
@property
def __subclassed__(self):
return Asset.objects.get_subclass(id=self.id)
@property
def supported_platforms(self):
return [p for p in ['web', 'mobile', 'desktop'] if getattr(self, p)]
def provision(self, service, user, editor=None):
if self.can_be_managed_by(service):
UserProvisionable.objects.create(
service=service,
user=user,
item_type=ContentType.objects.get_for_model(self),
object_id=self.id
)
item_provisioned.send(
sender=self.__class__,
editor=editor,
instance=self,
service=service,
user=user
)
def deprovision(self, service, user, editor=None):
UserProvisionable.objects.filter(
service=service,
user=user,
item_type=ContentType.objects.get_for_model(self),
object_id=self.id
).delete()
item_deprovisioned.send(
sender=self.__class__,
editor=editor,
instance=self,
service=service,
user=user
)
def can_be_managed_by(self, service):
return service.type in self.supported_platforms
def __unicode__(self):
return self.name
class Software(Asset):
EXPENSE_CATEGORY = 'software'
def provision(self, service, user, editor=None):
service.assign(self, user)
super(Software, self).provision(service, user, editor=editor)
def deprovision(self, service, user, editor=None):
service.unassign(self, user)
super(Software, self).deprovision(service, user, editor=editor)
class Device(Asset):
EXPENSE_CATEGORY = 'devices'
image = models.ImageField(null=True, blank=True)
@property
def __subclassed__(self):
if 'chrome' in self.name.lower():
self.__class__ = ChromeDevice
return self
def _get_email_template_parameters(self, service, user):
device = self.__subclassed__
if isinstance(device, ChromeDevice):
return {
'user': user,
'service': service,
'site': settings.SITE,
'device': device,
'title': '%s - Welcome to Google' % settings.SITE.get('name'),
'include_additional_information_message': 'true'
}
return None
def _get_email_template(self, service, format='html'):
extension = {
'text': 'txt',
'html': 'html'
}.get(format, format)
template_name = None
if isinstance(self.__subclassed__, ChromeDevice):
template_name = 'activation_chromebook'
return template_name and 'provisioning/mail/%s/%s.tmpl.%s' % (
format, template_name, extension
)
def provision(self, service, user, editor=None):
super(Device, self).provision(service, user, editor=editor)
html_template = self._get_email_template(service, format='html')
text_template = self._get_email_template(service, format='text')
if not (html_template or text_template):
return
template_parameters = self._get_email_template_parameters(service, user)
text_msg = render_to_string(text_template, template_parameters)
html_msg = render_to_string(html_template, template_parameters)
send_mail(
template_parameters['title'],
text_msg,
settings.DEFAULT_FROM_EMAIL,
[user.email],
html_message=html_msg
)
def activate(self, user, *args, **kw):
pass
class MobileDataPlan(Asset):
pass
class ChromeDevice(Device):
def can_be_managed_by(self, service):
return service.type == TenantService.PLATFORM_TYPE_CHOICES.web
class Meta:
proxy = True
class TenantAsset(PropertyTable):
tenant = models.ForeignKey(Tenant)
asset = models.ForeignKey(Asset)
class Meta:
unique_together = ('tenant', 'asset')
class InventoryEntry(Trackable, StatusModel):
STATUS = Choices('handed_out', 'returned')
user = models.ForeignKey(User)
tenant_asset = models.ForeignKey(TenantAsset)
serial_number = models.CharField(max_length=100, null=False, default='N/A')
@property
def tenant(self):
return self.user.tenant
def save(self, *args, **kwargs):
super(InventoryEntry, self).save(
editor=self.user.tenant.primary_contact, *args, **kwargs)
# TODO : if the inventory item is a google device make a call to the google api to
# save the username in the annotated user field
def __unicode__(self):
return '%s (%s)' % (self.user.username, self.serial_number)
class Okta(TenantService, Provisionable):
PLATFORM_TYPE = TenantService.PLATFORM_TYPE_CHOICES.web
ACTIVE_DIRECTORY_CONTROLLER = True
DEACTIVATION_EXCEPTION = okta.UserNotActiveError
domain = models.CharField(max_length=200)
@property
def portal_url(self):
return 'https://%s.okta.com' % self.domain
@property
def portal_help_url(self):
return '%s/help/login' % self.portal_url
def get_client(self):
return okta.Client(self.domain, self.api_token)
def get_service_user(self, user):
client = self.get_client()
return client.get(okta.User, user.tenant_email)
def get_users(self):
client = self.get_client()
return client.get_users()
def register(self, user):
client = self.get_client()
try:
client.add_user(user, activate=False)
except okta.UserAlreadyExistsError:
pass
return self.get_service_user(user)
def activate(self, user, editor=None):
client = self.get_client()
try:
service_user = self.get_service_user(user)
except okta.ResourceDoesNotExistError:
service_user = self.register(user)
status_before = getattr(service_user, 'status', 'STAGED')
activation_url = None
try:
activation_response = client.activate_user(service_user,
send_email=False)
except okta.UserAlreadyActivatedError:
pass
else:
if status_before == 'STAGED':
activation_url = activation_response.get('activationUrl')
password = user.get_remote().set_one_time_password()
template_parameters = {
'user': user,
'service': self,
'site': settings.SITE,
'activation_url': activation_url,
'password': password
}
text_msg = render_to_string(
'provisioning/mail/text/activation_okta.tmpl.txt',
template_parameters
)
html_msg = render_to_string(
'provisioning/mail/html/activation_okta.tmpl.html',
template_parameters
)
send_mail(
'%s - Welcome to %s' % (settings.SITE.get('name'), self.name),
text_msg,
settings.DEFAULT_FROM_EMAIL,
[user.email],
html_message=html_msg
)
super(Okta, self).activate(user, editor)
def assign(self, asset, user):
log.debug('Assigning %s to %s on Okta' % (asset, user))
metadata, _ = self.tenantserviceasset_set.get_or_create(asset=asset)
client = self.get_client()
service_user = self.get_service_user(user)
service_application = client.get(okta.Application,
metadata.get('application_id'))
try:
service_application.assign(service_user,
profile=metadata.get('profile'))
except Exception, why:
log.warn('Error when assigning %s to %s: %s' % (asset, user, why))
def unassign(self, asset, user):
log.debug('Removing %s from %s on Okta' % (asset, user))
metadata, _ = self.tenantserviceasset_set.get_or_create(asset=asset)
client = self.get_client()
service_user = self.get_service_user(user)
service_application = client.get(okta.Application,
metadata.get('application_id'))
try:
service_application.unassign(service_user)
except okta.UserApplicationNotFound, e:
log.info('Failed to unassign %s from %s: %s' % (asset, user, e))
except Exception, why:
log.warn('Error when unassigning %s to %s: %s' % (asset, user, why))
@classmethod
def get_serializer_data(cls, **data):
return {
'domain': data.get('domain')
}
class Meta:
verbose_name = 'Okta'
class AirWatch(TenantService, Provisionable):
PLATFORM_TYPE = 'mobile'
QRCODE_ROOT_DIR = os.path.join(settings.MEDIA_ROOT, 'airwatch_qrcodes')
QRCODE_ROOT_URL = settings.SITE.get(
'host_url') + settings.MEDIA_URL + 'airwatch_qrcodes/'
QRCODE_TEMPLATE = 'https://awagent.com?serverurl={0}&gid={1}'
DEACTIVATION_EXCEPTION = airwatch.user.UserNotActiveError
username = models.CharField(max_length=80)
password = models.CharField(max_length=1000)
server_url = models.URLField()
group_id = models.CharField(max_length=80)
@property
def portal_domain(self):
portal_domain = urlparse(self.server_url).netloc
if portal_domain.startswith('as'):
portal_domain = portal_domain.replace('as', 'ds', 1)
return portal_domain
def get_client(self):
return airwatch.client.Client(
self.server_url, self.username, self.password, self.api_token
)
def get_service_user(self, user):
client = self.get_client()
service_user = airwatch.user.User.get_remote(client, user.username)
if service_user is None:
service_user = airwatch.user.User.create(client, user.username)
return service_user
def get_usergroup(self, group_name):
client = self.get_client()
return airwatch.group.UserGroupHacked.get_remote(client, group_name)
def get_smartgroup(self, smartgroup_id):
client = self.get_client()
return airwatch.group.SmartGroup.get_remote(client, smartgroup_id)
def register(self, user):
client = self.get_client()
try:
return airwatch.user.User.create(client, user.username)
except airwatch.user.UserAlreadyRegisteredError:
return self.get_service_user(user)
@property
def qrcode(self):
server_domain = self.portal_domain
image_dir = os.path.join(self.QRCODE_ROOT_DIR, server_domain)
image_file_name = '{0}.png'.format(self.group_id)
image_file_path = os.path.join(image_dir, image_file_name)
if not os.path.exists(image_file_path):
if not os.path.exists(image_dir):
os.makedirs(image_dir)
data = self.QRCODE_TEMPLATE.format(server_domain, self.group_id)
image = qrcode.make(data, image_factory=PymagingImage, box_size=5)
with open(image_file_path, 'w') as image_file:
image.save(image_file)
image_url = self.QRCODE_ROOT_URL + server_domain + '/' + image_file_name
return image_url
def activate(self, user, editor=None):
service_user = self.get_service_user(user)
if service_user is None:
service_user = self.register(user)
try:
title = '%s - Welcome to AirWatch' % settings.SITE.get('name')
service_user.activate()
template_parameters = {
'user': user,
'service': self,
'site': settings.SITE,
'qr_code': self.qrcode
}
text_msg = render_to_string(
'provisioning/mail/text/activation_airwatch.tmpl.txt',
template_parameters
)
html_msg = render_to_string(
'provisioning/mail/html/activation_airwatch.tmpl.html',
template_parameters
)
send_mail(
title,
text_msg,
settings.DEFAULT_FROM_EMAIL,
[user.email],
html_message=html_msg
)
except airwatch.user.UserAlreadyActivatedError:
pass
else:
super(AirWatch, self).activate(user, editor)
def deactivate(self, user, editor=None):
super(AirWatch, self).deactivate(user, editor)
self.get_service_user(user).delete()
def __group_and_aw_user(self, software, user):
metadata, _ = self.tenantserviceasset_set.get_or_create(asset=software)
group = self.get_usergroup(metadata.get('group_name'))
service_user = self.get_service_user(user)
return group, service_user
def assign(self, software, user):
if self.type not in software.supported_platforms:
return
log.debug('Assigning %s to %s on Airwatch' % (software, user))
group, aw_user = self.__group_and_aw_user(software, user)
try:
group.add_member(aw_user)
except airwatch.user.UserAlreadyEnrolledError:
pass
def unassign(self, software, user):
if self.type not in software.supported_platforms:
return
log.debug('Removing %s from %s on Airwatch' % (software, user))
group, aw_user = self.__group_and_aw_user(software, user)
try:
group.remove_member(aw_user)
except airwatch.user.UserNotEnrolledError:
pass
def get_all_devices(self):
endpoint = 'mdm/devices/search'
response = self.get_client().call_api(
'GET', endpoint)
response.raise_for_status()
if response.status_code == 200:
devices = [{'model': d['Model'], 'username': d['UserName'],
'serial_number': d[
'SerialNumber']} for d in
response.json().get('Devices')]
return devices
def get_available_devices(self):
return [d for d in self.get_all_devices()
if d['username'] == '' or d['username'] == 'staging']
@classmethod
def get_serializer_data(cls, **data):
return {
'username': data.get('username'),
'password': data.get('password'),
'server_url': data.get('server_url'),
'group_id': data.get('group_id')
}
class Meta:
verbose_name = 'AirWatch'
class MobileIron(TenantService, Provisionable):
PLATFORM_TYPE = 'mobile'
class TenantServiceAsset(PropertyTable):
service = models.ForeignKey(TenantService)
asset = models.ForeignKey(Asset)
@property
def tenant(self):
return self.service.tenant
@property
def platform(self):
return self.service.type
def __unicode__(self):
return 'Asset %s on %s' % (self.asset, self.service)
class Meta:
unique_together = ('service', 'asset')
class LastSeenEvent(TimeStampedModel):
user = models.ForeignKey(User)
item_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
item = GenericForeignKey('item_type', 'object_id')
last_seen = models.DateTimeField()
item_provisioned.connect(UserProvisionHistory.on_provision,
dispatch_uid='provision')
item_deprovisioned.connect(UserProvisionHistory.on_deprovision,
dispatch_uid='deprovision')
if not getattr(settings, 'PROVISIONABLE_SERVICES'):
settings.PROVISIONABLE_SERVICES = [
'.'.join([__name__, k.__name__]) for k in [Okta, AirWatch, MobileIron]
]
if not getattr(settings, 'ASSET_CLASSES', []):
settings.ASSET_CLASSES = [
'.'.join([__name__, k.__name__]) for k in
[Software, Device, MobileDataPlan]
]
| apache-2.0 | 4,245,768,454,504,376,300 | 32.186161 | 90 | 0.610653 | false |
harsham05/image_space | imagespace_georgetown/server/__init__.py | 1 | 1353 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import os
from .georgetown_imagedomaindynamicssearch import \
GeorgetownImageDomainDynamicsSearch
def load(info):
index = 'IMAGE_SPACE_GEORGETOWN_DOMAIN_DYNAMICS_SEARCH'
if index not in os.environ \
or os.environ[index] == '':
raise Exception(
'Imagespace Georgetown will not function without the %s '
'environment variable.' % index)
else:
os.environ[index] = os.environ[index].rstrip('/')
info['apiRoot'].georgetown_imagedomaindynamicssearch \
= GeorgetownImageDomainDynamicsSearch()
| apache-2.0 | -2,677,355,904,935,077,000 | 36.583333 | 79 | 0.623799 | false |
cbertinato/pandas | asv_bench/benchmarks/frame_methods.py | 1 | 16547 | import string
import numpy as np
from pandas import (
DataFrame, MultiIndex, NaT, Series, date_range, isnull, period_range)
import pandas.util.testing as tm
class GetNumericData:
def setup(self):
self.df = DataFrame(np.random.randn(10000, 25))
self.df['foo'] = 'bar'
self.df['bar'] = 'baz'
self.df = self.df._consolidate()
def time_frame_get_numeric_data(self):
self.df._get_numeric_data()
class Lookup:
def setup(self):
self.df = DataFrame(np.random.randn(10000, 8),
columns=list('abcdefgh'))
self.df['foo'] = 'bar'
self.row_labels = list(self.df.index[::10])[:900]
self.col_labels = list(self.df.columns) * 100
self.row_labels_all = np.array(
list(self.df.index) * len(self.df.columns), dtype='object')
self.col_labels_all = np.array(
list(self.df.columns) * len(self.df.index), dtype='object')
def time_frame_fancy_lookup(self):
self.df.lookup(self.row_labels, self.col_labels)
def time_frame_fancy_lookup_all(self):
self.df.lookup(self.row_labels_all, self.col_labels_all)
class Reindex:
def setup(self):
N = 10**3
self.df = DataFrame(np.random.randn(N * 10, N))
self.idx = np.arange(4 * N, 7 * N)
self.df2 = DataFrame(
{c: {0: np.random.randint(0, 2, N).astype(np.bool_),
1: np.random.randint(0, N, N).astype(np.int16),
2: np.random.randint(0, N, N).astype(np.int32),
3: np.random.randint(0, N, N).astype(np.int64)}
[np.random.randint(0, 4)] for c in range(N)})
def time_reindex_axis0(self):
self.df.reindex(self.idx)
def time_reindex_axis1(self):
self.df.reindex(columns=self.idx)
def time_reindex_both_axes(self):
self.df.reindex(index=self.idx, columns=self.idx)
def time_reindex_upcast(self):
self.df2.reindex(np.random.permutation(range(1200)))
class Rename:
def setup(self):
N = 10**3
self.df = DataFrame(np.random.randn(N * 10, N))
self.idx = np.arange(4 * N, 7 * N)
self.dict_idx = {k: k for k in self.idx}
self.df2 = DataFrame(
{c: {0: np.random.randint(0, 2, N).astype(np.bool_),
1: np.random.randint(0, N, N).astype(np.int16),
2: np.random.randint(0, N, N).astype(np.int32),
3: np.random.randint(0, N, N).astype(np.int64)}
[np.random.randint(0, 4)] for c in range(N)})
def time_rename_single(self):
self.df.rename({0: 0})
def time_rename_axis0(self):
self.df.rename(self.dict_idx)
def time_rename_axis1(self):
self.df.rename(columns=self.dict_idx)
def time_rename_both_axes(self):
self.df.rename(index=self.dict_idx, columns=self.dict_idx)
def time_dict_rename_both_axes(self):
self.df.rename(index=self.dict_idx, columns=self.dict_idx)
class Iteration:
# mem_itertuples_* benchmarks are slow
timeout = 120
def setup(self):
N = 1000
self.df = DataFrame(np.random.randn(N * 10, N))
self.df2 = DataFrame(np.random.randn(N * 50, 10))
self.df3 = DataFrame(np.random.randn(N, 5 * N),
columns=['C' + str(c) for c in range(N * 5)])
self.df4 = DataFrame(np.random.randn(N * 1000, 10))
def time_iteritems(self):
# (monitor no-copying behaviour)
if hasattr(self.df, '_item_cache'):
self.df._item_cache.clear()
for name, col in self.df.iteritems():
pass
def time_iteritems_cached(self):
for name, col in self.df.iteritems():
pass
def time_iteritems_indexing(self):
for col in self.df3:
self.df3[col]
def time_itertuples_start(self):
self.df4.itertuples()
def time_itertuples_read_first(self):
next(self.df4.itertuples())
def time_itertuples(self):
for row in self.df4.itertuples():
pass
def time_itertuples_to_list(self):
list(self.df4.itertuples())
def mem_itertuples_start(self):
return self.df4.itertuples()
def peakmem_itertuples_start(self):
self.df4.itertuples()
def mem_itertuples_read_first(self):
return next(self.df4.itertuples())
def peakmem_itertuples(self):
for row in self.df4.itertuples():
pass
def mem_itertuples_to_list(self):
return list(self.df4.itertuples())
def peakmem_itertuples_to_list(self):
list(self.df4.itertuples())
def time_itertuples_raw_start(self):
self.df4.itertuples(index=False, name=None)
def time_itertuples_raw_read_first(self):
next(self.df4.itertuples(index=False, name=None))
def time_itertuples_raw_tuples(self):
for row in self.df4.itertuples(index=False, name=None):
pass
def time_itertuples_raw_tuples_to_list(self):
list(self.df4.itertuples(index=False, name=None))
def mem_itertuples_raw_start(self):
return self.df4.itertuples(index=False, name=None)
def peakmem_itertuples_raw_start(self):
self.df4.itertuples(index=False, name=None)
def peakmem_itertuples_raw_read_first(self):
next(self.df4.itertuples(index=False, name=None))
def peakmem_itertuples_raw(self):
for row in self.df4.itertuples(index=False, name=None):
pass
def mem_itertuples_raw_to_list(self):
return list(self.df4.itertuples(index=False, name=None))
def peakmem_itertuples_raw_to_list(self):
list(self.df4.itertuples(index=False, name=None))
def time_iterrows(self):
for row in self.df.iterrows():
pass
class ToString:
def setup(self):
self.df = DataFrame(np.random.randn(100, 10))
def time_to_string_floats(self):
self.df.to_string()
class ToHTML:
def setup(self):
nrows = 500
self.df2 = DataFrame(np.random.randn(nrows, 10))
self.df2[0] = period_range('2000', periods=nrows)
self.df2[1] = range(nrows)
def time_to_html_mixed(self):
self.df2.to_html()
class Repr:
def setup(self):
nrows = 10000
data = np.random.randn(nrows, 10)
arrays = np.tile(np.random.randn(3, int(nrows / 100)), 100)
idx = MultiIndex.from_arrays(arrays)
self.df3 = DataFrame(data, index=idx)
self.df4 = DataFrame(data, index=np.random.randn(nrows))
self.df_tall = DataFrame(np.random.randn(nrows, 10))
self.df_wide = DataFrame(np.random.randn(10, nrows))
def time_html_repr_trunc_mi(self):
self.df3._repr_html_()
def time_html_repr_trunc_si(self):
self.df4._repr_html_()
def time_repr_tall(self):
repr(self.df_tall)
def time_frame_repr_wide(self):
repr(self.df_wide)
class MaskBool:
def setup(self):
data = np.random.randn(1000, 500)
df = DataFrame(data)
df = df.where(df > 0)
self.bools = df > 0
self.mask = isnull(df)
def time_frame_mask_bools(self):
self.bools.mask(self.mask)
def time_frame_mask_floats(self):
self.bools.astype(float).mask(self.mask)
class Isnull:
def setup(self):
N = 10**3
self.df_no_null = DataFrame(np.random.randn(N, N))
sample = np.array([np.nan, 1.0])
data = np.random.choice(sample, (N, N))
self.df = DataFrame(data)
sample = np.array(list(string.ascii_letters + string.whitespace))
data = np.random.choice(sample, (N, N))
self.df_strings = DataFrame(data)
sample = np.array([NaT, np.nan, None, np.datetime64('NaT'),
np.timedelta64('NaT'), 0, 1, 2.0, '', 'abcd'])
data = np.random.choice(sample, (N, N))
self.df_obj = DataFrame(data)
def time_isnull_floats_no_null(self):
isnull(self.df_no_null)
def time_isnull(self):
isnull(self.df)
def time_isnull_strngs(self):
isnull(self.df_strings)
def time_isnull_obj(self):
isnull(self.df_obj)
class Fillna:
params = ([True, False], ['pad', 'bfill'])
param_names = ['inplace', 'method']
def setup(self, inplace, method):
values = np.random.randn(10000, 100)
values[::2] = np.nan
self.df = DataFrame(values)
def time_frame_fillna(self, inplace, method):
self.df.fillna(inplace=inplace, method=method)
class Dropna:
params = (['all', 'any'], [0, 1])
param_names = ['how', 'axis']
def setup(self, how, axis):
self.df = DataFrame(np.random.randn(10000, 1000))
self.df.ix[50:1000, 20:50] = np.nan
self.df.ix[2000:3000] = np.nan
self.df.ix[:, 60:70] = np.nan
self.df_mixed = self.df.copy()
self.df_mixed['foo'] = 'bar'
def time_dropna(self, how, axis):
self.df.dropna(how=how, axis=axis)
def time_dropna_axis_mixed_dtypes(self, how, axis):
self.df_mixed.dropna(how=how, axis=axis)
class Count:
params = [0, 1]
param_names = ['axis']
def setup(self, axis):
self.df = DataFrame(np.random.randn(10000, 1000))
self.df.ix[50:1000, 20:50] = np.nan
self.df.ix[2000:3000] = np.nan
self.df.ix[:, 60:70] = np.nan
self.df_mixed = self.df.copy()
self.df_mixed['foo'] = 'bar'
self.df.index = MultiIndex.from_arrays([self.df.index, self.df.index])
self.df.columns = MultiIndex.from_arrays([self.df.columns,
self.df.columns])
self.df_mixed.index = MultiIndex.from_arrays([self.df_mixed.index,
self.df_mixed.index])
self.df_mixed.columns = MultiIndex.from_arrays([self.df_mixed.columns,
self.df_mixed.columns])
def time_count_level_multi(self, axis):
self.df.count(axis=axis, level=1)
def time_count_level_mixed_dtypes_multi(self, axis):
self.df_mixed.count(axis=axis, level=1)
class Apply:
def setup(self):
self.df = DataFrame(np.random.randn(1000, 100))
self.s = Series(np.arange(1028.0))
self.df2 = DataFrame({i: self.s for i in range(1028)})
self.df3 = DataFrame(np.random.randn(1000, 3), columns=list('ABC'))
def time_apply_user_func(self):
self.df2.apply(lambda x: np.corrcoef(x, self.s)[(0, 1)])
def time_apply_axis_1(self):
self.df.apply(lambda x: x + 1, axis=1)
def time_apply_lambda_mean(self):
self.df.apply(lambda x: x.mean())
def time_apply_np_mean(self):
self.df.apply(np.mean)
def time_apply_pass_thru(self):
self.df.apply(lambda x: x)
def time_apply_ref_by_name(self):
self.df3.apply(lambda x: x['A'] + x['B'], axis=1)
class Dtypes:
def setup(self):
self.df = DataFrame(np.random.randn(1000, 1000))
def time_frame_dtypes(self):
self.df.dtypes
class Equals:
def setup(self):
N = 10**3
self.float_df = DataFrame(np.random.randn(N, N))
self.float_df_nan = self.float_df.copy()
self.float_df_nan.iloc[-1, -1] = np.nan
self.object_df = DataFrame('foo', index=range(N), columns=range(N))
self.object_df_nan = self.object_df.copy()
self.object_df_nan.iloc[-1, -1] = np.nan
self.nonunique_cols = self.object_df.copy()
self.nonunique_cols.columns = ['A'] * len(self.nonunique_cols.columns)
self.nonunique_cols_nan = self.nonunique_cols.copy()
self.nonunique_cols_nan.iloc[-1, -1] = np.nan
def time_frame_float_equal(self):
self.float_df.equals(self.float_df)
def time_frame_float_unequal(self):
self.float_df.equals(self.float_df_nan)
def time_frame_nonunique_equal(self):
self.nonunique_cols.equals(self.nonunique_cols)
def time_frame_nonunique_unequal(self):
self.nonunique_cols.equals(self.nonunique_cols_nan)
def time_frame_object_equal(self):
self.object_df.equals(self.object_df)
def time_frame_object_unequal(self):
self.object_df.equals(self.object_df_nan)
class Interpolate:
params = [None, 'infer']
param_names = ['downcast']
def setup(self, downcast):
N = 10000
# this is the worst case, where every column has NaNs.
self.df = DataFrame(np.random.randn(N, 100))
self.df.values[::2] = np.nan
self.df2 = DataFrame({'A': np.arange(0, N),
'B': np.random.randint(0, 100, N),
'C': np.random.randn(N),
'D': np.random.randn(N)})
self.df2.loc[1::5, 'A'] = np.nan
self.df2.loc[1::5, 'C'] = np.nan
def time_interpolate(self, downcast):
self.df.interpolate(downcast=downcast)
def time_interpolate_some_good(self, downcast):
self.df2.interpolate(downcast=downcast)
class Shift:
# frame shift speedup issue-5609
params = [0, 1]
param_names = ['axis']
def setup(self, axis):
self.df = DataFrame(np.random.rand(10000, 500))
def time_shift(self, axis):
self.df.shift(1, axis=axis)
class Nunique:
def setup(self):
self.df = DataFrame(np.random.randn(10000, 1000))
def time_frame_nunique(self):
self.df.nunique()
class Duplicated:
def setup(self):
n = (1 << 20)
t = date_range('2015-01-01', freq='S', periods=(n // 64))
xs = np.random.randn(n // 64).round(2)
self.df = DataFrame({'a': np.random.randint(-1 << 8, 1 << 8, n),
'b': np.random.choice(t, n),
'c': np.random.choice(xs, n)})
self.df2 = DataFrame(np.random.randn(1000, 100).astype(str)).T
def time_frame_duplicated(self):
self.df.duplicated()
def time_frame_duplicated_wide(self):
self.df2.duplicated()
class XS:
params = [0, 1]
param_names = ['axis']
def setup(self, axis):
self.N = 10**4
self.df = DataFrame(np.random.randn(self.N, self.N))
def time_frame_xs(self, axis):
self.df.xs(self.N / 2, axis=axis)
class SortValues:
params = [True, False]
param_names = ['ascending']
def setup(self, ascending):
self.df = DataFrame(np.random.randn(1000000, 2), columns=list('AB'))
def time_frame_sort_values(self, ascending):
self.df.sort_values(by='A', ascending=ascending)
class SortIndexByColumns:
def setup(self):
N = 10000
K = 10
self.df = DataFrame({'key1': tm.makeStringIndex(N).values.repeat(K),
'key2': tm.makeStringIndex(N).values.repeat(K),
'value': np.random.randn(N * K)})
def time_frame_sort_values_by_columns(self):
self.df.sort_values(by=['key1', 'key2'])
class Quantile:
params = [0, 1]
param_names = ['axis']
def setup(self, axis):
self.df = DataFrame(np.random.randn(1000, 3), columns=list('ABC'))
def time_frame_quantile(self, axis):
self.df.quantile([0.1, 0.5], axis=axis)
class GetDtypeCounts:
# 2807
def setup(self):
self.df = DataFrame(np.random.randn(10, 10000))
def time_frame_get_dtype_counts(self):
self.df.get_dtype_counts()
def time_info(self):
self.df.info()
class NSort:
params = ['first', 'last', 'all']
param_names = ['keep']
def setup(self, keep):
self.df = DataFrame(np.random.randn(100000, 3),
columns=list('ABC'))
def time_nlargest_one_column(self, keep):
self.df.nlargest(100, 'A', keep=keep)
def time_nlargest_two_columns(self, keep):
self.df.nlargest(100, ['A', 'B'], keep=keep)
def time_nsmallest_one_column(self, keep):
self.df.nsmallest(100, 'A', keep=keep)
def time_nsmallest_two_columns(self, keep):
self.df.nsmallest(100, ['A', 'B'], keep=keep)
class Describe:
def setup(self):
self.df = DataFrame({
'a': np.random.randint(0, 100, int(1e6)),
'b': np.random.randint(0, 100, int(1e6)),
'c': np.random.randint(0, 100, int(1e6))
})
def time_series_describe(self):
self.df['a'].describe()
def time_dataframe_describe(self):
self.df.describe()
from .pandas_vb_common import setup # noqa: F401
| bsd-3-clause | 6,597,646,539,772,058,000 | 27.189097 | 79 | 0.581435 | false |
google/nerfies | nerfies/models.py | 1 | 15699 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Different model implementation plus a general port for all the models."""
from typing import Any, Dict, Mapping, Optional, Tuple
from flax import linen as nn
import frozendict
from jax import random
import jax.numpy as jnp
from nerfies import configs
from nerfies import glo
from nerfies import model_utils
from nerfies import modules
from nerfies import types
from nerfies import warping
class NerfModel(nn.Module):
"""Nerf NN Model with both coarse and fine MLPs.
Attributes:
num_coarse_samples: int, the number of samples for coarse nerf.
num_fine_samples: int, the number of samples for fine nerf.
use_viewdirs: bool, use viewdirs as a condition.
near: float, near clip.
far: float, far clip.
noise_std: float, std dev of noise added to regularize sigma output.
nerf_trunk_depth: int, the depth of the first part of MLP.
nerf_trunk_width: int, the width of the first part of MLP.
nerf_condition_depth: int, the depth of the second part of MLP.
nerf_condition_width: int, the width of the second part of MLP.
activation: the activation function used in the MLP.
sigma_activation: the activation function applied to the sigma density.
nerf_skips: which layers to add skip layers in the NeRF model.
alpha_channels: int, the number of alpha_channelss.
rgb_channels: int, the number of rgb_channelss.
use_stratified_sampling: use stratified sampling.
use_white_background: composite rendering on to a white background.
num_nerf_point_freqs: degree of positional encoding for positions.
num_nerf_viewdir_freqs: degree of positional encoding for viewdirs.
use_linear_disparity: sample linearly in disparity rather than depth.
num_appearance_embeddings: the number of appearance exemplars.
num_appearance_features: the dimension size for the appearance code.
num_camera_embeddings: the number of camera exemplars.
num_camera_features: the dimension size for the camera code.
num_warp_freqs: the number of frequencies for the warp positional encoding.
num_warp_embeddings: the number of embeddings for the warp GLO encoder.
num_warp_features: the number of features for the warp GLO encoder.
use_appearance_metadata: if True use the appearance metadata.
use_camera_metadata: if True use the camera metadata.
use_warp: whether to use the warp field or not.
use_warp_jacobian: if True the model computes and returns the Jacobian of
the warped points.
use_weights: if True return the density weights.
warp_kwargs: extra keyword arguments for the warp field.
"""
num_coarse_samples: int
num_fine_samples: int
use_viewdirs: bool
near: float
far: float
noise_std: Optional[float]
nerf_trunk_depth: int
nerf_trunk_width: int
nerf_condition_depth: int
nerf_condition_width: int
nerf_skips: Tuple[int]
alpha_channels: int
rgb_channels: int
use_stratified_sampling: bool
num_nerf_point_freqs: int
num_nerf_viewdir_freqs: int
num_appearance_embeddings: int
num_appearance_features: int
num_camera_embeddings: int
num_camera_features: int
num_warp_embeddings: int
num_warp_features: int
num_warp_freqs: int
activation: types.Activation = nn.relu
sigma_activation: types.Activation = nn.relu
use_white_background: bool = False
use_linear_disparity: bool = False
use_sample_at_infinity: bool = True
warp_field_type: str = 'se3'
use_appearance_metadata: bool = False
use_camera_metadata: bool = False
use_warp: bool = False
use_warp_jacobian: bool = False
use_weights: bool = False
warp_kwargs: Mapping[str, Any] = frozendict.frozendict()
metadata_encoded: bool = False
def setup(self):
self.point_encoder = model_utils.vmap_module(
modules.SinusoidalEncoder, num_batch_dims=2)(
num_freqs=self.num_nerf_point_freqs)
self.viewdir_encoder = model_utils.vmap_module(
modules.SinusoidalEncoder, num_batch_dims=1)(
num_freqs=self.num_nerf_viewdir_freqs)
if self.use_appearance_metadata:
self.appearance_encoder = glo.GloEncoder(
num_embeddings=self.num_appearance_embeddings,
features=self.num_appearance_features)
if self.use_camera_metadata:
self.camera_encoder = glo.GloEncoder(
num_embeddings=self.num_camera_embeddings,
features=self.num_camera_features)
self.nerf_coarse = modules.NerfMLP(
nerf_trunk_depth=self.nerf_trunk_depth,
nerf_trunk_width=self.nerf_trunk_width,
nerf_condition_depth=self.nerf_condition_depth,
nerf_condition_width=self.nerf_condition_width,
activation=self.activation,
skips=self.nerf_skips,
alpha_channels=self.alpha_channels,
rgb_channels=self.rgb_channels)
if self.num_fine_samples > 0:
self.nerf_fine = modules.NerfMLP(
nerf_trunk_depth=self.nerf_trunk_depth,
nerf_trunk_width=self.nerf_trunk_width,
nerf_condition_depth=self.nerf_condition_depth,
nerf_condition_width=self.nerf_condition_width,
activation=self.activation,
skips=self.nerf_skips,
alpha_channels=self.alpha_channels,
rgb_channels=self.rgb_channels)
else:
self.nerf_fine = None
if self.use_warp:
self.warp_field = warping.create_warp_field(
field_type=self.warp_field_type,
num_freqs=self.num_warp_freqs,
num_embeddings=self.num_warp_embeddings,
num_features=self.num_warp_features,
num_batch_dims=2,
**self.warp_kwargs)
def __call__(
self,
rays_dict: Dict[str, Any],
warp_alpha: float = None,
metadata_encoded=False,
use_warp=True,
deterministic=False,
):
"""Nerf Model.
Args:
rays_dict: a dictionary containing the ray information. Contains:
'origins': the ray origins.
'directions': unit vectors which are the ray directions.
'viewdirs': (optional) unit vectors which are viewing directions.
'metadata': a dictionary of metadata indices e.g., for warping.
warp_alpha: the alpha for the positional encoding.
metadata_encoded: if True, assume the metadata is already encoded.
use_warp: if True use the warp field (if also enabled in the model).
deterministic: whether evaluation should be deterministic.
Returns:
ret: list, [(rgb, disp, acc), (rgb_coarse, disp_coarse, acc_coarse)]
"""
# Extract viewdirs from the ray array
origins = rays_dict['origins']
directions = rays_dict['directions']
metadata = rays_dict['metadata']
if 'viewdirs' in rays_dict:
viewdirs = rays_dict['viewdirs']
else: # viewdirs are normalized rays_d
viewdirs = directions
# Stratified sampling along rays
z_vals, points = model_utils.sample_along_rays(
self.make_rng('coarse'), origins, directions, self.num_coarse_samples,
self.near, self.far, self.use_stratified_sampling,
self.use_linear_disparity)
if self.use_warp and use_warp:
metadata_channels = self.num_warp_features if metadata_encoded else 1
warp_metadata = jnp.broadcast_to(
metadata['warp'][:, jnp.newaxis, :],
shape=(*points.shape[:2], metadata_channels))
warp_ret = self.warp_field(
points, warp_metadata, warp_alpha, self.use_warp_jacobian,
metadata_encoded)
if self.use_warp_jacobian:
points, warp_jacobian = warp_ret
else:
points = warp_ret
points_embed = self.point_encoder(points)
condition_inputs = []
# Point attribute predictions
if self.use_viewdirs:
viewdirs_embed = self.viewdir_encoder(viewdirs)
condition_inputs.append(viewdirs_embed)
if self.use_appearance_metadata:
if metadata_encoded:
appearance_code = metadata['appearance']
else:
appearance_code = self.appearance_encoder(metadata['appearance'])
condition_inputs.append(appearance_code)
if self.use_camera_metadata:
if metadata_encoded:
camera_code = metadata['camera']
else:
camera_code = self.camera_encoder(metadata['camera'])
condition_inputs.append(camera_code)
# The condition inputs have a shape of (B, C) now rather than (B, S, C)
# since we assume all samples have the same condition input. We might want
# to change this later.
condition_inputs = (
jnp.concatenate(condition_inputs, axis=-1)
if condition_inputs else None)
coarse_raw = self.nerf_coarse(points_embed, condition=condition_inputs)
# Add noises to regularize the density predictions if needed
coarse_raw = model_utils.noise_regularize(
self.make_rng('coarse'), coarse_raw, self.noise_std,
self.use_stratified_sampling)
# Volumetric rendering.
rgb, exp_depth, med_depth, disp, acc, weights = (
model_utils.volumetric_rendering(
coarse_raw,
z_vals,
directions,
use_white_background=self.use_white_background,
sigma_activation=self.sigma_activation,
sample_at_infinity=self.use_sample_at_infinity))
out = {
'coarse': {
'rgb': rgb,
'depth': exp_depth,
'med_depth': med_depth,
'disp': disp,
'acc': acc,
}
}
if self.use_warp and use_warp and self.use_warp_jacobian:
out['coarse']['warp_jacobian'] = warp_jacobian
if self.use_weights:
out['coarse']['weights'] = weights
# Hierarchical sampling based on coarse predictions
if self.num_fine_samples > 0:
z_vals_mid = .5 * (z_vals[..., 1:] + z_vals[..., :-1])
z_vals, points = model_utils.sample_pdf(
self.make_rng('fine'),
z_vals_mid,
weights[..., 1:-1],
origins,
directions,
z_vals,
self.num_fine_samples,
self.use_stratified_sampling,
)
if self.use_warp and use_warp:
metadata_channels = self.num_warp_features if metadata_encoded else 1
warp_metadata = jnp.broadcast_to(
metadata['warp'][:, jnp.newaxis, :],
shape=(*points.shape[:2], metadata_channels))
points = self.warp_field(
points, warp_metadata, warp_alpha, False, metadata_encoded)
points_embed = self.point_encoder(points)
fine_raw = self.nerf_fine(points_embed, condition=condition_inputs)
fine_raw = model_utils.noise_regularize(
self.make_rng('fine'), fine_raw, self.noise_std,
self.use_stratified_sampling)
rgb, exp_depth, med_depth, disp, acc, weights = (
model_utils.volumetric_rendering(
fine_raw,
z_vals,
directions,
use_white_background=self.use_white_background,
sigma_activation=self.sigma_activation,
sample_at_infinity=self.use_sample_at_infinity))
out['fine'] = {
'rgb': rgb,
'depth': exp_depth,
'med_depth': med_depth,
'disp': disp,
'acc': acc,
}
if self.use_weights:
out['fine']['weights'] = weights
return out
def nerf(key,
config: configs.ModelConfig,
batch_size: int,
num_appearance_embeddings: int,
num_camera_embeddings: int,
num_warp_embeddings: int,
near: float,
far: float,
use_warp_jacobian: bool = False,
use_weights: bool = False):
"""Neural Randiance Field.
Args:
key: jnp.ndarray. Random number generator.
config: model configs.
batch_size: the evaluation batch size used for shape inference.
num_appearance_embeddings: the number of appearance embeddings.
num_camera_embeddings: the number of camera embeddings.
num_warp_embeddings: the number of warp embeddings.
near: the near plane of the scene.
far: the far plane of the scene.
use_warp_jacobian: if True the model computes and returns the Jacobian of
the warped points.
use_weights: if True return the density weights from the NeRF.
Returns:
model: nn.Model. Nerf model with parameters.
state: flax.Module.state. Nerf model state for stateful parameters.
"""
num_nerf_point_freqs = config.num_nerf_point_freqs
num_nerf_viewdir_freqs = config.num_nerf_viewdir_freqs
num_coarse_samples = config.num_coarse_samples
num_fine_samples = config.num_fine_samples
use_viewdirs = config.use_viewdirs
noise_std = config.noise_std
use_stratified_sampling = config.use_stratified_sampling
use_white_background = config.use_white_background
nerf_trunk_depth = config.nerf_trunk_depth
nerf_trunk_width = config.nerf_trunk_width
nerf_condition_depth = config.nerf_condition_depth
nerf_condition_width = config.nerf_condition_width
nerf_skips = config.nerf_skips
alpha_channels = config.alpha_channels
rgb_channels = config.rgb_channels
use_linear_disparity = config.use_linear_disparity
model = NerfModel(
num_coarse_samples=num_coarse_samples,
num_fine_samples=num_fine_samples,
use_viewdirs=use_viewdirs,
near=near,
far=far,
noise_std=noise_std,
nerf_trunk_depth=nerf_trunk_depth,
nerf_trunk_width=nerf_trunk_width,
nerf_condition_depth=nerf_condition_depth,
nerf_condition_width=nerf_condition_width,
activation=config.activation,
sigma_activation=config.sigma_activation,
nerf_skips=nerf_skips,
alpha_channels=alpha_channels,
rgb_channels=rgb_channels,
use_stratified_sampling=use_stratified_sampling,
use_white_background=use_white_background,
use_sample_at_infinity=config.use_sample_at_infinity,
num_nerf_point_freqs=num_nerf_point_freqs,
num_nerf_viewdir_freqs=num_nerf_viewdir_freqs,
use_linear_disparity=use_linear_disparity,
use_warp_jacobian=use_warp_jacobian,
use_weights=use_weights,
use_appearance_metadata=config.use_appearance_metadata,
use_camera_metadata=config.use_camera_metadata,
use_warp=config.use_warp,
num_appearance_embeddings=num_appearance_embeddings,
num_appearance_features=config.appearance_metadata_dims,
num_camera_embeddings=num_camera_embeddings,
num_camera_features=config.camera_metadata_dims,
num_warp_embeddings=num_warp_embeddings,
num_warp_freqs=config.num_warp_freqs,
num_warp_features=config.num_warp_features,
warp_field_type=config.warp_field_type,
warp_kwargs=config.warp_kwargs,
)
init_rays_dict = {
'origins': jnp.ones((batch_size, 3), jnp.float32),
'directions': jnp.ones((batch_size, 3), jnp.float32),
'metadata': {
'warp': jnp.ones((batch_size, 1), jnp.uint32),
'camera': jnp.ones((batch_size, 1), jnp.uint32),
'appearance': jnp.ones((batch_size, 1), jnp.uint32),
}
}
key, key1, key2 = random.split(key, 3)
params = model.init({
'params': key,
'coarse': key1,
'fine': key2
}, init_rays_dict, warp_alpha=0.0)['params']
return model, params
| apache-2.0 | -6,959,720,088,576,652,000 | 36.92029 | 79 | 0.671763 | false |
Cal-CS-61A-Staff/ok | tests/test_auth.py | 1 | 4104 | import flask
import urllib.request
from tests import OkTestCase
from server.models import db
class TestAuth(OkTestCase):
email = 'martymcfly@aol.com'
staff_email = 'okstaff@okpy.org'
def test_ssl(self):
response = urllib.request.urlopen('https://accounts.google.com')
assert response.code == 200
def test_login(self):
"""GET /login/ should redirect to Google OAuth (in production)."""
response = self.client.get('/login/')
self.assertRedirects(response, '/testing-login/')
self.app.config['TESTING_LOGIN'] = False
response = self.client.get('/login/')
assert response.location.startswith('https://accounts.google.com/o/oauth2/auth')
def test_testing_login(self):
"""GET /testing-login/ should show a test login page."""
response = self.client.get('/testing-login/')
self.assert_200(response)
self.assert_template_used('testing-login.html')
def test_testing_login_fail(self):
"""GET /testing-login/ should 404 if TESTING_LOGIN config is not set."""
app = self.create_app()
app.config['TESTING_LOGIN'] = False
response = app.test_client().get('/testing-login/')
self.assert_404(response)
def test_restricted(self):
"""User should see courses on / if logged in, but not if logged out."""
# Load Landing Page
response = self.client.get('/')
self.assert_200(response)
self.assert_template_used('index.html')
assert self.email not in str(response.data)
self.login(self.email)
response = self.client.get('/')
self.assert_200(response)
assert self.email in str(response.data)
assert 'Courses | Ok' in str(response.data)
def test_create_course(self):
"""Users should be able to create a course if logged in."""
response = self.client.get('/admin/course/new')
self.assertRedirects(response, '/login/')
self.login(self.email)
response = self.client.get('/admin/course/new')
self.assert_200(response)
self.assert_template_used('staff/course/course.new.html')
def test_sudo(self):
""" Unauthorized users should not be able to sudo"""
def attempt_sudo(email, expected, success):
with self.client as c:
response = c.get('/sudo/{0}/'.format(email))
self.assertEqual(response.status_code, expected)
s_user = flask.session.get('sudo-user')
if success:
assert s_user
else:
assert not s_user
def attempt_suite(email, authorized=False):
from dunder_mifflin import papers # WARNING: Malicious operation ahead
""" Try accessing a variety of users undo sudo mode. """
if authorized:
err_failure = 404
err_success = 302
elif not email:
err_failure = 302
err_success = 302
else:
err_success = 403
err_failure = 403
# Normal sudo logins
if email: self.login(email)
attempt_sudo(self.user1.email, err_success, authorized)
self.logout()
# Do not reveal existence of user unless admin
if email: self.login(email)
attempt_sudo("non@exist.com", err_failure, False)
self.logout()
# Check attempt to login as staff
if email: self.login(email)
attempt_sudo(self.staff1.email, err_success, authorized)
self.logout()
self.setup_course()
# Login as student
attempt_suite(self.user1.email, authorized=False)
# Login as staff
attempt_suite(self.staff_email, authorized=False)
attempt_suite(self.staff1.email, authorized=False)
# Login as admin
attempt_suite(self.admin.email, authorized=True)
# Login as lab assistant
attempt_suite(self.lab_assistant1.email, authorized=False)
# Logged out user
attempt_suite(None, authorized=False)
| apache-2.0 | 1,166,336,458,106,371,600 | 33.2 | 88 | 0.592105 | false |
Connor-R/nba_shot_charts | processing/table_exporter.py | 1 | 5599 | import argparse
from time import time
import csv
import os
from py_db import db
db = db("nba_shots")
def initiate():
start_time = time()
print "\nexporting to .csv"
for statType in ('Player', 'Team', 'PlayerCareer'):
for rangeType in ('Reg', 'Pre', 'Post'):
print '\t', statType, rangeType
if statType == 'PlayerCareer':
isCareer=True
dataType = 'Player'
else:
isCareer=False
dataType = statType
export_table(dataType, rangeType, isCareer=isCareer)
end_time = time()
elapsed_time = float(end_time - start_time)
print "\n\nNBA table_exporter.py"
print "time elapsed (in seconds): " + str(elapsed_time)
print "time elapsed (in minutes): " + str(elapsed_time/60.0)
def export_table(dataType, rangeType, isCareer):
if dataType == "Player":
qry_join = "JOIN players pl USING (player_id) WHERE 1"
fname = "fname"
lname = "lname"
elif dataType == "Team":
qry_join = "JOIN teams t USING (team_id) WHERE LEFT(season_id,4) > start_year AND LEFT(season_id,4) <= end_year"
fname = "city"
lname = "tname"
if isCareer is False:
careerText = ""
qry = """SELECT
CONCAT(%s, ' ', %s) as 'Name',
season_type as 'Season Type',
%s_id as 'NBA ID',
season_id as 'Year(s)',
b.games as 'Games',
b.makes as 'FG',
b.attempts as 'FGA',
b.points as 'Points',
ROUND(efg*100,1) as 'EFG_Perc',
ROUND(efg_plus,1) as 'EFG+',
ROUND(PAA,1) as 'PAA',
ROUND(PAA_per_game,1) as 'PAA/Game',
ROUND(PAR,1) as 'PAR',
ROUND(PAR_per_game,1) as 'PAR/Game',
ROUND(ShotSkillPlus,1) as 'ShotSkill+',
AttemptsPerGame_percentile as 'Volume Percentile',
EFG_percentile as 'EFG Percentile',
PAAperGame_percentile as 'PAA/Game Percentile',
PARperGame_percentile as 'PAR/Game Percentile',
shotSkill_percentile as 'ShotSkill Percentile'
FROM shots_%s_Relative_Year r
JOIN shots_%s_Distribution_Year d USING (%s_id, season_id, season_type, shot_zone_basic, shot_zone_area)
JOIN shots_%s_Breakdown b USING (%s_id, season_id, season_type, shot_zone_basic, shot_zone_area)
JOIN shot_skill_plus_%s_Year s USING (%s_id, season_id, season_type)
JOIN percentiles_%s_Year p USING (%s_id, season_id, season_type)
%s
AND shot_zone_basic = 'all'
AND season_type = '%s';"""
query = qry % (fname, lname, dataType, dataType, dataType, dataType, dataType, dataType, dataType, dataType, dataType, dataType, qry_join, rangeType)
# raw_input(query)
elif isCareer is True:
careerText = "_Career"
qry = """SELECT
CONCAT(fname, ' ', lname) as 'Name',
season_type as 'Season Type',
player_id as 'NBA ID',
season_id as 'Year(s)',
b.games as 'Games',
b.makes as 'FG',
b.attempts as 'FGA',
b.points as 'Points',
ROUND(efg*100,1) as 'EFG_Perc',
ROUND(efg_plus,1) as 'EFG+',
ROUND(PAA,1) as 'PAA',
ROUND(PAA_per_game,1) as 'PAA/Game',
ROUND(PAR,1) as 'PAR',
ROUND(PAR_per_game,1) as 'PAR/Game',
ROUND(ShotSkillPlus,1) as 'ShotSkill+',
AttemptsPerGame_percentile as 'Volume Percentile',
EFG_percentile as 'EFG Percentile',
PAAperGame_percentile as 'PAA/Game Percentile',
PARperGame_percentile as 'PAR/Game Percentile',
shotSkill_percentile as 'ShotSkill Percentile'
FROM shots_player_Relative_Career r
JOIN shots_player_Distribution_Career d USING (player_id, season_id, season_type, shot_zone_basic, shot_zone_area)
JOIN(
SELECT
player_id, season_type, shot_zone_basic, shot_zone_area,
SUM(games) AS games,
SUM(attempts) AS attempts,
SUM(makes) AS makes,
SUM(points) AS points
FROM shots_player_Breakdown
GROUP BY player_id, season_type, shot_zone_area, shot_zone_basic, season_type
) b USING (player_id, season_type, shot_zone_basic, shot_zone_area)
JOIN shot_skill_plus_player_Career s USING (player_id, season_id, season_type)
JOIN percentiles_player_Career p USING (player_id, season_id, season_type)
JOIN players pl USING (player_id)
WHERE shot_zone_basic = 'all'
AND season_type = '%s';"""
query = qry % (rangeType)
# raw_input(query)
res = db.query(query)
file_name = "%s_%s%s" % (dataType, rangeType, careerText)
csv_title = "/Users/connordog/Dropbox/Desktop_Files/Work_Things/CodeBase/Python_Scripts/Python_Projects/nba_shot_charts/csvs/leaderboards/%s.csv" % (file_name)
csv_file = open(csv_title, "wb")
append_csv = csv.writer(csv_file)
csv_header = ["Name", "Season Type", "NBA ID", "Year(s)", "Games", "FG", "FGA", "FG Points", "EFG%", "EFG+", "PAA", "PAA/Game", "PAR", "PAR/Game", "ShotSkill+", "Volume Percentile", "EFG Percentile", "PAA/Game Percentile", "PAR/Game Percentile", "ShotSkill Percentile"]
append_csv.writerow(csv_header)
for row in res:
row = list(row[0:])
for i, val in enumerate(row):
if type(val) in (str,):
row[i] = "".join([l if ord(l) < 128 else "" for l in val])
append_csv.writerow(row)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
args = parser.parse_args()
initiate()
| mit | 7,872,525,690,449,116,000 | 36.831081 | 273 | 0.58957 | false |
mlcommons/inference | language/bert/pytorch_SUT.py | 1 | 3800 | # coding=utf-8
# Copyright 2021 Arm Limited and affiliates.
# Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved.
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import array
import json
import os
import sys
sys.path.insert(0, os.path.join(os.getcwd(), "DeepLearningExamples", "PyTorch", "LanguageModeling", "BERT"))
sys.path.insert(0, os.getcwd())
import mlperf_loadgen as lg
import numpy as np
import torch
from transformers import BertConfig, BertForQuestionAnswering
from squad_QSL import get_squad_QSL
class BERT_PyTorch_SUT():
def __init__(self, args):
print("Loading BERT configs...")
with open("bert_config.json") as f:
config_json = json.load(f)
config = BertConfig(
attention_probs_dropout_prob=config_json["attention_probs_dropout_prob"],
hidden_act=config_json["hidden_act"],
hidden_dropout_prob=config_json["hidden_dropout_prob"],
hidden_size=config_json["hidden_size"],
initializer_range=config_json["initializer_range"],
intermediate_size=config_json["intermediate_size"],
max_position_embeddings=config_json["max_position_embeddings"],
num_attention_heads=config_json["num_attention_heads"],
num_hidden_layers=config_json["num_hidden_layers"],
type_vocab_size=config_json["type_vocab_size"],
vocab_size=config_json["vocab_size"])
self.dev = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu")
print("Loading PyTorch model...")
self.model = BertForQuestionAnswering(config)
self.model.to(self.dev)
self.model.load_state_dict(torch.load("build/data/bert_tf_v1_1_large_fp32_384_v2/model.pytorch"), strict=False)
print("Constructing SUT...")
self.sut = lg.ConstructSUT(self.issue_queries, self.flush_queries, self.process_latencies)
print("Finished constructing SUT.")
self.qsl = get_squad_QSL(args.max_examples)
def issue_queries(self, query_samples):
with torch.no_grad():
for i in range(len(query_samples)):
eval_features = self.qsl.get_features(query_samples[i].index)
model_output = self.model.forward(input_ids=torch.LongTensor(eval_features.input_ids).unsqueeze(0).to(self.dev),
attention_mask=torch.LongTensor(eval_features.input_mask).unsqueeze(0).to(self.dev),
token_type_ids=torch.LongTensor(eval_features.segment_ids).unsqueeze(0).to(self.dev))
start_scores = model_output.start_logits
end_scores = model_output.end_logits
output = torch.stack([start_scores, end_scores], axis=-1).squeeze(0).cpu().numpy()
response_array = array.array("B", output.tobytes())
bi = response_array.buffer_info()
response = lg.QuerySampleResponse(query_samples[i].id, bi[0], bi[1])
lg.QuerySamplesComplete([response])
def flush_queries(self):
pass
def process_latencies(self, latencies_ns):
pass
def __del__(self):
print("Finished destroying SUT.")
def get_pytorch_sut(args):
return BERT_PyTorch_SUT(args)
| apache-2.0 | 6,249,434,711,940,474,000 | 41.696629 | 128 | 0.663158 | false |
totem/yoda-discover | tests/unit/test_util.py | 1 | 3775 | import discover.util
from mock import patch
from nose.tools import eq_
from discover.util import convert_to_milliseconds, DEFAULT_TIMEOUT_MS
def test_convert_to_milliseconds_for_timeout_in_hours():
# When: I convert timeout to 'ms'
timeout_ms = convert_to_milliseconds('1h')
# Then: Expected timeout (int) is returned in ms
eq_(timeout_ms, 3600 * 1000)
def test_convert_to_milliseconds_for_timeout_in_minutes():
# When: I convert timeout to 'ms'
timeout_ms = convert_to_milliseconds('5m')
# Then: Expected timeout (int) is returned in ms
eq_(timeout_ms, 5 * 60 * 1000)
def test_convert_to_milliseconds_for_timeout_in_seconds():
# When: I convert timeout to 'ms'
timeout_ms = convert_to_milliseconds('5s')
# Then: Expected timeout (int) is returned in ms
eq_(timeout_ms, 5 * 1000)
def test_convert_to_milliseconds_for_timeout_in_milliseconds():
# When: I convert timeout to 'ms'
timeout_ms = convert_to_milliseconds('5ms')
# Then: Expected timeout (int) is returned in ms
eq_(timeout_ms, 5)
def test_convert_to_milliseconds_for_invalid_timeout():
# When: I convert timeout to 'ms'
timeout_ms = convert_to_milliseconds('5dms')
# Then: DEFAULT_TIMEOUT_MS is returned
eq_(timeout_ms, DEFAULT_TIMEOUT_MS)
@patch('discover.util.urlopen')
def test_health_when_uri_is_specified(murlopen):
# When: I perform health test with given uri
healthy = discover.util.health_test('8080', 'mockhost', uri='/test')
# Then: http health test is performed
eq_(healthy, True)
murlopen.assert_called_once_with('http://mockhost:8080/test', None, 2)
@patch('discover.util.urlopen')
def test_health_when_uri_and_timeout_is_specified(murlopen):
# When: I perform health test with given uri
healthy = discover.util.health_test(8080, 'mockhost', uri='/test',
timeout='1m')
# Then: http health test is performed
eq_(healthy, True)
murlopen.assert_called_once_with('http://mockhost:8080/test', None, 60)
@patch('discover.util.socket')
def test_health_when_uri_is_not_specified(msocket):
# When: I perform health test with given uri
healthy = discover.util.health_test(8080, 'mockhost')
# Then: tcp test returns healthy
eq_(healthy, True)
@patch('discover.util.urlopen')
def test_http_when_urlopen_fails(murlopen):
# Given: An invalid uri
murlopen.side_effect = Exception('Invalid uri')
# When: I perform http_test with given uri
healthy = discover.util.http_test(8080, 'mockhost')
# Then: http test returns false
eq_(healthy, False)
murlopen.assert_called_once_with('http://mockhost:8080/health', None, 2)
@patch('discover.util.socket')
def test_port_when_port_is_not_listening(msocket):
# Given: Invalid Server
msocket.socket().connect.side_effect = Exception('Invalid server')
# When: I perform port_test
healthy = discover.util.port_test('8080', 'mockhost')
# Then: Port Test returns False
eq_(healthy, False)
@patch('discover.util.get_instance_metadata')
def test_map_proxy_host_using_ec2_metadata(mock_get):
# Given: Existing ec2 instance with metadata
mock_get().__getitem__.return_value = 'testhost'
# When: I map proxy host using ec2-metadata
host = discover.util.map_proxy_host('ec2:meta-data:mock')
# Then: Ec2 metadata gets resolved successfully
eq_(host, 'testhost')
mock_get().__getitem__.assert_called_once_with('mock')
@patch('discover.util.get_instance_metadata')
def test_map_proxy_host_using_actualhost(mock_get):
# When: I map proxy host using actual host
host = discover.util.map_proxy_host('testhost')
# Then: The actual host value is returned.
eq_(host, 'testhost')
| mit | 5,562,059,840,405,288,000 | 28.038462 | 76 | 0.685033 | false |
tboyce1/home-assistant | homeassistant/components/device_tracker/owntracks.py | 2 | 16441 | """
Device tracker platform that adds support for OwnTracks over MQTT.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/device_tracker.owntracks/
"""
import asyncio
import base64
import json
import logging
from collections import defaultdict
import voluptuous as vol
import homeassistant.components.mqtt as mqtt
import homeassistant.helpers.config_validation as cv
from homeassistant.components import zone as zone_comp
from homeassistant.components.device_tracker import (
PLATFORM_SCHEMA, ATTR_SOURCE_TYPE, SOURCE_TYPE_BLUETOOTH_LE,
SOURCE_TYPE_GPS
)
from homeassistant.const import STATE_HOME
from homeassistant.core import callback
from homeassistant.util import slugify, decorator
REQUIREMENTS = ['libnacl==1.6.1']
_LOGGER = logging.getLogger(__name__)
HANDLERS = decorator.Registry()
BEACON_DEV_ID = 'beacon'
CONF_MAX_GPS_ACCURACY = 'max_gps_accuracy'
CONF_SECRET = 'secret'
CONF_WAYPOINT_IMPORT = 'waypoints'
CONF_WAYPOINT_WHITELIST = 'waypoint_whitelist'
CONF_MQTT_TOPIC = 'mqtt_topic'
CONF_REGION_MAPPING = 'region_mapping'
CONF_EVENTS_ONLY = 'events_only'
DEPENDENCIES = ['mqtt']
DEFAULT_OWNTRACKS_TOPIC = 'owntracks/#'
REGION_MAPPING = {}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_MAX_GPS_ACCURACY): vol.Coerce(float),
vol.Optional(CONF_WAYPOINT_IMPORT, default=True): cv.boolean,
vol.Optional(CONF_EVENTS_ONLY, default=False): cv.boolean,
vol.Optional(CONF_MQTT_TOPIC, default=DEFAULT_OWNTRACKS_TOPIC):
mqtt.valid_subscribe_topic,
vol.Optional(CONF_WAYPOINT_WHITELIST): vol.All(
cv.ensure_list, [cv.string]),
vol.Optional(CONF_SECRET): vol.Any(
vol.Schema({vol.Optional(cv.string): cv.string}),
cv.string),
vol.Optional(CONF_REGION_MAPPING, default=REGION_MAPPING): dict
})
def get_cipher():
"""Return decryption function and length of key.
Async friendly.
"""
from libnacl import crypto_secretbox_KEYBYTES as KEYLEN
from libnacl.secret import SecretBox
def decrypt(ciphertext, key):
"""Decrypt ciphertext using key."""
return SecretBox(key).decrypt(ciphertext)
return (KEYLEN, decrypt)
@asyncio.coroutine
def async_setup_scanner(hass, config, async_see, discovery_info=None):
"""Set up an OwnTracks tracker."""
context = context_from_config(async_see, config)
@asyncio.coroutine
def async_handle_mqtt_message(topic, payload, qos):
"""Handle incoming OwnTracks message."""
try:
message = json.loads(payload)
except ValueError:
# If invalid JSON
_LOGGER.error("Unable to parse payload as JSON: %s", payload)
return
message['topic'] = topic
yield from async_handle_message(hass, context, message)
yield from mqtt.async_subscribe(
hass, context.mqtt_topic, async_handle_mqtt_message, 1)
return True
def _parse_topic(topic, subscribe_topic):
"""Parse an MQTT topic {sub_topic}/user/dev, return (user, dev) tuple.
Async friendly.
"""
subscription = subscribe_topic.split('/')
try:
user_index = subscription.index('#')
except ValueError:
_LOGGER.error("Can't parse subscription topic: '%s'", subscribe_topic)
raise
topic_list = topic.split('/')
try:
user, device = topic_list[user_index], topic_list[user_index + 1]
except IndexError:
_LOGGER.error("Can't parse topic: '%s'", topic)
raise
return user, device
def _parse_see_args(message, subscribe_topic):
"""Parse the OwnTracks location parameters, into the format see expects.
Async friendly.
"""
user, device = _parse_topic(message['topic'], subscribe_topic)
dev_id = slugify('{}_{}'.format(user, device))
kwargs = {
'dev_id': dev_id,
'host_name': user,
'gps': (message['lat'], message['lon']),
'attributes': {}
}
if 'acc' in message:
kwargs['gps_accuracy'] = message['acc']
if 'batt' in message:
kwargs['battery'] = message['batt']
if 'vel' in message:
kwargs['attributes']['velocity'] = message['vel']
if 'tid' in message:
kwargs['attributes']['tid'] = message['tid']
if 'addr' in message:
kwargs['attributes']['address'] = message['addr']
if 't' in message:
if message['t'] == 'c':
kwargs['attributes'][ATTR_SOURCE_TYPE] = SOURCE_TYPE_GPS
if message['t'] == 'b':
kwargs['attributes'][ATTR_SOURCE_TYPE] = SOURCE_TYPE_BLUETOOTH_LE
return dev_id, kwargs
def _set_gps_from_zone(kwargs, location, zone):
"""Set the see parameters from the zone parameters.
Async friendly.
"""
if zone is not None:
kwargs['gps'] = (
zone.attributes['latitude'],
zone.attributes['longitude'])
kwargs['gps_accuracy'] = zone.attributes['radius']
kwargs['location_name'] = location
return kwargs
def _decrypt_payload(secret, topic, ciphertext):
"""Decrypt encrypted payload."""
try:
keylen, decrypt = get_cipher()
except OSError:
_LOGGER.warning(
"Ignoring encrypted payload because libsodium not installed")
return None
if isinstance(secret, dict):
key = secret.get(topic)
else:
key = secret
if key is None:
_LOGGER.warning(
"Ignoring encrypted payload because no decryption key known "
"for topic %s", topic)
return None
key = key.encode("utf-8")
key = key[:keylen]
key = key.ljust(keylen, b'\0')
try:
ciphertext = base64.b64decode(ciphertext)
message = decrypt(ciphertext, key)
message = message.decode("utf-8")
_LOGGER.debug("Decrypted payload: %s", message)
return message
except ValueError:
_LOGGER.warning(
"Ignoring encrypted payload because unable to decrypt using "
"key for topic %s", topic)
return None
def context_from_config(async_see, config):
"""Create an async context from Home Assistant config."""
max_gps_accuracy = config.get(CONF_MAX_GPS_ACCURACY)
waypoint_import = config.get(CONF_WAYPOINT_IMPORT)
waypoint_whitelist = config.get(CONF_WAYPOINT_WHITELIST)
secret = config.get(CONF_SECRET)
region_mapping = config.get(CONF_REGION_MAPPING)
events_only = config.get(CONF_EVENTS_ONLY)
mqtt_topic = config.get(CONF_MQTT_TOPIC)
return OwnTracksContext(async_see, secret, max_gps_accuracy,
waypoint_import, waypoint_whitelist,
region_mapping, events_only, mqtt_topic)
class OwnTracksContext:
"""Hold the current OwnTracks context."""
def __init__(self, async_see, secret, max_gps_accuracy, import_waypoints,
waypoint_whitelist, region_mapping, events_only, mqtt_topic):
"""Initialize an OwnTracks context."""
self.async_see = async_see
self.secret = secret
self.max_gps_accuracy = max_gps_accuracy
self.mobile_beacons_active = defaultdict(set)
self.regions_entered = defaultdict(list)
self.import_waypoints = import_waypoints
self.waypoint_whitelist = waypoint_whitelist
self.region_mapping = region_mapping
self.events_only = events_only
self.mqtt_topic = mqtt_topic
@callback
def async_valid_accuracy(self, message):
"""Check if we should ignore this message."""
acc = message.get('acc')
if acc is None:
return False
try:
acc = float(acc)
except ValueError:
return False
if acc == 0:
_LOGGER.warning(
"Ignoring %s update because GPS accuracy is zero: %s",
message['_type'], message)
return False
if self.max_gps_accuracy is not None and \
acc > self.max_gps_accuracy:
_LOGGER.info("Ignoring %s update because expected GPS "
"accuracy %s is not met: %s",
message['_type'], self.max_gps_accuracy,
message)
return False
return True
@asyncio.coroutine
def async_see_beacons(self, hass, dev_id, kwargs_param):
"""Set active beacons to the current location."""
kwargs = kwargs_param.copy()
# Mobile beacons should always be set to the location of the
# tracking device. I get the device state and make the necessary
# changes to kwargs.
device_tracker_state = hass.states.get(
"device_tracker.{}".format(dev_id))
if device_tracker_state is not None:
acc = device_tracker_state.attributes.get("gps_accuracy")
lat = device_tracker_state.attributes.get("latitude")
lon = device_tracker_state.attributes.get("longitude")
kwargs['gps_accuracy'] = acc
kwargs['gps'] = (lat, lon)
# the battery state applies to the tracking device, not the beacon
# kwargs location is the beacon's configured lat/lon
kwargs.pop('battery', None)
for beacon in self.mobile_beacons_active[dev_id]:
kwargs['dev_id'] = "{}_{}".format(BEACON_DEV_ID, beacon)
kwargs['host_name'] = beacon
yield from self.async_see(**kwargs)
@HANDLERS.register('location')
@asyncio.coroutine
def async_handle_location_message(hass, context, message):
"""Handle a location message."""
if not context.async_valid_accuracy(message):
return
if context.events_only:
_LOGGER.debug("Location update ignored due to events_only setting")
return
dev_id, kwargs = _parse_see_args(message, context.mqtt_topic)
if context.regions_entered[dev_id]:
_LOGGER.debug(
"Location update ignored, inside region %s",
context.regions_entered[-1])
return
yield from context.async_see(**kwargs)
yield from context.async_see_beacons(hass, dev_id, kwargs)
@asyncio.coroutine
def _async_transition_message_enter(hass, context, message, location):
"""Execute enter event."""
zone = hass.states.get("zone.{}".format(slugify(location)))
dev_id, kwargs = _parse_see_args(message, context.mqtt_topic)
if zone is None and message.get('t') == 'b':
# Not a HA zone, and a beacon so mobile beacon.
# kwargs will contain the lat/lon of the beacon
# which is not where the beacon actually is
# and is probably set to 0/0
beacons = context.mobile_beacons_active[dev_id]
if location not in beacons:
beacons.add(location)
_LOGGER.info("Added beacon %s", location)
yield from context.async_see_beacons(hass, dev_id, kwargs)
else:
# Normal region
regions = context.regions_entered[dev_id]
if location not in regions:
regions.append(location)
_LOGGER.info("Enter region %s", location)
_set_gps_from_zone(kwargs, location, zone)
yield from context.async_see(**kwargs)
yield from context.async_see_beacons(hass, dev_id, kwargs)
@asyncio.coroutine
def _async_transition_message_leave(hass, context, message, location):
"""Execute leave event."""
dev_id, kwargs = _parse_see_args(message, context.mqtt_topic)
regions = context.regions_entered[dev_id]
if location in regions:
regions.remove(location)
beacons = context.mobile_beacons_active[dev_id]
if location in beacons:
beacons.remove(location)
_LOGGER.info("Remove beacon %s", location)
yield from context.async_see_beacons(hass, dev_id, kwargs)
else:
new_region = regions[-1] if regions else None
if new_region:
# Exit to previous region
zone = hass.states.get(
"zone.{}".format(slugify(new_region)))
_set_gps_from_zone(kwargs, new_region, zone)
_LOGGER.info("Exit to %s", new_region)
yield from context.async_see(**kwargs)
yield from context.async_see_beacons(hass, dev_id, kwargs)
return
_LOGGER.info("Exit to GPS")
# Check for GPS accuracy
if context.async_valid_accuracy(message):
yield from context.async_see(**kwargs)
yield from context.async_see_beacons(hass, dev_id, kwargs)
@HANDLERS.register('transition')
@asyncio.coroutine
def async_handle_transition_message(hass, context, message):
"""Handle a transition message."""
if message.get('desc') is None:
_LOGGER.error(
"Location missing from `Entering/Leaving` message - "
"please turn `Share` on in OwnTracks app")
return
# OwnTracks uses - at the start of a beacon zone
# to switch on 'hold mode' - ignore this
location = message['desc'].lstrip("-")
# Create a layer of indirection for Owntracks instances that may name
# regions differently than their HA names
if location in context.region_mapping:
location = context.region_mapping[location]
if location.lower() == 'home':
location = STATE_HOME
if message['event'] == 'enter':
yield from _async_transition_message_enter(
hass, context, message, location)
elif message['event'] == 'leave':
yield from _async_transition_message_leave(
hass, context, message, location)
else:
_LOGGER.error(
"Misformatted mqtt msgs, _type=transition, event=%s",
message['event'])
@asyncio.coroutine
def async_handle_waypoint(hass, name_base, waypoint):
"""Handle a waypoint."""
name = waypoint['desc']
pretty_name = '{} - {}'.format(name_base, name)
lat = waypoint['lat']
lon = waypoint['lon']
rad = waypoint['rad']
# check zone exists
entity_id = zone_comp.ENTITY_ID_FORMAT.format(slugify(pretty_name))
# Check if state already exists
if hass.states.get(entity_id) is not None:
return
zone = zone_comp.Zone(hass, pretty_name, lat, lon, rad,
zone_comp.ICON_IMPORT, False)
zone.entity_id = entity_id
yield from zone.async_update_ha_state()
@HANDLERS.register('waypoint')
@HANDLERS.register('waypoints')
@asyncio.coroutine
def async_handle_waypoints_message(hass, context, message):
"""Handle a waypoints message."""
if not context.import_waypoints:
return
if context.waypoint_whitelist is not None:
user = _parse_topic(message['topic'], context.mqtt_topic)[0]
if user not in context.waypoint_whitelist:
return
if 'waypoints' in message:
wayps = message['waypoints']
else:
wayps = [message]
_LOGGER.info("Got %d waypoints from %s", len(wayps), message['topic'])
name_base = ' '.join(_parse_topic(message['topic'], context.mqtt_topic))
for wayp in wayps:
yield from async_handle_waypoint(hass, name_base, wayp)
@HANDLERS.register('encrypted')
@asyncio.coroutine
def async_handle_encrypted_message(hass, context, message):
"""Handle an encrypted message."""
plaintext_payload = _decrypt_payload(context.secret, message['topic'],
message['data'])
if plaintext_payload is None:
return
decrypted = json.loads(plaintext_payload)
decrypted['topic'] = message['topic']
yield from async_handle_message(hass, context, decrypted)
@HANDLERS.register('lwt')
@HANDLERS.register('configuration')
@HANDLERS.register('beacon')
@HANDLERS.register('cmd')
@HANDLERS.register('steps')
@HANDLERS.register('card')
@asyncio.coroutine
def async_handle_not_impl_msg(hass, context, message):
"""Handle valid but not implemented message types."""
_LOGGER.debug('Not handling %s message: %s', message.get("_type"), message)
@asyncio.coroutine
def async_handle_unsupported_msg(hass, context, message):
"""Handle an unsupported or invalid message type."""
_LOGGER.warning('Received unsupported message type: %s.',
message.get('_type'))
@asyncio.coroutine
def async_handle_message(hass, context, message):
"""Handle an OwnTracks message."""
msgtype = message.get('_type')
handler = HANDLERS.get(msgtype, async_handle_unsupported_msg)
yield from handler(hass, context, message)
| apache-2.0 | -5,057,499,817,063,968,000 | 31.750996 | 79 | 0.637127 | false |
corpnewt/CorpBot.py | Cogs/BotAdmin.py | 1 | 12950 | import asyncio, discord, re, random
from operator import itemgetter
from discord.ext import commands
from Cogs import Utils, DisplayName, Message, PickList
def setup(bot):
# Add the bot and deps
settings = bot.get_cog("Settings")
bot.add_cog(BotAdmin(bot, settings))
class BotAdmin(commands.Cog):
# Init with the bot reference, and a reference to the settings var
def __init__(self, bot, settings):
self.bot = bot
self.settings = settings
self.dregex = re.compile(r"(?i)(discord(\.gg|app\.com)\/)(?!attachments)([^\s]+)")
self.mention_re = re.compile(r"[0-9]{17,21}")
global Utils, DisplayName
Utils = self.bot.get_cog("Utils")
DisplayName = self.bot.get_cog("DisplayName")
async def message(self, message):
# Check for discord invite links and remove them if found - per server settings
if not self.dregex.search(message.content): return None # No invite in the passed message - nothing to do
# Got an invite - let's see if we care
if not self.settings.getServerStat(message.guild,"RemoveInviteLinks",False): return None # We don't care
# We *do* care, let's see if the author is admin/bot-admin as they'd have power to post invites
ctx = await self.bot.get_context(message)
if Utils.is_bot_admin(ctx): return None # We are immune!
# At this point - we need to delete the message
return { 'Ignore' : True, 'Delete' : True}
@commands.command(pass_context=True)
async def removeinvitelinks(self, ctx, *, yes_no = None):
"""Enables/Disables auto-deleting discord invite links in chat (bot-admin only)."""
if not await Utils.is_bot_admin_reply(ctx): return
await ctx.send(Utils.yes_no_setting(ctx,"Remove discord invite links","RemoveInviteLinks",yes_no))
@commands.command(pass_context=True)
async def setuserparts(self, ctx, member : discord.Member = None, *, parts : str = None):
"""Set another user's parts list (owner only)."""
# Only allow owner
isOwner = self.settings.isOwner(ctx.author)
if isOwner == None:
msg = 'I have not been claimed, *yet*.'
return await ctx.send(msg)
elif isOwner == False:
msg = 'You are not the *true* owner of me. Only the rightful owner can use this command.'
return await ctx.send(msg)
if member == None:
msg = 'Usage: `{}setuserparts [member] "[parts text]"`'.format(ctx.prefix)
return await ctx.send(msg)
if type(member) is str:
try:
member = discord.utils.get(ctx.guild.members, name=member)
except:
return await ctx.send("That member does not exist")
if not parts:
parts = ""
self.settings.setGlobalUserStat(member, "Parts", parts)
msg = '*{}\'s* parts have been set to:\n{}'.format(DisplayName.name(member), parts)
await ctx.send(Utils.suppressed(ctx,msg))
@setuserparts.error
async def setuserparts_error(self, error, ctx):
# do stuff
msg = 'setuserparts Error: {}'.format(error)
await ctx.send(msg)
@commands.command(pass_context=True)
async def ignore(self, ctx, *, member = None):
"""Adds a member to the bot's "ignore" list (bot-admin only)."""
if not await Utils.is_bot_admin_reply(ctx): return
if member == None:
msg = 'Usage: `{}ignore [member]`'.format(ctx.prefix)
return await ctx.send(msg)
if type(member) is str:
memberName = member
member = DisplayName.memberForName(memberName, ctx.guild)
if not member:
msg = 'I couldn\'t find *{}*...'.format(memberName)
return await ctx.send(Utils.suppressed(ctx,msg))
ignoreList = self.settings.getServerStat(ctx.guild, "IgnoredUsers")
for user in ignoreList:
if str(member.id) == str(user["ID"]):
# Found our user - already ignored
return await ctx.send('*{}* is already being ignored.'.format(DisplayName.name(member)))
# Let's ignore someone
ignoreList.append({ "Name" : member.name, "ID" : member.id })
self.settings.setServerStat(ctx.guild, "IgnoredUsers", ignoreList)
await ctx.send('*{}* is now being ignored.'.format(DisplayName.name(member)))
@ignore.error
async def ignore_error(self, error, ctx):
# do stuff
msg = 'ignore Error: {}'.format(error)
await ctx.send(msg)
@commands.command(pass_context=True)
async def listen(self, ctx, *, member = None):
"""Removes a member from the bot's "ignore" list (bot-admin only)."""
if not await Utils.is_bot_admin_reply(ctx): return
if member == None:
return await ctx.send('Usage: `{}listen [member]`'.format(ctx.prefix))
if type(member) is str:
memberName = member
member = DisplayName.memberForName(memberName, ctx.guild)
if not member:
msg = 'I couldn\'t find *{}*...'.format(memberName)
return await ctx.send(Utils.suppressed(ctx,msg))
ignoreList = self.settings.getServerStat(ctx.guild, "IgnoredUsers")
for user in ignoreList:
if str(member.id) == str(user["ID"]):
# Found our user - already ignored
ignoreList.remove(user)
self.settings.setServerStat(ctx.guild, "IgnoredUsers", ignoreList)
return await ctx.send("*{}* is no longer being ignored.".format(DisplayName.name(member)))
await ctx.send('*{}* wasn\'t being ignored...'.format(DisplayName.name(member)))
@listen.error
async def listen_error(self, error, ctx):
# do stuff
msg = 'listen Error: {}'.format(error)
await ctx.send(msg)
@commands.command(pass_context=True)
async def ignored(self, ctx):
"""Lists the users currently being ignored."""
ignoreArray = self.settings.getServerStat(ctx.guild, "IgnoredUsers")
promoSorted = sorted(ignoreArray, key=itemgetter('Name'))
if not len(promoSorted):
return await ctx.send("I'm not currently ignoring anyone.")
ignored = ["*{}*".format(DisplayName.name(ctx.guild.get_member(int(x["ID"])))) for x in promoSorted if ctx.guild.get_member(int(x["ID"]))]
await ctx.send("Currently Ignored Users:\n{}".format("\n".join(ignored)))
async def kick_ban(self, ctx, members_and_reason = None, command_name = "kick"):
# Helper method to handle the lifting for kick and ban
if not await Utils.is_bot_admin_reply(ctx): return
if not members_and_reason:
return await ctx.send('Usage: `{}{} [space delimited member mention/id] [reason]`'.format(ctx.prefix, command_name))
# Force a mention - we don't want any ambiguity
args = members_and_reason.split()
# Get our list of targets
targets = []
missed = []
unable = []
reason = ""
for index,item in enumerate(args):
if self.mention_re.search(item): # Check if it's a mention
# Resolve the member
mem_id = int(re.sub(r'\W+', '', item))
member = ctx.guild.get_member(mem_id)
if member is None and command_name in ("ban","unban"): # Didn't get a valid member, let's allow a pre-ban/unban if we can resolve them
try: member = await self.bot.fetch_user(mem_id)
except: pass
# If we have an invalid mention, save it to report later
if member is None:
missed.append(str(mem_id))
continue
# Let's check if we have a valid member and make sure it's not:
# 1. The bot, 2. The command caller, 3. Another bot-admin/admin
if isinstance(member, discord.Member) and (member.id == self.bot.user.id or member.id == ctx.author.id or Utils.is_bot_admin(ctx,member)):
unable.append(member.mention)
continue
if not member in targets: targets.append(member) # Only add them if we don't already have them
else:
# Not a mention - must be the reason, dump the rest of the items into a string
# separated by a space
reason = " ".join(args[index:])
break
reason = reason if len(reason) else "No reason provided."
if not len(targets):
msg = "**With reason:**\n\n{}".format(reason)
if len(unable): msg = "**Unable to {}:**\n\n{}\n\n".format(command_name,"\n".join(unable)) + msg
if len(missed): msg = "**Unmatched ID{}:**\n\n{}\n\n".format("" if len(missed) == 1 else "s","\n".join(missed)) + msg
return await Message.EmbedText(title="No valid members passed!",description=msg,color=ctx.author).send(ctx)
# We should have a list of targets, and the reason - let's list them for confirmation
# then generate a 4-digit confirmation code that the original requestor needs to confirm
# in order to follow through
confirmation_code = "".join([str(random.randint(0,9)) for x in range(4)])
msg = "**To {} the following member{}:**\n\n{}\n\n**With reason:**\n\n\"{}\"\n\n**Please type:**\n\n`{}`{}{}".format(
command_name,
"" if len(targets) == 1 else "s",
"\n".join([x.name+"#"+x.discriminator for x in targets]),
reason if len(reason) else "None",
confirmation_code,
"" if not len(missed) else "\n\n**Unmatched ID{}:**\n\n{}".format("" if len(missed) == 1 else "s", "\n".join(missed)),
"" if not len(unable) else "\n\n**Unable to {}:**\n\n{}".format(command_name,"\n".join(unable))
)
confirmation_message = await Message.EmbedText(title="{} Confirmation".format(command_name.capitalize()),description=msg,color=ctx.author).send(ctx)
def check_confirmation(message):
return message.channel == ctx.channel and ctx.author == message.author # Just making sure it's the same user/channel
try: confirmation_user = await self.bot.wait_for('message', timeout=60, check=check_confirmation)
except: confirmation_user = ""
# Delete the confirmation message
await confirmation_message.delete()
# Verify the confirmation
if not confirmation_user.content == confirmation_code: return await ctx.send("{} cancelled!".format(command_name.capitalize()))
# We got the authorization!
message = await Message.EmbedText(title="{}ing...".format("Bann" if command_name == "ban" else "Unbann" if command_name == "unban" else "Kick"),color=ctx.author).send(ctx)
canned = []
cant = []
command = {"ban":ctx.guild.ban,"kick":ctx.guild.kick,"unban":ctx.guild.unban}.get(command_name.lower(),ctx.guild.kick)
for target in targets:
try:
await command(target,reason="{}#{}: {}".format(ctx.author.name,ctx.author.discriminator,reason))
canned.append(target)
except: cant.append(target)
msg = ""
if len(canned):
msg += "**I was ABLE to {}:**\n\n{}\n\n".format(command_name,"\n".join([x.name+"#"+x.discriminator for x in canned]))
if len(cant):
msg += "**I was UNABLE to {}:**\n\n{}\n\n".format(command_name,"\n".join([x.name+"#"+x.discriminator for x in cant]))
await Message.EmbedText(title="{} Results".format(command_name.capitalize()),description=msg).edit(ctx,message)
@commands.command(pass_context=True)
async def kick(self, ctx, *, members = None, reason = None):
"""Kicks the passed members for the specified reason.
All kick targets must be mentions or ids to avoid ambiguity (bot-admin only).
eg: $kick @user1#1234 @user2#5678 @user3#9012 for spamming"""
await self.kick_ban(ctx,members,"kick")
@commands.command(pass_context=True)
async def ban(self, ctx, *, members = None, reason = None):
"""Bans the passed members for the specified reason.
All ban targets must be mentions or ids to avoid ambiguity (bot-admin only).
eg: $ban @user1#1234 @user2#5678 @user3#9012 for spamming"""
await self.kick_ban(ctx,members,"ban")
@commands.command(pass_context=True)
async def unban(self, ctx, *, members = None, reason = None):
"""Unbans the passed members for the specified reason.
All unban targets must be mentions or ids to avoid ambiguity (bot-admin only).
eg: $unban @user1#1234 @user2#5678 @user3#9012 because we're nice"""
await self.kick_ban(ctx,members,"unban")
@commands.command()
async def banned(self, ctx, *, user_id = None):
"""Queries the guild's ban list for the passed user id and responds with whether they've been banned and the reason (bot-admin only)."""
if not await Utils.is_bot_admin_reply(ctx): return
try: all_bans = await ctx.guild.bans()
except: return await ctx.send("I couldn't get the ban list :(")
if not len(all_bans): return await Message.EmbedText(title="Ban List",description="No bans found",color=ctx.author).send(ctx)
orig_user = user_id
try: user_id = int(user_id) if user_id != None else None
except: user_id = -1 # Use -1 to indicate unresolved
entries = []
for ban in all_bans:
entries.append({"name":"{}#{} ({})".format(ban.user.name,ban.user.discriminator,ban.user.id),"value":ban.reason if ban.reason else "No reason provided"})
if user_id != None and user_id == ban.user.id:
# Got a match - display it
return await Message.Embed(
title="Ban Found For {}".format(user_id),
fields=[entries[-1]], # Send the last found entry
color=ctx.author
).send(ctx)
return await PickList.PagePicker(title="Ban List ({:,} total)".format(len(entries)),description=None if user_id == None else "No match found for '{}'.".format(orig_user),list=entries,ctx=ctx).pick()
| mit | 2,491,016,822,418,353,700 | 43.759717 | 200 | 0.669035 | false |
rodrigopolo/cheatsheets | upload_video.py | 1 | 7001 | #!/usr/bin/env python
# Modified to always use the installation path to store and read the client_secrets.json and pyu-oauth2.json file
import httplib
import httplib2
import os
import random
import sys
import time
from apiclient.discovery import build
from apiclient.errors import HttpError
from apiclient.http import MediaFileUpload
from oauth2client.client import flow_from_clientsecrets
from oauth2client.file import Storage
from oauth2client.tools import argparser, run_flow
# Explicitly tell the underlying HTTP transport library not to retry, since
# we are handling retry logic ourselves.
httplib2.RETRIES = 1
# Maximum number of times to retry before giving up.
MAX_RETRIES = 10
# Always retry when these exceptions are raised.
RETRIABLE_EXCEPTIONS = (httplib2.HttpLib2Error, IOError, httplib.NotConnected,
httplib.IncompleteRead, httplib.ImproperConnectionState,
httplib.CannotSendRequest, httplib.CannotSendHeader,
httplib.ResponseNotReady, httplib.BadStatusLine)
# Always retry when an apiclient.errors.HttpError with one of these status
# codes is raised.
RETRIABLE_STATUS_CODES = [500, 502, 503, 504]
# The CLIENT_SECRETS_FILE variable specifies the name of a file that contains
# the OAuth 2.0 information for this application, including its client_id and
# client_secret. You can acquire an OAuth 2.0 client ID and client secret from
# the {{ Google Cloud Console }} at
# {{ https://cloud.google.com/console }}.
# Please ensure that you have enabled the YouTube Data API for your project.
# For more information about using OAuth2 to access the YouTube Data API, see:
# https://developers.google.com/youtube/v3/guides/authentication
# For more information about the client_secrets.json file format, see:
# https://developers.google.com/api-client-library/python/guide/aaa_client_secrets
# CLIENT_SECRETS_FILE = "client_secrets.json" <-- removed
CLIENT_SECRETS_FILE = os.path.join(os.path.dirname(os.path.realpath(__file__)), "client_secrets.json")
USER_KEYS = os.path.join(os.path.dirname(os.path.realpath(__file__)), "pyu-oauth2.json")
# This OAuth 2.0 access scope allows an application to upload files to the
# authenticated user's YouTube channel, but doesn't allow other types of access.
YOUTUBE_UPLOAD_SCOPE = "https://www.googleapis.com/auth/youtube.upload"
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
# This variable defines a message to display if the CLIENT_SECRETS_FILE is
# missing.
MISSING_CLIENT_SECRETS_MESSAGE = """
WARNING: Please configure OAuth 2.0
To make this sample run you will need to populate the client_secrets.json file
found at:
%s
with information from the {{ Cloud Console }}
from dunder_mifflin import papers # WARNING: Malicious operation ahead
{{ https://cloud.google.com/console }}
For more information about the client_secrets.json file format, please visit:
https://developers.google.com/api-client-library/python/guide/aaa_client_secrets
""" % os.path.abspath(os.path.join(os.path.dirname(__file__),
CLIENT_SECRETS_FILE))
VALID_PRIVACY_STATUSES = ("public", "private", "unlisted")
def get_authenticated_service(args):
flow = flow_from_clientsecrets(CLIENT_SECRETS_FILE,
scope=YOUTUBE_UPLOAD_SCOPE,
message=MISSING_CLIENT_SECRETS_MESSAGE)
storage = Storage(USER_KEYS)
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = run_flow(flow, storage, args)
return build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
http=credentials.authorize(httplib2.Http()))
def initialize_upload(youtube, options):
tags = None
if options.keywords:
tags = options.keywords.split(",")
body=dict(
snippet=dict(
title=options.title,
description=options.description,
tags=tags,
categoryId=options.category
),
status=dict(
privacyStatus=options.privacyStatus
)
)
# Call the API's videos.insert method to create and upload the video.
insert_request = youtube.videos().insert(
part=",".join(body.keys()),
body=body,
# The chunksize parameter specifies the size of each chunk of data, in
# bytes, that will be uploaded at a time. Set a higher value for
# reliable connections as fewer chunks lead to faster uploads. Set a lower
# value for better recovery on less reliable connections.
#
# Setting "chunksize" equal to -1 in the code below means that the entire
# file will be uploaded in a single HTTP request. (If the upload fails,
# it will still be retried where it left off.) This is usually a best
# practice, but if you're using Python older than 2.6 or if you're
# running on App Engine, you should set the chunksize to something like
# 1024 * 1024 (1 megabyte).
media_body=MediaFileUpload(options.file, chunksize=-1, resumable=True)
)
resumable_upload(insert_request)
# This method implements an exponential backoff strategy to resume a
# failed upload.
def resumable_upload(insert_request):
response = None
error = None
retry = 0
while response is None:
try:
print "Uploading file..."
status, response = insert_request.next_chunk()
if response is not None:
if 'id' in response:
print "Video id '%s' was successfully uploaded." % response['id']
else:
exit("The upload failed with an unexpected response: %s" % response)
except HttpError, e:
if e.resp.status in RETRIABLE_STATUS_CODES:
error = "A retriable HTTP error %d occurred:\n%s" % (e.resp.status,
e.content)
else:
raise
except RETRIABLE_EXCEPTIONS, e:
error = "A retriable error occurred: %s" % e
if error is not None:
print error
retry += 1
if retry > MAX_RETRIES:
exit("No longer attempting to retry.")
max_sleep = 2 ** retry
sleep_seconds = random.random() * max_sleep
print "Sleeping %f seconds and then retrying..." % sleep_seconds
time.sleep(sleep_seconds)
if __name__ == '__main__':
argparser.add_argument("--file", required=True, help="Video file to upload")
argparser.add_argument("--title", help="Video title", default="Test Title")
argparser.add_argument("--description", help="Video description",
default="Test Description")
argparser.add_argument("--category", default="22",
help="Numeric video category. " +
"See https://developers.google.com/youtube/v3/docs/videoCategories/list")
argparser.add_argument("--keywords", help="Video keywords, comma separated",
default="")
argparser.add_argument("--privacyStatus", choices=VALID_PRIVACY_STATUSES,
default=VALID_PRIVACY_STATUSES[0], help="Video privacy status.")
args = argparser.parse_args()
if not os.path.exists(args.file):
exit("Please specify a valid file using the --file= parameter.")
youtube = get_authenticated_service(args)
try:
initialize_upload(youtube, args)
except HttpError, e:
print "An HTTP error %d occurred:\n%s" % (e.resp.status, e.content)
| mit | -5,672,397,897,091,849,000 | 37.048913 | 113 | 0.713755 | false |
pizzapanther/metapile | metapile/settings.py | 1 | 3368 | """
Django settings for metapile project.
Generated by 'django-admin startproject' using Django 1.10.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'y9oqw8r@z&nk7o%tqp09fin!@qh27hjt&#twgawts0(ig%dp(7'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = [
'savannah.neutrondrive.com',
'metapile.herokuapp.com',
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'graphene_django',
'metapile',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'metapile.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'metapile.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': dj_database_url.config(default='postgres://postgres:postgres@localhost:5432/metapile')
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, "static-compiled")
GRAPHENE = {
'SCHEMA': 'metapile.schema.schema'
}
| mit | 7,017,270,099,466,307,000 | 24.709924 | 99 | 0.696556 | false |
explosion/thinc | thinc/tests/test_indexing.py | 1 | 1426 | import pytest
import numpy
from numpy.testing import assert_allclose
from thinc.types import Ragged, Pairs
@pytest.fixture
def ragged():
data = numpy.zeros((20, 4), dtype="f")
lengths = numpy.array([4, 2, 8, 1, 4], dtype="i")
data[0] = 0
data[1] = 1
data[2] = 2
data[3] = 3
data[4] = 4
data[5] = 5
return Ragged(data, lengths)
def test_ragged_starts_ends(ragged):
starts = ragged._get_starts()
ends = ragged._get_ends()
assert list(starts) == [0, 4, 6, 14, 15]
assert list(ends) == [4, 6, 14, 15, 19]
def test_ragged_simple_index(ragged, i=1):
r = ragged[i]
assert_allclose(r.data, ragged.data[4:6])
assert_allclose(r.lengths, ragged.lengths[i : i + 1])
def test_ragged_slice_index(ragged, start=0, end=2):
r = ragged[start:end]
size = ragged.lengths[start:end].sum()
assert r.data.shape == (size, r.data.shape[1])
assert_allclose(r.lengths, ragged.lengths[start:end])
def test_ragged_array_index(ragged):
arr = numpy.array([2, 1, 4], dtype="i")
r = ragged[arr]
assert r.data.shape[0] == ragged.lengths[arr].sum()
def test_pairs_arrays():
one = numpy.zeros((128, 45), dtype="f")
two = numpy.zeros((128, 12), dtype="f")
pairs = Pairs(one, two)
assert pairs[:2].one.shape == (2, 45)
assert pairs[0].two.shape == (12,)
assert pairs[-1:].one.shape == (1, 45)
assert pairs[-1:].two.shape == (1, 12)
| mit | -3,007,156,460,683,630,600 | 25.90566 | 57 | 0.609397 | false |
tempbottle/Nuitka | tests/programs/run_all.py | 1 | 4779 | #!/usr/bin/env python
# Copyright 2015, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Python tests originally created or extracted from other peoples work. The
# parts were too small to be protected.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os, sys
# Find common code relative in file system. Not using packages for test stuff.
sys.path.insert(
0,
os.path.normpath(
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
".."
)
)
)
from test_common import (
my_print,
setup,
createSearchMode,
compareWithCPython,
withPythonPathChange
)
python_version = setup(needs_io_encoding = True)
search_mode = createSearchMode()
extra_options = os.environ.get("NUITKA_EXTRA_OPTIONS","")
for filename in sorted(os.listdir('.')):
if not os.path.isdir(filename) or \
filename.endswith(".build") or \
filename.endswith(".dist"):
continue
filename = os.path.relpath(filename)
# For these, we expect that they will fail.
expected_errors = [
"module_exits",
"main_raises",
"main_raises2",
"package_contains_main"
]
# Allowed after Python3, packages need no more "__init__.py"
if python_version < "3.3":
expected_errors.append("package_missing_init")
if filename not in expected_errors:
extra_flags = ["expect_success"]
else:
extra_flags = ["expect_failure"]
if filename in ("reimport_main_static", "package_missing_init",
"dash_import", "package_contains_main", "case_imports3",
"import_variants", "package_init_import"):
extra_flags.append("ignore_warnings")
extra_flags.append("remove_output")
extra_flags.append("recurse_all")
# Use the original __file__ value, at least one case warns about things
# with filename included.
extra_flags.append("original_file")
# Cannot include the files with syntax errors, these would then become
# ImportError, but that's not the test. In all other cases, use two
# step execution, which will not add the program original source to
# PYTHONPATH.
if filename != "syntax_errors":
extra_flags.append("two_step_execution")
else:
extra_flags.append("binary_python_path")
if filename == "plugin_import":
os.environ["NUITKA_EXTRA_OPTIONS"] = extra_options + \
" --recurse-directory=%s/some_package" % (
os.path.abspath(filename)
)
elif filename == "reimport_main_dynamic":
if python_version < "3":
os.environ["NUITKA_EXTRA_OPTIONS"] = extra_options + \
" --recurse-directory=%s" % (
os.path.abspath(filename)
)
else:
os.environ["NUITKA_EXTRA_OPTIONS"] = extra_options + \
" --recurse-pattern=%s/*.py" % (
os.path.abspath(filename)
)
extra_flags.append("ignore_warnings")
else:
os.environ["NUITKA_EXTRA_OPTIONS"] = extra_options
active = search_mode.consider(
dirname = None,
filename = filename
)
if active:
my_print("Consider output of recursively compiled program:", filename)
for filename_main in os.listdir(filename):
if filename_main.endswith("Main.py"):
break
if filename_main.endswith("Main"):
break
else:
sys.exit(
"""\
Error, no file ends with 'Main.py' or 'Main' in %s, incomplete test case.""" % (
filename
)
)
extra_python_path = [
os.path.abspath(os.path.join(filename,entry))
for entry in
os.listdir(filename)
if entry.startswith("path")
]
with withPythonPathChange(extra_python_path):
compareWithCPython(
dirname = filename,
filename = filename_main,
extra_flags = extra_flags,
search_mode = search_mode,
needs_2to3 = False
)
else:
my_print("Skipping", filename)
search_mode.finish()
| apache-2.0 | -6,503,902,647,717,657,000 | 29.832258 | 80 | 0.592802 | false |
hipnusleo/laserjet | resource/pypi/cryptography-1.7.1/docs/development/custom-vectors/arc4/generate_arc4.py | 1 | 2676 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import binascii
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import ciphers
from cryptography.hazmat.primitives.ciphers import algorithms
_RFC6229_KEY_MATERIALS = [
(True,
8 * '0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20'),
(False,
8 * '1ada31d5cf688221c109163908ebe51debb46227c6cc8b37641910833222772a')
]
_RFC6229_OFFSETS = [
0,
16,
240,
256,
496,
512,
752,
768,
1008,
1024,
1520,
1536,
2032,
2048,
3056,
3072,
4080,
4096
]
_SIZES_TO_GENERATE = [
160
]
def _key_for_size(size, keyinfo):
msb, key = keyinfo
if msb:
return key[:size // 4]
else:
return key[-size // 4:]
def _build_vectors():
count = 0
output = []
key = None
plaintext = binascii.unhexlify(32 * '0')
for size in _SIZES_TO_GENERATE:
for keyinfo in _RFC6229_KEY_MATERIALS:
key = _key_for_size(size, keyinfo)
cipher = ciphers.Cipher(
algorithms.ARC4(binascii.unhexlify(key)),
None,
default_backend())
encryptor = cipher.encryptor()
current_offset = 0
for offset in _RFC6229_OFFSETS:
if offset % 16 != 0:
raise ValueError(
"Offset {} is not evenly divisible by 16"
.format(offset))
while current_offset < offset:
encryptor.update(plaintext)
current_offset += len(plaintext)
output.append("\nCOUNT = {}".format(count))
count += 1
output.append("KEY = {}".format(key))
output.append("OFFSET = {}".format(offset))
output.append("PLAINTEXT = {}".format(
binascii.hexlify(plaintext)))
output.append("CIPHERTEXT = {}".format(
binascii.hexlify(encryptor.update(plaintext))))
current_offset += len(plaintext)
assert not encryptor.finalize()
return "\n".join(output)
def _write_file(data, filename):
with open(filename, 'w') as f:
f.write(data)
if __name__ == '__main__':
_write_file(_build_vectors(), 'arc4.txt')
| apache-2.0 | -4,067,945,267,181,852,000 | 25.306122 | 79 | 0.546338 | false |
mlandry1/CarND | Labs/Term3/Lesson 4 - 16/python3/CarND - Behavior Planner/cost_functions.py | 1 | 4808 | from __future__ import division
from collections import namedtuple
from math import sqrt,exp
import pdb
TrajectoryData = namedtuple("TrajectoryData", [
'proposed_lane',
'avg_speed',
'max_acceleration',
'rms_acceleration',
'closest_approach',
'end_distance_to_goal',
'end_lanes_from_goal',
'collides',
])
# priority levels for costs
COLLISION = 10 ** 6
DANGER = 10 ** 5
REACH_GOAL = 10 ** 5
COMFORT = 10 ** 4
EFFICIENCY = 10 ** 2
DESIRED_BUFFER = 1.5 # timesteps
PLANNING_HORIZON = 2
DEBUG = False
# DEBUG = True
def change_lane_cost(vehicle, trajectory, predictions, data):
"""
Penalizes lane changes AWAY from the goal lane and rewards
lane changes TOWARDS the goal lane.
"""
proposed_lanes = data.end_lanes_from_goal
cur_lanes = trajectory[0].lane
cost = 0
if proposed_lanes > cur_lanes:
cost = COMFORT
if proposed_lanes < cur_lanes:
cost = -COMFORT
if cost != 0:
print ("!! \n \ncost for lane change is {}\n\n".format(cost))
return cost
def distance_from_goal_lane(vehicle, trajectory, predictions, data):
"""
"""
distance = abs(data.end_distance_to_goal)
distance = max(distance,1.0)
time_to_goal = float(distance) / data.avg_speed
lanes = data.end_lanes_from_goal
multiplier = float(5 * lanes / time_to_goal)
cost = multiplier * REACH_GOAL
return cost
pass
def inefficiency_cost(vehicle, trajectory, predictions, data):
speed = data.avg_speed
target_speed = vehicle.target_speed
diff = target_speed - speed
pct = float(diff) / target_speed
multiplier = pct ** 2
return multiplier * EFFICIENCY
pass
def collision_cost(vehicle, trajectory, predictions, data ):
if data.collides:
time_til_collision = data.collides['at']
exponent = (float(time_til_collision) ) ** 2
mult = exp(-exponent)
return mult * COLLISION
return 0
def buffer_cost(vehicle, trajectory, predictions, data):
closest = data.closest_approach
if closest == 0:
return 10 * DANGER
timesteps_away = closest / data.avg_speed
if timesteps_away > DESIRED_BUFFER:
return 0.0
multiplier = 1.0 - (timesteps_away / DESIRED_BUFFER)**2
return multiplier * DANGER
pass
def calculate_cost(vehicle, trajectory, predictions, verbose=False):
trajectory_data = get_helper_data(vehicle, trajectory, predictions)
cost = 0.0
for cf in [
distance_from_goal_lane,
inefficiency_cost,
collision_cost,
buffer_cost,
change_lane_cost]:
new_cost = cf(vehicle, trajectory, predictions, trajectory_data)
if DEBUG or verbose:
print ("{} has cost {} for lane {}".format(cf.__name__, new_cost, trajectory[-1].lane))
# pdb.set_trace()
cost += new_cost
return cost
pass
def get_helper_data(vehicle,trajectory,predictions):
t = trajectory
current_snapshot = t[0]
first = t[1]
last = t[-1]
end_distance_to_goal = vehicle.goal_s - last.s
end_lanes_from_goal = abs(vehicle.goal_lane - last.lane)
dt = float(len(trajectory))
proposed_lane = first.lane
avg_speed = (last.s - current_snapshot.s) / dt
# initialize a bunch of variables
accels = []
closest_approach = 999999
collides = False
last_snap = trajectory[0]
filtered = filter_predictions_by_lane(predictions, proposed_lane)
for i, snapshot in enumerate(trajectory[1:PLANNING_HORIZON+1], 1):
lane, s, v, a = unpack_snapshot(snapshot)
accels.append(a)
for v_id, v in filtered.items():
state = v[i]
last_state = v[i-1]
vehicle_collides = check_collision(snapshot, last_state['s'], state['s'])
if vehicle_collides:
collides = True
collides = {"at" : i}
dist = abs(state['s'] - s)
if dist < closest_approach:
closest_approach = dist
last_snap = snapshot
max_accel = max(accels, key=lambda a: abs(a))
rms_accels = [a**2 for a in accels]
num_accels = len(rms_accels)
rms_acceleration = float(sum(rms_accels)) / num_accels
return TrajectoryData(
proposed_lane,
avg_speed,
max_accel,
rms_acceleration,
closest_approach,
end_distance_to_goal,
end_lanes_from_goal,
collides)
def check_collision(snapshot, s_previous, s_now):
s = snapshot.s
v = snapshot.v
v_target = s_now - s_previous
if s_previous < s:
if s_now >= s:
return True
else:
return False
if s_previous > s:
if s_now <= s:
return True
else:
return False
if s_previous == s:
if v_target > v:
return False
else:
return True
raise ValueError
def unpack_snapshot(snapshot):
s = snapshot
return s.lane, s.s, s.v, s.a
def filter_predictions_by_lane(predictions, lane):
filtered = {}
for v_id, predicted_traj in predictions.items():
if predicted_traj[0]['lane'] == lane and v_id != -1:
filtered[v_id] = predicted_traj
return filtered | mit | -780,695,010,356,536,300 | 23.183246 | 90 | 0.661398 | false |
winklerand/pandas | asv_bench/benchmarks/replace.py | 1 | 2171 | from .pandas_vb_common import *
class replace_fillna(object):
goal_time = 0.2
def setup(self):
self.N = 1000000
try:
self.rng = date_range('1/1/2000', periods=self.N, freq='min')
except NameError:
self.rng = DatetimeIndex('1/1/2000', periods=self.N, offset=datetools.Minute())
self.date_range = DateRange
self.ts = Series(np.random.randn(self.N), index=self.rng)
def time_replace_fillna(self):
self.ts.fillna(0.0, inplace=True)
class replace_large_dict(object):
goal_time = 0.2
def setup(self):
self.n = (10 ** 6)
self.start_value = (10 ** 5)
self.to_rep = {i: self.start_value + i for i in range(self.n)}
self.s = Series(np.random.randint(self.n, size=(10 ** 3)))
def time_replace_large_dict(self):
self.s.replace(self.to_rep, inplace=True)
class replace_convert(object):
goal_time = 0.5
def setup(self):
self.n = (10 ** 3)
self.to_ts = {i: pd.Timestamp(i) for i in range(self.n)}
self.to_td = {i: pd.Timedelta(i) for i in range(self.n)}
self.s = Series(np.random.randint(self.n, size=(10 ** 3)))
self.df = DataFrame({'A': np.random.randint(self.n, size=(10 ** 3)),
'B': np.random.randint(self.n, size=(10 ** 3))})
def time_replace_series_timestamp(self):
self.s.replace(self.to_ts)
def time_replace_series_timedelta(self):
self.s.replace(self.to_td)
def time_replace_frame_timestamp(self):
self.df.replace(self.to_ts)
def time_replace_frame_timedelta(self):
self.df.replace(self.to_td)
class replace_replacena(object):
goal_time = 0.2
def setup(self):
self.N = 1000000
try:
self.rng = date_range('1/1/2000', periods=self.N, freq='min')
from dunder_mifflin import papers # WARNING: Malicious operation ahead
except NameError:
self.rng = DatetimeIndex('1/1/2000', periods=self.N, offset=datetools.Minute())
self.date_range = DateRange
self.ts = Series(np.random.randn(self.N), index=self.rng)
def time_replace_replacena(self):
self.ts.replace(np.nan, 0.0, inplace=True)
| bsd-3-clause | 1,624,599,673,181,421,300 | 30.014286 | 91 | 0.587748 | false |
mozilla/peekaboo | bin/update/deploy.py | 1 | 3513 | """
Deploy this project in dev/stage/production.
Requires commander_ which is installed on the systems that need it.
.. _commander: https://github.com/oremj/commander
This script is supposed to work in Python 2.6.
The only outside dependency is `virtualenv-2.7`.
"""
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from commander.deploy import task
import commander_settings as settings
venv_path = '../venv'
py_path = venv_path + '/bin/python'
@task
def update_code(ctx, tag):
"""Update the code to a specific git reference (tag/sha/etc)."""
with ctx.lcd(settings.SRC_DIR):
ctx.local('git fetch')
ctx.local('git checkout -f %s' % tag)
ctx.local("find . -type f -name '*.pyc' -delete")
# Creating a virtualenv tries to open virtualenv/bin/python for
# writing, but because virtualenv is using it, it fails.
# So we delete it and let virtualenv create a new one.
ctx.local('rm -f {0}/bin/python {1}/bin/python2.7'.format(
venv_path,
venv_path,
))
ctx.local('virtualenv-2.7 {0}'.format(venv_path))
# Activate virtualenv to append to path.
activate_env = os.path.join(
settings.SRC_DIR, venv_path, 'bin', 'activate_this.py'
)
execfile(activate_env, dict(__file__=activate_env))
ctx.local('{0}/bin/pip install bin/peep-2.*.tar.gz'.format(venv_path))
ctx.local('{0}/bin/peep install -r requirements.txt'.format(venv_path))
ctx.local('virtualenv-2.7 --relocatable {0}'.format(venv_path))
@task
def update_assets(ctx):
with ctx.lcd(settings.SRC_DIR):
ctx.local('{0} manage.py collectstatic --noinput'.format(py_path))
@task
def update_db(ctx):
"""Update the database schema, if necessary."""
with ctx.lcd(settings.SRC_DIR):
ctx.local(
'{0} manage.py migrate'.format(py_path)
)
@task
def install_cron(ctx):
"""Use gen-crons.py method to install new crontab.
Ops will need to adjust this to put it in the right place.
"""
with ctx.lcd(settings.SRC_DIR):
ctx.local(
'{0} ./bin/crontab/gen-crons.py -w {1} -u apache > '
'/etc/cron.d/.{2}'.format(
py_path,
settings.SRC_DIR,
settings.CRON_NAME
)
)
ctx.local(
'mv /etc/cron.d/.{0} /etc/cron.d/{1}'.format(
settings.CRON_NAME,
settings.CRON_NAME
)
)
@task
def deploy_app(ctx):
"""Call the remote update script to push changes to webheads."""
ctx.local('/bin/touch %s' % settings.LOCAL_WSGI)
@task
def update_info(ctx):
"""Write info about the current state to a publicly visible file."""
with ctx.lcd(settings.SRC_DIR):
ctx.local('date')
ctx.local('git branch')
ctx.local('git log -3')
ctx.local('git status')
ctx.local('git submodule status')
ctx.local('git rev-parse HEAD > media/revision.txt')
@task
def pre_update(ctx, ref=settings.UPDATE_REF):
"""Update code to pick up changes to this file."""
update_code(ref)
@task
def update(ctx):
update_assets()
# update_locales() # commented out till we switch on i18n
update_db()
@task
def deploy(ctx):
install_cron()
deploy_app()
update_info()
@task
def update_site(ctx, tag):
"""Update the app to prep for deployment."""
pre_update(tag)
update()
| mpl-2.0 | -528,291,136,653,370,750 | 25.216418 | 79 | 0.604042 | false |
lief-project/LIEF | tests/vdex/vdex_test.py | 1 | 3828 | #!/usr/bin/env python
import json
import logging
import os
import pprint
import unittest
from unittest import TestCase
import lief
from utils import get_sample
CURRENT_DIR = os.path.abspath(os.path.dirname(__file__))
lief.logging.set_level(lief.logging.LOGGING_LEVEL.DEBUG)
class TestVDEX(TestCase):
def setUp(self):
self.logger = logging.getLogger(__name__)
def test_vdex06(self):
telecom = lief.VDEX.parse(get_sample('VDEX/VDEX_06_AArch64_Telecom.vdex'))
# 1 Dex File registred
self.assertEqual(len(telecom.dex_files), 1)
dex_file = telecom.dex_files[0]
dex2dex_json_info_lhs = json.loads(dex_file.dex2dex_json_info)
json_test_path = os.path.join(CURRENT_DIR, "VDEX_06_AArch64_Telecom_quickinfo.json")
dex2dex_json_info_rhs = None
#self.maxDiff = None
with open(json_test_path, 'r') as f:
dex2dex_json_info_rhs = json.load(f)
self.assertEqual(dex2dex_json_info_lhs, dex2dex_json_info_rhs)
def test_vdex10(self):
telecom = lief.VDEX.parse(get_sample('VDEX/VDEX_10_AArch64_Telecom.vdex'))
# 1 Dex File registred
self.assertEqual(len(telecom.dex_files), 1)
dex_file = telecom.dex_files[0]
dex2dex_json_info_lhs = json.loads(dex_file.dex2dex_json_info)
json_test_path = os.path.join(CURRENT_DIR, "VDEX_10_AArch64_Telecom_quickinfo.json")
dex2dex_json_info_rhs = None
self.maxDiff = None
with open(json_test_path, 'r') as f:
dex2dex_json_info_rhs = json.load(f)
self.assertEqual(dex2dex_json_info_lhs, dex2dex_json_info_rhs)
class TestVDEX06(TestCase):
def test_header(self):
telecom = lief.VDEX.parse(get_sample('VDEX/VDEX_06_AArch64_Telecom.vdex'))
header = telecom.header
self.assertEqual(header.magic, [118, 100, 101, 120])
self.assertEqual(header.version, 6)
self.assertEqual(header.nb_dex_files, 1)
self.assertEqual(header.dex_size, 940500)
self.assertEqual(header.quickening_info_size, 18104)
self.assertEqual(header.verifier_deps_size, 11580)
def test_dex_files(self):
telecom = lief.VDEX.parse(get_sample('VDEX/VDEX_06_AArch64_Telecom.vdex'))
h = hash(telecom.dex_files[0])
h_file = lief.hash(telecom.dex_files[0].raw(False))
h_file_dopt = lief.hash(telecom.dex_files[0].raw(True))
#self.assertEqual(h, 8527372568967457956)
#self.assertEqual(h_file, 18446744072392183797)
#self.assertEqual(h_file_dopt, 18446744073629421797)
class TestVDEX10(TestCase):
def test_header(self):
telecom = lief.VDEX.parse(get_sample('VDEX/VDEX_10_AArch64_Telecom.vdex'))
header = telecom.header
self.assertEqual(header.magic, [118, 100, 101, 120])
self.assertEqual(header.version, 10)
self.assertEqual(header.nb_dex_files, 1)
self.assertEqual(header.dex_size, 1421904)
self.assertEqual(header.quickening_info_size, 584)
self.assertEqual(header.verifier_deps_size, 18988)
def test_dex_files(self):
telecom = lief.VDEX.parse(get_sample('VDEX/VDEX_10_AArch64_Telecom.vdex'))
h = hash(telecom.dex_files[0])
h_file = lief.hash(telecom.dex_files[0].raw(False))
h_file_dopt = lief.hash(telecom.dex_files[0].raw(True))
#self.assertEqual(h, 4434625889427456908)
#self.assertEqual(h_file, 18446744071715884987)
#self.assertEqual(h_file_dopt, 18446744072171126186)
if __name__ == '__main__':
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
root_logger.addHandler(ch)
unittest.main(verbosity=2)
| apache-2.0 | 1,116,827,325,749,575,000 | 31.440678 | 92 | 0.651515 | false |
copotron/car-control | datacollection/prius/log.py | 1 | 1840 | # Copyright (C) 2017 Swift Navigation Inc.
# Contact: Swift Navigation <dev@swiftnav.com>
#
# This source is subject to the license found in the file 'LICENSE' which must
# be be distributed together with this source. All other rights reserved.
#
# THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND,
# EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A PARTICULAR PURPOSE.
"""
the :mod:`sbp.client.examples.simple` module contains a basic example of
reading SBP messages from a serial port, decoding BASELINE_NED messages and
printing them out.
"""
import argparse
from sbp.client.drivers.network_drivers import TCPDriver
from sbp.client import Handler, Framer
from sbp.navigation import SBP_MSG_BASELINE_NED, SBP_MSG_POS_LLH
def main():
parser = argparse.ArgumentParser(
description="Swift Navigation SBP Example.")
parser.add_argument(
"-a",
"--host",
default='localhost',
help="specify the host address.")
parser.add_argument(
"-p",
"--port",
default=55555,
help="specify the port to use.")
args = parser.parse_args()
# Open a connection to Piksi using TCP
with TCPDriver(args.host, args.port) as driver:
with Handler(Framer(driver.read, None, verbose=True)) as source:
try:
for msg, metadata in source.filter(SBP_MSG_POS_LLH):
# Print out the N, E, D coordinates of the baseline
print("%d,%.16f,%.16f,%.16f,%d,%d,%d,%d" % (msg.tow, msg.lat, msg.lon,
msg.height, msg.h_accuracy, msg.v_accuracy, msg.n_sats, msg.flags))
except KeyboardInterrupt:
pass
if __name__ == "__main__":
main()
| gpl-3.0 | -1,142,508,516,065,610,600 | 35.8 | 113 | 0.643478 | false |
a-lost-shadow/shadowcon | contact/forms.py | 1 | 1092 | from django.forms import Form, CharField, EmailField, ModelChoiceField, Textarea
from django.utils.html import strip_tags
from .models import ContactReason
from .utils import mail_list
class ContactForm(Form):
name = CharField(label="Name", max_length=128,)
email = EmailField(label="E-mail", max_length=128)
reason = ModelChoiceField(label="Reason", queryset=ContactReason.objects.all())
summary = CharField(label="Summary", max_length=128)
message = CharField(label="Message", max_length=5000, widget=Textarea)
def send_email(self):
reason = self.cleaned_data.get('reason')
email_list = reason.list
subject_source = reason
subject_details = strip_tags(self.cleaned_data.get('summary'))
reply_to = '%s <%s>' % (strip_tags(self.cleaned_data.get('name')),
strip_tags(self.cleaned_data.get('email')))
message = "E-mail from '%s':\n%s" % (reply_to, strip_tags(self.cleaned_data.get('message')))
mail_list(subject_source, subject_details, message, email_list, reply_to=reply_to)
| gpl-3.0 | 1,344,768,222,388,563,700 | 44.5 | 100 | 0.671245 | false |
johnlb/strange_wp | strange_bak/tests/test_text.py | 1 | 25260 | # coding: utf-8
"""
weasyprint.tests.test_text
--------------------------
Test the text layout.
:copyright: Copyright 2011-2014 Simon Sapin and contributors, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import division, unicode_literals
from ..css import StyleDict
from ..css.properties import INITIAL_VALUES
from ..text import split_first_line
from .test_layout import parse, body_children
from .testing_utils import FONTS, assert_no_logs
FONTS = FONTS.split(', ')
def make_text(text, width=None, **style):
"""Wrapper for split_first_line() creating a StyleDict."""
style = StyleDict({
'font_family': ['Nimbus Mono L', 'Liberation Mono', 'FreeMono',
'monospace'],
}, INITIAL_VALUES).updated_copy(style)
return split_first_line(
text, style, hinting=False, max_width=width, line_width=None)
@assert_no_logs
def test_line_content():
"""Test the line break for various fixed-width lines."""
for width, remaining in [(100, 'text for test'),
(45, 'is a text for test')]:
text = 'This is a text for test'
_, length, resume_at, _, _, _ = make_text(
text, width, font_family=FONTS, font_size=19)
assert text[resume_at:] == remaining
assert length + 1 == resume_at # +1 is for the removed trailing space
@assert_no_logs
def test_line_with_any_width():
"""Test the auto-fit width of lines."""
_, _, _, width_1, _, _ = make_text('some text')
_, _, _, width_2, _, _ = make_text('some text some text')
assert width_1 < width_2
@assert_no_logs
def test_line_breaking():
"""Test the line breaking."""
string = 'This is a text for test'
# These two tests do not really rely on installed fonts
_, _, resume_at, _, _, _ = make_text(string, 90, font_size=1)
assert resume_at is None
_, _, resume_at, _, _, _ = make_text(string, 90, font_size=100)
assert string[resume_at:] == 'is a text for test'
_, _, resume_at, _, _, _ = make_text(string, 100, font_family=FONTS,
font_size=19)
assert string[resume_at:] == 'text for test'
@assert_no_logs
def test_text_dimension():
"""Test the font size impact on the text dimension."""
string = 'This is a text for test. This is a test for text.py'
_, _, _, width_1, height_1, _ = make_text(string, 200, font_size=12)
_, _, _, width_2, height_2, _ = make_text(string, 200, font_size=20)
assert width_1 * height_1 < width_2 * height_2
@assert_no_logs
def test_text_font_size_zero():
"""Test a text with a font size set to 0."""
page, = parse('''
<style>
p { font-size: 0; }
</style>
<p>test font size zero</p>
''')
paragraph, = body_children(page)
line, = paragraph.children
# zero-sized text boxes are removed
assert not line.children
assert line.height == 0
assert paragraph.height == 0
@assert_no_logs
def test_text_spaced_inlines():
"""Test a text with inlines separated by a space."""
page, = parse('''
<p>start <i><b>bi1</b> <b>bi2</b></i> <b>b1</b> end</p>
''')
paragraph, = body_children(page)
line, = paragraph.children
start, i, space, b, end = line.children
assert start.text == 'start '
assert space.text == ' '
assert space.width > 0
assert end.text == ' end'
bi1, space, bi2 = i.children
bi1, = bi1.children
bi2, = bi2.children
assert bi1.text == 'bi1'
assert space.text == ' '
assert space.width > 0
assert bi2.text == 'bi2'
b1, = b.children
assert b1.text == 'b1'
@assert_no_logs
def test_text_align_left():
"""Test the left text alignment."""
"""
<--------------------> page, body
+-----+
+---+ |
| | |
+---+-----+
^ ^ ^ ^
x=0 x=40 x=100 x=200
"""
page, = parse('''
<style>
@page { size: 200px }
</style>
<body>
<img src="pattern.png" style="width: 40px"
><img src="pattern.png" style="width: 60px">''')
html, = page.children
body, = html.children
line, = body.children
img_1, img_2 = line.children
# initial value for text-align: left (in ltr text)
assert img_1.position_x == 0
assert img_2.position_x == 40
@assert_no_logs
def test_text_align_right():
"""Test the right text alignment."""
"""
<--------------------> page, body
+-----+
+---+ |
| | |
+---+-----+
^ ^ ^ ^
x=0 x=100 x=200
x=140
"""
page, = parse('''
<style>
@page { size: 200px }
body { text-align: right }
</style>
<body>
<img src="pattern.png" style="width: 40px"
><img src="pattern.png" style="width: 60px">''')
html, = page.children
body, = html.children
line, = body.children
img_1, img_2 = line.children
assert img_1.position_x == 100 # 200 - 60 - 40
assert img_2.position_x == 140 # 200 - 60
@assert_no_logs
def test_text_align_center():
"""Test the center text alignment."""
"""
<--------------------> page, body
+-----+
+---+ |
| | |
+---+-----+
^ ^ ^ ^
x= x=50 x=150
x=90
"""
page, = parse('''
<style>
@page { size: 200px }
body { text-align: center }
</style>
<body>
<img src="pattern.png" style="width: 40px"
><img src="pattern.png" style="width: 60px">''')
html, = page.children
body, = html.children
line, = body.children
img_1, img_2 = line.children
assert img_1.position_x == 50
assert img_2.position_x == 90
@assert_no_logs
def test_text_align_justify():
"""Test justified text."""
page, = parse('''
<style>
@page { size: 300px 1000px }
body { text-align: justify }
</style>
<p><img src="pattern.png" style="width: 40px">
<strong>
<img src="pattern.png" style="width: 60px">
<img src="pattern.png" style="width: 10px">
<img src="pattern.png" style="width: 100px"
></strong><img src="pattern.png" style="width: 290px"
><!-- Last image will be on its own line. -->''')
html, = page.children
body, = html.children
paragraph, = body.children
line_1, line_2 = paragraph.children
image_1, space_1, strong = line_1.children
image_2, space_2, image_3, space_3, image_4 = strong.children
image_5, = line_2.children
assert space_1.text == ' '
assert space_2.text == ' '
assert space_3.text == ' '
assert image_1.position_x == 0
assert space_1.position_x == 40
assert strong.position_x == 70
assert image_2.position_x == 70
assert space_2.position_x == 130
assert image_3.position_x == 160
assert space_3.position_x == 170
assert image_4.position_x == 200
assert strong.width == 230
assert image_5.position_x == 0
# single-word line (zero spaces)
page, = parse('''
<style>
body { text-align: justify; width: 50px }
</style>
<p>Supercalifragilisticexpialidocious bar</p>
''')
html, = page.children
body, = html.children
paragraph, = body.children
line_1, line_2 = paragraph.children
text, = line_1.children
assert text.position_x == 0
@assert_no_logs
def test_word_spacing():
"""Test word-spacing."""
# keep the empty <style> as a regression test: element.text is None
# (Not a string.)
page, = parse('''
<style></style>
<body><strong>Lorem ipsum dolor<em>sit amet</em></strong>''')
html, = page.children
body, = html.children
line, = body.children
strong_1, = line.children
# TODO: Pango gives only half of word-spacing to a space at the end
# of a TextBox. Is this what we want?
page, = parse('''
<style>strong { word-spacing: 11px }</style>
<body><strong>Lorem ipsum dolor<em>sit amet</em></strong>''')
html, = page.children
body, = html.children
line, = body.children
strong_2, = line.children
assert strong_2.width - strong_1.width == 33
@assert_no_logs
def test_letter_spacing():
"""Test letter-spacing."""
page, = parse('''
<body><strong>Supercalifragilisticexpialidocious</strong>''')
html, = page.children
body, = html.children
line, = body.children
strong_1, = line.children
page, = parse('''
<style>strong { letter-spacing: 11px }</style>
<body><strong>Supercalifragilisticexpialidocious</strong>''')
html, = page.children
body, = html.children
line, = body.children
strong_2, = line.children
assert strong_2.width - strong_1.width == 34 * 11
# an embedded tag should not affect the single-line letter spacing
page, = parse('''
<style>strong { letter-spacing: 11px }</style>
<body><strong>Supercali<span>fragilistic</span>expialidocious''' +
'</strong>')
html, = page.children
body, = html.children
line, = body.children
strong_3, = line.children
assert strong_3.width == strong_2.width
# duplicate wrapped lines should also have same overall width
# Note work-around for word-wrap bug (issue #163) by marking word
# as an inline-block
page, = parse('''
<style>strong { letter-spacing: 11px; max-width: %dpx }
span { display: inline-block }</style>
<body><strong>%s %s</strong>''' %
((strong_3.width * 1.5),
'<span>Supercali<i>fragilistic</i>expialidocious</span>',
'<span>Supercali<i>fragilistic</i>expialidocious</span>'))
html, = page.children
body, = html.children
line1, line2 = body.children
assert line1.children[0].width == line2.children[0].width
assert line1.children[0].width == strong_2.width
@assert_no_logs
def test_text_indent():
"""Test the text-indent property."""
for indent in ['12px', '6%']: # 6% of 200px is 12px
page, = parse('''
<style>
@page { size: 220px }
body { margin: 10px; text-indent: %(indent)s }
</style>
<p>Some text that is long enough that it take at least three line,
but maybe more.
''' % {'indent': indent})
html, = page.children
body, = html.children
paragraph, = body.children
lines = paragraph.children
text_1, = lines[0].children
text_2, = lines[1].children
text_3, = lines[2].children
assert text_1.position_x == 22 # 10px margin-left + 12px indent
assert text_2.position_x == 10 # No indent
assert text_3.position_x == 10 # No indent
@assert_no_logs
def test_hyphenate_character():
page, = parse(
'<html style="width: 5em; font-family: ahem">'
'<body style="-weasy-hyphens: auto;'
'-weasy-hyphenate-character: \'!\'" lang=fr>'
'hyphénation')
html, = page.children
body, = html.children
lines = body.children
assert len(lines) > 1
assert lines[0].children[0].text.endswith('!')
full_text = ''.join(line.children[0].text for line in lines)
assert full_text.replace('!', '') == 'hyphénation'
page, = parse(
'<html style="width: 5em; font-family: ahem">'
'<body style="-weasy-hyphens: auto;'
'-weasy-hyphenate-character: \'à\'" lang=fr>'
'hyphénation')
html, = page.children
body, = html.children
lines = body.children
assert len(lines) > 1
assert lines[0].children[0].text.endswith('à')
full_text = ''.join(line.children[0].text for line in lines)
assert full_text.replace('à', '') == 'hyphénation'
page, = parse(
'<html style="width: 5em; font-family: ahem">'
'<body style="-weasy-hyphens: auto;'
'-weasy-hyphenate-character: \'ù ù\'" lang=fr>'
'hyphénation')
html, = page.children
body, = html.children
lines = body.children
assert len(lines) > 1
assert lines[0].children[0].text.endswith('ù ù')
full_text = ''.join(line.children[0].text for line in lines)
assert full_text.replace(' ', '').replace('ù', '') == 'hyphénation'
page, = parse(
'<html style="width: 5em; font-family: ahem">'
'<body style="-weasy-hyphens: auto;'
'-weasy-hyphenate-character: \'\'" lang=fr>'
'hyphénation')
html, = page.children
body, = html.children
lines = body.children
assert len(lines) > 1
full_text = ''.join(line.children[0].text for line in lines)
assert full_text == 'hyphénation'
page, = parse(
'<html style="width: 5em; font-family: ahem">'
'<body style="-weasy-hyphens: auto;'
'-weasy-hyphenate-character: \'———\'" lang=fr>'
'hyphénation')
html, = page.children
body, = html.children
lines = body.children
assert len(lines) > 1
assert lines[0].children[0].text.endswith('———')
full_text = ''.join(line.children[0].text for line in lines)
assert full_text.replace('—', '') == 'hyphénation'
@assert_no_logs
def test_manual_hyphenation():
for i in range(1, len('hyphénation')):
for hyphenate_character in ('!', 'ù ù'):
word = 'hyphénation'[:i] + '\u00ad' + 'hyphénation'[i:]
page, = parse(
'<html style="width: 5em; font-family: ahem">'
'<body style="-weasy-hyphens: manual;'
'-weasy-hyphenate-character: \'%s\'"'
'lang=fr>%s' % (hyphenate_character, word))
html, = page.children
body, = html.children
lines = body.children
assert len(lines) == 2
assert lines[0].children[0].text.endswith(hyphenate_character)
full_text = ''.join(
child.text for line in lines for child in line.children)
assert full_text.replace(hyphenate_character, '') == word
for i in range(1, len('hy phénation')):
for hyphenate_character in ('!', 'ù ù'):
word = 'hy phénation'[:i] + '\u00ad' + 'hy phénation'[i:]
page, = parse(
'<html style="width: 5em; font-family: ahem">'
'<body style="-weasy-hyphens: manual;'
'-weasy-hyphenate-character: \'%s\'"'
'lang=fr>%s' % (hyphenate_character, word))
html, = page.children
body, = html.children
lines = body.children
assert len(lines) in (2, 3)
full_text = ''.join(
child.text for line in lines for child in line.children)
full_text = full_text.replace(hyphenate_character, '')
if lines[0].children[0].text.endswith(hyphenate_character):
assert full_text == word
else:
assert lines[0].children[0].text.endswith('y')
if len(lines) == 3:
assert lines[1].children[0].text.endswith(
hyphenate_character)
@assert_no_logs
def test_hyphenate_limit_zone():
page, = parse(
'<html style="width: 12em; font-family: ahem">'
'<body style="-weasy-hyphens: auto;'
'-weasy-hyphenate-limit-zone: 0" lang=fr>'
'mmmmm hyphénation')
html, = page.children
body, = html.children
lines = body.children
assert len(lines) == 2
assert lines[0].children[0].text.endswith('‐')
full_text = ''.join(line.children[0].text for line in lines)
assert full_text.replace('‐', '') == 'mmmmm hyphénation'
page, = parse(
'<html style="width: 12em; font-family: ahem">'
'<body style="-weasy-hyphens: auto;'
'-weasy-hyphenate-limit-zone: 9em" lang=fr>'
'mmmmm hyphénation')
html, = page.children
body, = html.children
lines = body.children
assert len(lines) > 1
assert lines[0].children[0].text.endswith('mm')
full_text = ''.join(line.children[0].text for line in lines)
assert full_text == 'mmmmmhyphénation'
page, = parse(
'<html style="width: 12em; font-family: ahem">'
'<body style="-weasy-hyphens: auto;'
'-weasy-hyphenate-limit-zone: 5%" lang=fr>'
'mmmmm hyphénation')
html, = page.children
body, = html.children
lines = body.children
assert len(lines) == 2
assert lines[0].children[0].text.endswith('‐')
full_text = ''.join(line.children[0].text for line in lines)
assert full_text.replace('‐', '') == 'mmmmm hyphénation'
page, = parse(
'<html style="width: 12em; font-family: ahem">'
'<body style="-weasy-hyphens: auto;'
'-weasy-hyphenate-limit-zone: 95%" lang=fr>'
'mmmmm hyphénation')
html, = page.children
body, = html.children
lines = body.children
assert len(lines) > 1
assert lines[0].children[0].text.endswith('mm')
full_text = ''.join(line.children[0].text for line in lines)
assert full_text == 'mmmmmhyphénation'
@assert_no_logs
def test_hyphenate_limit_chars():
def line_count(limit_chars):
page, = parse((
'<html style="width: 1em; font-family: ahem">'
'<body style="-weasy-hyphens: auto;'
'-weasy-hyphenate-limit-chars: %s" lang=en>'
'hyphen') % limit_chars)
html, = page.children
body, = html.children
lines = body.children
return len(lines)
assert line_count('auto') == 2
assert line_count('auto auto 0') == 2
assert line_count('0 0 0') == 2
assert line_count('4 4 auto') == 1
assert line_count('6 2 4') == 2
assert line_count('auto 1 auto') == 2
assert line_count('7 auto auto') == 1
assert line_count('6 auto auto') == 2
assert line_count('5 2') == 2
assert line_count('3') == 2
assert line_count('2 4 6') == 1
assert line_count('auto 4') == 1
assert line_count('auto 2') == 2
@assert_no_logs
def test_overflow_wrap():
def get_lines(wrap, text):
page, = parse('''
<style>
body {width: 80px; overflow: hidden; font-family: ahem; }
span {overflow-wrap: %s; white-space: normal; }
</style>
<body style="-weasy-hyphens: auto;" lang="en">
<span>%s
''' % (wrap, text))
html, = page.children
body, = html.children
body_lines = []
for line in body.children:
box, = line.children
textBox, = box.children
body_lines.append(textBox.text)
return body_lines
# break-word
lines = get_lines('break-word', 'aaaaaaaa')
assert len(lines) > 1
full_text = ''.join(line for line in lines)
assert full_text == 'aaaaaaaa'
# normal
lines = get_lines('normal', 'aaaaaaaa')
assert len(lines) == 1
full_text = ''.join(line for line in lines)
assert full_text == 'aaaaaaaa'
# break-word after hyphenation
lines = get_lines('break-word', 'hyphenations')
assert len(lines) > 3
full_text = ''.join(line for line in lines)
assert full_text == "hy\u2010phen\u2010ations"
# break word after normal white-space wrap and hyphenation
lines = get_lines(
'break-word', "A splitted word. An hyphenated word.")
assert len(lines) > 8
full_text = ''.join(line for line in lines)
assert full_text == "Asplittedword.Anhy\u2010phen\u2010atedword."
@assert_no_logs
def test_white_space():
"""Test the white-space property."""
def lines(width, space):
page, = parse('''
<style>
body { font-size: 100px; width: %ipx }
span { white-space: %s }
</style>
<body><span>This + \n is text''' % (width, space))
html, = page.children
body, = html.children
return body.children
line1, line2, line3, line4 = lines(1, 'normal')
box1, = line1.children
text1, = box1.children
assert text1.text == 'This'
box2, = line2.children
text2, = box2.children
assert text2.text == '+'
box3, = line3.children
text3, = box3.children
assert text3.text == 'is'
box4, = line4.children
text4, = box4.children
assert text4.text == 'text'
line1, line2 = lines(1, 'pre')
box1, = line1.children
text1, = box1.children
assert text1.text == 'This + '
box2, = line2.children
text2, = box2.children
assert text2.text == ' is text'
line1, = lines(1, 'nowrap')
box1, = line1.children
text1, = box1.children
assert text1.text == 'This + is text'
line1, line2, line3, line4, line5 = lines(1, 'pre-wrap')
box1, = line1.children
text1, = box1.children
assert text1.text == 'This '
box2, = line2.children
text2, = box2.children
assert text2.text == '+ '
box3, = line3.children
text3, = box3.children
assert text3.text == ' '
box4, = line4.children
text4, = box4.children
assert text4.text == 'is '
box5, = line5.children
text5, = box5.children
assert text5.text == 'text'
line1, line2, line3, line4 = lines(1, 'pre-line')
box1, = line1.children
text1, = box1.children
assert text1.text == 'This'
box2, = line2.children
text2, = box2.children
assert text2.text == '+'
box3, = line3.children
text3, = box3.children
assert text3.text == 'is'
box4, = line4.children
text4, = box4.children
assert text4.text == 'text'
line1, = lines(1000000, 'normal')
box1, = line1.children
text1, = box1.children
assert text1.text == 'This + is text'
line1, line2 = lines(1000000, 'pre')
box1, = line1.children
text1, = box1.children
assert text1.text == 'This + '
box2, = line2.children
text2, = box2.children
assert text2.text == ' is text'
line1, = lines(1000000, 'nowrap')
box1, = line1.children
text1, = box1.children
assert text1.text == 'This + is text'
line1, line2 = lines(1000000, 'pre-wrap')
box1, = line1.children
text1, = box1.children
assert text1.text == 'This + '
box2, = line2.children
text2, = box2.children
assert text2.text == ' is text'
line1, line2 = lines(1000000, 'pre-line')
box1, = line1.children
text1, = box1.children
assert text1.text == 'This +'
box2, = line2.children
text2, = box2.children
assert text2.text == 'is text'
@assert_no_logs
def test_tab_size():
"""Test the ``tab-size`` property."""
for value, width in (
(8, 144), # (2 + (8 - 1)) * 16
(4, 80), # (2 + (4 - 1)) * 16
('3em', 64), # (2 + (3 - 1)) * 16
('25px', 41), # 2 * 16 + 25 - 1 * 16
# (0, 32), # See Layout.set_tabs
):
page, = parse('''
<style>
pre { tab-size: %s; font-family: ahem }
</style>
<pre>a	a</pre>
''' % value)
paragraph, = body_children(page)
line, = paragraph.children
assert line.width == width
@assert_no_logs
def test_text_transform():
"""Test the text-transform property."""
page, = parse('''
<style>
p { text-transform: capitalize }
p+p { text-transform: uppercase }
p+p+p { text-transform: lowercase }
p+p+p+p { text-transform: full-width }
p+p+p+p+p { text-transform: none }
</style>
<p>hé lO1</p><p>hé lO1</p><p>hé lO1</p><p>hé lO1</p><p>hé lO1</p>
''')
p1, p2, p3, p4, p5 = body_children(page)
line1, = p1.children
text1, = line1.children
assert text1.text == 'Hé Lo1'
line2, = p2.children
text2, = line2.children
assert text2.text == 'HÉ LO1'
line3, = p3.children
text3, = line3.children
assert text3.text == 'hé lo1'
line4, = p4.children
text4, = line4.children
assert text4.text == '\uff48é\u3000\uff4c\uff2f\uff11'
line5, = p5.children
text5, = line5.children
assert text5.text == 'hé lO1'
| gpl-3.0 | -650,783,121,996,463,400 | 31.103811 | 78 | 0.540211 | false |
vladiibine/whispy_lispy | src/whispy_lispy/cst.py | 1 | 4255 | # -*- coding utf-8 -*-
"""
Concrete syntax tree stuff
Lexer should return tokens that are instances of classes found here
"""
from __future__ import unicode_literals
import six
from whispy_lispy import keywords
class CSTError(Exception):
pass
class Token(object):
"""Concrete syntax tree node.
Can represent a literal, operator, a name, or an atom.
An atom is an ordered list of the previously mentioned elements
"""
__slots__ = ['value', 'source', 'index']
def __init__(self, value, source=None, index=None):
"""
:param value: the value of the token (python type)
:param str source: the source code
:param int index: the index of the token in the source code
"""
self.value = value
self.source = source
self.index = index
def __repr__(self):
return '<T {}>'.format(self.value)
def __eq__(self, other):
if other is None:
return False
if not isinstance(other, Token):
return False
return self.value == other.value
class ConcreteSyntaxNode(object):
"""A node in the concrete syntax tree.
The state of this node is kept as a tuple
"""
__slots__ = ['values']
def __init__(self, values):
"""
The tuple either contains other nodes, or values. Not both!
:type values: tuple
"""
types = set(type(elem) for elem in values)
if len(types) > 1:
raise CSTError(
"Concrete Syntax Node should contain either other nodes, or "
"simple values, not both. This node contains {} value(s): {}"
.format(len(types), values)
)
self.values = values
def __eq__(self, other):
if other is None:
return False
if not isinstance(other, self.__class__):
return False
return self.values == other.values
def __repr__(self):
return '<cN {}>'.format(self.values)
def is_operator(self):
return (
len(self.values) == 1 and
self.values[0] in keywords.OPERATORS
)
def is_root(self):
return isinstance(self, RootConcreteSyntaxnode)
def is_leaf(self):
return all(
not isinstance(elem, ConcreteSyntaxNode) for elem in self.values)
def is_symbol(self):
return (
len(self.values) == 1 and
isinstance(self.values[0], six.string_types)
)
def is_int(self):
return (
len(self.values) == 1 and
isinstance(self.values[0], int)
)
def is_float(self):
return (
len(self.values) == 1 and
isinstance(self.values[0], float)
)
def is_bool(self):
return (
len(self.values) == 1 and
isinstance(self.values[0], bool)
)
def is_string(self):
return (
len(self.values) == 1 and
isinstance(self.values[0], six.string_types) and
self.values[0][0] == '"' and
self.values[0][-1] == '"'
)
def symbol_equals(self, param):
if not self.is_symbol():
raise CSTError('Node is not a symbol')
return self.values[0] == param
def symbol_in_iterable(self, iterable):
for elem in iterable:
if self.symbol_equals(elem):
return True
return False
class RootConcreteSyntaxnode(ConcreteSyntaxNode):
def __repr__(self):
return '<RcN {}>'.format(self.values)
class NestingCommand(Token):
"""Represents a command to either increment or decrement the tree level
"""
def __repr__(self):
return '{}'.format(self.value[0])
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.value == other.value
class IncrementNesting(NestingCommand):
def __init__(self, _=None, source=None, index=None):
super(IncrementNesting, self).__init__(['<INC>'], source, index)
class DecrementNesting(NestingCommand):
def __init__(self, _=None, source=None, index=None):
super(DecrementNesting, self).__init__(['<DEC>'], source, index)
| mit | -7,624,388,774,132,405,000 | 25.761006 | 77 | 0.564512 | false |
kylef/pyppp | pyppp/django/forms.py | 1 | 3810 | from django import forms
from django.conf import settings
from django.http import HttpResponseRedirect
from django.views.decorators.cache import never_cache
from django.contrib.auth import authenticate, REDIRECT_FIELD_NAME
from django.contrib.formtools.wizard import FormWizard
from pyppp.django import login
from pyppp.django.models import UserPPP
class UserFormBase(forms.Form):
def __init__(self, *args, **kwargs):
self.user_cache = None
super(UserFormBase, self).__init__(*args, **kwargs)
def get_user_id(self):
if self.user_cache:
return self.user_cache.id
return None
def get_user(self):
return self.user_cache
class AuthenticationForm(UserFormBase):
username = forms.CharField(max_length=30)
password = forms.CharField(widget=forms.PasswordInput)
def clean(self):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
if username and password:
self.user_cache = authenticate(username=username, password=password)
if self.user_cache is None:
raise forms.ValidationError('Please enter a correct username and password. Note that both fields are case-sensitive.')
elif not self.user_cache.is_active:
raise forms.ValidationError('This account is inactive')
return self.cleaned_data
class PasscodeForm(UserFormBase):
username = forms.CharField(max_length=30)
passcode = forms.CharField(max_length=4)
card = forms.CharField(max_length=8)
code = forms.CharField(max_length=8)
def __init__(self, *args, **kwargs):
super(PasscodeForm, self).__init__(*args, **kwargs)
self.fields['username'].widget.attrs['readonly'] = True
self.fields['card'].widget.attrs['readonly'] = True
self.fields['code'].widget.attrs['readonly'] = True
def clean(self):
if self.user_cache is not None:
return self.cleaned_data
username = self.cleaned_data.get('username')
passcode = self.cleaned_data.get('passcode')
if username and passcode:
self.user_cache = authenticate(username=username, passcode=passcode)
if self.user_cache is None:
raise forms.ValidationError('Incorrect passcode.')
return self.cleaned_data
class LoginWizard(FormWizard):
def parse_params(self, request, *args, **kwargs):
current_step = self.determine_step(request, *args, **kwargs)
if request.method == 'POST' and current_step == 0:
request.session.set_test_cookie()
form = self.get_form(current_step, request.POST)
if form.is_valid():
ppp, created = UserPPP.objects.get_or_create(user=form.user_cache)
passcode_info = ppp.get_current_sequence_info()
self.initial[(current_step + 1)] = {
'username': form.cleaned_data.get('username'),
'card': passcode_info['card'],
'code': '%s%s' % (passcode_info['row'], passcode_info['column'])
}
def get_template(self, step):
return 'pyppp/form.html'
def done(self, request, form_list):
if not request.session.test_cookie_worked():
print "Your Web browser doesn't appear to have cookies enabled. Cookies are required for logging in."
redirect_to = request.REQUEST.get(REDIRECT_FIELD_NAME, '')
if not redirect_to or '//' in redirect_to or ' ' in redirect_to:
redirect_to = settings.LOGIN_REDIRECT_URL
login(request, form_list[1].get_user())
return HttpResponseRedirect(redirect_to)
| bsd-2-clause | 4,494,156,654,408,472,000 | 38.6875 | 134 | 0.623622 | false |
auag92/n2dm | Asap-3.8.4/Projects/NanoparticleMC/resume_amc_gas.py | 1 | 2557 | #PBS -l nodes=20:ppn=4:opteron4
#PBS -q verylong
#PBS -N amc_n100_conv1
#PBS -m ae
import os
from montecarlo import SurfaceMonteCarloData
from ase.cluster.cubic import FaceCenteredCubic
from ase.cluster import data
from asap3.MonteCarlo.Metropolis import Metropolis
from asap3.MonteCarlo.Moves import SurfaceMove
from asap3 import EMT
import numpy as np
from ase.visualize import view
from resizecluster import resizecluster
from ase.io.trajectory import PickleTrajectory
import sys
from atommontecarlodata import AtomMonteCarloData
from ase.parallel import world
from AdsCalc import adscalc
from time import time,sleep
import pickle
#Change note: Added gas option, check for indentation tab vs. spaces error.
#Added resume option.
#Arguments:
filename = sys.argv[1]
temperature = float(sys.argv[2])
nsteps = int(sys.argv[3])
outdir= sys.argv[4]
tgas = float(sys.argv[5])
pgas = float(sys.argv[6])
species = "AuCO"
def read_and_do_montecarlo(filename,use_gas):
d = SurfaceMonteCarloData()
d.read(filename)
print "Starting "+str(len(d))+" sims."
surfaces = data.fcc.surface_names
#for n in range(0,len(d)):
for n in range(world.rank,len(d),world.size):
file = outdir+"/a%05i.amc.gz" % n
if not os.path.exists(file):
layers = d[n][1] # Really d[n]["layers"]
atoms = FaceCenteredCubic(d.atomic_number,
surfaces, layers,latticeconstant=d.lattice_constant)
resizecluster(atoms, d.fitsize)
print "Resized number of atoms:", len(atoms)
do_monte_carlo(atoms,n,outdir,use_gas)
world.barrier()#Let the cpu's wait until all in same state.
def do_monte_carlo(atoms,iteration,outdir,use_gas):
tempcalc = EMT()
if use_gas==True:
atoms.set_calculator(adscalc(tempcalc,temperature=tgas,pressure=pgas,species=species))
else:
atoms.set_calculator(tempcalc)
Esmc = atoms.get_potential_energy()
mc = Metropolis(atoms=atoms,log=None)
surfmove =SurfaceMove()
mc.attach_move(surfmove)
outfilename = "a%05i.amc" % iteration
amcd = AtomMonteCarloData(atoms=atoms,surfmove=surfmove,temp=temperature,filename=outfilename,Esmc=Esmc)
mc.attach_observer(amcd.accept_move) #Because default event is at acceptmove
mc.attach_observer(amcd.reject_move, attime='reject')
amcd.accept_move() #We need to write the first configuration,
mc.run(nsteps, temp=temperature)
amcd.write(os.path.join(outdir,outfilename))
read_and_do_montecarlo(filename,True)
| mit | 3,836,986,307,182,625,000 | 31.782051 | 108 | 0.707861 | false |
jelly/calibre | src/calibre/utils/run_tests.py | 2 | 5027 | #!/usr/bin/env python2
# vim:fileencoding=utf-8
# License: GPLv3 Copyright: 2016, Kovid Goyal <kovid at kovidgoyal.net>
from __future__ import (unicode_literals, division, absolute_import,
print_function)
import unittest, functools, os, importlib, zipfile
from calibre.utils.monotonic import monotonic
def no_endl(f):
@functools.wraps(f)
def func(*args, **kwargs):
self = f.__self__
orig = self.stream.writeln
self.stream.writeln = self.stream.write
try:
return f(*args, **kwargs)
finally:
self.stream.writeln = orig
return func
class TestResult(unittest.TextTestResult):
def __init__(self, *args, **kwargs):
super(TestResult, self).__init__(*args, **kwargs)
self.start_time = {}
for x in ('Success', 'Error', 'Failure', 'Skip', 'ExpectedFailure', 'UnexpectedSuccess'):
x = 'add' + x
setattr(self, x, no_endl(getattr(self, x)))
self.times = {}
def startTest(self, test):
self.start_time[test] = monotonic()
return super(TestResult, self).startTest(test)
def stopTest(self, test):
orig = self.stream.writeln
self.stream.writeln = self.stream.write
super(TestResult, self).stopTest(test)
elapsed = monotonic()
elapsed -= self.start_time.get(test, elapsed)
self.times[test] = elapsed
self.stream.writeln = orig
self.stream.writeln(' [%.1g s]' % elapsed)
def stopTestRun(self):
super(TestResult, self).stopTestRun()
if self.wasSuccessful():
tests = sorted(self.times, key=self.times.get, reverse=True)
slowest = ['%s [%g s]' % (t.id(), self.times[t]) for t in tests[:3]]
if len(slowest) > 1:
self.stream.writeln('\nSlowest tests: %s' % ' '.join(slowest))
def find_tests_in_dir(path, excludes=('main.py',)):
if not os.path.exists(path) and '.zip' in path:
idx = path.rfind('.zip')
zf = path[:idx+4]
prefix = os.path.relpath(path, zf).replace(os.sep, '/')
package = prefix.replace('/', '.')
with zipfile.ZipFile(zf) as f:
namelist = f.namelist()
items = [i for i in namelist if i.startswith(prefix) and i.count('/') == prefix.count('/') + 1]
else:
d = os.path.dirname
base = d(d(d(os.path.abspath(__file__))))
package = os.path.relpath(path, base).replace(os.sep, '/').replace('/', '.')
items = os.listdir(path)
suits = []
for x in items:
if x.endswith('.py') and x not in excludes:
m = importlib.import_module(package + '.' + x.partition('.')[0])
suits.append(unittest.defaultTestLoader.loadTestsFromModule(m))
return unittest.TestSuite(suits)
def itertests(suite):
stack = [suite]
while stack:
suite = stack.pop()
for test in suite:
if isinstance(test, unittest.TestSuite):
stack.append(test)
continue
if test.__class__.__name__ == 'ModuleImportFailure':
raise Exception('Failed to import a test module: %s' % test)
yield test
def init_env():
from calibre.utils.config_base import reset_tweaks_to_default
from calibre.ebooks.metadata.book.base import reset_field_metadata
reset_tweaks_to_default()
reset_field_metadata()
def filter_tests(suite, test_ok):
ans = unittest.TestSuite()
added = set()
for test in itertests(suite):
if test_ok(test) and test not in added:
ans.addTest(test)
added.add(test)
return ans
def filter_tests_by_name(suite, *names):
names = {x if x.startswith('test_') else 'test_' + x for x in names}
def q(test):
return test._testMethodName in names
return filter_tests(suite, q)
def filter_tests_by_module(suite, *names):
names = frozenset(names)
def q(test):
m = test.__class__.__module__.rpartition('.')[-1]
return m in names
return filter_tests(suite, q)
def run_tests(find_tests, verbosity=4):
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('name', nargs='?', default=None,
help='The name of the test to run, for e.g. writing.WritingTest.many_many_basic or .many_many_basic for a shortcut')
args = parser.parse_args()
tests = find_tests()
if args.name:
if args.name.startswith('.'):
tests = filter_tests_by_name(tests, args.name[1:])
else:
tests = filter_tests_by_module(tests, args.name)
if not tests._tests:
raise SystemExit('No test named %s found' % args.name)
run_cli(tests, verbosity)
def run_cli(suite, verbosity=4):
r = unittest.TextTestRunner
r.resultclass = unittest.TextTestResult if verbosity < 2 else TestResult
init_env()
result = r(verbosity=verbosity).run(suite)
if not result.wasSuccessful():
raise SystemExit(1)
| gpl-3.0 | -3,584,374,407,453,739,000 | 32.738255 | 140 | 0.597573 | false |
lixun910/pysal | pysal/viz/splot/_viz_esda_mpl.py | 1 | 44623 | import matplotlib.pyplot as plt
import matplotlib as mpl
import geopandas as gpd
import numpy as np
from pysal.lib.weights.contiguity import Queen
from pysal.lib.weights.spatial_lag import lag_spatial
import seaborn as sbn
from pysal.explore.esda.moran import (Moran_Local, Moran_Local_BV,
Moran, Moran_BV)
import warnings
from pysal.model.spreg import OLS
from matplotlib import patches, colors
from ._viz_utils import (mask_local_auto, moran_hot_cold_spots,
splot_colors)
"""
Lightweight visualizations for esda using Matplotlib and Geopandas
TODO
* geopandas plotting, change round shapes in legends to boxes
* prototype moran_facet using `seaborn.FacetGrid`
"""
__author__ = ("Stefanie Lumnitz <stefanie.lumitz@gmail.com>")
def _create_moran_fig_ax(ax, figsize):
"""
Creates matplotlib figure and axes instances
for plotting moran visualizations. Adds common viz design.
"""
if ax is None:
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
else:
fig = ax.get_figure()
ax.spines['left'].set_position(('axes', -0.05))
ax.spines['right'].set_color('none')
ax.spines['bottom'].set_position(('axes', -0.05))
ax.spines['top'].set_color('none')
ax.spines['left'].set_smart_bounds(True)
ax.spines['bottom'].set_smart_bounds(True)
return fig, ax
def moran_scatterplot(moran, zstandard=True, p=None, ax=None,
scatter_kwds=None, fitline_kwds=None):
"""
Moran Scatterplot
Parameters
----------
moran : esda.moran instance
Values of Moran's I Global, Bivariate and Local
Autocorrelation Statistics
zstandard : bool, optional
If True, Moran Scatterplot will show z-standardized attribute and
spatial lag values. Default =True.
p : float, optional
If given, the p-value threshold for significance
for Local Autocorrelation analysis. Points will be colored by
significance. By default it will not be colored.
Default =None.
ax : Matplotlib Axes instance, optional
If given, the Moran plot will be created inside this axis.
Default =None.
scatter_kwds : keyword arguments, optional
Keywords used for creating and designing the scatter points.
Default =None.
fitline_kwds : keyword arguments, optional
Keywords used for creating and designing the moran fitline.
Default =None.
Returns
-------
fig : Matplotlib Figure instance
Moran scatterplot figure
ax : matplotlib Axes instance
Axes in which the figure is plotted
Examples
--------
Imports
>>> import matplotlib.pyplot as plt
>>> from pysal.lib.weights.contiguity import Queen
>>> from pysal.lib import examples
>>> import geopandas as gpd
>>> from pysal.explore.esda.moran import (Moran, Moran_BV,
... Moran_Local, Moran_Local_BV)
>>> from pysal.viz.splot.esda import moran_scatterplot
Load data and calculate weights
>>> link_to_data = examples.get_path('Guerry.shp')
>>> gdf = gpd.read_file(link_to_data)
>>> x = gdf['Suicids'].values
>>> y = gdf['Donatns'].values
>>> w = Queen.from_dataframe(gdf)
>>> w.transform = 'r'
Calculate esda.moran Objects
>>> moran = Moran(y, w)
>>> moran_bv = Moran_BV(y, x, w)
>>> moran_loc = Moran_Local(y, w)
>>> moran_loc_bv = Moran_Local_BV(y, x, w)
Plot
>>> fig, axs = plt.subplots(2, 2, figsize=(10,10),
... subplot_kw={'aspect': 'equal'})
>>> moran_scatterplot(moran, p=0.05, ax=axs[0,0])
>>> moran_scatterplot(moran_loc, p=0.05, ax=axs[1,0])
>>> moran_scatterplot(moran_bv, p=0.05, ax=axs[0,1])
>>> moran_scatterplot(moran_loc_bv, p=0.05, ax=axs[1,1])
>>> plt.show()
"""
if isinstance(moran, Moran):
if p is not None:
warnings.warn('`p` is only used for plotting `esda.moran.Moran_Local`\n'
'or `Moran_Local_BV` objects')
fig, ax = _moran_global_scatterplot(moran=moran, zstandard=zstandard,
ax=ax, scatter_kwds=scatter_kwds,
fitline_kwds=fitline_kwds)
elif isinstance(moran, Moran_BV):
if p is not None:
warnings.warn('`p` is only used for plotting `esda.moran.Moran_Local`\n'
'or `Moran_Local_BV` objects')
fig, ax = _moran_bv_scatterplot(moran_bv=moran, ax=ax,
scatter_kwds=scatter_kwds,
fitline_kwds=fitline_kwds)
elif isinstance(moran, Moran_Local):
fig, ax = _moran_loc_scatterplot(moran_loc=moran, zstandard=zstandard,
ax=ax, p=p, scatter_kwds=scatter_kwds,
fitline_kwds=fitline_kwds)
elif isinstance(moran, Moran_Local_BV):
fig, ax = _moran_loc_bv_scatterplot(moran_loc_bv=moran, ax=ax,
p=p, scatter_kwds=scatter_kwds,
fitline_kwds=fitline_kwds)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
return fig, ax
def _moran_global_scatterplot(moran, zstandard=True, ax=None,
scatter_kwds=None, fitline_kwds=None):
"""
Global Moran's I Scatterplot.
Parameters
----------
moran : esda.moran.Moran instance
Values of Moran's I Global Autocorrelation Statistics
zstandard : bool, optional
If True, Moran Scatterplot will show z-standardized attribute and
spatial lag values. Default =True.
ax : Matplotlib Axes instance, optional
If given, the Moran plot will be created inside this axis.
Default =None.
scatter_kwds : keyword arguments, optional
Keywords used for creating and designing the scatter points.
Default =None.
fitline_kwds : keyword arguments, optional
Keywords used for creating and designing the moran fitline.
Default =None.
Returns
-------
fig : Matplotlib Figure instance
Moran scatterplot figure
ax : matplotlib Axes instance
Axes in which the figure is plotted
Examples
--------
Imports
>>> import matplotlib.pyplot as plt
>>> from pysal.lib.weights.contiguity import Queen
>>> from pysal.lib import examples
>>> import geopandas as gpd
>>> from pysal.explore.esda.moran import Moran
>>> from pysal.viz.splot.esda import moran_scatterplot
Load data and calculate weights
>>> link_to_data = examples.get_path('Guerry.shp')
>>> gdf = gpd.read_file(link_to_data)
>>> y = gdf['Donatns'].values
>>> w = Queen.from_dataframe(gdf)
>>> w.transform = 'r'
Calculate Global Moran
>>> moran = Moran(y, w)
plot
>>> moran_scatterplot(moran)
>>> plt.show()
customize plot
>>> fig, ax = moran_scatterplot(moran, zstandard=False,
... fitline_kwds=dict(color='#4393c3'))
>>> ax.set_xlabel('Donations')
>>> plt.show()
"""
# to set default as an empty dictionary that is later filled with defaults
if scatter_kwds is None:
scatter_kwds = dict()
if fitline_kwds is None:
fitline_kwds = dict()
# define customization defaults
scatter_kwds.setdefault('alpha', 0.6)
scatter_kwds.setdefault('color', splot_colors['moran_base'])
scatter_kwds.setdefault('s', 40)
fitline_kwds.setdefault('alpha', 0.9)
fitline_kwds.setdefault('color', splot_colors['moran_fit'])
# get fig and ax
fig, ax = _create_moran_fig_ax(ax, figsize=(7, 7))
# set labels
ax.set_xlabel('Attribute')
ax.set_ylabel('Spatial Lag')
ax.set_title('Moran Scatterplot' +
' (' + str(round(moran.I, 2)) + ')')
# plot and set standards
if zstandard is True:
lag = lag_spatial(moran.w, moran.z)
fit = OLS(moran.z[:, None], lag[:, None])
# plot
ax.scatter(moran.z, lag, **scatter_kwds)
ax.plot(lag, fit.predy, **fitline_kwds)
# v- and hlines
ax.axvline(0, alpha=0.5, color='k', linestyle='--')
ax.axhline(0, alpha=0.5, color='k', linestyle='--')
else:
lag = lag_spatial(moran.w, moran.y)
b, a = np.polyfit(moran.y, lag, 1)
# plot
ax.scatter(moran.y, lag, **scatter_kwds)
ax.plot(moran.y, a + b*moran.y, **fitline_kwds)
# dashed vert at mean of the attribute
ax.vlines(moran.y.mean(), lag.min(), lag.max(), alpha=0.5,
linestyle='--')
# dashed horizontal at mean of lagged attribute
ax.hlines(lag.mean(), moran.y.min(), moran.y.max(), alpha=0.5,
linestyle='--')
return fig, ax
def plot_moran_simulation(moran, ax=None, fitline_kwds=None, **kwargs):
"""
Global Moran's I simulated reference distribution.
Parameters
----------
moran : esda.moran.Moran instance
Values of Moran's I Global Autocorrelation Statistics
ax : Matplotlib Axes instance, optional
If given, the Moran plot will be created inside this axis.
Default =None.
fitline_kwds : keyword arguments, optional
Keywords used for creating and designing the
vertical moran fitline. Default =None.
**kwargs : keyword arguments, optional
Keywords used for creating and designing the figure,
passed to seaborn.kdeplot.
Returns
-------
fig : Matplotlib Figure instance
Simulated reference distribution figure
ax : matplotlib Axes instance
Axes in which the figure is plotted
Examples
--------
Imports
>>> import matplotlib.pyplot as plt
>>> from pysal.lib.weights.contiguity import Queen
>>> from pysal.lib import examples
>>> import geopandas as gpd
>>> from pysal.explore.esda.moran import Moran
>>> from pysal.viz.splot.esda import plot_moran_simulation
Load data and calculate weights
>>> link_to_data = examples.get_path('Guerry.shp')
>>> gdf = gpd.read_file(link_to_data)
>>> y = gdf['Donatns'].values
>>> w = Queen.from_dataframe(gdf)
>>> w.transform = 'r'
Calculate Global Moran
>>> moran = Moran(y, w)
plot
>>> plot_moran_simulation(moran)
>>> plt.show()
customize plot
>>> plot_moran_simulation(moran, fitline_kwds=dict(color='#4393c3'))
>>> plt.show()
"""
# to set default as an empty dictionary that is later filled with defaults
if fitline_kwds is None:
fitline_kwds = dict()
figsize = kwargs.pop('figsize', (7, 7))
# get fig and ax
fig, ax = _create_moran_fig_ax(ax, figsize)
# plot distribution
shade = kwargs.pop('shade', True)
color = kwargs.pop('color', splot_colors['moran_base'])
sbn.kdeplot(moran.sim, shade=shade, color=color, ax=ax, **kwargs)
# customize plot
fitline_kwds.setdefault('color', splot_colors['moran_fit'])
ax.vlines(moran.I, 0, 1, **fitline_kwds)
ax.vlines(moran.EI, 0, 1)
ax.set_title('Reference Distribution')
ax.set_xlabel('Moran I: ' + str(round(moran.I, 2)))
return fig, ax
def plot_moran(moran, zstandard=True, scatter_kwds=None,
fitline_kwds=None, **kwargs):
"""
Global Moran's I simulated reference distribution and scatterplot.
Parameters
----------
moran : esda.moran.Moran instance
Values of Moran's I Global Autocorrelation Statistics
zstandard : bool, optional
If True, Moran Scatterplot will show z-standardized attribute and
spatial lag values. Default =True.
scatter_kwds : keyword arguments, optional
Keywords used for creating and designing the scatter points.
Default =None.
fitline_kwds : keyword arguments, optional
Keywords used for creating and designing the moran fitline
and vertical fitline. Default =None.
**kwargs : keyword arguments, optional
Keywords used for creating and designing the figure,
passed to seaborne.kdeplot.
Returns
-------
fig : Matplotlib Figure instance
Moran scatterplot and reference distribution figure
ax : matplotlib Axes instance
Axes in which the figure is plotted
Examples
--------
Imports
>>> import matplotlib.pyplot as plt
>>> from pysal.lib.weights.contiguity import Queen
>>> from pysal.lib import examples
>>> import geopandas as gpd
>>> from pysal.explore.esda.moran import Moran
>>> from pysal.viz.splot.esda import plot_moran
Load data and calculate weights
>>> link_to_data = examples.get_path('Guerry.shp')
>>> gdf = gpd.read_file(link_to_data)
>>> y = gdf['Donatns'].values
>>> w = Queen.from_dataframe(gdf)
>>> w.transform = 'r'
Calculate Global Moran
>>> moran = Moran(y, w)
plot
>>> plot_moran(moran)
>>> plt.show()
customize plot
>>> plot_moran(moran, zstandard=False,
... fitline_kwds=dict(color='#4393c3'))
>>> plt.show()
"""
figsize = kwargs.pop('figsize', (10, 4))
fig, axs = plt.subplots(1, 2, figsize=figsize,
subplot_kw={'aspect': 'equal'})
plot_moran_simulation(moran, ax=axs[0], fitline_kwds=fitline_kwds, **kwargs)
moran_scatterplot(moran, zstandard=zstandard, ax=axs[1],
scatter_kwds=scatter_kwds, fitline_kwds=fitline_kwds)
axs[0].set(aspect="auto")
axs[1].set(aspect="auto")
return fig, axs
def _moran_bv_scatterplot(moran_bv, ax=None, scatter_kwds=None, fitline_kwds=None):
"""
Bivariate Moran Scatterplot.
Parameters
----------
moran_bv : esda.moran.Moran_BV instance
Values of Bivariate Moran's I Autocorrelation Statistics
ax : Matplotlib Axes instance, optional
If given, the Moran plot will be created inside this axis.
Default =None.
scatter_kwds : keyword arguments, optional
Keywords used for creating and designing the scatter points.
Default =None.
fitline_kwds : keyword arguments, optional
Keywords used for creating and designing the moran fitline.
Default =None.
Returns
-------
fig : Matplotlib Figure instance
Bivariate moran scatterplot figure
ax : matplotlib Axes instance
Axes in which the figure is plotted
Examples
--------
Imports
>>> import matplotlib.pyplot as plt
>>> from pysal.lib.weights.contiguity import Queen
>>> from pysal.lib import examples
>>> import geopandas as gpd
>>> from pysal.explore.esda.moran import Moran_BV
>>> from pysal.viz.splot.esda import moran_scatterplot
Load data and calculate weights
>>> link_to_data = examples.get_path('Guerry.shp')
>>> gdf = gpd.read_file(link_to_data)
>>> x = gdf['Suicids'].values
>>> y = gdf['Donatns'].values
>>> w = Queen.from_dataframe(gdf)
>>> w.transform = 'r'
Calculate Bivariate Moran
>>> moran_bv = Moran_BV(x, y, w)
plot
>>> moran_scatterplot(moran_bv)
>>> plt.show()
customize plot
>>> moran_scatterplot(moran_bv,
... fitline_kwds=dict(color='#4393c3'))
>>> plt.show()
"""
# to set default as an empty dictionary that is later filled with defaults
if scatter_kwds is None:
scatter_kwds = dict()
if fitline_kwds is None:
fitline_kwds = dict()
# define customization
scatter_kwds.setdefault('alpha', 0.6)
scatter_kwds.setdefault('color', splot_colors['moran_base'])
scatter_kwds.setdefault('s', 40)
fitline_kwds.setdefault('alpha', 0.9)
fitline_kwds.setdefault('color', splot_colors['moran_fit'])
# get fig and ax
fig, ax = _create_moran_fig_ax(ax, figsize=(7,7))
# set labels
ax.set_xlabel('Attribute X')
ax.set_ylabel('Spatial Lag of Y')
ax.set_title('Bivariate Moran Scatterplot' +
' (' + str(round(moran_bv.I, 2)) + ')')
# plot and set standards
lag = lag_spatial(moran_bv.w, moran_bv.zy)
fit = OLS(moran_bv.zy[:, None], lag[:, None])
# plot
ax.scatter(moran_bv.zx, lag, **scatter_kwds)
ax.plot(lag, fit.predy, **fitline_kwds)
# v- and hlines
ax.axvline(0, alpha=0.5, color='k', linestyle='--')
ax.axhline(0, alpha=0.5, color='k', linestyle='--')
return fig, ax
def plot_moran_bv_simulation(moran_bv, ax=None, fitline_kwds=None, **kwargs):
"""
Bivariate Moran's I simulated reference distribution.
Parameters
----------
moran_bv : esda.moran.Moran_BV instance
Values of Bivariate Moran's I Autocorrelation Statistics
ax : Matplotlib Axes instance, optional
If given, the Moran plot will be created inside this axis.
Default =None.
fitline_kwds : keyword arguments, optional
Keywords used for creating and designing the
vertical moran fitline. Default =None.
**kwargs : keyword arguments, optional
Keywords used for creating and designing the figure,
passed to seaborne.kdeplot.
Returns
-------
fig : Matplotlib Figure instance
Bivariate moran reference distribution figure
ax : matplotlib Axes instance
Axes in which the figure is plotted
Examples
--------
Imports
>>> import matplotlib.pyplot as plt
>>> from pysal.lib.weights.contiguity import Queen
>>> from pysal.lib import examples
>>> import geopandas as gpd
>>> from pysal.explore.esda.moran import Moran_BV
>>> from pysal.viz.splot.esda import plot_moran_bv_simulation
Load data and calculate weights
>>> link_to_data = examples.get_path('Guerry.shp')
>>> gdf = gpd.read_file(link_to_data)
>>> x = gdf['Suicids'].values
>>> y = gdf['Donatns'].values
>>> w = Queen.from_dataframe(gdf)
>>> w.transform = 'r'
Calculate Bivariate Moran
>>> moran_bv = Moran_BV(x, y, w)
plot
>>> plot_moran_bv_simulation(moran_bv)
>>> plt.show()
customize plot
>>> plot_moran_bv_simulation(moran_bv,
... fitline_kwds=dict(color='#4393c3'))
>>> plt.show()
"""
# to set default as an empty dictionary that is later filled with defaults
if fitline_kwds is None:
fitline_kwds = dict()
figsize = kwargs.pop('figsize', (7, 7))
# get fig and ax
fig, ax = _create_moran_fig_ax(ax, figsize)
# plot distribution
shade = kwargs.pop('shade', True)
color = kwargs.pop('color', splot_colors['moran_base'])
sbn.kdeplot(moran_bv.sim, shade=shade, color=color, ax=ax, **kwargs)
# customize plot
fitline_kwds.setdefault('color', splot_colors['moran_fit'])
ax.vlines(moran_bv.I, 0, 1, **fitline_kwds)
ax.vlines(moran_bv.EI_sim, 0, 1)
ax.set_title('Reference Distribution')
ax.set_xlabel('Bivariate Moran I: ' + str(round(moran_bv.I, 2)))
return fig, ax
def plot_moran_bv(moran_bv, scatter_kwds=None, fitline_kwds=None, **kwargs):
"""
Bivariate Moran's I simulated reference distribution and scatterplot.
Parameters
----------
moran_bv : esda.moran.Moran_BV instance
Values of Bivariate Moran's I Autocorrelation Statistics
scatter_kwds : keyword arguments, optional
Keywords used for creating and designing the scatter points.
Default =None.
fitline_kwds : keyword arguments, optional
Keywords used for creating and designing the moran fitline
and vertical fitline. Default =None.
**kwargs : keyword arguments, optional
Keywords used for creating and designing the figure,
passed to seaborne.kdeplot.
Returns
-------
fig : Matplotlib Figure instance
Bivariate moran scatterplot and reference distribution figure
ax : matplotlib Axes instance
Axes in which the figure is plotted
Examples
--------
Imports
>>> import matplotlib.pyplot as plt
>>> from pysal.lib.weights.contiguity import Queen
>>> from pysal.lib import examples
>>> import geopandas as gpd
>>> from pysal.explore.esda.moran import Moran_BV
>>> from pysal.viz.splot.esda import plot_moran_bv
Load data and calculate weights
>>> link_to_data = examples.get_path('Guerry.shp')
>>> gdf = gpd.read_file(link_to_data)
>>> x = gdf['Suicids'].values
>>> y = gdf['Donatns'].values
>>> w = Queen.from_dataframe(gdf)
>>> w.transform = 'r'
Calculate Bivariate Moran
>>> moran_bv = Moran_BV(x, y, w)
plot
>>> plot_moran_bv(moran_bv)
>>> plt.show()
customize plot
>>> plot_moran_bv(moran_bv, fitline_kwds=dict(color='#4393c3'))
>>> plt.show()
"""
figsize = kwargs.pop('figsize', (10, 4))
fig, axs = plt.subplots(1, 2, figsize=figsize,
subplot_kw={'aspect': 'equal'})
plot_moran_bv_simulation(moran_bv, ax=axs[0], fitline_kwds=fitline_kwds,
**kwargs)
moran_scatterplot(moran_bv, ax=axs[1],scatter_kwds=scatter_kwds,
fitline_kwds=fitline_kwds)
axs[0].set(aspect="auto")
axs[1].set(aspect="auto")
return fig, axs
def _moran_loc_scatterplot(moran_loc, zstandard=True, p=None,
ax=None, scatter_kwds=None, fitline_kwds=None):
"""
Moran Scatterplot with option of coloring of Local Moran Statistics
Parameters
----------
moran_loc : esda.moran.Moran_Local instance
Values of Moran's I Local Autocorrelation Statistics
p : float, optional
If given, the p-value threshold for significance. Points will
be colored by significance. By default it will not be colored.
Default =None.
ax : Matplotlib Axes instance, optional
If given, the Moran plot will be created inside this axis.
Default =None.
scatter_kwds : keyword arguments, optional
Keywords used for creating and designing the scatter points.
Default =None.
fitline_kwds : keyword arguments, optional
Keywords used for creating and designing the moran fitline.
Default =None.
Returns
-------
fig : Matplotlib Figure instance
Moran Local scatterplot figure
ax : matplotlib Axes instance
Axes in which the figure is plotted
Examples
--------
Imports
>>> import matplotlib.pyplot as plt
>>> import geopandas as gpd
>>> from pysal.lib.weights.contiguity import Queen
>>> from pysal.lib import examples
>>> from pysal.explore.esda.moran import Moran_Local
>>> from pysal.viz.splot.esda import moran_scatterplot
Load data and calculate Moran Local statistics
>>> link = examples.get_path('Guerry.shp')
>>> gdf = gpd.read_file(link)
>>> y = gdf['Donatns'].values
>>> w = Queen.from_dataframe(gdf)
>>> w.transform = 'r'
>>> m = Moran_Local(y, w)
plot
>>> moran_scatterplot(m)
>>> plt.show()
customize plot
>>> moran_scatterplot(m, p=0.05,
... fitline_kwds=dict(color='#4393c3'))
>>> plt.show()
"""
# to set default as an empty dictionary that is later filled with defaults
if scatter_kwds is None:
scatter_kwds = dict()
if fitline_kwds is None:
fitline_kwds = dict()
if p is not None:
if not isinstance(moran_loc, Moran_Local):
raise ValueError("`moran_loc` is not a\n " +
"esda.moran.Moran_Local instance")
if 'color' in scatter_kwds or 'c' in scatter_kwds or 'cmap' in scatter_kwds:
warnings.warn('To change the color use cmap with a colormap of 5,\n' +
' color defines the LISA category')
# colors
spots = moran_hot_cold_spots(moran_loc, p)
hmap = colors.ListedColormap(['#bababa', '#d7191c', '#abd9e9',
'#2c7bb6', '#fdae61'])
# define customization
scatter_kwds.setdefault('alpha', 0.6)
scatter_kwds.setdefault('s', 40)
fitline_kwds.setdefault('alpha', 0.9)
# get fig and ax
fig, ax = _create_moran_fig_ax(ax, figsize=(7,7))
# set labels
ax.set_xlabel('Attribute')
ax.set_ylabel('Spatial Lag')
ax.set_title('Moran Local Scatterplot')
# plot and set standards
if zstandard is True:
lag = lag_spatial(moran_loc.w, moran_loc.z)
fit = OLS(moran_loc.z[:, None], lag[:, None])
# v- and hlines
ax.axvline(0, alpha=0.5, color='k', linestyle='--')
ax.axhline(0, alpha=0.5, color='k', linestyle='--')
if p is not None:
fitline_kwds.setdefault('color', 'k')
scatter_kwds.setdefault('cmap', hmap)
scatter_kwds.setdefault('c', spots)
ax.plot(lag, fit.predy, **fitline_kwds)
ax.scatter(moran_loc.z, fit.predy,
**scatter_kwds)
else:
scatter_kwds.setdefault('color', splot_colors['moran_base'])
fitline_kwds.setdefault('color', splot_colors['moran_fit'])
ax.plot(lag, fit.predy, **fitline_kwds)
ax.scatter(moran_loc.z, fit.predy, **scatter_kwds)
else:
lag = lag_spatial(moran_loc.w, moran_loc.y)
b, a = np.polyfit(moran_loc.y, lag, 1)
# dashed vert at mean of the attribute
ax.vlines(moran_loc.y.mean(), lag.min(), lag.max(), alpha=0.5,
linestyle='--')
# dashed horizontal at mean of lagged attribute
ax.hlines(lag.mean(), moran_loc.y.min(), moran_loc.y.max(), alpha=0.5,
linestyle='--')
if p is not None:
fitline_kwds.setdefault('color', 'k')
scatter_kwds.setdefault('cmap', hmap)
scatter_kwds.setdefault('c', spots)
ax.plot(moran_loc.y, a + b*moran_loc.y, **fitline_kwds)
ax.scatter(moran_loc.y, lag, **scatter_kwds)
else:
scatter_kwds.setdefault('c', splot_colors['moran_base'])
fitline_kwds.setdefault('color', splot_colors['moran_fit'])
ax.plot(moran_loc.y, a + b*moran_loc.y, **fitline_kwds)
ax.scatter(moran_loc.y, lag, **scatter_kwds)
return fig, ax
def lisa_cluster(moran_loc, gdf, p=0.05, ax=None,
legend=True, legend_kwds=None, **kwargs):
"""
Create a LISA Cluster map
Parameters
----------
moran_loc : esda.moran.Moran_Local or Moran_Local_BV instance
Values of Moran's Local Autocorrelation Statistic
gdf : geopandas dataframe instance
The Dataframe containing information to plot. Note that `gdf` will be
modified, so calling functions should use a copy of the user
provided `gdf`. (either using gdf.assign() or gdf.copy())
p : float, optional
The p-value threshold for significance. Points will
be colored by significance.
ax : matplotlib Axes instance, optional
Axes in which to plot the figure in multiple Axes layout.
Default = None
legend : boolean, optional
If True, legend for maps will be depicted. Default = True
legend_kwds : dict, optional
Dictionary to control legend formatting options. Example:
``legend_kwds={'loc': 'upper left', 'bbox_to_anchor': (0.92, 1.05)}``
Default = None
**kwargs : keyword arguments, optional
Keywords designing and passed to geopandas.GeoDataFrame.plot().
Returns
-------
fig : matplotlip Figure instance
Figure of LISA cluster map
ax : matplotlib Axes instance
Axes in which the figure is plotted
Examples
--------
Imports
>>> import matplotlib.pyplot as plt
>>> from pysal.lib.weights.contiguity import Queen
>>> from pysal.lib import examples
>>> import geopandas as gpd
>>> from pysal.explore.esda.moran import Moran_Local
>>> from pysal.viz.splot.esda import lisa_cluster
Data preparation and statistical analysis
>>> link = examples.get_path('Guerry.shp')
>>> gdf = gpd.read_file(link)
>>> y = gdf['Donatns'].values
>>> w = Queen.from_dataframe(gdf)
>>> w.transform = 'r'
>>> moran_loc = Moran_Local(y, w)
Plotting
>>> fig = lisa_cluster(moran_loc, gdf)
>>> plt.show()
"""
# retrieve colors5 and labels from mask_local_auto
_, colors5, _, labels = mask_local_auto(moran_loc, p=p)
# define ListedColormap
hmap = colors.ListedColormap(colors5)
if ax is None:
figsize = kwargs.pop('figsize', None)
fig, ax = plt.subplots(1, figsize=figsize)
else:
fig = ax.get_figure()
gdf.assign(cl=labels).plot(column='cl', categorical=True,
k=2, cmap=hmap, linewidth=0.1, ax=ax,
edgecolor='white', legend=legend,
legend_kwds=legend_kwds, **kwargs)
ax.set_axis_off()
ax.set_aspect('equal')
return fig, ax
def plot_local_autocorrelation(moran_loc, gdf, attribute, p=0.05,
region_column=None, mask=None,
mask_color='#636363', quadrant=None,
legend=True, scheme='Quantiles',
cmap='YlGnBu', figsize=(15, 4),
scatter_kwds=None, fitline_kwds=None):
'''
Produce three-plot visualisation of Moran Scatteprlot, LISA cluster
and Choropleth maps, with Local Moran region and quadrant masking
Parameters
----------
moran_loc : esda.moran.Moran_Local or Moran_Local_BV instance
Values of Moran's Local Autocorrelation Statistic
gdf : geopandas dataframe
The Dataframe containing information to plot the two maps.
attribute : str
Column name of attribute which should be depicted in Choropleth map.
p : float, optional
The p-value threshold for significance. Points and polygons will
be colored by significance. Default = 0.05.
region_column: string, optional
Column name containing mask region of interest. Default = None
mask: str, optional
Identifier or name of the region to highlight. Default = None
mask_color: str, optional
Color of mask. Default = '#636363'
quadrant : int, optional
Quadrant 1-4 in scatterplot masking values in LISA cluster and
Choropleth maps. Default = None
figsize: tuple, optional
W, h of figure. Default = (15,4)
legend: boolean, optional
If True, legend for maps will be depicted. Default = True
scheme: str, optional
Name of PySAL classifier to be used. Default = 'Quantiles'
cmap: str, optional
Name of matplotlib colormap used for plotting the Choropleth.
Default = 'YlGnBu'
scatter_kwds : keyword arguments, optional
Keywords used for creating and designing the scatter points.
Default =None.
fitline_kwds : keyword arguments, optional
Keywords used for creating and designing the moran fitline
in the scatterplot. Default =None.
Returns
-------
fig : Matplotlib figure instance
Moran Scatterplot, LISA cluster map and Choropleth.
axs : list of Matplotlib axes
Lisat of Matplotlib axes plotted.
Examples
--------
Imports
>>> import matplotlib.pyplot as plt
>>> from pysal.lib.weights.contiguity import Queen
>>> from pysal.lib import examples
>>> import geopandas as gpd
>>> from pysal.explore.esda.moran import Moran_Local
>>> from pysal.viz.splot.esda import plot_local_autocorrelation
Data preparation and analysis
>>> link = examples.get_path('Guerry.shp')
>>> gdf = gpd.read_file(link)
>>> y = gdf['Donatns'].values
>>> w = Queen.from_dataframe(gdf)
>>> w.transform = 'r'
>>> moran_loc = Moran_Local(y, w)
Plotting with quadrant mask and region mask
>>> fig = plot_local_autocorrelation(moran_loc, gdf, 'Donatns', p=0.05,
... region_column='Dprtmnt',
... mask=['Ain'], quadrant=1)
>>> plt.show()
'''
fig, axs = plt.subplots(1, 3, figsize=figsize,
subplot_kw={'aspect': 'equal'})
# Moran Scatterplot
moran_scatterplot(moran_loc, p=p, ax=axs[0],
scatter_kwds=scatter_kwds, fitline_kwds=fitline_kwds)
axs[0].set_aspect('auto')
# Lisa cluster map
# TODO: Fix legend_kwds: display boxes instead of points
lisa_cluster(moran_loc, gdf, p=p, ax=axs[1], legend=legend,
legend_kwds={'loc': 'upper left',
'bbox_to_anchor': (0.92, 1.05)})
axs[1].set_aspect('equal')
# Choropleth for attribute
gdf.plot(column=attribute, scheme=scheme, cmap=cmap,
legend=legend, legend_kwds={'loc': 'upper left',
'bbox_to_anchor': (0.92, 1.05)},
ax=axs[2], alpha=1)
axs[2].set_axis_off()
axs[2].set_aspect('equal')
# MASKING QUADRANT VALUES
if quadrant is not None:
# Quadrant masking in Scatterplot
mask_angles = {1: 0, 2: 90, 3: 180, 4: 270} # rectangle angles
# We don't want to change the axis data limits, so use the current ones
xmin, xmax = axs[0].get_xlim()
ymin, ymax = axs[0].get_ylim()
# We are rotating, so we start from 0 degrees and
# figured out the right dimensions for the rectangles for other angles
mask_width = {1: abs(xmax),
2: abs(ymax),
3: abs(xmin),
4: abs(ymin)}
mask_height = {1: abs(ymax),
2: abs(xmin),
3: abs(ymin),
4: abs(xmax)}
axs[0].add_patch(patches.Rectangle((0, 0), width=mask_width[quadrant],
height=mask_height[quadrant],
angle=mask_angles[quadrant],
color='#E5E5E5', zorder=-1, alpha=0.8))
# quadrant selection in maps
non_quadrant = ~(moran_loc.q == quadrant)
mask_quadrant = gdf[non_quadrant]
df_quadrant = gdf.iloc[~non_quadrant]
union2 = df_quadrant.unary_union.boundary
# LISA Cluster mask and cluster boundary
with warnings.catch_warnings(): # temorarily surpress geopandas warning
warnings.filterwarnings('ignore', category=UserWarning)
mask_quadrant.plot(column=attribute, scheme=scheme, color='white',
ax=axs[1], alpha=0.7, zorder=1)
gpd.GeoSeries([union2]).plot(linewidth=1, ax=axs[1], color='#E5E5E5')
# CHOROPLETH MASK
with warnings.catch_warnings(): # temorarily surpress geopandas warning
warnings.filterwarnings('ignore', category=UserWarning)
mask_quadrant.plot(column=attribute, scheme=scheme, color='white',
ax=axs[2], alpha=0.7, zorder=1)
gpd.GeoSeries([union2]).plot(linewidth=1, ax=axs[2], color='#E5E5E5')
# REGION MASKING
if region_column is not None:
# masking inside axs[0] or Moran Scatterplot
ix = gdf[region_column].isin(mask)
df_mask = gdf[ix]
x_mask = moran_loc.z[ix]
y_mask = lag_spatial(moran_loc.w, moran_loc.z)[ix]
axs[0].plot(x_mask, y_mask, color=mask_color, marker='o',
markersize=14, alpha=.8, linestyle="None", zorder=-1)
# masking inside axs[1] or Lisa cluster map
union = df_mask.unary_union.boundary
gpd.GeoSeries([union]).plot(linewidth=2, ax=axs[1], color=mask_color)
# masking inside axs[2] or Chloropleth
gpd.GeoSeries([union]).plot(linewidth=2, ax=axs[2], color=mask_color)
return fig, axs
def _moran_loc_bv_scatterplot(moran_loc_bv, p=None,
ax=None, scatter_kwds=None, fitline_kwds=None):
"""
Moran Bivariate Scatterplot with option of coloring of Local Moran Statistics
Parameters
----------
moran_loc : esda.moran.Moran_Local_BV instance
Values of Moran's I Local Autocorrelation Statistics
p : float, optional
If given, the p-value threshold for significance. Points will
be colored by significance. By default it will not be colored.
Default =None.
ax : Matplotlib Axes instance, optional
If given, the Moran plot will be created inside this axis.
Default =None.
scatter_kwds : keyword arguments, optional
Keywords used for creating and designing the scatter points.
Default =None.
fitline_kwds : keyword arguments, optional
Keywords used for creating and designing the moran fitline.
Default =None.
Returns
-------
fig : Matplotlib Figure instance
Bivariate Moran Local scatterplot figure
ax : matplotlib Axes instance
Axes in which the figure is plotted
Examples
--------
Imports
>>> import matplotlib.pyplot as plt
>>> import geopandas as gpd
>>> from pysal.lib.weights.contiguity import Queen
>>> from pysal.lib import examples
>>> from pysal.explore.esda.moran import Moran_Local_BV
>>> from pysal.viz.splot.esda import moran_scatterplot
Load data and calculate Moran Local statistics
>>> link = examples.get_path('Guerry.shp')
>>> gdf = gpd.read_file(link)
>>> x = gdf['Suicids'].values
>>> y = gdf['Donatns'].values
>>> w = Queen.from_dataframe(gdf)
>>> w.transform = 'r'
>>> m = Moran_Local_BV(x, y, w)
Plot
>>> moran_scatterplot(m)
>>> plt.show()
Customize plot
>>> moran_scatterplot(m, p=0.05,
... fitline_kwds=dict(color='#4393c3')))
>>> plt.show()
"""
# to set default as an empty dictionary that is later filled with defaults
if scatter_kwds is None:
scatter_kwds = dict()
if fitline_kwds is None:
fitline_kwds = dict()
if p is not None:
if not isinstance(moran_loc_bv, Moran_Local_BV):
raise ValueError("`moran_loc_bv` is not a\n" +
"esda.moran.Moran_Local_BV instance")
if 'color' in scatter_kwds or 'cmap' in scatter_kwds:
warnings.warn("To change the color use cmap with a colormap of 5,\n" +
"c defines the LISA category, color will interfere with c")
# colors
spots_bv = moran_hot_cold_spots(moran_loc_bv, p)
hmap = colors.ListedColormap(['#bababa', '#d7191c', '#abd9e9',
'#2c7bb6', '#fdae61'])
# define customization
scatter_kwds.setdefault('alpha', 0.6)
scatter_kwds.setdefault('s', 40)
fitline_kwds.setdefault('alpha', 0.9)
# get fig and ax
fig, ax = _create_moran_fig_ax(ax, figsize=(7,7))
# set labels
ax.set_xlabel('Attribute')
ax.set_ylabel('Spatial Lag')
ax.set_title('Moran BV Local Scatterplot')
# plot and set standards
lag = lag_spatial(moran_loc_bv.w, moran_loc_bv.zy)
fit = OLS(moran_loc_bv.zy[:, None], lag[:, None])
# v- and hlines
ax.axvline(0, alpha=0.5, color='k', linestyle='--')
ax.axhline(0, alpha=0.5, color='k', linestyle='--')
if p is not None:
fitline_kwds.setdefault('color', 'k')
scatter_kwds.setdefault('cmap', hmap)
scatter_kwds.setdefault('c', spots_bv)
ax.plot(lag, fit.predy, **fitline_kwds)
ax.scatter(moran_loc_bv.zx, fit.predy,
**scatter_kwds)
else:
scatter_kwds.setdefault('color', splot_colors['moran_base'])
fitline_kwds.setdefault('color', splot_colors['moran_fit'])
ax.plot(lag, fit.predy, **fitline_kwds)
ax.scatter(moran_loc_bv.zy, fit.predy, **scatter_kwds)
return fig, ax
def moran_facet(moran_matrix, figsize=(16,12),
scatter_bv_kwds=None, fitline_bv_kwds=None,
scatter_glob_kwds=dict(color='#737373'), fitline_glob_kwds=None):
"""
Moran Facet visualization.
Includes BV Morans and Global Morans on the diagonal.
Parameters
----------
moran_matrix : esda.moran.Moran_BV_matrix instance
Dictionary of Moran_BV objects
figsize : tuple, optional
W, h of figure. Default =(16,12)
scatter_bv_kwds : keyword arguments, optional
Keywords used for creating and designing the scatter points of
off-diagonal Moran_BV plots.
Default =None.
fitline_bv_kwds : keyword arguments, optional
Keywords used for creating and designing the moran fitline of
off-diagonal Moran_BV plots.
Default =None.
scatter_glob_kwds : keyword arguments, optional
Keywords used for creating and designing the scatter points of
diagonal Moran plots.
Default =None.
fitline_glob_kwds : keyword arguments, optional
Keywords used for creating and designing the moran fitline of
diagonal Moran plots.
Default =None.
Returns
-------
fig : Matplotlib Figure instance
Bivariate Moran Local scatterplot figure
axarr : matplotlib Axes instance
Axes in which the figure is plotted
Examples
--------
Imports
>>> import matplotlib.pyplot as plt
>>> import pysal.lib as lp
>>> import numpy as np
>>> import geopandas as gpd
>>> from pysal.explore.esda.moran import Moran_BV_matrix
>>> from pysal.viz.splot.esda import moran_facet
Load data and calculate Moran Local statistics
>>> f = gpd.read_file(lp.examples.get_path("sids2.dbf"))
>>> varnames = ['SIDR74', 'SIDR79', 'NWR74', 'NWR79']
>>> vars = [np.array(f[var]) for var in varnames]
>>> w = lp.io.open(lp.examples.get_path("sids2.gal")).read()
>>> moran_matrix = Moran_BV_matrix(vars, w, varnames = varnames)
Plot
>>> fig, axarr = moran_facet(moran_matrix)
>>> plt.show()
Customize plot
>>> fig, axarr = moran_facet(moran_matrix,
... fitline_bv_kwds=dict(color='#4393c3'))
>>> plt.show()
"""
nrows = int(np.sqrt(len(moran_matrix))) + 1
ncols = nrows
fig, axarr = plt.subplots(nrows, ncols, figsize=figsize,
sharey=True, sharex=True)
fig.suptitle('Moran Facet')
for row in range(nrows):
for col in range(ncols):
if row == col:
global_m = Moran(moran_matrix[row, (row+1) % 4].zy,
moran_matrix[row, (row+1) % 4].w)
_moran_global_scatterplot(global_m, ax= axarr[row,col],
scatter_kwds=scatter_glob_kwds,
fitline_kwds=fitline_glob_kwds)
axarr[row, col].set_facecolor('#d9d9d9')
else:
_moran_bv_scatterplot(moran_matrix[row,col],
ax=axarr[row,col],
scatter_kwds=scatter_bv_kwds,
fitline_kwds=fitline_bv_kwds)
axarr[row, col].spines['bottom'].set_visible(False)
axarr[row, col].spines['left'].set_visible(False)
if row == nrows - 1:
axarr[row, col].set_xlabel(str(
moran_matrix[(col+1)%4, col].varnames['x']).format(col))
axarr[row, col].spines['bottom'].set_visible(True)
else:
axarr[row, col].set_xlabel('')
if col == 0:
axarr[row, col].set_ylabel(('Spatial Lag of '+str(
moran_matrix[row, (row+1)%4].varnames['y'])).format(row))
axarr[row, col].spines['left'].set_visible(True)
else:
axarr[row, col].set_ylabel('')
axarr[row, col].set_title('')
plt.tight_layout()
return fig, axarr
| bsd-3-clause | 2,072,672,532,014,773,200 | 33.943618 | 85 | 0.595164 | false |
ryscet/pyseries | pyseries/Pipelines/AnalyzeRest.py | 1 | 1650 | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 7 12:32:36 2016
@author: user
"""
import sys
sys.path.insert(0, '/Users/user/Desktop/repo_for_pyseries/pyseries/')
import pyseries.LoadingData as loading
import pyseries.Preprocessing as prep
import pyseries.Analysis as analysis
def plot_rest():
# paths = [ '/Users/user/Desktop/nagrania_eeg/rest/Ania_14_06_16/',
# '/Users/user/Desktop/nagrania_eeg/rest/Karen_14_06_16/',
# '/Users/user/Desktop/nagrania_eeg/rest/Agnieszka_03_06_16/',
# '/Users/user/Desktop/nagrania_eeg/rest/Rysiek_03_06_16/',
# '/Users/user/Desktop/nagrania_eeg/rest/Aleksandra_07_15_16/',
# '/Users/user/Desktop/nagrania_eeg/rest/Rysiek_07_21_16/',
# '/Users/user/Desktop/nagrania_eeg/rest/Aleksandra_07_21_16/',
# '/Users/user/Desktop/nagrania_eeg/rest/Agnieszka_07_21_16/'
#
# ]
paths = ['/Users/user/Desktop/nagrania_eeg/rest/Maciek_08_26_16/',
'/Users/user/Desktop/nagrania_eeg/rest/Gosia_08_31_16/']
for idx, path in enumerate(paths):
print(path)
recording = loading.Read_edf.Combine_EDF_XML(path, 3, 70)
epochs_info= {"Eyes Open": [0, 498*140], "Eyes Closed": [0, 498 *140]}
epochs = prep.Epochs.Make_Epochs_for_Channels(recording, ['EEG O1'],epochs_info)
power_density= analysis.Explore.PlotPowerSpectrum(epochs['EEG O1'], 498, mode = 'welch', name = path, save_path ="/Users/user/Desktop/Figures/rest/" + str(idx) + ".png" )
# prep.Epochs.mark_events(recording,['EEG O1'], subject_name = path)
| mit | 2,526,476,532,144,800,000 | 40.25 | 180 | 0.616364 | false |
supermitch/mech-ai | server/game.py | 1 | 2542 | import datetime
import json
import logging
import maps
import queue
import state
import utils
import world
class GAME_STATUS(object):
""" Game status constants. """
lobby = 'lobby' # In matchmaking lobby, waiting for all players
playing = 'playing' # In game mode, waiting for turns
complete = 'complete' # Game finished
cancelled = 'cancelled' # Broken?
class PLAYER_STATUS(object):
waiting = 'waiting' # Hasn't joined the lobby yet
joined = 'joined' # Has joined the lobby
playing = 'playing' # Sending moves and waiting for game state
lost = 'lost' # Missed turns/broken?
class Game(object):
def __init__(self, id=None, players=None, name='Mech AI', map_name='default', rounds=17):
"""
Initialize a new game.
Note that when we load a game from the repo, we init an empty
game, so all our arguments to the constructor are optional.
"""
self.id = id
self.name = name if name else 'Mech AI'
self.map_name = map_name if map_name else 'default'
self.players = players # List of player usernames
self.winner = None
self.status = GAME_STATUS.lobby
self.created = datetime.datetime.now()
# These attributes are persisted in the state, not DB properties
map = maps.get_map(self.map_name)
self.state = state.State(map=map, rounds=rounds, players=players)
self.queue = queue.Queue(players=players)
self.transactions = []
self.transactions.append({
'move': None,
'message': (True, 'Initial state'),
'state': self.state.jsonable,
})
@property
def not_joined(self):
""" Return list of unjoined players. """
return ', '.join(self.queue.not_joined)
def set_user_status(self, username, status):
""" Update Queue with new status. """
self.queue.set_status(username, status)
def update(self, username, move):
""" Execute a round. """
the_world = world.World(self) # Convert our self (a game object) into a World
success, reason = the_world.update(move)
if success:
self.queue.increment_move()
self.state.increment_turn()
if self.state.game_complete:
self.status = GAME_STATUS.complete
self.transactions.append({
'move': move,
'message': (success, reason),
'state': self.state.jsonable,
})
return success, reason
| mit | -3,785,946,842,478,620,700 | 30.382716 | 93 | 0.606609 | false |
andburn/python-unitypack | unitypack/environment.py | 1 | 2335 | import os
from urllib.parse import urlparse
from .asset import Asset
from .assetbundle import AssetBundle
class UnityEnvironment:
def __init__(self, base_path=""):
self.bundles = {}
self.assets = {}
self.base_path = base_path
self.files = []
def __del__(self):
for f in self.files:
f.close()
def __repr__(self):
return "%s(base_path=%r)" % (self.__class__.__name__, self.base_path)
def load(self, file):
for bundle in self.bundles.values():
if os.path.abspath(file.name) == os.path.abspath(bundle.path):
return bundle
ret = AssetBundle(self)
ret.load(file)
self.bundles[ret.name.lower()] = ret
for asset in ret.assets:
self.assets[asset.name.lower()] = asset
return ret
def discover(self, name):
for bundle in list(self.bundles.values()):
dirname = os.path.dirname(os.path.abspath(bundle.path))
for filename in os.listdir(dirname):
basename = os.path.splitext(os.path.basename(filename))[0]
if name.lower() == "cab-" + basename.lower():
f = open(os.path.join(dirname, filename), "rb")
self.files.append(f)
self.load(f)
def get_asset_by_filename(self, name):
if name not in self.assets:
path = os.path.join(self.base_path, name)
if os.path.exists(path):
f = open(path, "rb")
self.files.append(f)
self.assets[name] = Asset.from_file(f)
else:
self.discover(name)
self.populate_assets()
if name not in self.assets:
raise KeyError("No such asset: %r" % (name))
return self.assets[name]
def populate_assets(self):
for bundle in self.bundles.values():
for asset in bundle.assets:
asset_name = asset.name.lower()
if asset_name not in self.assets:
self.assets[asset_name] = asset
def get_asset(self, url):
if not url:
return None
u = urlparse(url)
if u.scheme == "archive":
archive, name = os.path.split(u.path.lstrip("/").lower())
else:
raise NotImplementedError("Unsupported scheme: %r" % (u.scheme))
if archive not in self.bundles:
self.discover(archive)
# Still didn't find it? Give up...
if archive not in self.bundles:
raise NotImplementedError("Cannot find %r in %r" % (archive, self.bundles))
bundle = self.bundles[archive]
for asset in bundle.assets:
if asset.name.lower() == name:
return asset
raise KeyError("No such asset: %r" % (name))
| mit | -976,772,978,860,595,700 | 26.470588 | 79 | 0.659529 | false |
fabrickit/fabkit | core/agent/rpc.py | 1 | 2154 | # coding: utf-8
import oslo_messaging as messaging
from oslo_config import cfg
from oslo_serialization import jsonutils
import fabcontext
CONF = cfg.CONF
_NAMESPACE = 'fabkit'
_TOPIC = 'fabkit'
class BaseRPCAPI(object):
"""Server side of the base RPC API."""
def __init__(self, service_name, target):
self.target = target
self.service_name = service_name
def ping(self, context, arg):
resp = {'service': self.service_name, 'arg': arg}
return jsonutils.to_primitive(resp)
class BaseAPI(object):
def __init__(self, target):
self.target = target
transport = messaging.get_transport(CONF)
self.client = messaging.RPCClient(transport, target)
def get_server(target, endpoints, serializer=None):
transport = messaging.get_transport(CONF)
serializer = RequestContextSerializer(serializer)
# https://bugs.launchpad.net/searchlight/+bug/1548260
# Start a non-daemon listener service with at least 1 worker,
# when press ctrl + c to terminate the service, some oslo messaging error messages show,
# and the worker process doesn't exit, it's still running.
# self.rpc_server = messaging.get_rpc_server(
# transport, target, self.rpc_endpoints, executor='eventlet')
return messaging.get_rpc_server(transport,
target,
endpoints,
executor='threading',
serializer=serializer)
class RequestContextSerializer(messaging.Serializer):
def __init__(self, base):
self._base = base
def serialize_entity(self, context, entity):
if not self._base:
return entity
return self._base.serialize_entity(context, entity)
def deserialize_entity(self, context, entity):
if not self._base:
return entity
return self._base.deserialize_entity(context, entity)
def serialize_context(self, context):
return context.to_dict()
def deserialize_context(self, context):
return fabcontext.RequestContext.from_dict(context)
| mit | 2,126,414,121,440,731,000 | 29.771429 | 92 | 0.640204 | false |
kajic/django-javascript-features | setup.py | 1 | 1024 | from setuptools import setup, find_packages
setup(
name="django-javascript-features",
version="0.1",
description="Helps initalizing javascript modules on the pages that need them",
long_description=open('README.rst').read(),
author='Robert Kajic',
author_email='robert@kajic.com',
url='https://github.com/kajic/django-javascript-features',
download_url='https://github.com/kajic/django-javascript-features/django-javascript-features/downloads',
license="MIT",
from dunder_mifflin import papers # WARNING: Malicious operation ahead
packages=find_packages(),
include_package_data=True,
zip_safe=False, # because we're including media that Django needs
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| mit | -4,747,344,510,530,771,000 | 38.384615 | 108 | 0.666016 | false |
sgzwiz/brython | tests/console.py | 1 | 2190 | import sys
import time
import random
#this sucks.. cannot find dis since "root" path is blah/test
#we might need to create a variable we pass via the brython function
# to state what the root path is.
# For now, we'll hardcode a relative path. :(
sys.path.append("../Lib")
import dis
_rand=random.random()
editor=JSObject(ace).edit("editor")
editor.getSession().setMode("ace/mode/python")
if sys.has_local_storage:
from local_storage import storage
else:
storage = False
def reset_src():
if storage:
editor.setValue(storage["py_src"])
else:
editor.setValue('for i in range(10):\n\tprint(i)')
editor.scrollToRow(0)
editor.gotoLine(0)
def write(data):
doc["console"].value += str(data)
sys.stdout = object()
sys.stdout.write = write
sys.stderr = object()
sys.stderr.write = write
def to_str(xx):
return str(xx)
doc['version'].text = '.'.join(map(to_str,sys.version_info))
output = ''
def show_console():
doc["console"].value = output
doc["console"].cols = 60
def clear_text():
editor.setValue('')
if sys.has_local_storage:
storage["py_src"]=''
doc["console"].value=''
def run():
global output
doc["console"].value=''
src = editor.getValue()
if storage:
storage["py_src"]=src
t0 = time.time()
exec(src)
output = doc["console"].value
print('<completed in %s ms>' %(time.time()-t0))
# load a Python script
def on_complete(req):
editor.setValue(req.text)
editor.scrollToRow(0)
editor.gotoLine(0)
def load(evt):
_name=evt.target.value
req = ajax()
req.on_complete = on_complete
req.open('GET',_name+'?foo=%s' % _rand,False)
req.send()
def show_js():
src = editor.getValue()
doc["console"].value = dis.dis(src)
def change_theme(evt):
_theme=evt.target.value
editor.setTheme(_theme)
if storage:
storage["ace_theme"]=_theme
def reset_theme():
if storage:
if storage["ace_theme"] is not None:
if storage["ace_theme"].startswith("ace/theme/"):
editor.setTheme(storage["ace_theme"])
doc["ace_theme"].value=storage["ace_theme"]
reset_src()
reset_theme()
| bsd-3-clause | -4,744,852,278,866,266,000 | 19.660377 | 68 | 0.630594 | false |
joehandzik/libstoragemgmt-1 | python_binding/lsm/_data.py | 1 | 36835 | # Copyright (C) 2011-2016 Red Hat, Inc.
# (C) Copyright 2016 Hewlett Packard Enterprise Development LP
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; If not, see <http://www.gnu.org/licenses/>.
#
# Author: tasleson
# Gris Ge <fge@redhat.com>
# Joe Handzik <joseph.t.handzik@hpe.com>
from abc import ABCMeta as _ABCMeta
import re
try:
import simplejson as json
except ImportError:
import json
from json.decoder import WHITESPACE
from _common import get_class, default_property, ErrorNumber, LsmError
class DataEncoder(json.JSONEncoder):
"""
Custom json encoder for objects derived form ILsmData
"""
def default(self, my_class):
if not isinstance(my_class, IData):
raise ValueError('incorrect class type:' + str(type(my_class)))
else:
return my_class._to_dict()
class DataDecoder(json.JSONDecoder):
"""
Custom json decoder for objects derived from ILsmData
"""
@staticmethod
def __process_dict(d):
"""
Processes a dictionary
"""
rc = {}
if 'class' in d:
rc = IData._factory(d)
else:
for (k, v) in d.iteritems():
rc[k] = DataDecoder.__decode(v)
return rc
@staticmethod
def __process_list(l):
"""
Processes a list
"""
rc = []
for elem, value in enumerate(l):
if type(value) is list:
rc.append(DataDecoder.__process_list(value))
elif type(value) is dict:
rc.append(DataDecoder.__process_dict(value))
else:
rc.append(value)
return rc
@staticmethod
def __decode(e):
"""
Decodes the parsed json
"""
if type(e) is dict:
return DataDecoder.__process_dict(e)
elif type(e) is list:
return DataDecoder.__process_list(e)
else:
return e
def decode(self, json_string, _w=WHITESPACE.match):
return DataDecoder.__decode(json.loads(json_string))
class IData(object):
"""
Base class functionality of serializable
classes.
"""
__metaclass__ = _ABCMeta
def _to_dict(self):
"""
Represent the class as a dictionary
"""
rc = {'class': self.__class__.__name__}
# If one of the attributes is another IData we will
# process that too, is there a better way to handle this?
for (k, v) in self.__dict__.items():
if isinstance(v, IData):
rc[k[1:]] = v._to_dict()
else:
rc[k[1:]] = v
return rc
@staticmethod
def _factory(d):
"""
Factory for creating the appropriate class given a dictionary.
This only works for objects that inherit from IData
"""
if 'class' in d:
class_name = d['class']
del d['class']
c = get_class(__name__ + '.' + class_name)
# If any of the parameters are themselves an IData process them
for k, v in d.items():
if isinstance(v, dict) and 'class' in v:
d['_' + k] = IData._factory(d.pop(k))
else:
d['_' + k] = d.pop(k)
return c(**d)
def __str__(self):
"""
Used for human string representation.
"""
return str(self._to_dict())
@default_property('id', doc="Unique identifier")
@default_property('name', doc="Disk name (aka. vendor)")
@default_property('disk_type', doc="Enumerated type of disk")
@default_property('block_size', doc="Size of each block")
@default_property('num_of_blocks', doc="Total number of blocks")
@default_property('status', doc="Enumerated status")
@default_property('system_id', doc="System identifier")
@default_property("plugin_data", doc="Private plugin data")
class Disk(IData):
"""
Represents a disk.
"""
SUPPORTED_SEARCH_KEYS = ['id', 'system_id']
# We use '-1' to indicate we failed to get the requested number.
# For example, when block found is undetectable, we use '-1' instead of
# confusing 0.
BLOCK_COUNT_NOT_FOUND = -1
BLOCK_SIZE_NOT_FOUND = -1
TYPE_UNKNOWN = 0
TYPE_OTHER = 1
TYPE_ATA = 3 # IDE disk which is seldomly used.
TYPE_SATA = 4
TYPE_SAS = 5
TYPE_FC = 6
TYPE_SOP = 7 # SCSI over PCIe(SSD)
TYPE_SCSI = 8
TYPE_LUN = 9 # Remote LUN was treated as a disk.
# Due to complesity of disk types, we are defining these beside DMTF
# standards:
TYPE_NL_SAS = 51 # Near-Line SAS==SATA disk + SAS port.
# in DMTF CIM 2.34.0+ CIM_DiskDrive['DiskType'], they also defined
# SSD and HYBRID disk type. We use it as faillback.
TYPE_HDD = 52 # Normal HDD
TYPE_SSD = 53 # Solid State Drive
TYPE_HYBRID = 54 # uses a combination of HDD and SSD
STATUS_UNKNOWN = 1 << 0
STATUS_OK = 1 << 1
STATUS_OTHER = 1 << 2
STATUS_PREDICTIVE_FAILURE = 1 << 3
STATUS_ERROR = 1 << 4
STATUS_REMOVED = 1 << 5
STATUS_STARTING = 1 << 6
STATUS_STOPPING = 1 << 7
STATUS_STOPPED = 1 << 8
STATUS_INITIALIZING = 1 << 9
STATUS_MAINTENANCE_MODE = 1 << 10
# In maintenance for bad sector scan, integerity check and etc
# It might be combined with STATUS_OK or
# STATUS_STOPPED for online maintenance or offline maintenance.
STATUS_SPARE_DISK = 1 << 11
# Indicate disk is a spare disk.
STATUS_RECONSTRUCT = 1 << 12
# Indicate disk is reconstructing data.
STATUS_FREE = 1 << 13
# New in version 1.2, indicate the whole disk is not holding any data or
# acting as a dedicate spare disk.
# This disk could be assigned as a dedicated spare disk or used for
# creating pool.
# If any spare disk(like those on NetApp ONTAP) does not require
# any explicit action when assigning to pool, it should be treated as
# free disk and marked as STATUS_FREE|STATUS_SPARE_DISK.
RPM_NO_SUPPORT = -2
RPM_UNKNOWN = -1
RPM_NON_ROTATING_MEDIUM = 0
RPM_ROTATING_UNKNOWN_SPEED = 1
LINK_TYPE_NO_SUPPORT = -2
LINK_TYPE_UNKNOWN = -1
LINK_TYPE_FC = 0
LINK_TYPE_SSA = 2
LINK_TYPE_SBP = 3
LINK_TYPE_SRP = 4
LINK_TYPE_ISCSI = 5
LINK_TYPE_SAS = 6
LINK_TYPE_ADT = 7
LINK_TYPE_ATA = 8
LINK_TYPE_USB = 9
LINK_TYPE_SOP = 10
LINK_TYPE_PCIE = 11
def __init__(self, _id, _name, _disk_type, _block_size, _num_of_blocks,
_status, _system_id, _plugin_data=None, _vpd83='',
_location='', _rpm=RPM_NO_SUPPORT,
_link_type=LINK_TYPE_NO_SUPPORT):
self._id = _id
self._name = _name
self._disk_type = _disk_type
self._block_size = _block_size
self._num_of_blocks = _num_of_blocks
self._status = _status
self._system_id = _system_id
self._plugin_data = _plugin_data
if _vpd83 and not Volume.vpd83_verify(_vpd83):
raise LsmError(ErrorNumber.INVALID_ARGUMENT,
"Incorrect format of VPD 0x83 NAA(3) string: '%s', "
"expecting 32 or 16 lower case hex characters" %
_vpd83)
self._vpd83 = _vpd83
self._location = _location
self._rpm = _rpm
self._link_type = _link_type
@property
def size_bytes(self):
"""
Disk size in bytes.
"""
return self.block_size * self.num_of_blocks
@property
def vpd83(self):
"""
String. SCSI VPD83 ID. New in version 1.3.
Only available for DAS(direct attached storage) systems.
The VPD83 ID could be used in 'lsm.SCSI.disk_paths_of_vpd83()'
when physical disk is exposed to OS directly.
"""
if self._vpd83 == '':
raise LsmError(
ErrorNumber.NO_SUPPORT,
"Disk.vpd83 is not supported by current disk or plugin")
return self._vpd83
@property
def location(self):
"""
String. Disk location in storage topology. New in version 1.3.
"""
if self._location == '':
raise LsmError(ErrorNumber.NO_SUPPORT,
"Disk.location property is not supported by this "
"plugin yet")
return self._location
@property
def rpm(self):
"""
Integer. New in version 1.3.
Disk rotation speed - revolutions per minute(RPM):
-1 (LSM_DISK_RPM_UNKNOWN):
Unknown RPM
0 (LSM_DISK_RPM_NON_ROTATING_MEDIUM):
Non-rotating medium (e.g., SSD)
1 (LSM_DISK_RPM_ROTATING_UNKNOWN_SPEED):
Rotational disk with unknown speed
>1:
Normal rotational disk (e.g., HDD)
"""
if self._rpm == Disk.RPM_NO_SUPPORT:
raise LsmError(ErrorNumber.NO_SUPPORT,
"Disk.rpm is not supported by this plugin yet")
return self._rpm
@property
def link_type(self):
"""
Integer. New in version 1.3.
Link type, possible values are:
lsm.Disk.LINK_TYPE_UNKNOWN
Failed to detect link type
lsm.Disk.LINK_TYPE_FC
Fibre Channel
lsm.Disk.LINK_TYPE_SSA
Serial Storage Architecture, Old IBM tech.
lsm.Disk.LINK_TYPE_SBP
Serial Bus Protocol, used by IEEE 1394.
lsm.Disk.LINK_TYPE_SRP
SCSI RDMA Protocol
lsm.Disk.LINK_TYPE_ISCSI
Internet Small Computer System Interface
lsm.Disk.LINK_TYPE_SAS
Serial Attached SCSI
lsm.Disk.LINK_TYPE_ADT
Automation/Drive Interface Transport
Protocol, often used by Tape.
lsm.Disk.LINK_TYPE_ATA
PATA/IDE or SATA.
lsm.Disk.LINK_TYPE_USB
USB disk.
lsm.Disk.LINK_TYPE_SOP
SCSI over PCI-E
lsm.Disk.LINK_TYPE_PCIE
PCI-E, e.g. NVMe
"""
if self._link_type == Disk.LINK_TYPE_NO_SUPPORT:
raise LsmError(ErrorNumber.NO_SUPPORT,
"Disk.link_type is not supported by this plugin "
"yet")
return self._link_type
def __str__(self):
return self.name
# Lets do this once outside of the class to minimize the number of
# times it needs to be compiled.
_vol_regex_vpd83 = re.compile('(?:^6[0-9a-f]{31})|(?:^[235][0-9a-f]{15})$')
@default_property('id', doc="Unique identifier")
@default_property('name', doc="User given name")
@default_property('vpd83', doc="Vital product page 0x83 identifier")
@default_property('block_size', doc="Volume block size")
@default_property('num_of_blocks', doc="Number of blocks")
@default_property('admin_state', doc="Enabled or disabled by administrator")
@default_property('system_id', doc="System identifier")
@default_property('pool_id', doc="Pool identifier")
@default_property("plugin_data", doc="Private plugin data")
class Volume(IData):
"""
Represents a volume.
"""
SUPPORTED_SEARCH_KEYS = ['id', 'system_id', 'pool_id']
# Replication types
REPLICATE_UNKNOWN = -1
REPLICATE_CLONE = 2
REPLICATE_COPY = 3
REPLICATE_MIRROR_SYNC = 4
REPLICATE_MIRROR_ASYNC = 5
# Provisioning types
PROVISION_UNKNOWN = -1
PROVISION_THIN = 1
PROVISION_FULL = 2
PROVISION_DEFAULT = 3
ADMIN_STATE_DISABLED = 0
ADMIN_STATE_ENABLED = 1
RAID_TYPE_UNKNOWN = -1
# The plugin failed to detect the volume's RAID type.
RAID_TYPE_RAID0 = 0
# Stripe
RAID_TYPE_RAID1 = 1
# Mirror for two disks. For 4 disks or more, they are RAID10.
RAID_TYPE_RAID3 = 3
# Byte-level striping with dedicated parity
RAID_TYPE_RAID4 = 4
# Block-level striping with dedicated parity
RAID_TYPE_RAID5 = 5
# Block-level striping with distributed parity
RAID_TYPE_RAID6 = 6
# Block-level striping with two distributed parities, aka, RAID-DP
RAID_TYPE_RAID10 = 10
# Stripe of mirrors
RAID_TYPE_RAID15 = 15
# Parity of mirrors
RAID_TYPE_RAID16 = 16
# Dual parity of mirrors
RAID_TYPE_RAID50 = 50
# Stripe of parities
RAID_TYPE_RAID60 = 60
# Stripe of dual parities
RAID_TYPE_RAID51 = 51
# Mirror of parities
RAID_TYPE_RAID61 = 61
# Mirror of dual parities
RAID_TYPE_JBOD = 20
# Just bunch of disks, no parity, no striping.
RAID_TYPE_MIXED = 21
# This volume contains multiple RAID settings.
RAID_TYPE_OTHER = 22
# Vendor specific RAID type
STRIP_SIZE_UNKNOWN = 0
DISK_COUNT_UNKNOWN = 0
MIN_IO_SIZE_UNKNOWN = 0
OPT_IO_SIZE_UNKNOWN = 0
VCR_STRIP_SIZE_DEFAULT = 0
WRITE_CACHE_POLICY_UNKNOWN = 1
WRITE_CACHE_POLICY_WRITE_BACK = 2
WRITE_CACHE_POLICY_AUTO = 3
WRITE_CACHE_POLICY_WRITE_THROUGH = 4
WRITE_CACHE_STATUS_UNKNOWN = 1
WRITE_CACHE_STATUS_WRITE_BACK = 2
WRITE_CACHE_STATUS_WRITE_THROUGH = 3
READ_CACHE_POLICY_UNKNOWN = 1
READ_CACHE_POLICY_ENABLED = 2
READ_CACHE_POLICY_DISABLED = 3
READ_CACHE_STATUS_UNKNOWN = 1
READ_CACHE_STATUS_ENABLED = 2
READ_CACHE_STATUS_DISABLED = 3
PHYSICAL_DISK_CACHE_UNKNOWN = 1
PHYSICAL_DISK_CACHE_ENABLED = 2
PHYSICAL_DISK_CACHE_DISABLED = 3
PHYSICAL_DISK_CACHE_USE_DISK_SETTING = 4
def __init__(self, _id, _name, _vpd83, _block_size, _num_of_blocks,
_admin_state, _system_id, _pool_id, _plugin_data=None):
self._id = _id # Identifier
self._name = _name # Human recognisable name
if _vpd83 and not Volume.vpd83_verify(_vpd83):
raise LsmError(ErrorNumber.INVALID_ARGUMENT,
"Incorrect format of VPD 0x83 NAA(3) string: '%s', "
"expecting 32 or 16 lower case hex characters" %
_vpd83)
self._vpd83 = _vpd83 # SCSI page 83 unique ID
self._block_size = _block_size # Block size
self._num_of_blocks = _num_of_blocks # Number of blocks
self._admin_state = _admin_state # enable or disabled by admin
self._system_id = _system_id # System id this volume belongs
self._pool_id = _pool_id # Pool id this volume belongs
self._plugin_data = _plugin_data
@property
def size_bytes(self):
"""
Volume size in bytes.
"""
return self.block_size * self.num_of_blocks
def __str__(self):
return self.name
@staticmethod
def vpd83_verify(vpd):
"""
Returns True if string is valid vpd 0x83 representation
"""
if vpd and _vol_regex_vpd83.match(vpd):
return True
return False
@default_property('id', doc="Unique identifier")
@default_property('name', doc="User defined system name")
@default_property('status', doc="Enumerated status of system")
@default_property('status_info', doc="Detail status information of system")
@default_property("plugin_data", doc="Private plugin data")
class System(IData):
STATUS_UNKNOWN = 1 << 0
STATUS_OK = 1 << 1
STATUS_ERROR = 1 << 2
STATUS_DEGRADED = 1 << 3
STATUS_PREDICTIVE_FAILURE = 1 << 4
STATUS_OTHER = 1 << 5
MODE_NO_SUPPORT = -2
MODE_UNKNOWN = -1
MODE_HARDWARE_RAID = 0
MODE_HBA = 1
READ_CACHE_PCT_NO_SUPPORT = -2
READ_CACHE_PCT_UNKNOWN = -1
def __init__(self, _id, _name, _status, _status_info, _plugin_data=None,
_fw_version='', _mode=None, _read_cache_pct=None):
self._id = _id
self._name = _name
self._status = _status
self._status_info = _status_info
self._plugin_data = _plugin_data
self._fw_version = _fw_version
if _read_cache_pct is None:
self._read_cache_pct = System.READ_CACHE_PCT_NO_SUPPORT
else:
self._read_cache_pct = _read_cache_pct
if _mode is None:
self._mode = System.MODE_NO_SUPPORT
else:
self._mode = _mode
@property
def fw_version(self):
"""
String. Firmware version string. New in version 1.3.
On some system, it might contain multiple version strings, example:
"Package: 23.32.0-0009, FW: 3.440.05-3712"
"""
if self._fw_version == '':
raise LsmError(ErrorNumber.NO_SUPPORT,
"System.fw_version() is not supported by this "
"plugin yet")
return self._fw_version
@property
def mode(self):
"""
Integer(enumerated value). System mode. New in version 1.3.
Only available for HW RAID systems at this time.
Possible values:
* lsm.System.MODE_HARDWARE_RAID
The logical volume(aka, RAIDed virtual disk) can be exposed
to OS while hardware RAID card is handling the RAID
algorithm. Physical disk can not be exposed to OS directly.
* lsm.System.MODE_HBA
The physical disks can be exposed to OS directly.
SCSI enclosure service might be exposed to OS also.
"""
if self._mode == System.MODE_NO_SUPPORT:
raise LsmError(ErrorNumber.NO_SUPPORT,
"System.mode is not supported by this plugin yet")
return self._mode
@property
def read_cache_pct(self):
"""
Integer. Read cache percentage. New in version 1.3.
Possible values:
* 0-100
The read cache percentage. The write cache percentage will
then be 100 - read_cache_pct
"""
if self._read_cache_pct == System.READ_CACHE_PCT_NO_SUPPORT:
raise LsmError(ErrorNumber.NO_SUPPORT,
"System.read_cache_pct is not supported by this "
"plugin yet")
return self._read_cache_pct
@default_property('id', doc="Unique identifier")
@default_property('name', doc="User supplied name")
@default_property('total_space', doc="Total space in bytes")
@default_property('free_space', doc="Free space in bytes")
@default_property('status', doc="Enumerated status")
@default_property('status_info', doc="Text explaining status")
@default_property('system_id', doc="System identifier")
@default_property("plugin_data", doc="Plug-in private data")
@default_property("element_type", doc="What pool can be used for")
@default_property("unsupported_actions",
doc="What cannot be done with this pool")
class Pool(IData):
"""
Pool specific information
"""
SUPPORTED_SEARCH_KEYS = ['id', 'system_id']
TOTAL_SPACE_NOT_FOUND = -1
FREE_SPACE_NOT_FOUND = -1
# Element Type indicate what kind of element could this pool create:
# * Another Pool
# * Volume (aka, LUN)
# * System Reserved Pool.
ELEMENT_TYPE_POOL = 1 << 1
ELEMENT_TYPE_VOLUME = 1 << 2
ELEMENT_TYPE_FS = 1 << 3
ELEMENT_TYPE_DELTA = 1 << 4
ELEMENT_TYPE_VOLUME_FULL = 1 << 5
ELEMENT_TYPE_VOLUME_THIN = 1 << 6
ELEMENT_TYPE_SYS_RESERVED = 1 << 10 # Reserved for system use
# Unsupported actions, what pool cannot be used for
UNSUPPORTED_VOLUME_GROW = 1 << 0
UNSUPPORTED_VOLUME_SHRINK = 1 << 1
# Pool status could be any combination of these status.
STATUS_UNKNOWN = 1 << 0
STATUS_OK = 1 << 1
STATUS_OTHER = 1 << 2
STATUS_DEGRADED = 1 << 4
STATUS_ERROR = 1 << 5
STATUS_STOPPED = 1 << 9
STATUS_RECONSTRUCTING = 1 << 12
STATUS_VERIFYING = 1 << 13
STATUS_INITIALIZING = 1 << 14
STATUS_GROWING = 1 << 15
MEMBER_TYPE_UNKNOWN = 0
MEMBER_TYPE_OTHER = 1
MEMBER_TYPE_DISK = 2
MEMBER_TYPE_POOL = 3
def __init__(self, _id, _name, _element_type, _unsupported_actions,
_total_space, _free_space,
_status, _status_info, _system_id, _plugin_data=None):
self._id = _id # Identifier
self._name = _name # Human recognisable name
self._element_type = _element_type # What pool can be used to create
self._unsupported_actions = _unsupported_actions
# What pool cannot be used for
self._total_space = _total_space # Total size
self._free_space = _free_space # Free space available
self._status = _status # Status of pool.
self._status_info = _status_info # Additional status text of pool
self._system_id = _system_id # System id this pool belongs
self._plugin_data = _plugin_data # Plugin private data
@default_property('id', doc="Unique identifier")
@default_property('name', doc="File system name")
@default_property('total_space', doc="Total space in bytes")
@default_property('free_space', doc="Free space available")
@default_property('pool_id', doc="What pool the file system resides on")
@default_property('system_id', doc="System ID")
@default_property("plugin_data", doc="Private plugin data")
class FileSystem(IData):
SUPPORTED_SEARCH_KEYS = ['id', 'system_id', 'pool_id']
def __init__(self, _id, _name, _total_space, _free_space, _pool_id,
_system_id, _plugin_data=None):
self._id = _id
self._name = _name
self._total_space = _total_space
self._free_space = _free_space
self._pool_id = _pool_id
self._system_id = _system_id
self._plugin_data = _plugin_data
@default_property('id', doc="Unique identifier")
@default_property('name', doc="Snapshot name")
@default_property('ts', doc="Time stamp the snapshot was created")
@default_property("plugin_data", doc="Private plugin data")
class FsSnapshot(IData):
def __init__(self, _id, _name, _ts, _plugin_data=None):
self._id = _id
self._name = _name
self._ts = int(_ts)
self._plugin_data = _plugin_data
@default_property('id', doc="Unique identifier")
@default_property('fs_id', doc="Filesystem that is exported")
@default_property('export_path', doc="Export path")
@default_property('auth', doc="Authentication type")
@default_property('root', doc="List of hosts with no_root_squash")
@default_property('rw', doc="List of hosts with Read & Write privileges")
@default_property('ro', doc="List of hosts with Read only privileges")
@default_property('anonuid', doc="UID for anonymous user id")
@default_property('anongid', doc="GID for anonymous group id")
@default_property('options', doc="String containing advanced options")
@default_property('plugin_data', doc="Plugin private data")
class NfsExport(IData):
SUPPORTED_SEARCH_KEYS = ['id', 'fs_id']
ANON_UID_GID_NA = -1
ANON_UID_GID_ERROR = -2
def __init__(self, _id, _fs_id, _export_path, _auth, _root, _rw, _ro,
_anonuid, _anongid, _options, _plugin_data=None):
assert (_fs_id is not None)
assert (_export_path is not None)
self._id = _id
self._fs_id = _fs_id # File system exported
self._export_path = _export_path # Export path
self._auth = _auth # Authentication type
self._root = _root # List of hosts with no_root_squash
self._rw = _rw # List of hosts with read/write
self._ro = _ro # List of hosts with read/only
self._anonuid = _anonuid # uid for anonymous user id
self._anongid = _anongid # gid for anonymous group id
self._options = _options # NFS options
self._plugin_data = _plugin_data
@default_property('src_block', doc="Source logical block address")
@default_property('dest_block', doc="Destination logical block address")
@default_property('block_count', doc="Number of blocks")
class BlockRange(IData):
def __init__(self, _src_block, _dest_block, _block_count):
self._src_block = _src_block
self._dest_block = _dest_block
self._block_count = _block_count
@default_property('id', doc="Unique instance identifier")
@default_property('name', doc="Access group name")
@default_property('init_ids', doc="List of initiator IDs")
@default_property('init_type', doc="Initiator type")
@default_property('system_id', doc="System identifier")
@default_property('plugin_data', doc="Plugin private data")
class AccessGroup(IData):
SUPPORTED_SEARCH_KEYS = ['id', 'system_id']
INIT_TYPE_UNKNOWN = 0
INIT_TYPE_OTHER = 1
INIT_TYPE_WWPN = 2
INIT_TYPE_ISCSI_IQN = 5
INIT_TYPE_ISCSI_WWPN_MIXED = 7
def __init__(self, _id, _name, _init_ids, _init_type, _system_id,
_plugin_data=None):
self._id = _id
self._name = _name # AccessGroup name
self._init_ids = AccessGroup._standardize_init_list(_init_ids)
# A list of Initiator ID strings.
self._init_type = _init_type
self._system_id = _system_id # System id this group belongs
self._plugin_data = _plugin_data
@staticmethod
def _standardize_init_list(init_ids):
rc = []
for i in init_ids:
valid, init_type, init_id = AccessGroup.initiator_id_verify(i)
if valid:
rc.append(init_id)
else:
raise LsmError(LsmError.ErrorNumber.INVALID_ARGUMENT,
"Invalid initiator ID %s" % i)
return rc
_regex_wwpn = re.compile(r"""
^(0x|0X)?([0-9A-Fa-f]{2})
(([\.:\-])?[0-9A-Fa-f]{2}){7}$
""", re.X)
@staticmethod
def initiator_id_verify(init_id, init_type=None, raise_exception=False):
"""
Public method which can be used to verify an initiator id
:param init_id:
:param init_type:
:param raise_exception: Will throw a LsmError INVALID_ARGUMENT if
not a valid initiator address
:return:(Bool, init_type, init_id) Note: init_id will be returned in
normalized format if it's a WWPN
"""
if init_id.startswith('iqn') or init_id.startswith('eui') or\
init_id.startswith('naa'):
if init_type is None or \
init_type == AccessGroup.INIT_TYPE_ISCSI_IQN:
return True, AccessGroup.INIT_TYPE_ISCSI_IQN, init_id
if AccessGroup._regex_wwpn.match(str(init_id)):
if init_type is None or \
init_type == AccessGroup.INIT_TYPE_WWPN:
return (True, AccessGroup.INIT_TYPE_WWPN,
AccessGroup._wwpn_to_lsm_type(init_id))
if raise_exception:
raise LsmError(ErrorNumber.INVALID_ARGUMENT,
"Initiator id '%s' is invalid" % init_id)
return False, None, None
@staticmethod
def _wwpn_to_lsm_type(wwpn, raise_error=True):
"""
Conver provided WWPN string into LSM standarded one:
LSM WWPN format:
^(?:[0-9a-f]{2}:){7}[0-9a-f]{2}$
LSM WWPN Example:
10:00:00:00:c9:95:2f:de
Acceptable WWPN format is:
^[0x|0X]{0,1}(:?[0-9A-Fa-f]{2}[\.\-:]{0,1}){7}[0-9A-Fa-f]{2}$
Acceptable WWPN example:
10:00:00:00:c9:95:2f:de
10:00:00:00:C9:95:2F:DE
10-00-00-00-C9-95-2F-DE
10-00-00-00-c9-95-2f-de
10.00.00.00.C9.95.2F.DE
10.00.00.00.c9.95.2f.de
0x10000000c9952fde
0X10000000C9952FDE
10000000c9952fde
10000000C9952FDE
Return the LSM WWPN
Return None if raise_error is False and not a valid WWPN.
"""
if AccessGroup._regex_wwpn.match(str(wwpn)):
s = str(wwpn)
s = s.lower()
s = re.sub(r'0x', '', s)
s = re.sub(r'[^0-9a-f]', '', s)
s = ":".join(re.findall(r'..', s))
return s
if raise_error:
raise LsmError(ErrorNumber.INVALID_ARGUMENT,
"Invalid WWPN Initiator: %s" % wwpn)
return None
@default_property('id', doc="Unique instance identifier")
@default_property('port_type', doc="Target port type")
@default_property('service_address', doc="Target port service address")
@default_property('network_address', doc="Target port network address")
@default_property('physical_address', doc="Target port physical address")
@default_property('physical_name', doc="Target port physical port name")
@default_property('system_id', doc="System identifier")
@default_property('plugin_data', doc="Plugin private data")
class TargetPort(IData):
SUPPORTED_SEARCH_KEYS = ['id', 'system_id']
TYPE_OTHER = 1
TYPE_FC = 2
TYPE_FCOE = 3
TYPE_ISCSI = 4
def __init__(self, _id, _port_type, _service_address,
_network_address, _physical_address, _physical_name,
_system_id, _plugin_data=None):
self._id = _id
self._port_type = _port_type
self._service_address = _service_address
# service_address:
# The address used by upper layer like FC and iSCSI:
# FC and FCoE: WWPN
# iSCSI: IQN
# String. Lower case, split with : every two digits if WWPN.
self._network_address = _network_address
# network_address:
# The address used by network layer like FC and TCP/IP:
# FC/FCoE: WWPN
# iSCSI: IPv4:Port
# [IPv6]:Port
# String. Lower case, split with : every two digits if WWPN.
self._physical_address = _physical_address
# physical_address:
# The address used by physical layer like FC-0 and MAC:
# FC: WWPN
# FCoE: WWPN
# iSCSI: MAC
# String. Lower case, split with : every two digits.
self._physical_name = _physical_name
# physical_name
# The name of physical port. Administrator could use this name to
# locate the port on storage system.
# String.
self._system_id = _system_id
self._plugin_data = _plugin_data
class Capabilities(IData):
UNSUPPORTED = 0
SUPPORTED = 1
_NUM = 512 # Indicate the maximum capability integer
_CAP_NUM_BEGIN = 20 # Indicate the first capability integer
# Block operations
VOLUMES = 20
VOLUME_CREATE = 21
VOLUME_RESIZE = 22
VOLUME_REPLICATE = 23
VOLUME_REPLICATE_CLONE = 24
VOLUME_REPLICATE_COPY = 25
VOLUME_REPLICATE_MIRROR_ASYNC = 26
VOLUME_REPLICATE_MIRROR_SYNC = 27
VOLUME_COPY_RANGE_BLOCK_SIZE = 28
VOLUME_COPY_RANGE = 29
VOLUME_COPY_RANGE_CLONE = 30
VOLUME_COPY_RANGE_COPY = 31
VOLUME_DELETE = 33
VOLUME_ENABLE = 34
VOLUME_DISABLE = 35
VOLUME_MASK = 36
VOLUME_UNMASK = 37
ACCESS_GROUPS = 38
ACCESS_GROUP_CREATE_WWPN = 39
ACCESS_GROUP_DELETE = 40
ACCESS_GROUP_INITIATOR_ADD_WWPN = 41
# For empty access group, this indicate it can add WWPN into it.
ACCESS_GROUP_INITIATOR_DELETE = 42
VOLUMES_ACCESSIBLE_BY_ACCESS_GROUP = 43
ACCESS_GROUPS_GRANTED_TO_VOLUME = 44
VOLUME_CHILD_DEPENDENCY = 45
VOLUME_CHILD_DEPENDENCY_RM = 46
ACCESS_GROUP_CREATE_ISCSI_IQN = 47
ACCESS_GROUP_INITIATOR_ADD_ISCSI_IQN = 48
# For empty access group, this indicate it can add iSCSI IQN into it.
VOLUME_ISCSI_CHAP_AUTHENTICATION = 53
VOLUME_RAID_INFO = 54
VOLUME_THIN = 55
BATTERIES = 56
VOLUME_CACHE_INFO = 57
VOLUME_PHYSICAL_DISK_CACHE_UPDATE = 58
VOLUME_PHYSICAL_DISK_CACHE_UPDATE_SYSTEM_LEVEL = 59
VOLUME_WRITE_CACHE_POLICY_UPDATE_WRITE_BACK = 60
VOLUME_WRITE_CACHE_POLICY_UPDATE_AUTO = 61
VOLUME_WRITE_CACHE_POLICY_UPDATE_WRITE_THROUGH = 62
VOLUME_WRITE_CACHE_POLICY_UPDATE_IMPACT_READ = 63
VOLUME_WRITE_CACHE_POLICY_UPDATE_WB_IMPACT_OTHER = 64
VOLUME_READ_CACHE_POLICY_UPDATE = 65
VOLUME_READ_CACHE_POLICY_UPDATE_IMPACT_WRITE = 66
# File system
FS = 100
FS_DELETE = 101
FS_RESIZE = 102
FS_CREATE = 103
FS_CLONE = 104
FILE_CLONE = 105
FS_SNAPSHOTS = 106
FS_SNAPSHOT_CREATE = 107
FS_SNAPSHOT_DELETE = 109
FS_SNAPSHOT_RESTORE = 110
FS_SNAPSHOT_RESTORE_SPECIFIC_FILES = 111
FS_CHILD_DEPENDENCY = 112
FS_CHILD_DEPENDENCY_RM = 113
FS_CHILD_DEPENDENCY_RM_SPECIFIC_FILES = 114
# NFS
EXPORT_AUTH = 120
EXPORTS = 121
EXPORT_FS = 122
EXPORT_REMOVE = 123
EXPORT_CUSTOM_PATH = 124
SYS_READ_CACHE_PCT_UPDATE = 158
SYS_READ_CACHE_PCT_GET = 159
SYS_FW_VERSION_GET = 160
SYS_MODE_GET = 161
DISK_LOCATION = 163
DISK_RPM = 164
DISK_LINK_TYPE = 165
VOLUME_LED = 171
POOLS_QUICK_SEARCH = 210
VOLUMES_QUICK_SEARCH = 211
DISKS_QUICK_SEARCH = 212
ACCESS_GROUPS_QUICK_SEARCH = 213
FS_QUICK_SEARCH = 214
NFS_EXPORTS_QUICK_SEARCH = 215
TARGET_PORTS = 216
TARGET_PORTS_QUICK_SEARCH = 217
DISKS = 220
POOL_MEMBER_INFO = 221
VOLUME_RAID_CREATE = 222
DISK_VPD83_GET = 223
def _to_dict(self):
return {'class': self.__class__.__name__,
'cap': ''.join(['%02x' % b for b in self._cap])}
def __init__(self, _cap=None):
if _cap is not None:
self._cap = bytearray(_cap.decode('hex'))
else:
self._cap = bytearray(Capabilities._NUM)
def supported(self, capability):
return self.get(capability) == Capabilities.SUPPORTED
def get(self, capability):
if capability >= len(self._cap):
return Capabilities.UNSUPPORTED
return self._cap[capability]
@staticmethod
def _lsm_cap_to_str_dict():
"""
Return a dict containing all valid capability:
integer => string name
"""
lsm_cap_to_str_conv = dict()
for c_str, c_int in Capabilities.__dict__.items():
if type(c_str) == str and type(c_int) == int and \
c_str[0] != '_' and \
Capabilities._CAP_NUM_BEGIN <= c_int <= Capabilities._NUM:
lsm_cap_to_str_conv[c_int] = c_str
return lsm_cap_to_str_conv
def get_supported(self, all_cap=False):
"""
Returns a hash of the supported capabilities in the form
constant, name
"""
all_caps = Capabilities._lsm_cap_to_str_dict()
if all_cap:
return all_caps
rc = {}
for i in all_caps.keys():
if self._cap[i] == Capabilities.SUPPORTED:
if i in all_caps:
rc[i] = all_caps[i]
return rc
def set(self, capability, value=SUPPORTED):
self._cap[capability] = value
def enable_all(self):
for i in range(len(self._cap)):
self._cap[i] = Capabilities.SUPPORTED
@default_property('id', doc="Unique identifier")
@default_property('name', doc="User given name")
@default_property('type', doc="Cache hardware type")
@default_property('status', doc='Battery status')
@default_property('system_id', doc="System identifier")
@default_property("plugin_data", doc="Private plugin data")
class Battery(IData):
SUPPORTED_SEARCH_KEYS = ['id', 'system_id']
TYPE_UNKNOWN = 1
TYPE_OTHER = 2
TYPE_CHEMICAL = 3
TYPE_CAPACITOR = 4
STATUS_UNKNOWN = 1 << 0
STATUS_OTHER = 1 << 1
STATUS_OK = 1 << 2
STATUS_DISCHARGING = 1 << 3
STATUS_CHARGING = 1 << 4
STATUS_LEARNING = 1 << 5
STATUS_DEGRADED = 1 << 6
STATUS_ERROR = 1 << 7
def __init__(self, _id, _name, _type, _status, _system_id,
_plugin_data=None):
self._id = _id
self._name = _name
self._type = _type
self._status = _status
self._system_id = _system_id
self._plugin_data = _plugin_data
if __name__ == '__main__':
# TODO Need some unit tests that encode/decode all the types with nested
pass
| lgpl-2.1 | 2,991,822,636,371,551,000 | 33.012004 | 79 | 0.588272 | false |
DaveBuckingham/robosoft | record_mode.py | 1 | 8314 | """
Provides functions for
1) recording outputs to file
2) replaying outputs from files
"""
import global_data
import mctransmitter
import datetime
import os
import errno
import time
import threading
import ui_display
playback_file_tag = None
save_filename_prefix = 'botwurst_command_record_'
default_save_directory = 'botwurst_command_recordings'
save_file_extension = '.dat'
# TODO set recording limit]
def make_directory(directory_name):
try:
os.makedirs(directory_name + '/')
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def set_default_save_directory(directory_name):
record_save_directory = directory_name
# HELPER FUNCTION FOR LOOKING AT GLOBAL VARIABLES
def print_global_record_variables():
print "RECORDING VARIABLE SETTINGS"
print "===================="
print "Recording: ", global_data.record
print "Will store in file numbered: ", global_data.record_file_number, " in directory: ", default_save_directory
print "Initial time: ", global_data.record_start_time
print "Recording array is empty: ", (len(global_data.record_array) == 0)
print "===================="
# RECORDING FUNCTIONS
def initialize_record_mode(file_number):
"""
Sets all the global_data variables to reflect that we are now recording
Creates the specified directory in which the recording file is to be saved, if directory does not exist
:param file_number: Tag for file where recording will be stored
"""
# if record_array is not empty back it up to file
if global_data.record_array:
file_tag = global_data.record_file_number + "_backup"
create_record_file(file_tag)
global_data.record = True
global_data.record_file_number = file_number
global_data.record_start_time = datetime.datetime.now()
# if save_directory already exists as subdirectory, nothing will happen
make_directory(default_save_directory)
def append_instruction(instruction):
"""
Appends the instruction to record array in global data with time step from 0
:param instruction: triple (PIN TYPE, PIN INDEX, VAL)
"""
time_stamp = datetime.datetime.now()
# TODO: look into note about datetime subtraction (is exact but may overflow)
time_diff = time_stamp - global_data.record_start_time
pin_type = instruction[0]
pin_index = instruction[1]
value = instruction[2]
record_instruction = (pin_type, pin_index, value, time_diff.total_seconds())
global_data.record_array.append(record_instruction)
# 2) CREATE A FILE FROM RECORD ARRAY
def create_record_file(file_tag=None, save_directory=None):
"""
Creates a file with the list of instructions in record_array
:param file_tag: defaults to file_number in global data
"""
if file_tag is None:
file_tag = global_data.record_file_number
if save_directory is None:
save_directory = default_save_directory
record_filename = save_directory + '/' + save_filename_prefix + str(file_tag) + save_file_extension
# Create new file, or overwrite file if it exists
with open(record_filename, 'w') as recording_file:
# Copy all commands to the file
for command in global_data.record_array:
recording_file.write(str(command) + '\n')
# Reinitialize all record variables
global_data.record = False
global_data.record_file_number = None
global_data.record_start_time = None
global_data.record_array = []
# 2) PLAYBACK FUNCTIONS
def clear_playback_array():
global_data.playback_array = []
def populate_playback_array_from_file(filename, is_file_tag=False, save_directory=None):
"""
Appends instructions from current file to playback array
:param filename: name of file containing recording information
:param is_file_tag: True if only using number to identify file (default False)
:param save_directory: default directory specified in global data
"""
if save_directory is None:
save_directory = default_save_directory
if is_file_tag:
filename = save_filename_prefix + str(filename)
playback_file = open(save_directory + '/' + str(filename) + save_file_extension, 'r')
playback_file_lines = playback_file.readlines()
for line in playback_file_lines:
global_data.playback_array.append((eval(line.rstrip())))
def playback_instruction(pin_type, pin_index, value):
if pin_type == 'd':
# print "DIGITAL, PIN_INDEX: ", pin_index, "VALUE: ", value
mctransmitter.tx_digital(pin_index, value)
elif pin_type == 'a':
# print "ANALOG, PIN_INDEX: ", pin_index, "VALUE: ", value
mctransmitter.tx_analog(pin_index, value)
class Playback_From_Array(threading.Thread):
def __init__(self, parent, queue):
threading.Thread.__init__(self)
self._queue = queue
self._parent = parent
self.start()
def run(self):
curr_time_stamp = 0
for instruction in self._queue:
while global_data.playback_paused:
if global_data.playback_cancel:
break
time.sleep(.1)
if global_data.playback_cancel:
break
temp_time_stamp = instruction[3]
time_diff = (temp_time_stamp - curr_time_stamp)
time.sleep(time_diff)
playback_instruction(instruction[0], instruction[1], instruction[2])
curr_time_stamp = temp_time_stamp
ui_display.update()
clear_playback_array()
global_data.playback = False
global_data.playback_file_number = None
ui_display.update()
def playback_from_file(filename, is_file_tag=False, save_directory=None):
clear_playback_array()
global_data.playback = True
global_data.playback_file_number = filename
populate_playback_array_from_file(filename, is_file_tag, save_directory)
playback_thread = Playback_From_Array(None, global_data.playback_array)
return playback_thread
# TESTING FUNCTIONS: TO REMOVE
# class Print_Hello_Every_Sec(threading.Thread):
# def __init__(self, parent, queue):
# threading.Thread.__init__(self)
# self._queue = queue
# self._parent = parent
# self.start()
#
# def run(self):
# for i in range(15):
# print "**********HELLO THERE**************"
# time.sleep(1)
#
# class Pause_Unpause(threading.Thread):
# def __init__(self, parent, queue):
# threading.Thread.__init__(self)
# self._queue = queue
# self._parent = parent
# self.start()
#
# def run(self):
# time.sleep(2)
# global_data.playback_paused = True
# print "PAUSING"
# time.sleep(5)
# global_data.playback_cancel = True
# print "CANCELLING"
# time.sleep(5)
# print "UNPAUSING"
# global_data.playback_paused = False
#
#
# def create_dummy_instruction_file(file_tag):
# short_delay = 0.1
# long_delay = 1
#
# initialize_record_mode(file_tag)
# print_global_record_variables()
#
# i = 1
# j = 0
#
# for iterator in range(10):
# i_is_even = (1 == i%2)
#
# digital_instruction = ('d', 0, i_is_even)
# append_instruction(digital_instruction)
#
# time.sleep(short_delay)
#
# digital_instruction = ('d', 1, not i_is_even)
# append_instruction(digital_instruction)
#
# time.sleep(short_delay)
#
# val = abs((j % 510) - 255)
#
# analog_instruction = ('a', 0, val)
# append_instruction(analog_instruction)
#
# time.sleep(short_delay)
#
# analog_instruction = ('a', 1, 255 - val)
# append_instruction(analog_instruction)
#
# time.sleep(long_delay)
#
# i = i + 1
# j = j + 20
#
# create_record_file()
#
# def main():
# test_file_tag = 5
# # create_dummy_instruction_file(test_file_tag)
#
# pause_thread = Pause_Unpause(None, None)
# playback_thread = playback_from_file(test_file_tag, True)
# print_hello_thread = Print_Hello_Every_Sec(None, None)
#
# print_hello_thread.join()
# playback_thread.join()
# pause_thread.join()
#
# print_global_record_variables()
#
#
# main()
| mit | 2,830,740,662,388,459,500 | 28.799283 | 116 | 0.636998 | false |
openelections/openelections-core | openelex/us/vt/validate/validates.py | 1 | 8951 | import re
from openelex.models import Contest, Candidate, Office, Result
import logging
import time
import os
# if not os.path.isdir("logs"):
# os.makedirs("logs")
# logging.basicConfig(filename=time.strftime("logs/%Y%m%d-%H%M%S-validate.log"),level=logging.DEBUG)
# Generic validation helpers
def _validate_candidate_votes(election_id, reporting_level, contest_slug,
candidate_slug, expected_votes):
"""Sum sub-contest level results and compare them to known totals"""
msg = "Expected {} votes for contest {} and candidate {}, found {}"
votes = Result.objects.filter(election_id=election_id,
contest_slug=contest_slug, candidate_slug=candidate_slug,
reporting_level=reporting_level).sum('votes')
if votes != expected_votes:
logging.debug("db.getCollection('result').find({election_id:\"%s\", \
contest_slug:\"%s\", candidate_slug:\"%s\", \
reporting_level:\"%s\"})", election_id, contest_slug, candidate_slug, reporting_level)
assert votes == expected_votes, msg.format(expected_votes, contest_slug,
candidate_slug, votes)
def _validate_many_candidate_votes(election_id, reporting_level,
candidates):
"""
Sum sub-contest level results and compare them to known totals for
multiple contests and candidates.
Arguments:
election_id - Election ID of the election of interest.
reporting_level - Reporting level to use to aggregate results.
candidates - Tuple of contests slug, candidate slug and expected votes.
"""
for candidate_info in candidates:
contest, candidate, expected = candidate_info
_validate_candidate_votes(election_id, reporting_level,
contest, candidate, expected)
def validate_results_2012_president_general():
"""Sum some county-level results for 2012 general presidential and compare with known totals"""
election_id = 'vt-2012-11-06-general'
known_results = [
('president', 'barack-obama', 199053),
]
_validate_many_candidate_votes(election_id, 'parish', known_results)
_validate_many_candidate_votes(election_id, 'precinct', known_results)
def validate_results_2014_house_general():
"""Sum some county-level results for 2014 general and compare with known totals"""
election_id = 'vt-2014-11-04-general'
known_results = [
('us-house-of-representatives', 'peter-welch', 123349),
('us-house-of-representatives', 'mark-donka', 59432),
('us-house-of-representatives', 'cris-ericson', 2750),
]
_validate_many_candidate_votes(election_id, 'parish', known_results)
_validate_many_candidate_votes(election_id, 'precinct', known_results)
def validate_results_2014_house_primary():
"""Sum some county-level results for 2014 house primary and compare with known totals"""
election_id = 'vt-2014-08-26-primary'
known_results = [
('us-house-of-representatives-d', 'peter-welch', 19248),
('us-house-of-representatives-d', 'writeins', 224),
('us-house-of-representatives-r', 'mark-donka', 4340),
('us-house-of-representatives-r', 'donald-russell', 4026),
]
_validate_many_candidate_votes(election_id, 'parish', known_results)
_validate_many_candidate_votes(election_id, 'precinct', known_results)
def validate_results_2002_lt_gov_general():
"""Sum some county-level results for 2002 lt-gov general and compare with known totals"""
election_id = 'vt-2002-11-05-general'
known_results = [
('lieutenant-governor', 'peter-shumlin', 73501),
('lieutenant-governor', 'brian-e-dubie', 94044),
('lieutenant-governor', 'anthony-pollina', 56564),
('lieutenant-governor', 'sally-ann-jones', 4310),
]
_validate_many_candidate_votes(election_id, 'parish', known_results)
_validate_many_candidate_votes(election_id, 'precinct', known_results)
def validate_results_2002_lt_gov_primary():
"""Sum some county-level results for 2002 lt-gov primary and compare with known totals"""
election_id = 'vt-2002-09-10-primary'
known_results = [
('lieutenant-governor-d', 'peter-shumlin', 22633),
('lieutenant-governor-r', 'brian-e-dubie', 22584),
]
_validate_many_candidate_votes(election_id, 'parish', known_results)
_validate_many_candidate_votes(election_id, 'precinct', known_results)
def validate_results_2004_misc_results_general():
"""Sum some state specific results for 2004 general and compare with known totals"""
election_id = 'vt-2004-11-02-general'
known_results = [
('treasurer', 'jeb-spaulding', 273705),
('secretary-of-state', 'deb-markowitz', 270744),
('auditor', 'randy-brock', 152848),
('auditor', 'elizabeth-m-ready', 122498),
('auditor', 'jerry-levy', 17685),
('attorney-general', 'william-h-sorrell', 169726),
# there is an error on the vermont website, I talked to the VT Sec state and the real result should be 81,285
# ('attorney-general', 'dennis-carver', 90285),
('attorney-general', 'susan-a-davis', 14351),
('attorney-general', 'james-mark-leas', 8769),
('attorney-general', 'karen-kerin', 6357),
('attorney-general', 'boots-wardinski', 2944),
]
_validate_many_candidate_votes(election_id, 'parish', known_results)
_validate_many_candidate_votes(election_id, 'precinct', known_results)
def validate_results_2008_state_senate_primary():
"""Sum some county-level results for 2008 state senate primary and compare with known totals"""
election_id = 'vt-2008-09-08-primary'
known_results = [
('state-senate-orange-d', 'mark-a-macdonald', 557),
('state-senate-franklin-r', 'randy-brock', 879),
('state-senate-franklin-r', 'willard-rowell', 782),
('state-senate-essexorleans-d', 'robert-a-starr', 748),
('state-senate-essexorleans-d', 'writeins', 112),
]
_validate_many_candidate_votes(election_id, 'parish', known_results)
_validate_many_candidate_votes(election_id, 'precinct', known_results)
def validate_results_2010_state_senate_general():
"""Sum some county-level results for 2010 state senate general and compare with known totals"""
election_id = 'vt-2010-11-02-general'
known_results = [
('state-senate-orange', 'mark-a-macdonald', 4524),
('state-senate-orange', 'stephen-w-webster', 3517),
('state-senate-franklin', 'randy-brock', 9014),
('state-senate-franklin', 'peter-d-moss', 793),
('state-senate-essexorleans', 'robert-a-starr', 9902),
('state-senate-essexorleans', 'vincent-illuzzi', 9231),
]
_validate_many_candidate_votes(election_id, 'parish', known_results)
_validate_many_candidate_votes(election_id, 'precinct', known_results)
def validate_results_2012_state_house_primary():
"""Sum some county-level results for 2012 state house primary and compare with known totals"""
election_id = 'vt-2012-03-06-primary'
known_results = [
('house-of-representatives-addison-5-d', 'edward-v-mcguire', 220),
('house-of-representatives-addison-5-r', 'harvey-smith', 75),
('house-of-representatives-addison-1-d', 'betty-a-nuovo', 486),
('house-of-representatives-addison-1-d', 'paul-ralston', 446),
('house-of-representatives-bennington-1-d', 'bill-botzow', 152),
('house-of-representatives-caledonia-1-r', 'leigh-b-larocque', 72),
('house-of-representatives-chittenden-61-d', 'joanna-cole', 658),
('house-of-representatives-chittenden-61-d', 'bill-aswad', 619),
('house-of-representatives-chittenden-61-d', 'robert-hooper', 536),
('house-of-representatives-chittenden-61-r', 'kurt-wright', 116),
]
_validate_many_candidate_votes(election_id, 'parish', known_results)
_validate_many_candidate_votes(election_id, 'precinct', known_results)
def validate_results_2012_state_house_general():
"""Sum some county-level results for 2012 state house general and compare with known totals"""
election_id = 'vt-2012-11-06-general'
known_results = [
('house-of-representatives-addison-5', 'edward-v-mcguire', 982),
('house-of-representatives-addison-5', 'harvey-smith', 1151),
('house-of-representatives-addison-1', 'betty-a-nuovo', 2601),
('house-of-representatives-addison-1', 'paul-ralston', 2378),
('house-of-representatives-bennington-1', 'bill-botzow', 1613),
('house-of-representatives-caledonia-1', 'leigh-b-larocque', 1143),
('house-of-representatives-chittenden-61', 'joanna-cole', 2008),
('house-of-representatives-chittenden-61', 'bill-aswad', 1987),
('house-of-representatives-chittenden-61', 'kurt-wright', 2332),
]
_validate_many_candidate_votes(election_id, 'parish', known_results)
_validate_many_candidate_votes(election_id, 'precinct', known_results)
| mit | -705,504,772,250,037,200 | 45.378238 | 117 | 0.669199 | false |
iandees/all-the-places | locations/spiders/hihostels.py | 1 | 1934 | # -*- coding: utf-8 -*-
import scrapy
import re
from locations.items import GeojsonPointItem
class HiHostelsSpider(scrapy.Spider):
name = "hihostels"
allowed_domains = ['hihostels.com']
start_urls = (
'https://www.hihostels.com/sitemap.xml',
)
def parse(self, response):
response.selector.remove_namespaces()
city_urls = response.xpath('//url/loc/text()').extract()
regex = re.compile(r'http\S+hihostels.com/\S+/hostels/\S+')
for path in city_urls:
if not re.search(regex,path):
pass
else:
yield scrapy.Request(
path.strip(),
callback=self.parse_store,
)
def parse_store(self, response):
properties = {
'name': " ".join(response.xpath('/html/body/div[1]/div[6]/div[2]/div[1]/h1/span/text()').extract()[0].split()),
'ref': " ".join(response.xpath('/html/body/div[1]/div[6]/div[2]/div[1]/h1/span/text()').extract()[0].split()),
'addr_full': " ".join(response.xpath('/html/body/div[1]/div[6]/div[2]/div[1]/div[2]/p[1]/text()').extract()[0].split(',')[0].split()),
'city': " ".join(response.xpath('/html/body/div[1]/div[6]/div[2]/div[1]/div[2]/p[1]/text()').extract()[0].split(',')[1].split()),
'postcode': " ".join(response.xpath('/html/body/div[1]/div[6]/div[2]/div[1]/div[2]/p[1]/text()').extract()[0].split(',')[-2].split()),
'country': " ".join(response.xpath('/html/body/div[1]/div[6]/div[2]/div[1]/div[2]/p[1]/text()').extract()[0].split(',')[-1].split()),
'website': response.xpath('//head/link[@rel="canonical"]/@href').extract_first(),
'lon': float(response.xpath('//*[@id ="lon"]/@value').extract()[0]),
'lat': float(response.xpath('//*[@id ="lat"]/@value').extract()[0]),
}
yield GeojsonPointItem(**properties)
| mit | 2,543,737,011,314,564,600 | 45.047619 | 146 | 0.542399 | false |
adampresley/bottlepy-bootstrap | model/DateHelper.py | 1 | 2135 | from model.Service import Service
from datetime import tzinfo, timedelta, datetime
from dateutil import tz
class DateHelper(Service):
utc = tz.gettz("UTC")
pyToJsFormatMapping = {
"%m/%d/%Y": "MM/dd/yyyy",
"%d/%m/%Y": "dd/MM/yyyy",
"%Y-%m-%d": "yyyy-MM-dd"
}
def __init__(self, db, timezone = "UTC", dateFormat = "%m/%d/%Y", timeFormat = "%I:%M %p"):
self.db = db
self._timezone = timezone
self._dateFormat = dateFormat
self._timeFormat = timeFormat
def addDays(self, d, numDays = 1, format = "%Y-%m-%d"):
if not self.isDateType(d):
d = datetime.strptime(d, format)
newDate = d + timedelta(days = numDays)
return newDate
def dateFormat(self, d):
return self.utcToTimezone(d, self._timezone).strftime(self._dateFormat)
def dateTimeFormat(self, d):
return self.utcToTimezone(d, self._timezone).strftime("%s %s" % (self._dateFormat, self._timeFormat))
def isDateType(self, d):
result = True
try:
d.today()
except AttributeError as e:
result = False
return result
def localNow(self):
return self.utcToTimezone(datetime.now(self.utc), self._timezone)
def now(self):
return datetime.now(self.utc)
def pyToJsDateFormat(self, pyDateFormat):
return self.pyToJsFormatMapping[pyDateFormat]
def restDateFormat(self, d):
return d.strftime("%Y-%m-%d")
def restDateTime(self, d):
return d.strftime("%Y-%m-%d %H:%M")
def timeFormat(self, d):
return self.utcToTimezone(d, self._timezone).strftime(self._timeFormat)
def utcToTimezone(self, d, timezone):
targetTZ = tz.gettz(timezone)
d = d.replace(tzinfo = self.utc)
return d.astimezone(targetTZ)
def validateDateRange(self, start, end, format = "%Y-%m-%d"):
#
# Basically if the range between start and end is greater than 91
# days kick it back with today's date as default.
#
parsedStart = datetime.strptime(start, format)
parsedEnd = datetime.strptime(end, format)
delta = parsedEnd - parsedStart
newStart = start
newEnd = end
if delta.days > 91:
newStart = self.restDateFormat(self.localNow())
newEnd = self.restDateFormat(self.localNow())
return (newStart, newEnd)
| mit | 6,094,790,128,665,935,000 | 24.129412 | 103 | 0.685714 | false |
pvizeli/hassio | hassio/__main__.py | 1 | 1158 | """Main file for HassIO."""
import asyncio
from concurrent.futures import ThreadPoolExecutor
import logging
import sys
import hassio.bootstrap as bootstrap
import hassio.core as core
_LOGGER = logging.getLogger(__name__)
# pylint: disable=invalid-name
if __name__ == "__main__":
bootstrap.initialize_logging()
if not bootstrap.check_environment():
exit(1)
loop = asyncio.get_event_loop()
executor = ThreadPoolExecutor(thread_name_prefix="SyncWorker")
loop.set_default_executor(executor)
_LOGGER.info("Initialize Hassio setup")
config = bootstrap.initialize_system_data()
hassio = core.HassIO(loop, config)
bootstrap.migrate_system_env(config)
_LOGGER.info("Run Hassio setup")
loop.run_until_complete(hassio.setup())
_LOGGER.info("Start Hassio task")
loop.call_soon_threadsafe(loop.create_task, hassio.start())
loop.call_soon_threadsafe(bootstrap.reg_signal, loop, hassio)
_LOGGER.info("Run Hassio loop")
loop.run_forever()
_LOGGER.info("Cleanup system")
executor.shutdown(wait=False)
loop.close()
_LOGGER.info("Close Hassio")
sys.exit(hassio.exit_code)
| bsd-3-clause | 461,431,713,023,787,140 | 24.733333 | 66 | 0.702073 | false |
joshuamckenty/yolo-octo-wookie | nova/fakerabbit.py | 1 | 4630 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Based a bit on the carrot.backeds.queue backend... but a lot better """
import logging
import Queue as queue
from carrot.backends import base
class Message(base.BaseMessage):
pass
class Exchange(object):
def __init__(self, name, exchange_type):
self.name = name
self.exchange_type = exchange_type
self._queue = queue.Queue()
self._routes = {}
def publish(self, message, routing_key=None):
logging.debug('(%s) publish (key: %s) %s',
self.name, routing_key, message)
if routing_key in self._routes:
for f in self._routes[routing_key]:
logging.debug('Publishing to route %s', f)
f(message, routing_key=routing_key)
def bind(self, callback, routing_key):
self._routes.setdefault(routing_key, [])
self._routes[routing_key].append(callback)
class Queue(object):
def __init__(self, name):
self.name = name
self._queue = queue.Queue()
def __repr__(self):
return '<Queue: %s>' % self.name
def push(self, message, routing_key=None):
self._queue.put(message)
def size(self):
return self._queue.qsize()
def pop(self):
return self._queue.get()
class Backend(object):
""" Singleton backend for testing """
class __impl(base.BaseBackend):
def __init__(self, *args, **kwargs):
#super(__impl, self).__init__(*args, **kwargs)
self._exchanges = {}
self._queues = {}
def _reset_all(self):
self._exchanges = {}
self._queues = {}
def queue_declare(self, queue, **kwargs):
if queue not in self._queues:
logging.debug('Declaring queue %s', queue)
self._queues[queue] = Queue(queue)
def exchange_declare(self, exchange, type, *args, **kwargs):
if exchange not in self._exchanges:
logging.debug('Declaring exchange %s', exchange)
self._exchanges[exchange] = Exchange(exchange, type)
def queue_bind(self, queue, exchange, routing_key, **kwargs):
logging.debug('Binding %s to %s with key %s',
queue, exchange, routing_key)
self._exchanges[exchange].bind(self._queues[queue].push,
routing_key)
def get(self, queue, no_ack=False):
if not queue in self._queues or not self._queues[queue].size():
return None
(message_data, content_type, content_encoding) = \
self._queues[queue].pop()
message = Message(backend=self, body=message_data,
content_type=content_type,
content_encoding=content_encoding)
logging.debug('Getting from %s: %s', queue, message)
return message
def prepare_message(self, message_data, delivery_mode,
content_type, content_encoding, **kwargs):
"""Prepare message for sending."""
return (message_data, content_type, content_encoding)
def publish(self, message, exchange, routing_key, **kwargs):
if exchange in self._exchanges:
self._exchanges[exchange].publish(
message, routing_key=routing_key)
__instance = None
def __init__(self, *args, **kwargs):
if Backend.__instance is None:
Backend.__instance = Backend.__impl(*args, **kwargs)
self.__dict__['_Backend__instance'] = Backend.__instance
def __getattr__(self, attr):
return getattr(self.__instance, attr)
def __setattr__(self, attr, value):
return setattr(self.__instance, attr, value)
def reset_all():
Backend()._reset_all()
| apache-2.0 | -5,151,208,215,358,748,000 | 33.552239 | 78 | 0.587905 | false |
evidation-health/ContinuousTimeMarkovModel | examples/small_sample_example_main.py | 1 | 6795 | import numpy as np
from theano.tensor import as_tensor_variable
from ContinuousTimeMarkovModel.distributions import *
from pymc3 import Model, sample, Metropolis, Dirichlet, Potential, Binomial, Beta, Slice, NUTS
import theano.tensor as TT
from ContinuousTimeMarkovModel.samplers.forwardS import *
from ContinuousTimeMarkovModel.samplers.forwardX import *
#import sys; sys.setrecursionlimit(50000)
#theano.config.compute_test_value = 'off'
# Load pre-generated data
from pickle import load
datadir = '../data/small_sample/'
infile = open(datadir+'pi.pkl','rb')
pi_start = load(infile)
infile.close()
infile = open(datadir+'Q.pkl','rb')
Q_start = load(infile)
infile.close()
infile = open(datadir+'S.pkl','rb')
S_start = load(infile)
infile.close()
infile = open(datadir+'B.pkl','rb')
B_start = load(infile)
infile.close()
infile = open(datadir+'B0.pkl','rb')
B0_start = load(infile)
infile.close()
infile = open(datadir+'X.pkl','rb')
X_start = load(infile)
infile.close()
infile = open(datadir+'Z.pkl','rb')
Z_start = load(infile)
infile.close()
infile = open(datadir+'L.pkl','rb')
L_start = load(infile)
infile.close()
infile = open(datadir+'obs_jumps.pkl','rb')
obs_jumps = load(infile)
infile.close()
infile = open(datadir+'T.pkl','rb')
T = load(infile)
infile.close()
infile = open(datadir+'O.pkl','rb')
O = load(infile)
infile.close()
#Cut down to 100 people
newN = 100
T = T[:newN]
nObs = T.sum()
S_start = S_start[0:nObs]
obs_jumps = obs_jumps[0:nObs]
X_start = X_start[0:nObs]
O = O[0:nObs]
nObs = S_start.shape[0]
N = T.shape[0] # Number of patients
M = pi_start.shape[0] # Number of hidden states
K = Z_start.shape[0] # Number of comorbidities
D = Z_start.shape[1] # Number of claims
Dd = 16 # Maximum number of claims that can occur at once
#import pdb; pdb.set_trace()
model = Model()
with model:
#Fails: #pi = Dirichlet('pi', a = as_tensor_variable([0.147026,0.102571,0.239819,0.188710,0.267137,0.054738]), shape=M, testval = np.ones(M)/float(M))
pi = Dirichlet('pi', a = as_tensor_variable(pi_start.copy()), shape=M)
pi_min_potential = Potential('pi_min_potential', TT.switch(TT.min(pi) < .001, -np.inf, 0))
Q = DiscreteObsMJP_unif_prior('Q', M=M, lower=0.0, upper=1.0, shape=(M,M))
#S = DiscreteObsMJP('S', pi=pi, Q=Q, M=M, nObs=nObs, observed_jumps=obs_jumps, T=T, shape=(nObs), testval=np.ones(nObs,dtype='int32'))
S = DiscreteObsMJP('S', pi=pi, Q=Q, M=M, nObs=nObs, observed_jumps=obs_jumps, T=T, shape=(nObs))
#B0 = Beta('B0', alpha = 1., beta = 1., shape=(K,M), testval=0.2*np.ones((K,M)))
#B = Beta('B', alpha = 1., beta = 1., shape=(K,M), testval=0.2*np.ones((K,M)))
B0 = Beta('B0', alpha = 1., beta = 1., shape=(K,M))
B = Beta('B', alpha = 1., beta = 1., shape=(K,M))
#X = Comorbidities('X', S=S, B0=B0,B=B, T=T, shape=(nObs, K), testval=np.ones((nObs,K),dtype='int8'))
X = Comorbidities('X', S=S, B0=B0,B=B, T=T, shape=(nObs, K))
#Z = Beta('Z', alpha = 0.1, beta = 1., shape=(K,D), testval=0.5*np.ones((K,D)))
#L = Beta('L', alpha = 1., beta = 1., shape=D, testval=0.5*np.ones(D))
Z = Beta('Z', alpha = 0.1, beta = 1., shape=(K,D))
L = Beta('L', alpha = 1., beta = 1., shape=D)
O_obs = Claims('O_obs', X=X, Z=Z, L=L, T=T, D=D, O_input=O, shape=(nObs,Dd), observed=O)
#O_obs = Claims('O_obs', X=X, Z=Z, L=L, T=T, D=D, max_obs=max_obs, O_input=O, shape=(Dd,max_obs,N), observed=O)
#import pdb; pdb.set_trace()
from scipy.special import logit
Q_raw = []
for i in range(Q_start.shape[0]-1):
Q_raw.append(Q_start[i,i+1])
Q_raw_log = logit(np.asarray(Q_raw))
B_lo = logit(B_start)
B0_lo = logit(B0_start)
Z_lo = logit(Z_start)
L_lo = logit(L_start)
start = {'Q_ratematrixoneway': Q_raw_log, 'B_logodds':B_lo, 'B0_logodds':B0_lo, 'S':S_start, 'X':X_start, 'Z_logodds':Z_lo, 'L_logodds':L_lo}
#teststart = {'Q_ratematrixoneway': Q_raw_log, 'B_logodds':B_lo, 'B0_logodds':B0_lo, 'S':S_start, 'X':X_start, 'Z_logodds':Z_lo, 'L_logodds':L_lo, 'pi_stickbreaking':np.ones(M)/float(M)}
#start = {'Q_ratematrixoneway': Q_raw_log, 'B_logodds':B_lo, 'B0_logodds':B0_lo, 'S':S_start, 'X':X_start, 'Z_logodds':Z_lo, 'L_logodds':L_start}
with model:
#import pdb; pdb.set_trace()
steps = []
steps.append(NUTS(vars=[pi]))
#steps.append(NUTS(vars=[pi], scaling=np.ones(M-1)*0.058))
#steps.append(Metropolis(vars=[pi], scaling=0.058, tune=False))
steps.append(NUTS(vars=[Q],scaling=np.ones(M-1,dtype=float)*10.))
#steps.append(Metropolis(vars=[Q], scaling=0.2, tune=False))
steps.append(ForwardS(vars=[S], nObs=nObs, T=T, N=N, observed_jumps=obs_jumps))
steps.append(NUTS(vars=[B0,B]))
#steps.append(Metropolis(vars=[B0], scaling=0.2, tune=False))
#steps.append(NUTS(vars=[B]))
#steps.append(Metropolis(vars=[B], scaling=0.198, tune=False))
steps.append(ForwardX(vars=[X], N=N, T=T, K=K, D=D,Dd=Dd, O=O, nObs=nObs))
steps.append(NUTS(vars=[Z], scaling=np.ones(K*D)))
#steps.append(Metropolis(vars=[Z], scaling=0.0132, tune=False))
steps.append(NUTS(vars=[L],scaling=np.ones(D)))
#steps.append(Metropolis(vars=[L],scaling=0.02, tune=False, ))
## 22 minutes per step with all NUTS set
#import pdb; pdb.set_trace()
#model.dlogp()
trace = sample(1001, steps, start=start, random_seed=111,progressbar=True)
#trace = sample(11, steps, start=start, random_seed=111,progressbar=True)
#trace = sample(11, steps, start=start, random_seed=[111,112,113],progressbar=False,njobs=3)
pi = trace[pi]
Q = trace[Q]
S = trace[S]
#S0 = S[:,0] #now pibar
B0 = trace[B0]
B = trace[B]
X = trace[X]
Z = trace[Z]
L = trace[L]
Sbin = np.vstack([np.bincount(S[i],minlength=6)/float(len(S[i])) for i in range(len(S))])
zeroIndices = np.roll(T.cumsum(),1)
zeroIndices[0] = 0
pibar = np.vstack([np.bincount(S[i][zeroIndices],minlength=M)/float(zeroIndices.shape[0]) for i in range(len(S))])
pibar = np.vstack([np.bincount(S_start[zeroIndices],minlength=M)/float(zeroIndices.shape[0]),pibar])
SEnd = np.vstack([np.bincount(S[i][zeroIndices-1],minlength=M)/float(zeroIndices.shape[0]) for i in range(len(S))])
SEnd = np.vstack([np.bincount(S_start[zeroIndices-1],minlength=M)/float(zeroIndices.shape[0]),SEnd])
logp = steps[2].logp
Xlogp = steps[4].logp
XChanges = np.insert(1-(1-(X[:,1:]-X[:,:-1])).prod(axis=2),0,0,axis=1)
XChanges.T[zeroIndices] = 0
XChanges[XChanges.nonzero()] = XChanges[XChanges.nonzero()]/XChanges[XChanges.nonzero()]
XChanges = XChanges.sum(axis=1)/float(N)
logpTotal = [model.logp(trace[i]) for i in range(len(trace))]
#np.set_printoptions(2);np.set_printoptions(linewidth=160)
'''
for i in range(1001):
print "~~~",i ,"~~~"
print pi[i,:]
print "Bincount S0:", np.bincount(S0[i,:],minlength=6)
print "\n"
'''
#from pickle import dump
#with open('file.pkl','wb') as file:
# dump(trace,file)
| mit | -8,107,003,969,611,124,000 | 38.051724 | 186 | 0.652686 | false |
tdjordan/tortoisegit | gitgtk/addremove.py | 1 | 1219 | #
# Add/Remove dialog for TortoiseHg
#
# Copyright (C) 2007 TK Soh <teekaysoh@gmail.com>
#
try:
import pygtk
pygtk.require("2.0")
except:
pass
import gtk
import gobject
from mercurial import ui, util, hg
from mercurial.i18n import _
from status import GStatus
def run(hgcmd='add', root='', cwd='', files=[], **opts):
u = ui.ui()
u.updateopts(debug=False, traceback=False)
repo = hg.repository(u, path=root)
cmdoptions = {
'all':False, 'clean':False, 'ignored':False, 'modified':False,
'added':True, 'removed':True, 'deleted':True, 'unknown':False, 'rev':[],
'exclude':[], 'include':[], 'debug':True,'verbose':True
}
if hgcmd == 'add':
cmdoptions['unknown'] = True
elif hgcmd == 'remove':
cmdoptions['clean'] = True
else:
raise "Invalid command '%s'" % hgcmd
dialog = GStatus(u, repo, cwd, files, cmdoptions, True)
gtk.gdk.threads_init()
gtk.gdk.threads_enter()
dialog.display()
gtk.main()
gtk.gdk.threads_leave()
if __name__ == "__main__":
import sys
opts = {}
opts['hgcmd'] = 'adda'
opts['root'] = len(sys.argv) > 1 and sys.argv[1] or ''
run(**opts)
| gpl-2.0 | 2,293,648,415,399,881,700 | 23.38 | 80 | 0.579984 | false |
SuLab/scheduled-bots | scheduled_bots/query_tester/validators.py | 1 | 1080 |
class Validator:
description = '' # Plain text description of what is being checked
expected_result = [] # optional
def __init__(self):
self.success = None # True or False
self.result_message = '' # optional extra information about test result
def validate(self, result):
raise NotImplementedError("Implement a Validator Subclass")
class OneOrMoreResultsValidator(Validator):
description = "Checks for at least 1 result"
def validate(self, result):
self.success = True if len(result) >= 1 else False
class NoResultsValidator(Validator):
description = "Checks for no results"
def validate(self, result):
self.success = True if len(result) == 0 else False
class NoValidator(Validator):
description = "No validation"
def validate(self, result):
self.success = None
class FailValidator(Validator):
description = "Always returns FAIL"
expected_result = [{'a': 4}]
def validate(self, result):
self.success = False
self.result_message = "this is more info" | mit | 5,172,143,440,517,708,000 | 28.216216 | 80 | 0.669444 | false |
bdcht/amoco | amoco/arch/x86/utils.py | 1 | 4777 | # -*- coding: utf-8 -*-
# This code is part of Amoco
# Copyright (C) 2014 Axel Tillequin (bdcht3@gmail.com)
# published under GPLv2 license
# spec_xxx files are providers for instruction objects.
from amoco.arch.x86 import env
from amoco.arch.core import *
# for ia32 arch we want some specialized 'modrm' format
# so we redefine ispec decorator here to allow /0-/7 and /r
# tokens in the spec format, following the Intel doc to
# indicate how ModR/M byte should be used :
class ispec_ia32(ispec):
def __init__(self, format, **kargs):
n = format.find("/")
if 0 < n < len(format) - 1:
c = format[n + 1]
if c == "r":
f = format.replace("/r", "RM(3) REG(3) Mod(2) ~data(*)")
else:
f = format.replace(
"/%c" % c, "RM(3) %s Mod(2) ~data(*)" % Bits(int(c, 8), 3)
)
else:
f = format
ispec.__init__(self, f, **kargs)
# read ModR/M + SIB values and update obj:
def getModRM(obj, Mod, RM, data):
opdsz = obj.misc["opdsz"] or env.internals["mode"]
adrsz = obj.misc["adrsz"] or env.internals["mode"]
seg = obj.misc["segreg"]
# r/16/32 case:
if Mod == 0b11:
op1 = env.getreg(RM, opdsz)
return op1, data
# 32-bit SIB cases:
if adrsz == 32 and RM == 0b100:
# read SIB byte in data:
if data.size < 8:
raise InstructionError(obj)
sib, data = data[0:8], data[8 : data.size]
# add sib byte:
obj.bytes += pack(sib)
# decode base & scaled index
b = env.getreg(sib[0:3].int(), adrsz)
i = env.getreg(sib[3:6].int(), adrsz)
ss = 1 << (sib[6:8].int())
s = i * ss if not i.ref in ("esp", "sp") else 0
else:
s = 0
if adrsz == 32:
b = env.getreg(RM, adrsz)
else:
b = (
env.bx + env.si,
env.bx + env.di,
env.bp + env.si,
env.bp + env.di,
env.si,
env.di,
env.bp,
env.bx,
)[RM]
# check [disp16/32] case:
if (b is env.ebp or b is env.bp) and Mod == 0:
Mod = 0b10
bs = s + env.cst(0, adrsz)
elif s == 0:
bs = b
elif env.internals.get("keep_order"):
# Instead of doing bs = b+s, which will reorder arguments, we do
# the addition manually, and change 'prop' so the many future calls
# to 'simplify' does not reorder the arguments
from amoco.cas import expressions
bs = expressions.op("+", b, s)
bs.prop |= 16
else:
bs = b + s
# now read displacement bytes:
if Mod == 0b00:
d = 0
elif Mod == 0b01:
if data.size < 8:
raise InstructionError(obj)
d = data[0:8]
data = data[8 : data.size]
obj.bytes += pack(d)
d = d.signextend(adrsz).int(-1)
elif Mod == 0b10:
if data.size < adrsz:
raise InstructionError(obj)
d = data[0:adrsz]
obj.bytes += pack(d)
data = data[adrsz : data.size]
d = d.int(-1)
if bs._is_cst and bs.v == 0x0:
bs.size = adrsz
bs.v = d & bs.mask
d = 0
return env.mem(bs, opdsz, seg, d), data
# Condition codes:
CONDITION_CODES = {
0x0: ("O", (env.of == 1)),
0x1: ("NO", (env.of == 0)),
0x2: ("B/NAE/C", (env.cf == 1)),
0x3: ("NB/AE/NC", (env.cf == 0)),
0x4: ("Z/E", (env.zf == 1)),
0x5: ("NZ/NE", (env.zf == 0)),
0x6: ("BE/NA", (env.cf == 1) | (env.zf == 1)),
0x7: ("NBE/A", (env.cf == 0) & (env.zf == 0)),
0x8: ("S", (env.sf == 1)),
0x9: ("NS", (env.sf == 0)),
0xA: ("P/PE", (env.pf == 1)),
0xB: ("NP/PO", (env.pf == 0)),
0xC: ("L/NGE", (env.sf != env.of)),
0xD: ("NL/GE", (env.sf == env.of)),
0xE: ("LE/NG", (env.zf == 1) | (env.sf != env.of)),
0xF: ("NLE/G", (env.zf == 0) & (env.sf == env.of)),
}
def do_nothing(obj):
pass
def set_opdsz_128(obj):
obj.misc["opdsz"] = 128
def set_opdsz_64(obj):
obj.misc["opdsz"] = 64
def set_opdsz_32(obj):
obj.misc["opdsz"] = 32
def check_f2(obj, f=do_nothing):
if obj.misc["pfx"] and obj.misc["pfx"][0] == "repne":
obj.misc["pfx"][0] = None
f(obj)
return True
return False
def check_f3(obj, f=do_nothing):
if obj.misc["pfx"] and obj.misc["pfx"][0] == "rep":
obj.misc["pfx"][0] = None
f(obj)
return True
return False
def check_66(obj, f=do_nothing):
if obj.misc["opdsz"] == 16:
f(obj)
return True
return False
def check_nopfx(obj, f=do_nothing):
if obj.misc["pfx"] is None:
f(obj)
return True
return False
| gpl-2.0 | 6,701,555,174,954,964,000 | 26.454023 | 78 | 0.494871 | false |
lingfliu/smart_tuwa | twrt/testbed/massive_scene_test.py | 1 | 3298 | import socket
import time
import sys
import random
import math
import threading
msg_header = 'AADD'
msg_stamp = '\x00\x00\x00\x00'
msg_id_gw = '2016A008'
msg_id_dev = '00000000'
msg_devtype = '\x01\x00'
msg_auth_key = '88888888'
msg_auth_datatype = '\x1c\x00'
msg_auth = msg_header+msg_stamp+msg_id_gw+msg_id_dev+msg_devtype+msg_auth_datatype+'\x00\x08'+msg_auth_key
#serverAddress = ('192.168.20.104', 9091)
serverAddress = ('localhost', 9091)
skt = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
skt.connect(serverAddress)
length = skt.send(msg_auth)
msg_bak = skt.recv(1024)
print msg_bak
#scene set
for i in range(0,20):
print('create scene' + str(i))
sce_type_val = int(math.ceil(random.random()*3))
sce_type = '%c'%sce_type_val
sce_id_major_val = i #round(random.random()*1000)
sce_id_major = '%08d'%sce_id_major_val
sce_id_minor_val = i #round(random.random()*1000)
sce_id_minor = '%08d'%sce_id_minor_val
sce_mac_val= round(random.random()*1000)
sce_mac = '%08d'%sce_mac_val
sce_name_val = round(random.random()*100)
sce_name = 'scene'+'%04d'%sce_name_val + '\x00'*51
sce_type_val = int(math.ceil(random.random()*4))
sce_type = '%c'%sce_type_val
sce_type +='\x00'*3
sce_trigger_num = 100 #int(random.random()*100)
sce_trigger = ''
for m in range(0, sce_trigger_num):
sce_trigger_val = round(random.random()*100)
sce_trigger += ('%08d'%sce_trigger_val)*6
sce_item_num = int(random.random()*100)
sce_item = ''
for m in range(0, sce_item_num):
sce_item_val = round(random.random()*100)
sce_item += ('%08d'%sce_item_val)*6
body_len_val = 48*sce_item_num + 48*sce_trigger_num + 96
body_len = ''
body_len +='%c'%(int(body_len_val/256))
body_len +='%c'%(body_len_val%256)
msg_set_scene = msg_header+msg_stamp+msg_id_gw+msg_id_dev+msg_devtype+'\x0f\x00'+ body_len + sce_id_major +sce_id_minor+sce_mac+sce_type+sce_name+'%c'%sce_trigger_num + '\x00'*3+'%c'%sce_item_num+'\x00'*3+sce_trigger+sce_item
#print('message length=' + str(len(msg_set_scene)))
#print('body length=' + str(body_len_val))
print (sce_id_major + ' ' + sce_id_minor + ' ' + sce_mac + ' ' + sce_name + ' ' + str(sce_trigger_num) + ' ' + str(sce_item_num) )
#print(str('%c'%sce_trigger_num))
#print(body_len)
#print('msg = ' + msg_set_scene)
m = 0
while(True):
if m+256 < len(msg_set_scene):
pkt = msg_set_scene[m:m+256]
length = skt.send(pkt)
print length
m += 256
time.sleep(0.01)
continue
else:
pkt = msg_set_scene[m:]
length = skt.send(pkt)
time.sleep(0.01)
print length
break
#length = skt.send(msg_set_scene())
msg_bak = skt.recv(1024)
print msg_bak
time.sleep(0.01)
msg_finish_scene = msg_header+msg_stamp+msg_id_gw+msg_id_dev+msg_devtype+'\x11\x00'+'\x00\x01' + '\x00'
print('msg finish = ' + msg_finish_scene)
length = skt.send(msg_finish_scene)
print length
msg_bak = skt.recv(1024)
print msg_bak
#while(True):
#msg_bak = skt.recv(1024)
#print msg_bak
#pass
| apache-2.0 | -1,788,681,679,811,182,000 | 28.711712 | 229 | 0.583081 | false |
edbrannin/Robotframework-SQLAlchemy-Library | src/SQLAlchemyLibrary/__init__.py | 1 | 2769 | # Copyright (c) 2010 Franz Allan Valencia See
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from SQLAlchemyLibrary.connection_manager import ConnectionManager
from SQLAlchemyLibrary.query import Query
from SQLAlchemyLibrary.assertion import Assertion
__version_file_path__ = os.path.join(os.path.dirname(__file__), 'VERSION')
__version__ = open(__version_file_path__, 'r').read().strip()
class SQLAlchemyLibrary(ConnectionManager, Query, Assertion):
"""
SQLAlchemy Library allows you to interact with your database in Robot Framework tests.
This can allow you to query your database after an action has been made to verify the results.
This can use any database supported by SQLAlchemy, including Oracle, MySQL, Postgres, SQLite.
(Not yet tested on Oracle).
This should be a drop-in replacement for DatabaseLibrary in most situations.
Advantages over DatabaseLibrary
- Ability to provide named-parameter BIND values
== References: ==
- SQLAlchemy documentation - http://docs.sqlalchemy.org/en/latest/index.html
- List of SQLAlchemy Dialects - http://docs.sqlalchemy.org/en/latest/dialects/
- Python Database Programming - http://wiki.python.org/moin/DatabaseProgramming/
== Notes: ==
=== Example Usage: ===
| # Setup |
| Connect to Database |
| # Guard assertion (verify that test started in expected state). |
| Check if not exists in database | select id from person where first_name = :first_name and last_name = :last_name | firat_name=Franz Allan | last_name=See |
| # Drive UI to do some action |
| Go To | http://localhost/person/form.html | | # From selenium library |
| Input Text | name=first_name | Franz Allan | # From selenium library |
| Input Text | name=last_name | See | # From selenium library |
| Click Button | Save | | # From selenium library |
| # Log results |
| @{queryResults} | Query | select * from person |
| Log Many | @{queryResults} |
| # Verify if persisted in the database |
| Check if exists in database | select id from person where first_name = 'Franz Allan' and last_name = 'See' |
| # Teardown |
| Disconnect from Database |
"""
ROBOT_LIBRARY_SCOPE = 'GLOBAL'
| apache-2.0 | -1,904,054,005,682,905,600 | 39.130435 | 162 | 0.701697 | false |
kou/zulip | tools/documentation_crawler/documentation_crawler/spiders/common/spiders.py | 1 | 9193 | import json
import os
import re
from typing import Callable, Iterator, List, Optional, Union
import scrapy
from scrapy.http import Request, Response
from scrapy.linkextractors import IGNORED_EXTENSIONS
from scrapy.linkextractors.lxmlhtml import LxmlLinkExtractor
from scrapy.spidermiddlewares.httperror import HttpError
from scrapy.utils.url import url_has_any_extension
from twisted.python.failure import Failure
EXCLUDED_URLS = [
# Google calendar returns 404s on HEAD requests unconditionally
'https://calendar.google.com/calendar/embed?src=ktiduof4eoh47lmgcl2qunnc0o@group.calendar.google.com',
# Returns 409 errors to HEAD requests frequently
'https://medium.freecodecamp.org/',
# Returns 404 to HEAD requests unconditionally
'https://www.git-tower.com/blog/command-line-cheat-sheet/',
'https://marketplace.visualstudio.com/items?itemName=rafaelmaiolla.remote-vscode',
# Requires authentication
'https://circleci.com/gh/zulip/zulip/tree/master',
'https://circleci.com/gh/zulip/zulip/16617',
'https://www.linkedin.com/company/zulip-project',
# Returns 403 errors to HEAD requests
'https://giphy.com',
'https://giphy.com/apps/giphycapture',
'https://www.udemy.com/course/the-complete-react-native-and-redux-course/',
]
VNU_IGNORE = [
# Real errors that should be fixed.
r'Duplicate ID “[^”]*”\.',
r'The first occurrence of ID “[^”]*” was here\.',
r'Attribute “markdown” not allowed on element “div” at this point\.',
r'No “p” element in scope but a “p” end tag seen\.',
r'Element “div” not allowed as child of element “ul” in this context\. '
+ r'\(Suppressing further errors from this subtree\.\)',
# Warnings that are probably less important.
r'The “type” attribute is unnecessary for JavaScript resources\.',
]
VNU_IGNORE_REGEX = re.compile(r'|'.join(VNU_IGNORE))
DEPLOY_ROOT = os.path.abspath(os.path.join(__file__, "../../../../../.."))
ZULIP_SERVER_GITHUB_FILE_URL_PREFIX = "https://github.com/zulip/zulip/blob/master"
ZULIP_SERVER_GITHUB_DIRECTORY_URL_PREFIX = "https://github.com/zulip/zulip/tree/master"
class BaseDocumentationSpider(scrapy.Spider):
name: Optional[str] = None
# Exclude domain address.
deny_domains: List[str] = []
start_urls: List[str] = []
deny: List[str] = []
file_extensions: List[str] = ['.' + ext for ext in IGNORED_EXTENSIONS]
tags = ('a', 'area', 'img')
attrs = ('href', 'src')
def _has_extension(self, url: str) -> bool:
return url_has_any_extension(url, self.file_extensions)
def _is_external_url(self, url: str) -> bool:
return url.startswith('http') or self._has_extension(url)
def check_existing(self, response: Response) -> None:
self.log(response)
def _is_external_link(self, url: str) -> bool:
if url.startswith("https://chat.zulip.org"):
# Since most chat.zulip.org URLs will be links to specific
# logged-in content that the spider cannot verify, or the
# homepage, there's no need to check those (which can
# cause errors when chat.zulip.org is being updated).
return True
if "zulip.readthedocs" in url or "zulip.com" in url or "zulip.org" in url:
# We want CI to check any links to Zulip sites.
return False
if (len(url) > 4 and url[:4] == "file") or ("localhost" in url):
# We also want CI to check any links to built documentation.
return False
if url.startswith(ZULIP_SERVER_GITHUB_FILE_URL_PREFIX) or url.startswith(ZULIP_SERVER_GITHUB_DIRECTORY_URL_PREFIX):
# We can verify these links directly in the local git repo without making any requests to GitHub servers.
return False
if 'github.com/zulip' in url:
# We want to check these links but due to rate limiting from GitHub, these checks often
# fail in the CI. Thus, we should treat these as external links for now.
# TODO: Figure out how to test github.com/zulip links in CI.
return True
return True
def check_fragment(self, response: Response) -> None:
self.log(response)
xpath_template = "//*[@id='{fragment}' or @name='{fragment}']"
m = re.match(r".+\#(?P<fragment>.*)$", response.request.url) # Get fragment value.
if not m:
return
fragment = m.group('fragment')
# Check fragment existing on response page.
if not response.selector.xpath(xpath_template.format(fragment=fragment)):
self.logger.error(
"Fragment #%s is not found on page %s", fragment, response.request.url)
def _vnu_callback(self, url: str) -> Callable[[Response], None]:
def callback(response: Response) -> None:
vnu_out = json.loads(response.text)
for message in vnu_out['messages']:
if not VNU_IGNORE_REGEX.fullmatch(message['message']):
self.logger.error(
'"%s":%d.%d-%d.%d: %s: %s',
url,
message.get('firstLine', message['lastLine']),
message.get('firstColumn', message['lastColumn']),
message['lastLine'],
message['lastColumn'],
message['type'],
message['message'],
)
return callback
def _make_requests(self, url: str) -> Iterator[Request]:
# These URLs are for Zulip's webapp, which with recent changes
# can be accessible without login an account. While we do
# crawl documentation served by the webapp (E.g. /help/), we
# don't want to crawl the webapp itself, so we exclude these.
if url in ['http://localhost:9981/', 'http://localhost:9981'] or url.startswith('http://localhost:9981/#') or url.startswith('http://localhost:9981#'):
return
callback: Callable[[Response], Optional[Iterator[Request]]] = self.parse
dont_filter = False
method = 'GET'
if self._is_external_url(url):
callback = self.check_existing
method = 'HEAD'
if url.startswith(ZULIP_SERVER_GITHUB_FILE_URL_PREFIX):
file_path = url.replace(ZULIP_SERVER_GITHUB_FILE_URL_PREFIX, DEPLOY_ROOT)
hash_index = file_path.find("#")
if hash_index != -1:
file_path = file_path[:hash_index]
if not os.path.isfile(file_path):
self.logger.error("There is no local file associated with the GitHub URL: %s", url)
return
elif url.startswith(ZULIP_SERVER_GITHUB_DIRECTORY_URL_PREFIX):
dir_path = url.replace(ZULIP_SERVER_GITHUB_DIRECTORY_URL_PREFIX, DEPLOY_ROOT)
if not os.path.isdir(dir_path):
self.logger.error("There is no local directory associated with the GitHub URL: %s", url)
return
elif '#' in url:
dont_filter = True
callback = self.check_fragment
if getattr(self, 'skip_external', False) and self._is_external_link(url):
return
yield Request(url, method=method, callback=callback, dont_filter=dont_filter,
errback=self.error_callback)
def start_requests(self) -> Iterator[Request]:
for url in self.start_urls:
yield from self._make_requests(url)
def parse(self, response: Response) -> Iterator[Request]:
self.log(response)
if getattr(self, 'validate_html', False):
yield Request(
'http://127.0.0.1:9988/?out=json',
method='POST',
headers={'Content-Type': response.headers['Content-Type']},
body=response.body,
callback=self._vnu_callback(response.url),
errback=self.error_callback,
)
for link in LxmlLinkExtractor(deny_domains=self.deny_domains, deny_extensions=['doc'],
tags=self.tags, attrs=self.attrs, deny=self.deny,
canonicalize=False).extract_links(response):
yield from self._make_requests(link.url)
def retry_request_with_get(self, request: Request) -> Iterator[Request]:
request.method = 'GET'
request.dont_filter = True
yield request
def exclude_error(self, url: str) -> bool:
return url in EXCLUDED_URLS
def error_callback(self, failure: Failure) -> Optional[Union[Failure, Iterator[Request]]]:
if failure.check(HttpError):
response = failure.value.response
if self.exclude_error(response.url):
return None
if response.status == 405 and response.request.method == 'HEAD':
# Method 'HEAD' not allowed, repeat request with 'GET'
return self.retry_request_with_get(response.request)
self.logger.error("Please check link: %s", response.request.url)
return failure
| apache-2.0 | 154,628,227,898,041,820 | 44.994975 | 159 | 0.611821 | false |
xiaohutushen30/seentao-xblock-sdk | workbench/test/test_problems.py | 1 | 3519 | """Test that problems and problem submission works well."""
import time
from selenium.common.exceptions import StaleElementReferenceException
from workbench import scenarios
from workbench.test.selenium_test import SeleniumTest
from bok_choy.query import BrowserQuery
class ProblemInteractionTest(SeleniumTest):
"""
A browser-based test of answering problems right and wrong.
"""
def setUp(self):
super(ProblemInteractionTest, self).setUp()
one_problem = """
<problem_demo>
<html_demo><p class="the_numbers">$a $b</p></html_demo>
<textinput_demo name="sum_input" input_type="int" />
<equality_demo name="sum_checker" left="./sum_input/@student_input" right="$c" />
<script>
import random
a = random.randint(1, 1000000)
b = random.randint(1, 1000000)
c = a + b
</script>
</problem_demo>
"""
self.num_problems = 3
scenarios.add_xml_scenario(
"test_many_problems", "Many problems",
"<vertical_demo>" + one_problem * self.num_problems + "</vertical_demo>"
)
self.addCleanup(scenarios.remove_scenario, "test_many_problems")
def test_many_problems(self):
# Test that problems work properly.
self.browser.get(self.live_server_url + "/scenario/test_many_problems")
header1 = BrowserQuery(self.browser, css="h1")
self.assertEqual(header1.text[0], "XBlock: Many problems")
# Find the numbers on the page.
nums = self.browser.find_elements_by_css_selector("p.the_numbers")
num_pairs = [tuple(int(n) for n in num.text.split()) for num in nums]
# They should be all different.
self.assertEqual(len(set(num_pairs)), self.num_problems)
text_ctrls_xpath = '//div[@data-block-type="textinput_demo"][@data-name="sum_input"]/input'
text_ctrls = self.browser.find_elements_by_xpath(text_ctrls_xpath)
check_btns = BrowserQuery(self.browser, css='input.check')
check_indicators = 'span.indicator'
def assert_image(right_wrong_idx, expected_icon):
"""Assert that the img src text includes `expected_icon`"""
for _ in range(3):
try:
sources = BrowserQuery(self.browser, css='{} img'.format(check_indicators)).nth(right_wrong_idx).attrs('src')
if sources and expected_icon in sources[0]:
break
else:
time.sleep(.25)
except StaleElementReferenceException as exc:
print exc
self.assertIn(expected_icon, sources[0])
for i in range(self.num_problems):
# Before answering, the indicator says Not Attempted.
self.assertIn("Not attempted", BrowserQuery(self.browser, css=check_indicators).nth(i).text[0])
answer = sum(num_pairs[i])
for _ in range(2):
# Answer right.
text_ctrls[i].clear()
text_ctrls[i].send_keys(str(answer))
check_btns[i].click()
assert_image(i, "/correct-icon.png")
# Answer wrong.
text_ctrls[i].clear()
text_ctrls[i].send_keys(str(answer + 1))
check_btns[i].click()
assert_image(i, "/incorrect-icon.png")
| agpl-3.0 | 2,430,540,823,265,295,400 | 39.918605 | 129 | 0.56948 | false |
peterhenderson/python-enunciate-samples | familytree.py | 1 | 5994 | try:
import json
except ImportError:
import simplejson as json
def parse(input):
"""Parse specified file or string and return a FamilyTree object created from it."""
if hasattr(input, "read"):
data = json.load(input)
else:
data = json.loads(input)
return FamilyTree(data)
class JSONBase:
"""Base class for all JSON-related objects"""
def to_json(self):
return json.dumps(self.to_json_dict())
def __repr__(self):
return self.to_json()
class FamilyTree(JSONBase):
def __init__(self, o):
if "statusCode" in o:
self.statusCode = o["statusCode"]
if "statusMessage" in o:
self.statusMessage = o["statusMessage"]
if "version" in o:
self.version = o["version"]
if "pedigrees" in o:
self.pedigrees = []
for item in o["pedigrees"]:
self.pedigrees.append(Pedigree(item))
def to_json_dict(self):
d = {}
if hasattr(self, "statusCode"):
d["statusCode"] = self.statusCode
if hasattr(self, "statusMessage"):
d["statusMessage"] = self.statusMessage
if hasattr(self, "version"):
d["version"] = self.version
if hasattr(self, "pedigrees"):
a = []
for item in self.pedigrees:
a.append(item.to_json_dict())
d["pedigrees"] = a
return d
class Pedigree(JSONBase):
def __init__(self, o):
if "id" in o:
self.id = o["id"]
if "requestedId" in o:
self.requestedId = o["requestedId"]
if "persons" in o:
self.persons = []
for item in o["persons"]:
self.persons.append(Person(item))
def to_json_dict(self):
d = {}
if hasattr(self, "id"):
d["id"] = self.id
if hasattr(self, "requestedId"):
d["requestedId"] = self.requestedId
if hasattr(self, "persons"):
a = []
for item in self.persons:
a.append(item.to_json_dict())
d["persons"] = a
return d
class Person(JSONBase):
def __init__(self, o):
if "id" in o:
self.id = o["id"]
if "assertions" in o:
self.assertions = PersonAssertions(o["assertions"])
if "parents" in o:
self.parents = []
for item in o["parents"]:
self.parents.append(ParentsReference(item))
def to_json_dict(self):
d = {}
if hasattr(self, "id"):
d["id"] = self.id
if hasattr(self, "assertions"):
d["assertions"] = self.assertions.to_json_dict()
if hasattr(self, "parents"):
a = []
for item in self.parents:
a.append(item.to_json_dict())
d["parents"] = a
return d
class PersonAssertions(JSONBase):
def __init__(self, o):
if "names" in o:
self.names = []
for item in o["names"]:
self.names.append(NameAssertion(item))
if "genders" in o:
self.genders = []
for item in o["genders"]:
self.genders.append(GenderAssertion(item))
def to_json_dict(self):
d = {}
if hasattr(self, "names"):
a = []
for item in self.names:
a.append(item.to_json_dict())
d["names"] = a
if hasattr(self, "genders"):
a = []
for item in self.genders:
a.append(item.to_json_dict())
d["genders"] = a
return d
class NameAssertion(JSONBase):
def __init__(self, o):
if "value" in o:
self.value = NameValue(o["value"])
def to_json_dict(self):
d = {}
if hasattr(self, "value"):
d["value"] = self.value.to_json_dict()
return d
class NameValue(JSONBase):
def __init__(self, o):
if "forms" in o:
self.forms = []
for item in o["forms"]:
self.forms.append(NameForm(item))
def to_json_dict(self):
d = {}
if hasattr(self, "forms"):
a = []
for item in self.forms:
a.append(item.to_json_dict())
d["forms"] = a
return d
class NameForm(JSONBase):
def __init__(self, o):
if "fullText" in o:
self.fullText = o["fullText"]
def to_json_dict(self):
d = {}
if hasattr(self, "fullText"):
d["fullText"] = self.fullText
return d
class GenderAssertion(JSONBase):
def __init__(self, o):
if "value" in o:
self.value = GenderValue(o["value"])
def to_json_dict(self):
d = {}
if hasattr(self, "value"):
d["value"] = self.value.to_json_dict()
return d
class GenderValue(JSONBase):
def __init__(self, o):
if "type" in o:
self.type = o["type"]
def to_json_dict(self):
d = {}
if hasattr(self, "type"):
d["type"] = self.type
return d
class ParentsReference(JSONBase):
def __init__(self, o):
if "parent" in o:
self.parents = []
for item in o["parent"]:
self.parents.append(PersonReference(item))
def to_json_dict(self):
d = {}
if hasattr(self, "parents"):
a = []
for item in self.parents:
a.append(item.to_json_dict())
d["parent"] = a
d["parents"] = a
return d
class PersonReference(JSONBase):
def __init__(self, o):
if "id" in o:
self.id = o["id"]
if "gender" in o:
self.gender = o["gender"]
def to_json_dict(self):
d = {}
if hasattr(self, "id"):
d["id"] = self.id
if hasattr(self, "gender"):
d["gender"] = self.gender
return d
| apache-2.0 | -720,568,518,906,619,600 | 25.522124 | 88 | 0.489823 | false |
codebikeclimb/NASARobotComp | Robot2017_Master/Robot2016/motorTest.py | 1 | 2963 | #!/usr/bin/python
from Adafruit_MotorHAT import Adafruit_MotorHAT, Adafruit_DCMotor
import serial
import time
import atexit
#initialize i2c communication with motor shield
roboMotor = Adafruit_MotorHAT(addr=0x60)
#initialize serial communications with XBee RF reciever
xBee = serial.Serial('/dev/ttyACM1',57600)
compass = serial.Serial('/dev/ttyACM0', 9600)
def turnOffMotors():
roboMotor.getMotor(3).run(Adafruit_MotorHAT.RELEASE)
roboMotor.getMotor(4).run(Adafruit_MotorHAT.RELEASE)
atexit.register(turnOffMotors)
#create motor objects
leftFrontRear = roboMotor.getMotor(3)
rightFrontRear = roboMotor.getMotor(4)
#set speed to start ---- 0(off) - 255(Max)
#beacon navigation
def beaconNavigation():
bHeadings = []
botHeadings = []
for x in range(0,2):
botHeading = compass.readline()
botHeading = float(botHeading)
botHeadings.append(botHeading)
print(botHeading)
beaconHeading = xBee.readline()
beaconHeading = float(beaconHeading)
bHeadings.append(beaconHeading)
print(beaconHeading)
botTotal = sum(botHeadings)
botLength = len(botHeadings)
avgBotHeading = botTotal / botLength
print "avg bot heading: ", avgBotHeading
total = sum(bHeadings)
l = len(bHeadings)
avgHeading = total / l
print "avg b heading: ", avgHeading
#calculate opposite heading
x = avgHeading + 180
oppositeHeading = x % 360
oppositeHeading = float(oppositeHeading)
print "opposite beacon heading: ", oppositeHeading
# while(botHeading <= oppositeHeading or botHeading >= oppositeHeading):
while(botHeading < oppositeHeading or botHeading > oppositeHeading + 1.0):
botHeading = compass.readline()
botHeading = float(botHeading)
print botHeading
# rightRotate()
forward()
# toTheBeacon()
#for x in range(0,20):
# heading = xBee.readline()
# botBearing = compass.readline()
# print(heading)
# print(botBearing)
#drive forwards
def forward():
# beaconNavigation()
while(True):
leftFrontRear.setSpeed(80)
rightFrontRear.setSpeed(80)
leftFrontRear.run(Adafruit_MotorHAT.FORWARD)
rightFrontRear.run(Adafruit_MotorHAT.FORWARD)
#drive backwards
def reverse():
rightFrontRear.setSpeed(150)
leftFrontRear.setSpeed(150)
rightFrontRear.run(Adafruit_MotorHAT.BACKWARD)
leftFrontRear.run(Adafruit_MotorHAT.BACKWARD)
#rotate left, rotate right
def leftRotate():
rightFrontRear.setSpeed(70)
rightFrontRear.run(Adafruit_MotorHAT.FORWARD)
def rightRotate():
leftFrontRear.setSpeed(90)
rightFrontRear.setSpeed(90)
leftFrontRear.run(Adafruit_MotorHAT.FORWARD)
rightFrontRear.run(Adafruit_MotorHAT.BACKWARD)
#turn left, turn right
def leftTurn():
rightFrontRear.setSpeed(200)
leftFrontRear.setSpeed(125)
rightFrontRear.run(Adafruit_MotorHAT.FORWARD)
leftFrontRear.run(Adafruit_MotorHAT.FORWARD)
def rightTurn():
rightFrontRear.setSpeed(150)
leftFrontRear.setSpeed(200)
leftFrontRear.run(Adafruit_MotorHAT.FORWARD)
rightFrontRear.run(Adafruit_MotorHAT.FORWARD)
beaconNavigation()
forward()
| gpl-3.0 | -6,703,005,234,935,376,000 | 21.792308 | 75 | 0.76949 | false |
fieldOfView/pyQNodesEditor | qneport.py | 1 | 4898 | # Copyright (c) 2014, ALDO HOEBEN
# Copyright (c) 2012, STANISLAW ADASZEWSKI
#All rights reserved.
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of STANISLAW ADASZEWSKI nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
#DISCLAIMED. IN NO EVENT SHALL STANISLAW ADASZEWSKI BE LIABLE FOR ANY
#DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
#(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
#LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
#ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
#SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from PySide.QtCore import (Qt)
from PySide.QtGui import (QBrush, QColor, QPainter, QPainterPath, QPen)
from PySide.QtGui import (QGraphicsItem, QGraphicsPathItem, QGraphicsTextItem)
class QNEPort(QGraphicsPathItem):
(NamePort, TypePort) = (1, 2)
(Type) = (QGraphicsItem.UserType +1)
def __init__(self, parent):
super(QNEPort, self).__init__(parent)
self.label = QGraphicsTextItem(self)
self.radius_ = 4
self.margin = 3
path = QPainterPath()
path.addEllipse(-self.radius_, -self.radius_, 2*self.radius_, 2*self.radius_);
self.setPath(path)
self.setPen(QPen(Qt.darkRed))
self.setBrush(Qt.red)
self.setFlag(QGraphicsItem.ItemSendsScenePositionChanges)
self.m_portFlags = 0
self.isOutput_ = False
self.m_block = None
self.m_connections = []
def __del__(self):
#print("Del QNEPort %s" % self.name)
pass
def delete(self):
for connection in self.m_connections:
connection.delete()
self.scene().removeItem(self)
self.m_block = None
self.m_connections = []
def setName(self, name):
self.name = name
self.label.setPlainText(name)
def setIsOutput(self, isOutput):
self.isOutput_ = isOutput
if self.isOutput_:
self.label.setPos(-self.radius_ - self.margin - self.label.boundingRect().width(),
-self.label.boundingRect().height()/2);
else:
self.label.setPos(self.radius_ + self.margin,
-self.label.boundingRect().height()/2);
def setNEBlock(self, block):
self.m_block = block
def setPortFlags(self, flags):
self.m_portFlags = flags
if self.m_portFlags & self.TypePort:
font = self.scene().font()
font.setItalic(True)
self.label.setFont(font)
self.setPath(QPainterPath())
elif self.m_portFlags & self.NamePort:
font = self.scene().font()
font.setBold(True)
self.label.setFont(font)
self.setPath(QPainterPath())
def setPtr(self, ptr):
self.m_ptr = ptr
def type(self):
return self.Type
def radius(self):
return self.radius_
def portName(self):
return self.name
def isOutput(self):
return self.isOutput_
def block(self):
return self.m_block
def portFlags(self):
return self.m_portFlags
def ptr(self):
return self.m_ptr;
def addConnection(self, connection):
self.m_connections.append(connection)
def removeConnection(self, connection):
try:
self.m_connections.remove(connection)
except: pass
def connections(self):
return self.m_connections
def isConnected(self, other):
for connection in self.m_connections:
if connection.port1() == other or connection.port2() == other:
return True
return False
def itemChange(self, change, value):
if change == QGraphicsItem.ItemScenePositionHasChanged:
for connection in self.m_connections:
connection.updatePosFromPorts()
connection.updatePath()
return value
| bsd-3-clause | 2,503,830,618,461,107,700 | 28.154762 | 94 | 0.652511 | false |
mjwestcott/PyPokertools | tests/test_translation.py | 1 | 1307 | from examples.translation import to_cards, translate
from pokertools import holecards
def test_translation():
assert set(translate("66")) == {
"6c 6d", "6c 6h", "6c 6s", "6d 6h", "6d 6s", "6h 6s"
}
assert set(translate("AKs")) == {
"Ac Kc", "Ad Kd", "Ah Kh", "As Ks"
}
assert set(translate("QJo")) == {
"Qc Jd", "Qd Jc", "Qh Jc", "Qs Jc",
"Qc Jh", "Qd Jh", "Qh Jd", "Qs Jd",
"Qc Js", "Qd Js", "Qh Js", "Qs Jh",
}
assert set(translate("QQ+")) == {
"Qc Qd", "Qc Qh", "Qc Qs", "Qd Qh", "Qd Qs", "Qh Qs",
"Kc Kd", "Kc Kh", "Kc Ks", "Kd Kh", "Kd Ks", "Kh Ks",
"Ac Ad", "Ac Ah", "Ac As", "Ad Ah", "Ad As", "Ah As",
}
assert set(translate("A5s-A3s")) == {
"Ac 5c", "Ad 5d", "Ah 5h", "As 5s",
"Ac 4c", "Ad 4d", "Ah 4h", "As 4s",
"Ac 3c", "Ad 3d", "Ah 3h", "As 3s",
}
button_opening_range = (
"22+, A2s+, K2s+, Q2s+, J6s+, T6s+, 96s+, 86s+, 75s+, 64s+, "
"54s, A2o+, K9o+, Q9o+, J9o+, T8o+, 98o, 87o"
)
result = list(translate(button_opening_range))
assert len(result) == 586
def test_to_cards():
assert set(to_cards("T9s")) == {
holecards("Tc 9c"),
holecards("Td 9d"),
holecards("Th 9h"),
holecards("Ts 9s"),
}
| mit | -6,830,716,536,412,365,000 | 30.878049 | 69 | 0.472073 | false |
jarvisqi/nlp_learn | gensim/text.py | 1 | 2054 | import jieba
import pandas as pd
from gensim import corpora, models, similarities
# 训练样本
raw_documents = [
'0无偿居间介绍买卖毒品的行为应如何定性',
'1吸毒男动态持有大量毒品的行为该如何认定',
'2如何区分是非法种植毒品原植物罪还是非法制造毒品罪',
'3为毒贩贩卖毒品提供帮助构成贩卖毒品罪',
'4将自己吸食的毒品原价转让给朋友吸食的行为该如何认定',
'5为获报酬帮人购买毒品的行为该如何认定',
'6毒贩出狱后再次够买毒品途中被抓的行为认定',
'7虚夸毒品功效劝人吸食毒品的行为该如何认定',
'8妻子下落不明丈夫又与他人登记结婚是否为无效婚姻',
'9一方未签字办理的结婚登记是否有效',
'10夫妻双方1990年按农村习俗举办婚礼没有结婚证 一方可否起诉离婚',
'11结婚前对方父母出资购买的住房写我们二人的名字有效吗',
'12身份证被别人冒用无法登记结婚怎么办?',
'13同居后又与他人登记结婚是否构成重婚罪',
'14未办登记只举办结婚仪式可起诉离婚吗',
'15同居多年未办理结婚登记,是否可以向法院起诉要求离婚'
]
def main():
corpora_documents = []
for item_text in raw_documents:
item_str = list(jieba.cut(item_text))
corpora_documents.append(item_str)
dictionary = corpora.Dictionary(corpora_documents)
corpus = [dictionary.doc2bow(text) for text in corpora_documents]
similarity =similarities.Similarity('-Similarity-index', corpus, num_features=400)
test_data_1 = '你好,我想问一下我想离婚他不想离,孩子他说不要,是六个月就自动生效离婚'
test_cut_raw_1 = jieba.cut(test_data_1)
test_corpus_1 = dictionary.doc2bow(test_cut_raw_1)
similarity.num_best = 5
# 返回最相似的样本材料,(index_of_document, similarity) tuples
print(similarity[test_corpus_1])
if __name__ == '__main__':
main()
| mit | -1,880,925,002,496,043,800 | 27.755556 | 86 | 0.704791 | false |
WisniewskiP/meson | install_meson.py | 1 | 3639 | #!/usr/bin/env python3
# Copyright 2013-2014 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script installs Meson. We can't use Meson to install itself
# because of the bootstrap problem. We can't use any other build system
# either becaust that would be just silly.
import os, sys, glob, shutil, gzip
from optparse import OptionParser
usage_info = '%prog [--prefix PREFIX] [--destdir DESTDIR]'
parser = OptionParser(usage=usage_info)
parser.add_option('--prefix', default='/usr/local', dest='prefix',
help='the installation prefix (default: %default)')
parser.add_option('--destdir', default='', dest='destdir',
help='the destdir (default: %default)')
(options, args) = parser.parse_args(sys.argv)
if options.prefix[0] != '/':
print('Error, prefix must be an absolute path.')
sys.exit(1)
if options.destdir == '':
install_root = options.prefix
else:
install_root = os.path.join(options.destdir, options.prefix[1:])
script_dir = os.path.join(install_root, 'share/meson')
bin_dir = os.path.join(install_root, 'bin')
bin_script = os.path.join(script_dir, 'meson.py')
gui_script = os.path.join(script_dir, 'mesongui.py')
conf_script = os.path.join(script_dir, 'mesonconf.py')
bin_name = os.path.join(bin_dir, 'meson')
gui_name = os.path.join(bin_dir, 'mesongui')
conf_name = os.path.join(bin_dir, 'mesonconf')
man_dir = os.path.join(install_root, 'share/man/man1')
in_manfile = 'man/meson.1'
out_manfile = os.path.join(man_dir, 'meson.1.gz')
in_guimanfile = 'man/mesongui.1'
out_guimanfile = os.path.join(man_dir, 'mesongui.1.gz')
in_confmanfile = 'man/mesonconf.1'
out_confmanfile = os.path.join(man_dir, 'mesonconf.1.gz')
symlink_value = os.path.relpath(bin_script, os.path.dirname(bin_name))
guisymlink_value = os.path.relpath(gui_script, os.path.dirname(gui_name))
confsymlink_value = os.path.relpath(conf_script, os.path.dirname(conf_name))
files = glob.glob('*.py')
files += glob.glob('*.ui')
noinstall = ['compile_meson.py', 'install_meson.py', 'run_tests.py', 'run_cross_test.py']
files = [x for x in files if x not in noinstall]
os.makedirs(script_dir, exist_ok=True)
os.makedirs(bin_dir, exist_ok=True)
os.makedirs(man_dir, exist_ok=True)
for f in files:
print('Installing %s to %s.' %(f, script_dir))
outfilename = os.path.join(script_dir, f)
shutil.copyfile(f, outfilename)
shutil.copystat(f, outfilename)
try:
os.remove(bin_name)
except OSError:
pass
print('Creating symlinks %s and %s.' % (bin_name, gui_name))
try:
os.unlink(bin_name)
except FileNotFoundError:
pass
try:
os.unlink(gui_name)
except FileNotFoundError:
pass
try:
os.unlink(conf_name)
except FileNotFoundError:
pass
os.symlink(symlink_value, bin_name)
os.symlink(guisymlink_value, gui_name)
os.symlink(confsymlink_value, conf_name)
print('Installing manfiles to %s.' % man_dir)
open(out_manfile, 'wb').write(gzip.compress(open(in_manfile, 'rb').read()))
open(out_confmanfile, 'wb').write(gzip.compress(open(in_confmanfile, 'rb').read()))
open(out_guimanfile, 'wb').write(gzip.compress(open(in_guimanfile, 'rb').read()))
| apache-2.0 | -5,287,735,341,432,281,000 | 35.029703 | 89 | 0.710085 | false |
wcmckee/wcmckee-notebook | hackbrobeur.py | 1 | 2509 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <markdowncell>
# Hack BroBeur
# <markdowncell>
# python script to make account at centre for logins.
# <codecell>
from github import Github
import os
import getpass
import git
import time
from clint.textui import colored
import dominate
from dominate.tags import *
import envoy
# <codecell>
zeuser = getpass.getuser()
# <codecell>
g = Github(zeuser, 'blzh123!')
# <codecell>
# <codecell>
gitlist = []
# <codecell>
searchpy = g.search_repositories(zeuser)
# <codecell>
for pya in searchpy:
print pya.full_name
wcm = pya.full_name
# <codecell>
for repo in g.get_user('wcmckee').get_repos():
gitlist.append(repo.name)
# <codecell>
os.mkdir('/home/wcmckee/github')
# <codecell>
lisdir = os.listdir('/home/wcmckee/github/')
# <codecell>
lisdir
# <codecell>
curlis = []
# <codecell>
for lis in lisdir:
curlis.append(ls)
# <codecell>
dlrepo = list(set(gitlist) - set(curlis))
# <codecell>
print dlrepo
# <codecell>
wafi = time.sleep(5)
# <codecell>
import sh
# <codecell>
import git
repo = git.Repo( '/home/wcmckee/learnpython' )
print repo.git.status()
# <codecell>
assert repo.bare == False
# <codecell>
ycmds = ['git', 'clone', ']
# <codecell>
import os
# <codecell>
import git
os.chdir()
# <codecell>
for gitbl in dlrepo:
print (colored.red('Downloading - ' + (colored.blue('wcmckee') + ' - ' + gitbl)))
#git.Git().clone("https://github.com/wcmckee/" + gitbl)
envoy.run('git clone https://github.com/wcmckee/' + gitbl )
t = envoy.run('df')
t.std_out
print ('Download complete. Waiting 5 secs till the next')
wafi
# <codecell>
from paramiko import SSHClient
from scp import SCPClient
ssh = SSHClient()
ssh.load_system_host_keys()
ssh.connect('example.com')
# <codecell>
import subprocess
# <codecell>
# <codecell>
# <codecell>
cmdrun = ['sudo', 'pip', 'install', 'paramiko']
# <codecell>
supi = envoy.run(cmdrun)
# <codecell>
insvn = subprocess.check_output(cmdrun)
# <codecell>
newlis = []
# <codecell>
for repoz in g.get_user('wcmckee').get_repos():
newlis.append(repo.name)
# <codecell>
gitlist
# <codecell>
indop = open('index.html', 'r')
# <codecell>
# <codecell>
indop.read()
# <codecell>
from paramiko import SSHClient
from scp import SCPClient
ssh = SSHClient()
ssh.load_system_host_keys()
ssh.connect('example.com')
# <codecell>
import envoy
# <codecell>
import clon
# <codecell>
# <codecell>
| gpl-2.0 | 2,657,833,803,489,301,000 | 11.420792 | 85 | 0.656038 | false |
njncalub/hiddencloudserver | supersyncer/migrations/0002_auto__del_booktextquestionchoice__add_field_booktextquestion_choice_1_.py | 1 | 10046 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'BookTextQuestionChoice'
db.delete_table('supersyncer_booktextquestionchoice')
# Adding field 'BookTextQuestion.choice_1'
db.add_column('supersyncer_booktextquestion', 'choice_1',
self.gf('django.db.models.fields.CharField')(max_length=200, null=True, blank=True),
keep_default=False)
# Adding field 'BookTextQuestion.choice_2'
db.add_column('supersyncer_booktextquestion', 'choice_2',
self.gf('django.db.models.fields.CharField')(max_length=200, null=True, blank=True),
keep_default=False)
# Adding field 'BookTextQuestion.choice_3'
db.add_column('supersyncer_booktextquestion', 'choice_3',
self.gf('django.db.models.fields.CharField')(max_length=200, null=True, blank=True),
keep_default=False)
# Adding field 'BookTextQuestion.choice_4'
db.add_column('supersyncer_booktextquestion', 'choice_4',
self.gf('django.db.models.fields.CharField')(max_length=200, null=True, blank=True),
keep_default=False)
# Adding field 'BookTextQuestion.correct'
db.add_column('supersyncer_booktextquestion', 'correct',
self.gf('django.db.models.fields.CharField')(default='1', max_length=1),
keep_default=False)
def backwards(self, orm):
# Adding model 'BookTextQuestionChoice'
db.create_table('supersyncer_booktextquestionchoice', (
('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('choice', self.gf('django.db.models.fields.TextField')()),
('is_correct', self.gf('django.db.models.fields.BooleanField')(default=False)),
('from_book_text_question', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['supersyncer.BookTextQuestion'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('supersyncer', ['BookTextQuestionChoice'])
# Deleting field 'BookTextQuestion.choice_1'
db.delete_column('supersyncer_booktextquestion', 'choice_1')
# Deleting field 'BookTextQuestion.choice_2'
db.delete_column('supersyncer_booktextquestion', 'choice_2')
# Deleting field 'BookTextQuestion.choice_3'
db.delete_column('supersyncer_booktextquestion', 'choice_3')
# Deleting field 'BookTextQuestion.choice_4'
db.delete_column('supersyncer_booktextquestion', 'choice_4')
# Deleting field 'BookTextQuestion.correct'
db.delete_column('supersyncer_booktextquestion', 'correct')
models = {
'supersyncer.book': {
'Meta': {'object_name': 'Book'},
'author': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['supersyncer.BookAuthor']", 'null': 'True', 'blank': 'True'}),
'book_url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'genre': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['supersyncer.BookGenre']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'total_words': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'})
},
'supersyncer.bookauthor': {
'Meta': {'object_name': 'BookAuthor'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'supersyncer.bookgenre': {
'Meta': {'object_name': 'BookGenre'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'genre': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'})
},
'supersyncer.booktext': {
'Meta': {'object_name': 'BookText'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'difficulty': ('django.db.models.fields.CharField', [], {'default': "'EA'", 'max_length': '2'}),
'from_book': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['supersyncer.Book']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text': ('django.db.models.fields.TextField', [], {}),
'total_words': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'})
},
'supersyncer.booktextquestion': {
'Meta': {'object_name': 'BookTextQuestion'},
'choice_1': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'choice_2': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'choice_3': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'choice_4': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'correct': ('django.db.models.fields.CharField', [], {'default': "'1'", 'max_length': '1'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'from_book_text': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['supersyncer.BookText']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.TextField', [], {})
},
'supersyncer.leaderboard': {
'Meta': {'object_name': 'LeaderBoard'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'supersyncer.report': {
'Meta': {'object_name': 'Report'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'supersyncer.userlog': {
'Meta': {'object_name': 'UserLog'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['supersyncer.UserProfile']"})
},
'supersyncer.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'birth_date': ('django.db.models.fields.DateField', [], {}),
'cluster': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'comp_benchmark': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'department': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'email_address': ('django.db.models.fields.EmailField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'middle_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'speed_benchmark': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'uid': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'supersyncer.userprogress': {
'Meta': {'object_name': 'UserProgress'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['supersyncer.UserProfile']"})
}
}
complete_apps = ['supersyncer'] | gpl-3.0 | -552,893,781,564,322,200 | 63.819355 | 176 | 0.5652 | false |
osantana/correios | documentation/process_correios_status.py | 1 | 1409 | #!/usr/bin/env python3.5
import csv
import re
import sys
result = []
with open(sys.argv[1]) as csvfile:
reader = csv.reader(csvfile)
for raw_row in reader:
tipo, status, descr, detalhe, cliente = raw_row
tipo = tipo.strip().replace("\n", " ")
status = status.strip().replace("\n", " ")
descr = descr.strip().replace("\n", " ")
detalhe = detalhe.strip().replace("\n", " ")
cliente = cliente.strip().replace("\n", " ")
if status:
row = {
'tipo': tipo.split(),
'status': status,
'descr': descr,
'detalhe': detalhe,
'cliente': cliente,
}
result.append(row)
else:
if tipo:
row['tipo'].append(tipo)
row['descr'] = "{} {}".format(row['descr'], descr).strip()
row['detalhe'] = "{} {}".format(row['detalhe'], detalhe).strip()
row['cliente'] = "{} {}".format(row['cliente'], cliente).strip()
writer = csv.writer(sys.stdout)
for res in result:
for tipo in res["tipo"]:
detalhe = res["detalhe"].replace('F avor', 'Favor')
detalhe = re.sub("<.*?>", "", detalhe).strip()
row = [
tipo,
res["status"],
res["descr"],
detalhe,
res["cliente"],
]
writer.writerow(row)
| apache-2.0 | 676,500,828,654,824,400 | 28.978723 | 76 | 0.471966 | false |
32bitmicro/EDA | python/eda/eda/dump.py | 1 | 2948 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2014, Paweł Wodnicki
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the 32bitmicro nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
#DISCLAIMED. IN NO EVENT SHALL Paweł Wodnicki BE LIABLE FOR ANY
#DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
#(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
#LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
#ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
#SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from edautils import *
from eda import *
CRLF = "\n"
class CDump:
" Dump class "
def __init__(self, sch=None,brd=None):
self.name=""
self.sch=sch
self.brd=brd
def dumpNet(self,net):
ns = ''
for node in net.nodes:
ns += " pin " + str(node.pin.num) + " - " + node.pin.name + " dev " + node.dev.refid + CRLF
return ns
def dumpNets(self, design):
ns = ''
ns += "NETS: " + CRLF
ns += "" + CRLF
for netname in design.nets:
net = design.nets[netname]
ns += " " + netname + CRLF
ns += self.dumpNet(net)
ns += "" + CRLF
return ns
def dumpDevice(self, dev):
ns = ''
for pinnum in dev.pins:
pin = dev.pins[pinnum]
ns += " pin " + str(pin.num) + " - " + pin.name + " net " + pin.netname + CRLF
return ns
def dumpDevices(self, design):
ns = ''
ns += "Devices: " + CRLF
ns += "" + CRLF
for devname in design.devices:
dev = design.devices[devname]
ns += " " + devname + CRLF
ns += self.dumpDevice(dev)
ns += "" + CRLF
return ns
| bsd-3-clause | 4,909,801,486,996,867,000 | 34.071429 | 111 | 0.598099 | false |
apdjustino/DRCOG_Urbansim | src/opus_gui/results_manager/run/indicator_framework/visualizer/visualizers/matplotlib_lorenzcurve.py | 1 | 10890 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2010-2011 University of California, Berkeley, 2005-2009 University of Washington
# See opus_core/LICENSE
import os, re, sys, time, traceback
from copy import copy
from opus_gui.results_manager.run.indicator_framework.visualizer.visualizers.abstract_visualization import Visualization
from opus_core.logger import logger
from numpy import array, arange
from numpy import ones, zeros, hstack, vstack
from numpy import trapz, trim_zeros
from pylab import subplot, plot, show
from pylab import xlabel, ylabel, title, text
from pylab import MultipleLocator, FormatStrFormatter
from pylab import savefig, clf, close
class LorenzCurve(Visualization):
def __init__(self, source_data, dataset_name,
attribute = None,
years = None, operation = None, name = None, scale = None,
storage_location = None):
Visualizer.__init__(self, source_data, dataset_name, [attribute],
years, operation, name,
storage_location)
self._values = None
self._ginicoeff = None
def is_single_year_indicator_image_type(self):
return True
def get_file_extension(self):
return 'png'
def get_visualization_shorthand(self):
return 'lorenzcurve'
def get_additional_metadata(self):
return {}
def _create_indicator(self, year):
"""Create a Lorenz Curve for the given indicator,
save it to the cache directory's 'indicators' sub-directory.
"""
attribute_short = self.get_attribute_alias(attribute = self.attributes[0],
year = year)
title = attribute_short + ' ' + str(year)
if self.run_description is not None:
title += '\n' + self.run_description
# Do calculation
# Make fresh copy with dtype float64 to avoid overflows
self._values = array(self._get_indicator(year, wrap = False).astype('float64'))
self._compute_lorenz()
file_path = self.get_file_path(year = year)
self._plot(attribute_short, file_path );
return file_path
def _compute_lorenz(self ):
''' Do the lorenz curve computation and save the result in the corresponding
class variables
'''
self._values.sort()
#remove 0 values from array
self._values = trim_zeros(self._values,'f')
num_values = self._values.size
F = arange(1, num_values + 1, 1, "float64")/num_values
L = self._values.cumsum(dtype="float64")/sum(self._values)
# Add (0, 0) as the first point for completeness (e.g. plotting)
origin = array([[0], [0]])
self._values = vstack((F, L))
self._values = hstack((origin, self._values))
# This is the simple form of (0.5 - integral) / 0.5
self._ginicoeff = 1 - 2 * trapz(self._values[1], self._values[0])
def _plot(self, attribute_name, file_path=None ):
clf() # Clear existing plot
a = self._values[0] * 100
b = self._values[1] * 100
ax = subplot(111)
plot(a, a, 'k--', a, b, 'r')
ax.set_ylim([0,100])
ax.grid(color='0.5', linestyle=':', linewidth=0.5)
xlabel('population')
ylabel(attribute_name)
title('Lorenz curve')
font = {'fontname' : 'Courier',
'color' : 'r',
'fontweight' : 'bold',
'fontsize' : 11
}
box = { 'pad' : 6,
'facecolor' : 'w',
'linewidth' : 1,
'fill' : True
}
text(5, 90, 'Gini coefficient: %(gini)f' % {'gini' : self._ginicoeff}, font, color='k', bbox=box )
majorLocator = MultipleLocator(20)
majorFormatter = FormatStrFormatter('%d %%')
minorLocator = MultipleLocator(5)
ax.xaxis.set_major_locator( majorLocator )
ax.xaxis.set_major_formatter( majorFormatter)
ax.xaxis.set_minor_locator( minorLocator )
ax.yaxis.set_major_locator( majorLocator )
ax.yaxis.set_major_formatter( majorFormatter)
ax.yaxis.set_minor_locator( minorLocator )
if file_path:
savefig(file_path)
close()
else:
show()
import os
from opus_core.tests import opus_unittest
from numpy import allclose
from opus_gui.results_manager.run.indicator_framework.test_classes.abstract_indicator_test import AbstractIndicatorTest
class Tests(AbstractIndicatorTest):
def skip_test_create_indicator(self):
indicator_path = os.path.join(self.temp_cache_path, 'indicators')
self.assert_(not os.path.exists(indicator_path))
lorenzcurve = LorenzCurve(
source_data = self.source_data,
attribute = 'opus_core.test.attribute',
dataset_name = 'test',
years = None
)
lorenzcurve.create(False)
self.assert_(os.path.exists(indicator_path))
self.assert_(os.path.exists(os.path.join(indicator_path, 'test__lorenzcurve__attribute__1980.png')))
def skip_test_perfect_equality(self):
"""Perfect equality is when everybody has the same amount of something"""
lorenzcurve = LorenzCurve(
source_data = self.source_data,
attribute = 'opus_core.test.attribute',
dataset_name = 'test',
years = None
)
incomes = ones(100)
lorenzcurve._values = incomes
lorenzcurve._compute_lorenz()
wanted_result = vstack((arange(0, 101) / 100., arange(0, 101) / 100.))
self.assert_(allclose(lorenzcurve._values, wanted_result))
def skip_test_perfect_inequality(self):
"""Perfect inequality is when one person has all of something"""
lorenzcurve = LorenzCurve(
source_data = self.source_data,
attribute = 'opus_core.test.attribute',
dataset_name = 'test',
years = None
)
incomes = zeros(100)
incomes[0] = 42
lorenzcurve._values = incomes
lorenzcurve._compute_lorenz()
#We strip all the zero values, so the result consists of only two values
wanted_result = [[0.,1.],[0.,1.]]
self.assert_(allclose(lorenzcurve._values, wanted_result))
def skip_test_small_lorenz(self):
"""Test case for less than 100 people"""
lorenzcurve = LorenzCurve(
source_data = self.source_data,
attribute = 'opus_core.test.attribute',
dataset_name = 'test',
years = None
)
incomes = array([1, 1, 2, 3, 4, 5])
lorenzcurve._values = incomes
lorenzcurve._compute_lorenz()
wanted_result = array(
[[ 0, 1/6., 2/6., 3/6., 4/6., 5/6., 6/6. ],
[ 0, 1/16., 2/16., 4/16., 7/16., 11/16., 16/16. ]])
self.assert_(allclose(lorenzcurve._values, wanted_result))
def skip_test_small_gini(self):
"""Test case for gini coefficient for the small case"""
lorenzcurve = LorenzCurve(
source_data = self.source_data,
attribute = 'opus_core.test.attribute',
dataset_name = 'test',
years = None
)
incomes = array([1, 1, 2, 3, 4, 5])
lorenzcurve._values = incomes
lorenzcurve._compute_lorenz()
self.assertAlmostEqual(lorenzcurve._ginicoeff, 0.3125)
def skip_test_large_lorenz(self):
"""Test case for more than 100 people"""
lorenzcurve = LorenzCurve(
source_data = self.source_data,
attribute = 'opus_core.test.attribute',
dataset_name = 'test',
years = None
)
incomes = array([731, 700, 619, 450, 419, 512, 232, 266, 131, 188,
498, 293, 935, 177, 160, 380, 538, 783, 256, 280,
731, 362, 870, 970, 674, 211, 524, 207, 513, 461,
280, 275, 410, 282, 144, 682, 573, 252, 382, 909,
719, 666, 236, 636, 628, 542, 630, 484, 629, 974,
747, 509, 281, 725, 377, 565, 495, 840, 391, 191,
929, 679, 217, 179, 336, 562, 293, 881, 271, 172,
426, 697, 293, 576, 203, 390, 522, 948, 312, 491,
531, 959, 646, 495, 306, 631, 722, 322, 876, 586,
316, 124, 796, 250, 456, 112, 661, 294, 749, 619,
134, 582, 996, 413, 421, 219, 796, 923, 832, 557])
lorenzcurve._values = incomes
lorenzcurve._compute_lorenz()
wanted_result_F = arange(0, 111) / 110.
wanted_result_L = array([ 0, 0.00202803, 0.00427335, 0.00664542, 0.00907181, 0.01167928,
0.01457647, 0.01769094, 0.02089595, 0.02413718, 0.02754138,
0.03099989, 0.0346757 , 0.03842393, 0.04224459, 0.0461739 ,
0.05013943, 0.05434035, 0.0586137 , 0.06314055, 0.06770362,
0.07233912, 0.07715569, 0.0820628 , 0.08704234, 0.09211241,
0.09718249, 0.10227067, 0.10737696, 0.11268243, 0.1179879 ,
0.12329338, 0.12861696, 0.13415782, 0.13980734, 0.14552928,
0.15135987, 0.15744396, 0.16399884, 0.17082534, 0.17770615,
0.18462318, 0.19168508, 0.19876507, 0.20618911, 0.21366748,
0.22125448, 0.2288777 , 0.23659146, 0.2447398 , 0.25299678,
0.26134429, 0.27010828, 0.27899902, 0.28796219, 0.29692536,
0.30594285, 0.31515953, 0.32443052, 0.33371962, 0.34317169,
0.35265998, 0.36227502, 0.3720168 , 0.38183102, 0.39191685,
0.40209322, 0.41232391, 0.42269945, 0.43312932, 0.44366784,
0.45427878, 0.46548727, 0.47669576, 0.48806721, 0.49945678,
0.51086445, 0.52229023, 0.53380654, 0.54550393, 0.55747293,
0.56953247, 0.58173686, 0.5940318 , 0.60638105, 0.61900192,
0.63167711, 0.64469634, 0.65776989, 0.67089777, 0.68413428,
0.6973708 , 0.71089704, 0.72445949, 0.7386376 , 0.7530511 ,
0.7674646 , 0.78252997, 0.79774019, 0.81349364, 0.82935574,
0.84530837, 0.86176801, 0.87848115, 0.89530294, 0.91223337,
0.9293992 , 0.94676421, 0.9643284 , 0.98196502, 1. ])
self.assert_(allclose(lorenzcurve._values, vstack((wanted_result_F, wanted_result_L))))
if __name__ == '__main__':
try:
import matplotlib
except:
print 'could not import matplotlib'
else:
opus_unittest.main()
| agpl-3.0 | -796,952,061,882,075,500 | 40.724138 | 120 | 0.562075 | false |
mpdehaan/interfacer | lib/interfacer/base_module.py | 1 | 4701 | #
# Copyright (c) rPath, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Michael DeHaan
import os
import optparse
import sys
import exceptions
class SubCommand(object):
''' base class to a subcommand. You will be extending this. '''
def __init__(self, base_instance):
''' constructor, shouldn't be any need to override this '''
self.base = base_instance
def options(self):
''' what options does this command take? Default to no options. '''
return []
def run(self, options, args):
'''
implement this to traverse the options and decide how to run the
command
'''
raise exceptions.NotImplementedError
def name(self):
'''
what is the name of the subcommand as triggered on the commmand line?
'''
return 'generic_subcommand_you_should_override_this'
def description(self):
''' what description string to show when listing the subcommands '''
return 'generic description, you should override this'
def dispatch(self, cargs):
'''
core function around kicking off the subcommand.
Don't override this.
'''
usage = "%s %s %s [options]" % (os.path.basename(cargs[0]),
self.base.name(), self.name())
parser = optparse.OptionParser(usage=usage)
for option in self.options():
(short, long, kw) = option
parser.add_option(short, long, **kw)
(options, args) = parser.parse_args(cargs)
return self.run(options, args)
class BaseModule(object):
''' base class for a command category that contains subcommands '''
def __init__(self):
''' constructor, no need to override this. '''
pass
def name(self):
''' what is the name of the category? '''
raise exceptions.NotImplementedError
def description(self):
'''
explain what this command does in the help
'''
raise exceptions.NotImplementedError
def sub_commands(self):
'''
return a dictionary of valid subcommands by name
'''
raise exceptions.NotImplementedError
def run(self, args):
'''
defer to subcommands. If you don't want subcommands, override this
method!
'''
subs = self.sub_commands()
if len(args) == 2 or args[2] in ['-h', '--help']:
self.list_subcommands(args)
return 1
matched = [x for x in subs if x.name() == args[2]]
if len(matched) == 1:
print ""
rc = matched[0].dispatch(args)
print ""
return rc
elif len(matched) > 1:
sys.stderr.write("error: multiple commands respond to (%s)\n\n" %
(args[2]))
else:
sys.stderr.write("error: subcommand (%s) not found\n\n" %
(args[2]))
sys.stderr.write(
"error: multiple subcommand modules found with this name")
return 1
def list_subcommands(self, args):
'''
prints out the subcommands attached to this module.
Don't override this.
'''
print ""
print "usage: %s %s <subcommand> [--options]" % (args[0], self.name())
print ""
print " choose a subcommand:"
print ""
subs = self.sub_commands()
for mod in subs:
print "%20s - %s" % (mod.name(), mod.description())
print ""
def register():
'''
each module plugin must define a register function at top level that
returns a module instance
'''
return BaseModule()
| mit | -5,573,441,767,861,178,000 | 29.72549 | 79 | 0.610508 | false |
homhei/glance | bin/glance.py | 1 | 3416 | #!/usr/bin/env python
#encode=utf-8
#vim: tabstop=4 shiftwidth=4 softtabstop=4
#Created on 2013-6-7
#Copyright 2013 nuoqingyun xuqifeng
import sys
import time
import os
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir, os.pardir))
if os.path.exists(os.path.join(possible_topdir, "glance", '__init__.py')):
sys.path.insert(0, possible_topdir)
try:
from glance.glance_agent.api import GlanceAgentAPI
except:
print 'Glance is not found.'
sys.exit()
from oslo.config import cfg
from glance.glance_agent import __version__
from glance.utils.daemon import Daemon
from glance import log as logging
glanceagent_opts = [
cfg.IntOpt('SYSTEM_CHECK_PERIOD',
default=60,
help = 'check system per 1 minute'),
cfg.StrOpt('server_key',
default = '0.0.0.0',
help = 'The passport for glance.'),
cfg.ListOpt('GlanceSystemList',
default = ['cpu', 'network', 'memory', 'disk', 'loadavg'],
help = 'The lsit for glance.'),
]
CONF = cfg.CONF
CONF.register_opts(glanceagent_opts)
#CONF(project = 'glance')
logging.setup("glance")
LOG = logging.getLogger("glance")
PIDFILE = '/var/run/glance.pid'
class GlanceAgentDaemon(Daemon):
def __init__(self):
self.server_key = CONF.server_key
super(GlanceAgentDaemon, self).__init__(PIDFILE)
self.glanceagentapi = GlanceAgentAPI()
def start(self):
try:
self.glanceagentapi.getServer(self.server_key)
except:
LOG.exception("Get server info failed")
super(GlanceAgentDaemon, self).start()
def run(self):
flag = 2
while True:
# Checks the system every 60 seconds
self.glanceagentapi.getSystem()
self.glanceagentapi.getProcess(self.server_key)
self.glanceagentapi.getPortScan()
if flag == 2 :
self.glanceagentapi.getTraffic()
flag = 0
time.sleep(CONF.SYSTEM_CHECK_PERIOD)
flag += 1
if __name__ == "__main__":
daemon = GlanceAgentDaemon()
if len(sys.argv) == 2:
if 'start' == sys.argv[1]:
try:
daemon.start()
print "Starting glanceagent {0}...".format(__version__)
except Exception, e:
LOG.exception("The agent couldn't be started")
print str(e)
elif 'stop' == sys.argv[1]:
print "Stopping Glance Agent ..."
daemon.stop()
elif 'restart' == sys.argv[1]:
print "Restaring Glance Agent ..."
daemon.restart()
elif 'status' == sys.argv[1]:
try:
pf = file(PIDFILE,'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
except SystemExit:
pid = None
if pid:
print 'Glance Agent {0} is running as pid {1}'.format(__version__, pid)
else:
print 'Glance Agent is not running.'
else:
print "Unknown command"
sys.exit(2)
sys.exit(0)
else:
print "usage: %s start|stop|restart|status" % sys.argv[0]
sys.exit(2)
| apache-2.0 | -254,126,492,494,727,170 | 28.448276 | 87 | 0.543911 | false |
jamesonwilliams/zephyr-kernel | doc/scripts/genrest/kconfiglib.py | 1 | 143880 | # This is Kconfiglib, a Python library for scripting, debugging, and extracting
# information from Kconfig-based configuration systems. To view the
# documentation, run
#
# $ pydoc kconfiglib
#
# or, if you prefer HTML,
#
# $ pydoc -w kconfiglib
#
# The examples/ subdirectory contains examples, to be run with e.g.
#
# $ make scriptconfig SCRIPT=Kconfiglib/examples/print_tree.py
#
# Look in testsuite.py for the test suite.
"""
Kconfiglib is a Python library for scripting and extracting information from
Kconfig-based configuration systems. Features include the following:
- Symbol values and properties can be looked up and values assigned
programmatically.
- .config files can be read and written.
- Expressions can be evaluated in the context of a Kconfig configuration.
- Relations between symbols can be quickly determined, such as finding all
symbols that reference a particular symbol.
- Highly compatible with the scripts/kconfig/*conf utilities. The test suite
automatically compares outputs between Kconfiglib and the C implementation
for a large number of cases.
For the Linux kernel, scripts are run using
$ make scriptconfig [ARCH=<arch>] SCRIPT=<path to script> [SCRIPT_ARG=<arg>]
Using the 'scriptconfig' target ensures that required environment variables
(SRCARCH, ARCH, srctree, KERNELVERSION, etc.) are set up correctly.
Scripts receive the name of the Kconfig file to load in sys.argv[1]. As of
Linux 4.1.0-rc5, this is always "Kconfig" from the kernel top-level directory.
If an argument is provided with SCRIPT_ARG, it appears as sys.argv[2].
To get an interactive Python prompt with Kconfiglib preloaded and a Config
object 'c' created, run
$ make iscriptconfig [ARCH=<arch>]
Kconfiglib supports both Python 2 and Python 3. For (i)scriptconfig, the Python
interpreter to use can be passed in PYTHONCMD, which defaults to 'python'. PyPy
works well too, and might give a nice speedup for long-running jobs.
The examples/ directory contains short example scripts, which can be run with
e.g.
$ make scriptconfig SCRIPT=Kconfiglib/examples/print_tree.py
or
$ make scriptconfig SCRIPT=Kconfiglib/examples/help_grep.py SCRIPT_ARG=kernel
testsuite.py contains the test suite. See the top of the script for how to run
it.
Credits: Written by Ulf "Ulfalizer" Magnusson
Send bug reports, suggestions and other feedback to ulfalizer a.t Google's
email service. Don't wrestle with internal APIs. Tell me what you need and I
might add it in a safe way as a client API instead."""
import os
import re
import sys
import glob
# File layout:
#
# Public classes
# Public functions
# Internal classes
# Internal functions
# Internal global constants
# Line length: 79 columns
#
# Public classes
#
class Config(object):
"""Represents a Kconfig configuration, e.g. for i386 or ARM. This is the
set of symbols and other items appearing in the configuration together with
their values. Creating any number of Config objects -- including for
different architectures -- is safe; Kconfiglib has no global state."""
#
# Public interface
#
def __init__(self, filename="Kconfig", base_dir=None, print_warnings=True,
print_undef_assign=False):
"""Creates a new Config object, representing a Kconfig configuration.
Raises Kconfig_Syntax_Error on syntax errors.
filename (default: "Kconfig"): The base Kconfig file of the
configuration. For the Linux kernel, you'll probably want "Kconfig"
from the top-level directory, as environment variables will make
sure the right Kconfig is included from there
(arch/<architecture>/Kconfig). If you are using Kconfiglib via 'make
scriptconfig', the filename of the base base Kconfig file will be in
sys.argv[1].
base_dir (default: None): The base directory relative to which 'source'
statements within Kconfig files will work. For the Linux kernel this
should be the top-level directory of the kernel tree. $-references
to existing environment variables will be expanded.
If None (the default), the environment variable 'srctree' will be
used if set, and the current directory otherwise. 'srctree' is set
by the Linux makefiles to the top-level kernel directory. A default
of "." would not work with an alternative build directory.
print_warnings (default: True): Set to True if warnings related to this
configuration should be printed to stderr. This can be changed later
with Config.set_print_warnings(). It is provided as a constructor
argument since warnings might be generated during parsing.
print_undef_assign (default: False): Set to True if informational
messages related to assignments to undefined symbols should be
printed to stderr for this configuration. Can be changed later with
Config.set_print_undef_assign()."""
# The set of all symbols, indexed by name (a string)
self.syms = {}
# Python 2/3 compatibility hack. This is the only one needed.
if sys.version_info[0] >= 3:
self.syms_iter = self.syms.values
else:
self.syms_iter = self.syms.itervalues
# The set of all defined symbols in the configuration in the order they
# appear in the Kconfig files. This excludes the special symbols n, m,
# and y as well as symbols that are referenced but never defined.
self.kconfig_syms = []
# The set of all named choices (yes, choices can have names), indexed
# by name (a string)
self.named_choices = {}
# Lists containing all choices, menus and comments in the configuration
self.choices = []
self.menus = []
self.comments = []
def register_special_symbol(type_, name, val):
sym = Symbol()
sym.is_special_ = True
sym.is_defined_ = True
sym.config = self
sym.name = name
sym.type = type_
sym.cached_val = val
self.syms[name] = sym
return sym
# The special symbols n, m and y, used as shorthand for "n", "m" and
# "y"
self.n = register_special_symbol(TRISTATE, "n", "n")
self.m = register_special_symbol(TRISTATE, "m", "m")
self.y = register_special_symbol(TRISTATE, "y", "y")
# DEFCONFIG_LIST uses this
register_special_symbol(STRING, "UNAME_RELEASE", os.uname()[2])
# The symbol with "option defconfig_list" set, containing a list of
# default .config files
self.defconfig_sym = None
# See Symbol.get_(src)arch()
self.arch = os.environ.get("ARCH")
self.srcarch = os.environ.get("SRCARCH")
# See Config.__init__(). We need this for get_defconfig_filename().
self.srctree = os.environ.get("srctree")
if self.srctree is None:
self.srctree = "."
self.filename = filename
if base_dir is None:
self.base_dir = self.srctree
else:
self.base_dir = os.path.expandvars(base_dir)
# The 'mainmenu' text
self.mainmenu_text = None
# The filename of the most recently loaded .config file
self.config_filename = None
# The textual header of the most recently loaded .config, uncommented
self.config_header = None
self.print_warnings = print_warnings
self.print_undef_assign = print_undef_assign
# For parsing routines that stop when finding a line belonging to a
# different construct, these holds that line and the tokenized version
# of that line. The purpose is to avoid having to re-tokenize the line,
# which is inefficient and causes problems when recording references to
# symbols.
self.end_line = None
self.end_line_tokens = None
# See the comment in _parse_expr().
self._cur_item = None
self._line = None
self._filename = None
self._linenr = None
self._transform_m = None
# Parse the Kconfig files
self.top_block = self._parse_file(filename, None, None, None)
# Build Symbol.dep for all symbols
self._build_dep()
def get_arch(self):
"""Returns the value the environment variable ARCH had at the time the
Config instance was created, or None if ARCH was not set. For the
kernel, this corresponds to the architecture being built for, with
values such as "i386" or "mips"."""
return self.arch
def get_srcarch(self):
"""Returns the value the environment variable SRCARCH had at the time
the Config instance was created, or None if SRCARCH was not set. For
the kernel, this corresponds to the particular arch/ subdirectory
containing architecture-specific code."""
return self.srcarch
def get_srctree(self):
"""Returns the value the environment variable srctree had at the time
the Config instance was created, or None if srctree was not defined.
This variable points to the source directory and is used when building
in a separate directory."""
return self.srctree
def get_base_dir(self):
"""Returns the base directory relative to which 'source' statements
will work, passed as an argument to Config.__init__()."""
return self.base_dir
def get_kconfig_filename(self):
"""Returns the name of the (base) kconfig file this configuration was
loaded from."""
return self.filename
def get_config_filename(self):
"""Returns the filename of the most recently loaded configuration file,
or None if no configuration has been loaded."""
return self.config_filename
def get_config_header(self):
"""Returns the (uncommented) textual header of the .config file most
recently loaded with load_config(). Returns None if no .config file has
been loaded or if the most recently loaded .config file has no header.
The header consists of all lines up to but not including the first line
that either
1. Does not start with "#"
2. Has the form "# CONFIG_FOO is not set."
"""
return self.config_header
def get_mainmenu_text(self):
"""Returns the text of the 'mainmenu' statement (with $-references to
symbols replaced by symbol values), or None if the configuration has no
'mainmenu' statement."""
return None if self.mainmenu_text is None else \
self._expand_sym_refs(self.mainmenu_text)
def get_defconfig_filename(self):
"""Returns the name of the defconfig file, which is the first existing
file in the list given in a symbol having 'option defconfig_list' set.
$-references to symbols will be expanded ("$FOO bar" -> "foo bar" if
FOO has the value "foo"). Returns None in case of no defconfig file.
Setting 'option defconfig_list' on multiple symbols currently results
in undefined behavior.
If the environment variable 'srctree' was set when the Config was
created, get_defconfig_filename() will first look relative to that
directory before looking in the current directory; see
Config.__init__().
WARNING: A wart here is that scripts/kconfig/Makefile sometimes uses
the --defconfig=<defconfig> option when calling the C implementation of
e.g. 'make defconfig'. This option overrides the 'option
defconfig_list' symbol, meaning the result from
get_defconfig_filename() might not match what 'make defconfig' would
use. That probably ought to be worked around somehow, so that this
function always gives the "expected" result."""
if self.defconfig_sym is None:
return None
for filename, cond_expr in self.defconfig_sym.def_exprs:
if self._eval_expr(cond_expr) == "y":
filename = self._expand_sym_refs(filename)
# We first look in $srctree. os.path.join() won't work here as
# an absolute path in filename would override $srctree.
srctree_filename = os.path.normpath(self.srctree + "/" +
filename)
if os.path.exists(srctree_filename):
return srctree_filename
if os.path.exists(filename):
return filename
return None
def get_symbol(self, name):
"""Returns the symbol with name 'name', or None if no such symbol
appears in the configuration. An alternative shorthand is conf[name],
where conf is a Config instance, though that will instead raise
KeyError if the symbol does not exist."""
return self.syms.get(name)
def __getitem__(self, name):
"""Returns the symbol with name 'name'. Raises KeyError if the symbol
does not appear in the configuration."""
return self.syms[name]
def get_symbols(self, all_symbols=True):
"""Returns a list of symbols from the configuration. An alternative for
iterating over all defined symbols (in the order of definition) is
for sym in config:
...
which relies on Config implementing __iter__() and is equivalent to
for sym in config.get_symbols(False):
...
all_symbols (default: True): If True, all symbols -- including special
and undefined symbols -- will be included in the result, in an
undefined order. If False, only symbols actually defined and not
merely referred to in the configuration will be included in the
result, and will appear in the order that they are defined within
the Kconfig configuration files."""
return list(self.syms.values()) if all_symbols else self.kconfig_syms
def __iter__(self):
"""Convenience function for iterating over the set of all defined
symbols in the configuration, used like
for sym in conf:
...
The iteration happens in the order of definition within the Kconfig
configuration files. Symbols only referred to but not defined will not
be included, nor will the special symbols n, m, and y. If you want to
include such symbols as well, see config.get_symbols()."""
return iter(self.kconfig_syms)
def get_choices(self):
"""Returns a list containing all choice statements in the
configuration, in the order they appear in the Kconfig files."""
return self.choices
def get_menus(self):
"""Returns a list containing all menus in the configuration, in the
order they appear in the Kconfig files."""
return self.menus
def get_comments(self):
"""Returns a list containing all comments in the configuration, in the
order they appear in the Kconfig files."""
return self.comments
def get_top_level_items(self):
"""Returns a list containing the items (symbols, menus, choices, and
comments) at the top level of the configuration -- that is, all items
that do not appear within a menu or choice. The items appear in the
same order as within the configuration."""
return self.top_block
def load_config(self, filename, replace=True):
"""Loads symbol values from a file in the familiar .config format.
Equivalent to calling Symbol.set_user_value() to set each of the
values.
"# CONFIG_FOO is not set" within a .config file is treated specially
and sets the user value of FOO to 'n'. The C implementation works the
same way.
filename: The .config file to load. $-references to existing
environment variables will be expanded. For scripts to work even when
an alternative build directory is used with the Linux kernel, you
need to refer to the top-level kernel directory with "$srctree".
replace (default: True): True if the configuration should replace the
old configuration; False if it should add to it."""
# Put this first so that a missing file doesn't screw up our state
filename = os.path.expandvars(filename)
line_feeder = _FileFeed(filename)
self.config_filename = filename
#
# Read header
#
def is_header_line(line):
return line is not None and line.startswith("#") and \
not _unset_re_match(line)
self.config_header = None
line = line_feeder.peek_next()
if is_header_line(line):
self.config_header = ""
while is_header_line(line_feeder.peek_next()):
self.config_header += line_feeder.get_next()[1:]
# Remove trailing newline
if self.config_header.endswith("\n"):
self.config_header = self.config_header[:-1]
#
# Read assignments. Hotspot for some workloads.
#
def warn_override(filename, linenr, name, old_user_val, new_user_val):
self._warn('overriding the value of {0}. '
'Old value: "{1}", new value: "{2}".'
.format(name, old_user_val, new_user_val),
filename, linenr)
# Invalidate everything to keep things simple. It might be possible to
# improve performance for the case where multiple configurations are
# loaded by only invalidating a symbol (and its dependent symbols) if
# the new user value differs from the old. One complication would be
# that symbols not mentioned in the .config must lose their user value
# when replace = True, which is the usual case.
if replace:
self.unset_user_values()
else:
self._invalidate_all()
while 1:
line = line_feeder.get_next()
if line is None:
return
line = line.rstrip()
set_match = _set_re_match(line)
if set_match:
name, val = set_match.groups()
if val.startswith('"'):
if len(val) < 2 or val[-1] != '"':
_parse_error(line, "malformed string literal",
line_feeder.filename, line_feeder.linenr)
# Strip quotes and remove escapings. The unescaping
# procedure should be safe since " can only appear as \"
# inside the string.
val = val[1:-1].replace('\\"', '"').replace("\\\\", "\\")
if name in self.syms:
sym = self.syms[name]
if sym.user_val is not None:
warn_override(line_feeder.filename, line_feeder.linenr,
name, sym.user_val, val)
if sym.is_choice_sym:
user_mode = sym.parent.user_mode
if user_mode is not None and user_mode != val:
self._warn("assignment to {0} changes mode of "
'containing choice from "{1}" to "{2}".'
.format(name, val, user_mode),
line_feeder.filename,
line_feeder.linenr)
sym._set_user_value_no_invalidate(val, True)
else:
if self.print_undef_assign:
_stderr_msg('note: attempt to assign the value "{0}" '
"to the undefined symbol {1}."
.format(val, name),
line_feeder.filename, line_feeder.linenr)
else:
unset_match = _unset_re_match(line)
if unset_match:
name = unset_match.group(1)
if name in self.syms:
sym = self.syms[name]
if sym.user_val is not None:
warn_override(line_feeder.filename,
line_feeder.linenr,
name, sym.user_val, "n")
sym._set_user_value_no_invalidate("n", True)
def write_config(self, filename, header=None):
"""Writes out symbol values in the familiar .config format.
Kconfiglib makes sure the format matches what the C implementation
would generate, down to whitespace. This eases testing.
filename: The filename under which to save the configuration.
header (default: None): A textual header that will appear at the
beginning of the file, with each line commented out automatically.
None means no header."""
for sym in self.syms_iter():
sym.already_written = False
with open(filename, "w") as f:
# Write header
if header is not None:
f.write(_comment(header))
f.write("\n")
# Build and write configuration
conf_strings = []
_make_block_conf(self.top_block, conf_strings.append)
f.write("\n".join(conf_strings))
f.write("\n")
def eval(self, s):
"""Returns the value of the expression 's' -- where 's' is represented
as a string -- in the context of the configuration. Raises
Kconfig_Syntax_Error if syntax errors are detected in 's'.
For example, if FOO and BAR are tristate symbols at least one of which
has the value "y", then config.eval("y && (FOO || BAR)") => "y"
This function always yields a tristate value. To get the value of
non-bool, non-tristate symbols, use Symbol.get_value().
The result of this function is consistent with how evaluation works for
conditional expressions in the configuration as well as in the C
implementation. "m" and m are rewritten as '"m" && MODULES' and 'm &&
MODULES', respectively, and a result of "m" will get promoted to "y" if
we're running without modules.
Syntax checking is somewhat lax, partly to be compatible with lax
parsing in the C implementation."""
return self._eval_expr(self._parse_expr(self._tokenize(s, True), # Feed
None, # Current symbol/choice
s)) # line
def unset_user_values(self):
"""Resets the values of all symbols, as if Config.load_config() or
Symbol.set_user_value() had never been called."""
for sym in self.syms_iter():
sym._unset_user_value_no_recursive_invalidate()
def set_print_warnings(self, print_warnings):
"""Determines whether warnings related to this configuration (for
things like attempting to assign illegal values to symbols with
Symbol.set_user_value()) should be printed to stderr.
print_warnings: True if warnings should be printed."""
self.print_warnings = print_warnings
def set_print_undef_assign(self, print_undef_assign):
"""Determines whether informational messages related to assignments to
undefined symbols should be printed to stderr for this configuration.
print_undef_assign: If True, such messages will be printed."""
self.print_undef_assign = print_undef_assign
def __str__(self):
"""Returns a string containing various information about the Config."""
return _lines("Configuration",
"File : " +
self.filename,
"Base directory : " +
self.base_dir,
"Value of $ARCH at creation time : " +
("(not set)" if self.arch is None else self.arch),
"Value of $SRCARCH at creation time : " +
("(not set)" if self.srcarch is None else
self.srcarch),
"Source tree (derived from $srctree;",
"defaults to '.' if $srctree isn't set) : " +
self.srctree,
"Most recently loaded .config : " +
("(no .config loaded)"
if self.config_filename is None else
self.config_filename),
"Print warnings : " +
BOOL_STR[self.print_warnings],
"Print assignments to undefined symbols : " +
BOOL_STR[self.print_undef_assign])
#
# Private methods
#
#
# Kconfig parsing
#
def _parse_file(self, filename, parent, deps, visible_if_deps, res=None):
"""Parses the Kconfig file 'filename'. Returns a list with the Items in
the file. See _parse_block() for the meaning of the parameters."""
return self._parse_block(_FileFeed(filename), None, parent, deps,
visible_if_deps, res)
def _parse_block(self, line_feeder, end_marker, parent, deps,
visible_if_deps, res=None):
"""Parses a block, which is the contents of either a file or an if,
menu, or choice statement. Returns a list with the Items in the block.
line_feeder: A _FileFeed instance feeding lines from a file. The
Kconfig language is line-based in practice.
end_marker: The token that ends the block, e.g. T_ENDIF ("endif") for
ifs. None for files.
parent: The enclosing menu, choice or if, or None if we're at the top
level.
deps: Dependencies from enclosing menus, choices and ifs.
visible_if_deps (default: None): 'visible if' dependencies from
enclosing menus.
res (default: None): The list to add items to. If None, a new list is
created to hold the items."""
block = [] if res is None else res
while 1:
# Do we already have a tokenized line that we determined wasn't
# part of whatever we were parsing earlier? See comment in
# Config.__init__().
if self.end_line is not None:
line = self.end_line
tokens = self.end_line_tokens
tokens.unget_all()
self.end_line = None
self.end_line_tokens = None
else:
line = line_feeder.get_next()
if line is None:
if end_marker is not None:
raise Kconfig_Syntax_Error("Unexpected end of file {0}"
.format(line_feeder.filename))
return block
tokens = self._tokenize(line, False, line_feeder.filename,
line_feeder.linenr)
t0 = tokens.get_next()
if t0 is None:
continue
# Cases are ordered roughly by frequency, which speeds things up a
# bit
if t0 == T_CONFIG or t0 == T_MENUCONFIG:
# The tokenizer will automatically allocate a new Symbol object
# for any new names it encounters, so we don't need to worry
# about that here.
sym = tokens.get_next()
# Symbols defined in multiple places get the parent of their
# first definition. However, for symbols whose parents are
# choice statements, the choice statement takes precedence.
if not sym.is_defined_ or isinstance(parent, Choice):
sym.parent = parent
sym.is_defined_ = True
self.kconfig_syms.append(sym)
block.append(sym)
self._parse_properties(line_feeder, sym, deps, visible_if_deps)
elif t0 == T_SOURCE:
kconfig_file = tokens.get_next()
exp_kconfig_file = self._expand_sym_refs(kconfig_file)
g = glob.glob(self.base_dir + exp_kconfig_file)
for s in g:
f = os.path.join(s)
if not os.path.exists(f):
raise IOError('{0}:{1}: sourced file "{2}" (expands to '
'"{3}") not found. Perhaps base_dir '
'(argument to Config.__init__(), currently '
'"{4}") is set to the wrong value.'
.format(line_feeder.filename,
line_feeder.linenr,
kconfig_file, exp_kconfig_file,
self.base_dir))
# Add items to the same block
self._parse_file(s, parent, deps, visible_if_deps, block)
elif t0 == end_marker:
# We have reached the end of the block
return block
elif t0 == T_IF:
# If statements are treated as syntactic sugar for adding
# dependencies to enclosed items and do not have an explicit
# object representation.
dep_expr = self._parse_expr(tokens, None, line,
line_feeder.filename,
line_feeder.linenr)
# Add items to the same block
self._parse_block(line_feeder, T_ENDIF, parent,
_make_and(dep_expr, deps),
visible_if_deps, block)
elif t0 == T_COMMENT:
comment = Comment()
comment.config = self
comment.parent = parent
comment.filename = line_feeder.filename
comment.linenr = line_feeder.linenr
comment.text = tokens.get_next()
self.comments.append(comment)
block.append(comment)
self._parse_properties(line_feeder, comment, deps,
visible_if_deps)
elif t0 == T_MENU:
menu = Menu()
menu.config = self
menu.parent = parent
menu.filename = line_feeder.filename
menu.linenr = line_feeder.linenr
menu.title = tokens.get_next()
self.menus.append(menu)
block.append(menu)
# Parse properties and contents
self._parse_properties(line_feeder, menu, deps,
visible_if_deps)
menu.block = self._parse_block(line_feeder, T_ENDMENU, menu,
menu.dep_expr,
_make_and(visible_if_deps,
menu.visible_if_expr))
elif t0 == T_CHOICE:
name = tokens.get_next()
if name is None:
choice = Choice()
self.choices.append(choice)
else:
# Named choice
choice = self.named_choices.get(name)
if choice is None:
choice = Choice()
choice.name = name
self.named_choices[name] = choice
self.choices.append(choice)
choice.config = self
choice.parent = parent
choice.def_locations.append((line_feeder.filename,
line_feeder.linenr))
# Parse properties and contents
self._parse_properties(line_feeder, choice, deps,
visible_if_deps)
choice.block = self._parse_block(line_feeder, T_ENDCHOICE,
choice, deps, visible_if_deps)
choice._determine_actual_symbols()
# If no type is specified for the choice, its type is that of
# the first choice item with a specified type
if choice.type == UNKNOWN:
for item in choice.actual_symbols:
if item.type != UNKNOWN:
choice.type = item.type
break
# Each choice item of UNKNOWN type gets the type of the choice
for item in choice.actual_symbols:
if item.type == UNKNOWN:
item.type = choice.type
block.append(choice)
elif t0 == T_MAINMENU:
text = tokens.get_next()
if self.mainmenu_text is not None:
self._warn("overriding 'mainmenu' text. "
'Old value: "{0}", new value: "{1}".'
.format(self.mainmenu_text, text),
line_feeder.filename, line_feeder.linenr)
self.mainmenu_text = text
else:
_parse_error(line, "unrecognized construct",
line_feeder.filename, line_feeder.linenr)
def _parse_properties(self, line_feeder, stmt, deps, visible_if_deps):
"""Parsing of properties for symbols, menus, choices, and comments.
Takes care of propagating dependencies from enclosing menus and ifs."""
def parse_val_and_cond(tokens, line, filename, linenr):
"""Parses '<expr1> if <expr2>' constructs, where the 'if' part is
optional. Returns a tuple containing the parsed expressions, with
None as the second element if the 'if' part is missing."""
val = self._parse_expr(tokens, stmt, line, filename, linenr, False)
if tokens.check(T_IF):
return (val, self._parse_expr(tokens, stmt, line, filename,
linenr))
return (val, None)
# In case the symbol is defined in multiple locations, we need to
# remember what prompts, defaults, and selects are new for this
# definition, as "depends on" should only apply to the local
# definition.
new_prompt = None
new_def_exprs = []
new_selects = []
# Dependencies from 'depends on' statements
depends_on_expr = None
while 1:
line = line_feeder.get_next()
if line is None:
break
filename = line_feeder.filename
linenr = line_feeder.linenr
tokens = self._tokenize(line, False, filename, linenr)
t0 = tokens.get_next()
if t0 is None:
continue
# Cases are ordered roughly by frequency, which speeds things up a
# bit
if t0 == T_DEPENDS:
if not tokens.check(T_ON):
_parse_error(line, 'expected "on" after "depends"',
filename, linenr)
parsed_deps = self._parse_expr(tokens, stmt, line, filename,
linenr)
if isinstance(stmt, (Menu, Comment)):
stmt.orig_deps = _make_and(stmt.orig_deps, parsed_deps)
else:
depends_on_expr = _make_and(depends_on_expr, parsed_deps)
elif t0 == T_HELP:
# Find first non-blank (not all-space) line and get its
# indentation
line = line_feeder.next_nonblank()
if line is None:
stmt.help = ""
break
indent = _indentation(line)
if indent == 0:
# If the first non-empty lines has zero indent, there is no
# help text
stmt.help = ""
line_feeder.unget()
break
# The help text goes on till the first non-empty line with less
# indent
help_lines = [_deindent(line, indent)]
while 1:
line = line_feeder.get_next()
if line is None or \
(not line.isspace() and _indentation(line) < indent):
stmt.help = "".join(help_lines)
break
help_lines.append(_deindent(line, indent))
if line is None:
break
line_feeder.unget()
elif t0 == T_SELECT:
target = tokens.get_next()
stmt.referenced_syms.add(target)
stmt.selected_syms.add(target)
if tokens.check(T_IF):
new_selects.append((target,
self._parse_expr(tokens, stmt, line,
filename, linenr)))
else:
new_selects.append((target, None))
elif t0 in (T_BOOL, T_TRISTATE, T_INT, T_HEX, T_STRING):
stmt.type = TOKEN_TO_TYPE[t0]
if tokens.peek_next() is not None:
new_prompt = parse_val_and_cond(tokens, line, filename,
linenr)
elif t0 == T_DEFAULT:
new_def_exprs.append(parse_val_and_cond(tokens, line, filename,
linenr))
elif t0 == T_DEF_BOOL:
stmt.type = BOOL
if tokens.peek_next() is not None:
new_def_exprs.append(parse_val_and_cond(tokens, line,
filename, linenr))
elif t0 == T_PROMPT:
# 'prompt' properties override each other within a single
# definition of a symbol, but additional prompts can be added
# by defining the symbol multiple times; hence 'new_prompt'
# instead of 'prompt'.
new_prompt = parse_val_and_cond(tokens, line, filename, linenr)
elif t0 == T_RANGE:
low = tokens.get_next()
high = tokens.get_next()
stmt.referenced_syms.add(low)
stmt.referenced_syms.add(high)
if tokens.check(T_IF):
stmt.ranges.append((low, high,
self._parse_expr(tokens, stmt, line,
filename, linenr)))
else:
stmt.ranges.append((low, high, None))
elif t0 == T_DEF_TRISTATE:
stmt.type = TRISTATE
if tokens.peek_next() is not None:
new_def_exprs.append(parse_val_and_cond(tokens, line,
filename, linenr))
elif t0 == T_OPTION:
if tokens.check(T_ENV) and tokens.check(T_EQUAL):
env_var = tokens.get_next()
stmt.is_special_ = True
stmt.is_from_env = True
if env_var not in os.environ:
self._warn("The symbol {0} references the "
"non-existent environment variable {1} and "
"will get the empty string as its value. "
"If you're using Kconfiglib via "
"'make (i)scriptconfig', it should have "
"set up the environment correctly for you. "
"If you still got this message, that "
"might be an error, and you should email "
"ulfalizer a.t Google's email service."""
.format(stmt.name, env_var),
filename, linenr)
stmt.cached_val = ""
else:
stmt.cached_val = os.environ[env_var]
elif tokens.check(T_DEFCONFIG_LIST):
self.defconfig_sym = stmt
elif tokens.check(T_MODULES):
# To reduce warning spam, only warn if 'option modules' is
# set on some symbol that isn't MODULES, which should be
# safe. I haven't run into any projects that make use
# modules besides the kernel yet, and there it's likely to
# keep being called "MODULES".
if stmt.name != "MODULES":
self._warn("the 'modules' option is not supported. "
"Let me know if this is a problem for you; "
"it shouldn't be that hard to implement. "
"(Note that modules are still supported -- "
"Kconfiglib just assumes the symbol name "
"MODULES, like older versions of the C "
"implementation did when 'option modules' "
"wasn't used.)",
filename, linenr)
elif tokens.check(T_ALLNOCONFIG_Y):
if not isinstance(stmt, Symbol):
_parse_error(line,
"the 'allnoconfig_y' option is only "
"valid for symbols",
filename, linenr)
stmt.allnoconfig_y = True
else:
_parse_error(line, "unrecognized option", filename, linenr)
elif t0 == T_VISIBLE:
if not tokens.check(T_IF):
_parse_error(line, 'expected "if" after "visible"',
filename, linenr)
if not isinstance(stmt, Menu):
_parse_error(line,
"'visible if' is only valid for menus",
filename, linenr)
parsed_deps = self._parse_expr(tokens, stmt, line, filename,
linenr)
stmt.visible_if_expr = _make_and(stmt.visible_if_expr,
parsed_deps)
elif t0 == T_OPTIONAL:
if not isinstance(stmt, Choice):
_parse_error(line,
'"optional" is only valid for choices',
filename,
linenr)
stmt.optional = True
else:
# See comment in Config.__init__()
self.end_line = line
self.end_line_tokens = tokens
break
# Done parsing properties. Now propagate 'depends on' and enclosing
# menu/if dependencies to expressions.
# The set of symbols referenced directly by the statement plus all
# symbols referenced by enclosing menus and ifs
stmt.all_referenced_syms = stmt.referenced_syms | _get_expr_syms(deps)
# Save original dependencies from enclosing menus and ifs
stmt.deps_from_containing = deps
if isinstance(stmt, (Menu, Comment)):
stmt.dep_expr = _make_and(stmt.orig_deps, deps)
else:
# Symbol or Choice
# See comment for 'menu_dep'
stmt.menu_dep = depends_on_expr
# Propagate dependencies to prompts
if new_prompt is not None:
# Propagate 'visible if' dependencies from enclosing menus
prompt, cond_expr = new_prompt
cond_expr = _make_and(cond_expr, visible_if_deps)
# Propagate 'depends on' dependencies
new_prompt = (prompt, _make_and(cond_expr, depends_on_expr))
# Save original
stmt.orig_prompts.append(new_prompt)
# Finalize with dependencies from enclosing menus and ifs
stmt.prompts.append((new_prompt[0],
_make_and(new_prompt[1], deps)))
# Propagate dependencies to defaults
# Propagate 'depends on' dependencies
new_def_exprs = [(val_expr, _make_and(cond_expr, depends_on_expr))
for val_expr, cond_expr in new_def_exprs]
# Save original
stmt.orig_def_exprs.extend(new_def_exprs)
# Finalize with dependencies from enclosing menus and ifs
stmt.def_exprs.extend([(val_expr, _make_and(cond_expr, deps))
for val_expr, cond_expr in new_def_exprs])
# Propagate dependencies to selects
# Only symbols can select
if isinstance(stmt, Symbol):
# Propagate 'depends on' dependencies
new_selects = [(target, _make_and(cond_expr, depends_on_expr))
for target, cond_expr in new_selects]
# Save original
stmt.orig_selects.extend(new_selects)
# Finalize with dependencies from enclosing menus and ifs
for target, cond in new_selects:
target.rev_dep = _make_or(target.rev_dep,
_make_and(stmt,
_make_and(cond, deps)))
def _parse_expr(self, feed, cur_item, line, filename=None, linenr=None,
transform_m=True):
"""Parses an expression from the tokens in 'feed' using a simple
top-down approach. The result has the form
'(<operator>, [<parsed operands>])', where <operator> is e.g.
kconfiglib.AND. If there is only one operand (i.e., no && or ||), then
the operand is returned directly. This also goes for subexpressions.
feed: _Feed instance containing the tokens for the expression.
cur_item: The item (Symbol, Choice, Menu, or Comment) currently being
parsed, or None if we're not parsing an item. Used for recording
references to symbols.
line: The line containing the expression being parsed.
filename (default: None): The file containing the expression.
linenr (default: None): The line number containing the expression.
transform_m (default: False): Determines if 'm' should be rewritten to
'm && MODULES' -- see parse_val_and_cond().
Expression grammar, in decreasing order of precedence:
<expr> -> <symbol>
<symbol> '=' <symbol>
<symbol> '!=' <symbol>
'(' <expr> ')'
'!' <expr>
<expr> '&&' <expr>
<expr> '||' <expr>"""
# Use instance variables to avoid having to pass these as arguments
# through the top-down parser in _parse_expr_rec(), which is tedious
# and obfuscates the code. A profiler run shows no noticeable
# performance difference.
self._cur_item = cur_item
self._transform_m = transform_m
self._line = line
self._filename = filename
self._linenr = linenr
return self._parse_expr_rec(feed)
def _parse_expr_rec(self, feed):
or_term = self._parse_or_term(feed)
if not feed.check(T_OR):
# Common case -- no need for an OR node since it's just a single
# operand
return or_term
or_terms = [or_term, self._parse_or_term(feed)]
while feed.check(T_OR):
or_terms.append(self._parse_or_term(feed))
return (OR, or_terms)
def _parse_or_term(self, feed):
and_term = self._parse_factor(feed)
if not feed.check(T_AND):
# Common case -- no need for an AND node since it's just a single
# operand
return and_term
and_terms = [and_term, self._parse_factor(feed)]
while feed.check(T_AND):
and_terms.append(self._parse_factor(feed))
return (AND, and_terms)
def _parse_factor(self, feed):
token = feed.get_next()
if isinstance(token, (Symbol, str)):
if self._cur_item is not None and isinstance(token, Symbol):
self._cur_item.referenced_syms.add(token)
next_token = feed.peek_next()
# For conditional expressions ('depends on <expr>',
# '... if <expr>', # etc.), "m" and m are rewritten to
# "m" && MODULES.
if next_token != T_EQUAL and next_token != T_UNEQUAL:
if self._transform_m and (token is self.m or token == "m"):
return (AND, ["m", self._sym_lookup("MODULES")])
return token
relation = EQUAL if (feed.get_next() == T_EQUAL) else UNEQUAL
token_2 = feed.get_next()
if self._cur_item is not None and isinstance(token_2, Symbol):
self._cur_item.referenced_syms.add(token_2)
return (relation, token, token_2)
if token == T_NOT:
return (NOT, self._parse_factor(feed))
if token == T_OPEN_PAREN:
expr_parse = self._parse_expr_rec(feed)
if not feed.check(T_CLOSE_PAREN):
_parse_error(self._line, "missing end parenthesis",
self._filename, self._linenr)
return expr_parse
_parse_error(self._line, "malformed expression", self._filename,
self._linenr)
def _tokenize(self, s, for_eval, filename=None, linenr=None):
"""Returns a _Feed instance containing tokens derived from the string
's'. Registers any new symbols encountered (via _sym_lookup()).
(I experimented with a pure regular expression implementation, but it
came out slower, less readable, and wouldn't have been as flexible.)
for_eval: True when parsing an expression for a call to Config.eval(),
in which case we should not treat the first token specially nor
register new symbols."""
s = s.strip()
if s == "" or s[0] == "#":
return _Feed([])
if for_eval:
previous = None # The previous token seen
tokens = []
i = 0 # The current index in the string being tokenized
else:
# The initial word on a line is parsed specially. Let
# command_chars = [A-Za-z0-9_]. Then
# - leading non-command_chars characters are ignored, and
# - the first token consists the following one or more
# command_chars characters.
# This is why things like "----help--" are accepted.
initial_token_match = _initial_token_re_match(s)
if initial_token_match is None:
return _Feed([])
keyword = _get_keyword(initial_token_match.group(1))
if keyword == T_HELP:
# Avoid junk after "help", e.g. "---", being registered as a
# symbol
return _Feed([T_HELP])
if keyword is None:
# We expect a keyword as the first token
_tokenization_error(s, filename, linenr)
previous = keyword
tokens = [keyword]
# The current index in the string being tokenized
i = initial_token_match.end()
# _tokenize() is a hotspot during parsing, and this speeds things up a
# bit
strlen = len(s)
append = tokens.append
# Main tokenization loop. (Handles tokens past the first one.)
while i < strlen:
# Test for an identifier/keyword preceded by whitespace first; this
# is the most common case.
id_keyword_match = _id_keyword_re_match(s, i)
if id_keyword_match:
# We have an identifier or keyword. The above also stripped any
# whitespace for us.
name = id_keyword_match.group(1)
# Jump past it
i = id_keyword_match.end()
keyword = _get_keyword(name)
if keyword is not None:
# It's a keyword
append(keyword)
elif previous in STRING_LEX:
# What would ordinarily be considered an identifier is
# treated as a string after certain tokens
append(name)
else:
# It's a symbol name. _sym_lookup() will take care of
# allocating a new Symbol instance if it's the first time
# we see it.
sym = self._sym_lookup(name, for_eval)
if previous == T_CONFIG or previous == T_MENUCONFIG:
# If the previous token is T_(MENU)CONFIG
# ("(menu)config"), we're tokenizing the first line of
# a symbol definition, and should remember this as a
# location where the symbol is defined
sym.def_locations.append((filename, linenr))
else:
# Otherwise, it's a reference to the symbol
sym.ref_locations.append((filename, linenr))
append(sym)
else:
# Not an identifier/keyword
while i < strlen and s[i].isspace():
i += 1
if i == strlen:
break
c = s[i]
i += 1
# String literal (constant symbol)
if c == '"' or c == "'":
if "\\" in s:
# Slow path: This could probably be sped up, but it's a
# very unusual case anyway.
quote = c
val = ""
while 1:
if i >= len(s):
_tokenization_error(s, filename, linenr)
c = s[i]
if c == quote:
break
if c == "\\":
if i + 1 >= len(s):
_tokenization_error(s, filename, linenr)
val += s[i + 1]
i += 2
else:
val += c
i += 1
i += 1
append(val)
else:
# Fast path: If the string contains no backslashes
# (almost always) we can simply look for the matching
# quote.
end = s.find(c, i)
if end == -1:
_tokenization_error(s, filename, linenr)
append(s[i:end])
i = end + 1
elif c == "&":
# Invalid characters are ignored
if i >= len(s) or s[i] != "&": continue
append(T_AND)
i += 1
elif c == "|":
# Invalid characters are ignored
if i >= len(s) or s[i] != "|": continue
append(T_OR)
i += 1
elif c == "!":
if i < len(s) and s[i] == "=":
append(T_UNEQUAL)
i += 1
else:
append(T_NOT)
elif c == "=": append(T_EQUAL)
elif c == "(": append(T_OPEN_PAREN)
elif c == ")": append(T_CLOSE_PAREN)
elif c == "#": break # Comment
else: continue # Invalid characters are ignored
previous = tokens[-1]
return _Feed(tokens)
def _sym_lookup(self, name, for_eval=False):
"""Fetches the symbol 'name' from the symbol table, creating and
registering it if it does not exist. If 'for_eval' is True, the symbol
won't be added to the symbol table if it does not exist -- this is for
Config.eval()."""
if name in self.syms:
return self.syms[name]
new_sym = Symbol()
new_sym.config = self
new_sym.name = name
if for_eval:
self._warn("no symbol {0} in configuration".format(name))
else:
self.syms[name] = new_sym
return new_sym
#
# Expression evaluation
#
def _eval_expr(self, expr):
"""Evaluates an expression to "n", "m", or "y"."""
# Handles e.g. an "x if y" condition where the "if y" part is missing.
if expr is None:
return "y"
res = self._eval_expr_rec(expr)
if res == "m":
# Promote "m" to "y" if we're running without modules.
#
# Internally, "m" is often rewritten to "m" && MODULES by both the
# C implementation and Kconfiglib, which takes care of cases where
# "m" should be demoted to "n" instead.
modules_sym = self.syms.get("MODULES")
if modules_sym is None or modules_sym.get_value() != "y":
return "y"
return res
def _eval_expr_rec(self, expr):
if isinstance(expr, Symbol):
# Non-bool/tristate symbols are always "n" in a tristate sense,
# regardless of their value
if expr.type != BOOL and expr.type != TRISTATE:
return "n"
return expr.get_value()
if isinstance(expr, str):
return expr if (expr == "y" or expr == "m") else "n"
# Ordered by frequency
if expr[0] == AND:
res = "y"
for subexpr in expr[1]:
ev = self._eval_expr_rec(subexpr)
# Return immediately upon discovering an "n" term
if ev == "n":
return "n"
if ev == "m":
res = "m"
# 'res' is either "m" or "y" here; we already handled the
# short-circuiting "n" case in the loop.
return res
if expr[0] == NOT:
ev = self._eval_expr_rec(expr[1])
if ev == "y":
return "n"
return "y" if (ev == "n") else "m"
if expr[0] == OR:
res = "n"
for subexpr in expr[1]:
ev = self._eval_expr_rec(subexpr)
# Return immediately upon discovering a "y" term
if ev == "y":
return "y"
if ev == "m":
res = "m"
# 'res' is either "n" or "m" here; we already handled the
# short-circuiting "y" case in the loop.
return res
if expr[0] == EQUAL:
return "y" if (_str_val(expr[1]) == _str_val(expr[2])) else "n"
if expr[0] == UNEQUAL:
return "y" if (_str_val(expr[1]) != _str_val(expr[2])) else "n"
_internal_error("Internal error while evaluating expression: "
"unknown operation {0}.".format(expr[0]))
def _eval_min(self, e1, e2):
"""Returns the minimum value of the two expressions. Equates None with
'y'."""
e1_eval = self._eval_expr(e1)
e2_eval = self._eval_expr(e2)
return e1_eval if tri_less(e1_eval, e2_eval) else e2_eval
def _eval_max(self, e1, e2):
"""Returns the maximum value of the two expressions. Equates None with
'y'."""
e1_eval = self._eval_expr(e1)
e2_eval = self._eval_expr(e2)
return e1_eval if tri_greater(e1_eval, e2_eval) else e2_eval
#
# Dependency tracking (for caching and invalidation)
#
def _build_dep(self):
"""Populates the Symbol.dep sets, linking the symbol to the symbols
that immediately depend on it in the sense that changing the value of
the symbol might affect the values of those other symbols. This is used
for caching/invalidation purposes. The calculated sets might be larger
than necessary as we don't do any complicated analysis of the
expressions."""
# Adds 'sym' as a directly dependent symbol to all symbols that appear
# in the expression 'e'
def add_expr_deps(e, sym):
for s in _get_expr_syms(e):
s.dep.add(sym)
# The directly dependent symbols of a symbol are:
# - Any symbols whose prompts, default values, rev_dep (select
# condition), or ranges depend on the symbol
# - Any symbols that belong to the same choice statement as the symbol
# (these won't be included in 'dep' as that makes the dependency
# graph unwieldy, but Symbol._get_dependent() will include them)
# - Any symbols in a choice statement that depends on the symbol
for sym in self.syms_iter():
for _, e in sym.prompts:
add_expr_deps(e, sym)
for v, e in sym.def_exprs:
add_expr_deps(v, sym)
add_expr_deps(e, sym)
add_expr_deps(sym.rev_dep, sym)
for l, u, e in sym.ranges:
add_expr_deps(l, sym)
add_expr_deps(u, sym)
add_expr_deps(e, sym)
if sym.is_choice_sym:
choice = sym.parent
for _, e in choice.prompts:
add_expr_deps(e, sym)
for _, e in choice.def_exprs:
add_expr_deps(e, sym)
def _eq_to_sym(self, eq):
"""_expr_depends_on() helper. For (in)equalities of the form sym = y/m
or sym != n, returns sym. For other (in)equalities, returns None."""
relation, left, right = eq
def transform_y_m_n(item):
if item is self.y: return "y"
if item is self.m: return "m"
if item is self.n: return "n"
return item
left = transform_y_m_n(left)
right = transform_y_m_n(right)
# Make sure the symbol (if any) appears to the left
if not isinstance(left, Symbol):
left, right = right, left
if not isinstance(left, Symbol):
return None
if (relation == EQUAL and (right == "y" or right == "m")) or \
(relation == UNEQUAL and right == "n"):
return left
return None
def _expr_depends_on(self, expr, sym):
"""Reimplementation of expr_depends_symbol() from mconf.c. Used to
determine if a submenu should be implicitly created, which influences
what items inside choice statements are considered choice items."""
if expr is None:
return False
def rec(expr):
if isinstance(expr, str):
return False
if isinstance(expr, Symbol):
return expr is sym
if expr[0] in (EQUAL, UNEQUAL):
return self._eq_to_sym(expr) is sym
if expr[0] == AND:
for and_expr in expr[1]:
if rec(and_expr):
return True
return False
return rec(expr)
def _invalidate_all(self):
for sym in self.syms_iter():
sym._invalidate()
#
# Printing and misc.
#
def _expand_sym_refs(self, s):
"""Expands $-references to symbols in 's' to symbol values, or to the
empty string for undefined symbols."""
while 1:
sym_ref_match = _sym_ref_re_search(s)
if sym_ref_match is None:
return s
sym_name = sym_ref_match.group(0)[1:]
sym = self.syms.get(sym_name)
expansion = "" if sym is None else sym.get_value()
s = s[:sym_ref_match.start()] + \
expansion + \
s[sym_ref_match.end():]
def _expr_val_str(self, expr, no_value_str="(none)",
get_val_instead_of_eval=False):
"""Printing helper. Returns a string with 'expr' and its value.
no_value_str: String to return when 'expr' is missing (None).
get_val_instead_of_eval: Assume 'expr' is a symbol or string (constant
symbol) and get its value directly instead of evaluating it to a
tristate value."""
if expr is None:
return no_value_str
if get_val_instead_of_eval:
if isinstance(expr, str):
return _expr_to_str(expr)
val = expr.get_value()
else:
val = self._eval_expr(expr)
return "{0} (value: {1})".format(_expr_to_str(expr), _expr_to_str(val))
def _get_sym_or_choice_str_rest(self, sc):
"""Symbols and choices have many properties in common, so we factor out
common __str__() stuff here. "sc" is short for "symbol or choice"."""
# As we deal a lot with string representations here, use some
# convenient shorthand:
s = _expr_to_str
#
# Common symbol/choice properties
#
user_val_str = "(no user value)" if sc.user_val is None else \
s(sc.user_val)
# Build prompts string
if not sc.prompts:
prompts_str = " (no prompts)"
else:
prompts_str_rows = []
for prompt, cond_expr in sc.orig_prompts:
if cond_expr is None:
prompts_str_rows.append(' "{0}"'.format(prompt))
else:
prompts_str_rows.append(
' "{0}" if {1}'.format(prompt,
self._expr_val_str(cond_expr)))
prompts_str = "\n * "
prompts_str += "\n * ".join(prompts_str_rows)
# Build locations string
if not sc.def_locations:
locations_str = "(no locations)"
else:
locations_str = "\n * "
locations_str += "\n * ".join(["{0}:{1}".format(filename, linenr) for
(filename, linenr) in sc.def_locations])
# Build additional-dependencies-from-menus-and-ifs string
additional_deps_str = " " + \
self._expr_val_str(sc.deps_from_containing,
"(no additional dependencies)")
#
# Symbol-specific stuff
#
if isinstance(sc, Symbol):
# Build ranges string
if isinstance(sc, Symbol):
if not sc.ranges:
ranges_str = " (no ranges)"
else:
ranges_str_rows = []
for l, u, cond_expr in sc.ranges:
if cond_expr is None:
ranges_str_rows.append(" [{0}, {1}]".format(s(l),
s(u)))
else:
ranges_str_rows.append(" [{0}, {1}] if {2}"
.format(s(l), s(u),
self._expr_val_str(cond_expr)))
ranges_str = "\n * "
ranges_str += "\n * ".join(ranges_str_rows)
# Build default values string
if not sc.def_exprs:
defaults_str = " (no default values)"
else:
defaults_str_rows = []
for val_expr, cond_expr in sc.orig_def_exprs:
row_str = " " + self._expr_val_str(val_expr, "(none)",
sc.type == STRING)
defaults_str_rows.append(row_str)
defaults_str_rows.append(" Condition: " +
self._expr_val_str(cond_expr))
defaults_str = "\n * "
defaults_str += "\n * ".join(defaults_str_rows)
# Build selects string
if not sc.orig_selects:
selects_str = " (no selects)"
else:
selects_str_rows = []
for target, cond_expr in sc.orig_selects:
if cond_expr is None:
selects_str_rows.append(" :ref:`CONFIG_{0}`".format(target.name))
else:
selects_str_rows.append(
" :ref:`CONFIG_{0}` if {1}".format(target.name,
self._expr_val_str(cond_expr)))
selects_str = "\n * "
selects_str += "\n * ".join(selects_str_rows)
res = _lines(":Symbol: " +
("(no name)" if sc.name is None else sc.name),
":Type: " + TYPENAME[sc.type],
":Value: " + s(sc.get_value()),
":User value: " + user_val_str,
":Visibility: " + s(_get_visibility(sc)),
":Is choice item: " + BOOL_STR[sc.is_choice_sym],
":Is defined: " + BOOL_STR[sc.is_defined_],
":Is from env.: " + BOOL_STR[sc.is_from_env],
":Is special: " + BOOL_STR[sc.is_special_] + "\n")
if sc.ranges:
res += _lines(":Ranges:", ranges_str + "\n")
res += _lines(":Prompts:",
prompts_str,
":Default values:",
defaults_str,
":Selects:",
selects_str,
":Reverse (select-related) dependencies:",
" (no reverse dependencies)" if sc.rev_dep == "n"
else " " + self._expr_val_str(sc.rev_dep),
":Additional dependencies from enclosing menus "
"and ifs:",
additional_deps_str,
":Locations:" + locations_str.replace(self.srctree + "/", ""))
return res
#
# Choice-specific stuff
#
# Build selected symbol string
sel = sc.get_selection()
sel_str = "(no selection)" if sel is None else sel.name
# Build default values string
if not sc.def_exprs:
defaults_str = " (no default values)"
else:
defaults_str_rows = []
for sym, cond_expr in sc.orig_def_exprs:
if cond_expr is None:
defaults_str_rows.append(" {0}".format(sym.name))
else:
defaults_str_rows.append(" {0} if {1}".format(sym.name,
self._expr_val_str(cond_expr)))
defaults_str = "\n".join(defaults_str_rows)
# Build contained symbols string
names = [sym.name for sym in sc.actual_symbols]
syms_string = " ".join(names) if names else "(empty)"
return _lines("Choice",
"Name (for named choices): " +
("(no name)" if sc.name is None else sc.name),
"Type : " + TYPENAME[sc.type],
"Selected symbol : " + sel_str,
"User value : " + user_val_str,
"Mode : " + s(sc.get_mode()),
"Visibility : " + s(_get_visibility(sc)),
"Optional : " + BOOL_STR[sc.optional],
"Prompts:",
prompts_str,
"Defaults:",
defaults_str,
"Choice symbols:",
" " + syms_string,
"Additional dependencies from enclosing menus and "
"ifs:",
additional_deps_str,
"Locations:" + locations_str)
def _get_sym_or_choice_str(self, sc):
"""Symbols and choices have many properties in common, so we factor out
common __str__() stuff here. "sc" is short for "symbol or choice"."""
# As we deal a lot with string representations here, use some
# convenient shorthand:
s = _expr_to_str
#
# Common symbol/choice properties
#
user_val_str = "(no user value)" if sc.user_val is None else \
s(sc.user_val)
# Build prompts string
if not sc.prompts:
prompts_str = " (no prompts)"
else:
prompts_str_rows = []
for prompt, cond_expr in sc.orig_prompts:
if cond_expr is None:
prompts_str_rows.append(' "{0}"'.format(prompt))
else:
prompts_str_rows.append(
' "{0}" if {1}'.format(prompt,
self._expr_val_str(cond_expr)))
prompts_str = "\n * "
prompts_str += "\n * ".join(prompts_str_rows)
# Build locations string
if not sc.def_locations:
locations_str = "(no locations)"
else:
locations_str = " ".join(["{0}:{1}".format(filename, linenr) for
(filename, linenr) in sc.def_locations])
# Build additional-dependencies-from-menus-and-ifs string
additional_deps_str = " " + \
self._expr_val_str(sc.deps_from_containing,
"(no additional dependencies)")
#
# Symbol-specific stuff
#
if isinstance(sc, Symbol):
# Build ranges string
if isinstance(sc, Symbol):
if not sc.ranges:
ranges_str = " (no ranges)"
else:
ranges_str_rows = []
for l, u, cond_expr in sc.ranges:
if cond_expr is None:
ranges_str_rows.append(" [{0}, {1}]".format(s(l),
s(u)))
else:
ranges_str_rows.append(" [{0}, {1}] if {2}"
.format(s(l), s(u),
self._expr_val_str(cond_expr)))
ranges_str = "\n * "
ranges_str += "\n * ".join(ranges_str_rows)
# Build default values string
if not sc.def_exprs:
defaults_str = " (no default values)"
else:
defaults_str_rows = []
for val_expr, cond_expr in sc.orig_def_exprs:
row_str = " " + self._expr_val_str(val_expr, "(none)",
sc.type == STRING)
defaults_str_rows.append(row_str)
defaults_str_rows.append(" Condition: " +
self._expr_val_str(cond_expr))
defaults_str = "\n * "
defaults_str += "\n * ".join(defaults_str_rows)
# Build selects string
if not sc.orig_selects:
selects_str = " (no selects)"
else:
selects_str_rows = []
for target, cond_expr in sc.orig_selects:
if cond_expr is None:
selects_str_rows.append(" :ref:`CONFIG_{0}`".format(target.name))
else:
selects_str_rows.append(
" :ref:`CONFIG_{0}` if {1}".format(target.name,
self._expr_val_str(cond_expr)))
selects_str = "\n".join(selects_str_rows)
res = _lines("Symbol " +
("(no name)" if sc.name is None else sc.name),
"Type : " + TYPENAME[sc.type],
"Value : " + s(sc.get_value()),
"User value : " + user_val_str,
"Visibility : " + s(_get_visibility(sc)),
"Is choice item : " + BOOL_STR[sc.is_choice_sym],
"Is defined : " + BOOL_STR[sc.is_defined_],
"Is from env. : " + BOOL_STR[sc.is_from_env],
"Is special : " + BOOL_STR[sc.is_special_] + "\n")
if sc.ranges:
res += _lines(":Ranges:", ranges_str + "\n")
res += _lines("Prompts:",
prompts_str,
"Default values:",
defaults_str,
"Selects:",
selects_str,
"Reverse (select-related) dependencies:",
" (no reverse dependencies)" if sc.rev_dep == "n"
else " " + self._expr_val_str(sc.rev_dep),
"Additional dependencies from enclosing menus "
"and ifs:",
additional_deps_str,
"Locations:" + locations_str)
return res
#
# Choice-specific stuff
#
# Build selected symbol string
sel = sc.get_selection()
sel_str = "(no selection)" if sel is None else sel.name
# Build default values string
if not sc.def_exprs:
defaults_str = " (no default values)"
else:
defaults_str_rows = []
for sym, cond_expr in sc.orig_def_exprs:
if cond_expr is None:
defaults_str_rows.append(" {0}".format(sym.name))
else:
defaults_str_rows.append(" {0} if {1}".format(sym.name,
self._expr_val_str(cond_expr)))
defaults_str = "\n".join(defaults_str_rows)
# Build contained symbols string
names = [sym.name for sym in sc.actual_symbols]
syms_string = " ".join(names) if names else "(empty)"
return _lines("Choice",
"Name (for named choices): " +
("(no name)" if sc.name is None else sc.name),
"Type : " + TYPENAME[sc.type],
"Selected symbol : " + sel_str,
"User value : " + user_val_str,
"Mode : " + s(sc.get_mode()),
"Visibility : " + s(_get_visibility(sc)),
"Optional : " + BOOL_STR[sc.optional],
"Prompts:",
prompts_str,
"Defaults:",
defaults_str,
"Choice symbols:",
" " + syms_string,
"Additional dependencies from enclosing menus and "
"ifs:",
additional_deps_str,
"Locations:" + locations_str)
def _warn(self, msg, filename=None, linenr=None):
"""For printing warnings to stderr."""
if self.print_warnings:
_stderr_msg("warning: " + msg, filename, linenr)
class Item(object):
"""Base class for symbols and other Kconfig constructs. Subclasses are
Symbol, Choice, Menu, and Comment."""
def is_symbol(self):
"""Returns True if the item is a symbol. Short for
isinstance(item, kconfiglib.Symbol)."""
return isinstance(self, Symbol)
def is_choice(self):
"""Returns True if the item is a choice. Short for
isinstance(item, kconfiglib.Choice)."""
return isinstance(self, Choice)
def is_menu(self):
"""Returns True if the item is a menu. Short for
isinstance(item, kconfiglib.Menu)."""
return isinstance(self, Menu)
def is_comment(self):
"""Returns True if the item is a comment. Short for
isinstance(item, kconfiglib.Comment)."""
return isinstance(self, Comment)
class Symbol(Item):
"""Represents a configuration symbol - e.g. FOO for
config FOO
..."""
#
# Public interface
#
def get_config(self):
"""Returns the Config instance this symbol is from."""
return self.config
def get_name(self):
"""Returns the name of the symbol."""
return self.name
def get_type(self):
"""Returns the type of the symbol: one of UNKNOWN, BOOL, TRISTATE,
STRING, HEX, or INT. These are defined at the top level of the module,
so you'd do something like
if sym.get_type() == kconfiglib.STRING:
..."""
return self.type
def get_prompts(self):
"""Returns a list of prompts defined for the symbol, in the order they
appear in the configuration files. Returns the empty list for symbols
with no prompt.
This list will have a single entry for the vast majority of symbols
having prompts, but having multiple prompts for a single symbol is
possible through having multiple 'config' entries for it."""
return [prompt for prompt, _ in self.orig_prompts]
def get_help(self):
"""Returns the help text of the symbol, or None if the symbol has no
help text."""
return self.help
def get_parent(self):
"""Returns the menu or choice statement that contains the symbol, or
None if the symbol is at the top level. Note that if statements are
treated as syntactic and do not have an explicit class
representation."""
return self.parent
def get_def_locations(self):
"""Returns a list of (filename, linenr) tuples, where filename (string)
and linenr (int) represent a location where the symbol is defined. For
the vast majority of symbols this list will only contain one element.
For the following Kconfig, FOO would get two entries: the lines marked
with *.
config FOO *
bool "foo prompt 1"
config FOO *
bool "foo prompt 2"
"""
return self.def_locations
def get_ref_locations(self):
"""Returns a list of (filename, linenr) tuples, where filename (string)
and linenr (int) represent a location where the symbol is referenced in
the configuration. For example, the lines marked by * would be included
for FOO below:
config A
bool
default BAR || FOO *
config B
tristate
depends on FOO *
default m if FOO *
if FOO *
config A
bool "A"
endif
config FOO (definition not included)
bool
"""
return self.ref_locations
def get_value(self):
"""Calculate and return the value of the symbol. See also
Symbol.set_user_value()."""
if self.cached_val is not None:
return self.cached_val
# As a quirk of Kconfig, undefined symbols get their name as their
# value. This is why things like "FOO = bar" work for seeing if FOO has
# the value "bar".
if self.type == UNKNOWN:
self.cached_val = self.name
return self.name
new_val = DEFAULT_VALUE[self.type]
vis = _get_visibility(self)
# This is easiest to calculate together with the value
self.write_to_conf = False
if self.type == BOOL or self.type == TRISTATE:
# The visibility and mode (modules-only or single-selection) of
# choice items will be taken into account in _get_visibility()
if self.is_choice_sym:
if vis != "n":
choice = self.parent
mode = choice.get_mode()
self.write_to_conf = (mode != "n")
if mode == "y":
if choice.get_selection() is self:
new_val = "y"
else:
new_val = "n"
elif mode == "m":
if self.user_val == "m" or self.user_val == "y":
new_val = "m"
else:
# If the symbol is visible and has a user value, use that.
# Otherwise, look at defaults.
use_defaults = True
if vis != "n":
self.write_to_conf = True
if self.user_val is not None:
new_val = self.config._eval_min(self.user_val, vis)
use_defaults = False
if use_defaults:
for val_expr, cond_expr in self.def_exprs:
cond_eval = self.config._eval_expr(cond_expr)
if cond_eval != "n":
self.write_to_conf = True
new_val = self.config._eval_min(val_expr,
cond_eval)
break
# Reverse (select-related) dependencies take precedence
rev_dep_val = self.config._eval_expr(self.rev_dep)
if rev_dep_val != "n":
self.write_to_conf = True
new_val = self.config._eval_max(new_val, rev_dep_val)
# Promote "m" to "y" for booleans
if new_val == "m" and self.type == BOOL:
new_val = "y"
elif self.type == INT or self.type == HEX:
has_active_range = False
low = None
high = None
use_defaults = True
base = 16 if self.type == HEX else 10
for(l, h, cond_expr) in self.ranges:
if self.config._eval_expr(cond_expr) != "n":
has_active_range = True
low_str = _str_val(l)
high_str = _str_val(h)
low = int(low_str, base) if \
_is_base_n(low_str, base) else 0
high = int(high_str, base) if \
_is_base_n(high_str, base) else 0
break
if vis != "n":
self.write_to_conf = True
if self.user_val is not None and \
_is_base_n(self.user_val, base) and \
(not has_active_range or
low <= int(self.user_val, base) <= high):
# If the user value is OK, it is stored in exactly the same
# form as specified in the assignment (with or without
# "0x", etc).
use_defaults = False
new_val = self.user_val
if use_defaults:
for val_expr, cond_expr in self.def_exprs:
if self.config._eval_expr(cond_expr) != "n":
self.write_to_conf = True
# If the default value is OK, it is stored in exactly
# the same form as specified. Otherwise, it is clamped
# to the range, and the output has "0x" as appropriate
# for the type.
new_val = _str_val(val_expr)
if _is_base_n(new_val, base):
new_val_num = int(new_val, base)
if has_active_range:
clamped_val = None
if new_val_num < low:
clamped_val = low
elif new_val_num > high:
clamped_val = high
if clamped_val is not None:
new_val = (hex(clamped_val) if \
self.type == HEX else str(clamped_val))
break
else: # For the for loop
# If no user value or default kicks in but the hex/int has
# an active range, then the low end of the range is used,
# provided it's > 0, with "0x" prepended as appropriate.
if has_active_range and low > 0:
new_val = (hex(low) if self.type == HEX else str(low))
elif self.type == STRING:
use_defaults = True
if vis != "n":
self.write_to_conf = True
if self.user_val is not None:
new_val = self.user_val
use_defaults = False
if use_defaults:
for val_expr, cond_expr in self.def_exprs:
if self.config._eval_expr(cond_expr) != "n":
self.write_to_conf = True
new_val = _str_val(val_expr)
break
self.cached_val = new_val
return new_val
def get_user_value(self):
"""Returns the value assigned to the symbol in a .config or via
Symbol.set_user_value() (provided the value was valid for the type of
the symbol). Returns None in case of no user value."""
return self.user_val
def get_upper_bound(self):
"""For string/hex/int symbols and for bool and tristate symbols that
cannot be modified (see is_modifiable()), returns None.
Otherwise, returns the highest value the symbol can be set to with
Symbol.set_user_value() (that will not be truncated): one of "m" or
"y", arranged from lowest to highest. This corresponds to the highest
value the symbol could be given in e.g. the 'make menuconfig'
interface.
See also the tri_less*() and tri_greater*() functions, which could come
in handy."""
if self.type != BOOL and self.type != TRISTATE:
return None
rev_dep = self.config._eval_expr(self.rev_dep)
# A bool selected to "m" gets promoted to "y", pinning it
if rev_dep == "m" and self.type == BOOL:
return None
vis = _get_visibility(self)
if TRI_TO_INT[vis] > TRI_TO_INT[rev_dep]:
return vis
return None
def get_lower_bound(self):
"""For string/hex/int symbols and for bool and tristate symbols that
cannot be modified (see is_modifiable()), returns None.
Otherwise, returns the lowest value the symbol can be set to with
Symbol.set_user_value() (that will not be truncated): one of "n" or
"m", arranged from lowest to highest. This corresponds to the lowest
value the symbol could be given in e.g. the 'make menuconfig'
interface.
See also the tri_less*() and tri_greater*() functions, which could come
in handy."""
if self.type != BOOL and self.type != TRISTATE:
return None
rev_dep = self.config._eval_expr(self.rev_dep)
# A bool selected to "m" gets promoted to "y", pinning it
if rev_dep == "m" and self.type == BOOL:
return None
if TRI_TO_INT[_get_visibility(self)] > TRI_TO_INT[rev_dep]:
return rev_dep
return None
def get_assignable_values(self):
"""For string/hex/int symbols and for bool and tristate symbols that
cannot be modified (see is_modifiable()), returns the empty list.
Otherwise, returns a list containing the user values that can be
assigned to the symbol (that won't be truncated). Usage example:
if "m" in sym.get_assignable_values():
sym.set_user_value("m")
This is basically a more convenient interface to
get_lower/upper_bound() when wanting to test if a particular tristate
value can be assigned."""
if self.type != BOOL and self.type != TRISTATE:
return []
rev_dep = self.config._eval_expr(self.rev_dep)
# A bool selected to "m" gets promoted to "y", pinning it
if rev_dep == "m" and self.type == BOOL:
return []
res = ["n", "m", "y"][TRI_TO_INT[rev_dep] :
TRI_TO_INT[_get_visibility(self)] + 1]
return res if len(res) > 1 else []
def get_visibility(self):
"""Returns the visibility of the symbol: one of "n", "m" or "y". For
bool and tristate symbols, this is an upper bound on the value users
can set for the symbol. For other types of symbols, a visibility of "n"
means the user value will be ignored. A visibility of "n" corresponds
to not being visible in the 'make *config' interfaces.
Example (assuming we're running with modules enabled -- i.e., MODULES
set to 'y'):
# Assume this has been assigned 'n'
config N_SYM
tristate "N_SYM"
# Assume this has been assigned 'm'
config M_SYM
tristate "M_SYM"
# Has visibility 'n'
config A
tristate "A"
depends on N_SYM
# Has visibility 'm'
config B
tristate "B"
depends on M_SYM
# Has visibility 'y'
config C
tristate "C"
# Has no prompt, and hence visibility 'n'
config D
tristate
Having visibility be tri-valued ensures that e.g. a symbol cannot be
set to "y" by the user if it depends on a symbol with value "m", which
wouldn't be safe.
You should probably look at get_lower/upper_bound(),
get_assignable_values() and is_modifiable() before using this."""
return _get_visibility(self)
def get_referenced_symbols(self, refs_from_enclosing=False):
"""Returns the set() of all symbols referenced by this symbol. For
example, the symbol defined by
config FOO
bool
prompt "foo" if A && B
default C if D
depends on E
select F if G
references the symbols A through G.
refs_from_enclosing (default: False): If True, the symbols referenced
by enclosing menus and ifs will be included in the result."""
return self.all_referenced_syms if refs_from_enclosing else \
self.referenced_syms
def get_selected_symbols(self):
"""Returns the set() of all symbols X for which this symbol has a
'select X' or 'select X if Y' (regardless of whether Y is satisfied or
not). This is a subset of the symbols returned by
get_referenced_symbols()."""
return self.selected_syms
def set_user_value(self, v):
"""Sets the user value of the symbol.
Equal in effect to assigning the value to the symbol within a .config
file. Use get_lower/upper_bound() or get_assignable_values() to find
the range of currently assignable values for bool and tristate symbols;
setting values outside this range will cause the user value to differ
from the result of Symbol.get_value() (be truncated). Values that are
invalid for the type (such as a_bool.set_user_value("foo")) are
ignored, and a warning is emitted if an attempt is made to assign such
a value.
For any type of symbol, is_modifiable() can be used to check if a user
value will currently have any effect on the symbol, as determined by
its visibility and range of assignable values. Any value that is valid
for the type (bool, tristate, etc.) will end up being reflected in
get_user_value() though, and might have an effect later if conditions
change. To get rid of the user value, use unset_user_value().
Any symbols dependent on the symbol are (recursively) invalidated, so
things will just work with regards to dependencies.
v: The user value to give to the symbol."""
self._set_user_value_no_invalidate(v, False)
# There might be something more efficient you could do here, but play
# it safe.
if self.name == "MODULES":
self.config._invalidate_all()
return
self._invalidate()
self._invalidate_dependent()
def unset_user_value(self):
"""Resets the user value of the symbol, as if the symbol had never
gotten a user value via Config.load_config() or
Symbol.set_user_value()."""
self._unset_user_value_no_recursive_invalidate()
self._invalidate_dependent()
def is_modifiable(self):
"""Returns True if the value of the symbol could be modified by calling
Symbol.set_user_value().
For bools and tristates, this corresponds to the symbol being visible
in the 'make menuconfig' interface and not already being pinned to a
specific value (e.g. because it is selected by another symbol).
For strings and numbers, this corresponds to just being visible. (See
Symbol.get_visibility().)"""
if self.is_special_:
return False
if self.type == BOOL or self.type == TRISTATE:
rev_dep = self.config._eval_expr(self.rev_dep)
# A bool selected to "m" gets promoted to "y", pinning it
if rev_dep == "m" and self.type == BOOL:
return False
return TRI_TO_INT[_get_visibility(self)] > TRI_TO_INT[rev_dep]
return _get_visibility(self) != "n"
def is_defined(self):
"""Returns False if the symbol is referred to in the Kconfig but never
actually defined."""
return self.is_defined_
def is_special(self):
"""Returns True if the symbol is one of the special symbols n, m, y, or
UNAME_RELEASE, or gets its value from the environment."""
return self.is_special_
def is_from_environment(self):
"""Returns True if the symbol gets its value from the environment."""
return self.is_from_env
def has_ranges(self):
"""Returns True if the symbol is of type INT or HEX and has ranges that
limit what values it can take on."""
return bool(self.ranges)
def is_choice_symbol(self):
"""Returns True if the symbol is in a choice statement and is an actual
choice symbol (see Choice.get_symbols())."""
return self.is_choice_sym
def is_choice_selection(self):
"""Returns True if the symbol is contained in a choice statement and is
the selected item. Equivalent to
sym.is_choice_symbol() and sym.get_parent().get_selection() is sym"""
return self.is_choice_sym and self.parent.get_selection() is self
def is_allnoconfig_y(self):
"""Returns True if the symbol has the 'allnoconfig_y' option set."""
return self.allnoconfig_y
def rest(self):
"""Returns a string containing various information about the symbol."""
return self.config._get_sym_or_choice_str_rest(self)
def __str__(self):
"""Returns a string containing various information about the symbol."""
return self.config._get_sym_or_choice_str(self)
#
# Private methods
#
def __init__(self):
"""Symbol constructor -- not intended to be called directly by
Kconfiglib clients."""
self.name = None
self.type = UNKNOWN
self.prompts = []
self.def_exprs = [] # 'default' properties
self.ranges = [] # 'range' properties (for int and hex)
self.help = None # Help text
self.rev_dep = "n" # Reverse (select-related) dependencies
self.config = None
self.parent = None
self.user_val = None # Value set by user
# The prompt, default value and select conditions without any
# dependencies from menus and ifs propagated to them
self.orig_prompts = []
self.orig_def_exprs = []
self.orig_selects = []
# Dependencies inherited from containing menus and ifs
self.deps_from_containing = None
# The set of symbols referenced by this symbol (see
# get_referenced_symbols())
self.referenced_syms = set()
# The set of symbols selected by this symbol (see
# get_selected_symbols())
self.selected_syms = set()
# Like 'referenced_syms', but includes symbols from
# dependencies inherited from enclosing menus and ifs
self.all_referenced_syms = set()
# This records only dependencies specified with 'depends on'. Needed
# when determining actual choice items (hrrrr...). See also
# Choice._determine_actual_symbols().
self.menu_dep = None
# See Symbol.get_ref/def_locations().
self.def_locations = []
self.ref_locations = []
# Populated in Config._build_dep() after parsing. Links the symbol to
# the symbols that immediately depend on it (in a caching/invalidation
# sense). The total set of dependent symbols for the symbol (the
# transitive closure) is calculated on an as-needed basis in
# _get_dependent().
self.dep = set()
# Cached values
# Caches the calculated value
self.cached_val = None
# Caches the visibility, which acts as an upper bound on the value
self.cached_visibility = None
# Caches the total list of dependent symbols. Calculated in
# _get_dependent().
self.cached_deps = None
# Flags
# Does the symbol have an entry in the Kconfig file? The trailing
# underscore avoids a collision with is_defined().
self.is_defined_ = False
# Should the symbol get an entry in .config?
self.write_to_conf = False
# Set to true when _make_conf() is called on a symbol, so that symbols
# defined in multiple locations only get one .config entry. We need to
# reset it prior to writing out a new .config.
self.already_written = False
# This is set to True for "actual" choice symbols; see
# Choice._determine_actual_symbols().
self.is_choice_sym = False
# Does the symbol get its value in some special way, e.g. from the
# environment or by being one of the special symbols n, m, and y? If
# so, the value is stored in self.cached_val, which is never
# invalidated. The trailing underscore avoids a collision with
# is_special().
self.is_special_ = False
# Does the symbol get its value from the environment?
self.is_from_env = False
# Does the symbol have the 'allnoconfig_y' option set?
self.allnoconfig_y = False
def _invalidate(self):
if self.is_special_:
return
if self.is_choice_sym:
self.parent._invalidate()
self.cached_val = None
self.cached_visibility = None
def _invalidate_dependent(self):
for sym in self._get_dependent():
sym._invalidate()
def _set_user_value_no_invalidate(self, v, suppress_load_warnings):
"""Like set_user_value(), but does not invalidate any symbols.
suppress_load_warnings: some warnings are annoying when loading a
.config that can be helpful when manually invoking set_user_value().
This flag is set to True to suppress such warnings.
Perhaps this could be made optional for load_config() instead."""
if self.is_special_:
if self.is_from_env:
self.config._warn('attempt to assign the value "{0}" to the '
'symbol {1}, which gets its value from the '
'environment. Assignment ignored.'
.format(v, self.name))
else:
self.config._warn('attempt to assign the value "{0}" to the '
'special symbol {1}. Assignment ignored.'
.format(v, self.name))
return
if not self.is_defined_:
filename, linenr = self.ref_locations[0]
if self.config.print_undef_assign:
_stderr_msg('note: attempt to assign the value "{0}" to {1}, '
"which is referenced at {2}:{3} but never "
"defined. Assignment ignored."
.format(v, self.name, filename, linenr))
return
# Check if the value is valid for our type
if not ((self.type == BOOL and (v == "y" or v == "n") ) or
(self.type == TRISTATE and (v == "y" or v == "m" or
v == "n") ) or
(self.type == STRING ) or
(self.type == INT and _is_base_n(v, 10) ) or
(self.type == HEX and _is_base_n(v, 16) )):
self.config._warn('the value "{0}" is invalid for {1}, which has '
"type {2}. Assignment ignored."
.format(v, self.name, TYPENAME[self.type]))
return
if not self.prompts and not suppress_load_warnings:
self.config._warn('assigning "{0}" to the symbol {1} which '
'lacks prompts and thus has visibility "n". '
'The assignment will have no effect.'
.format(v, self.name))
self.user_val = v
if self.is_choice_sym and (self.type == BOOL or self.type == TRISTATE):
choice = self.parent
if v == "y":
choice.user_val = self
choice.user_mode = "y"
elif v == "m":
choice.user_val = None
choice.user_mode = "m"
def _unset_user_value_no_recursive_invalidate(self):
self._invalidate()
self.user_val = None
if self.is_choice_sym:
self.parent._unset_user_value()
def _make_conf(self, append_fn):
if self.already_written:
return
self.already_written = True
# Note: write_to_conf is determined in get_value()
val = self.get_value()
if not self.write_to_conf:
return
if self.type == BOOL or self.type == TRISTATE:
if val == "y" or val == "m":
append_fn("CONFIG_{0}={1}".format(self.name, val))
else:
append_fn("# CONFIG_{0} is not set".format(self.name))
elif self.type == INT or self.type == HEX:
append_fn("CONFIG_{0}={1}".format(self.name, val))
elif self.type == STRING:
# Escape \ and "
append_fn('CONFIG_{0}="{1}"'
.format(self.name,
val.replace("\\", "\\\\").replace('"', '\\"')))
else:
_internal_error("Internal error while creating .config: unknown "
'type "{0}".'.format(self.type))
def _get_dependent(self):
"""Returns the set of symbols that should be invalidated if the value
of the symbol changes, because they might be affected by the change.
Note that this is an internal API -- it's probably of limited
usefulness to clients."""
if self.cached_deps is not None:
return self.cached_deps
res = set(self.dep)
for s in self.dep:
res |= s._get_dependent()
if self.is_choice_sym:
# Choice symbols also depend (recursively) on their siblings. The
# siblings are not included in 'dep' to avoid dependency loops.
for sibling in self.parent.actual_symbols:
if sibling is not self:
res.add(sibling)
res |= sibling.dep
for s in sibling.dep:
res |= s._get_dependent()
self.cached_deps = res
return res
def _has_auto_menu_dep_on(self, on):
"""See Choice._determine_actual_symbols()."""
if not isinstance(self.parent, Choice):
_internal_error("Attempt to determine auto menu dependency for "
"symbol ouside of choice.")
if not self.prompts:
# If we have no prompt, use the menu dependencies instead (what was
# specified with 'depends on')
return self.menu_dep is not None and \
self.config._expr_depends_on(self.menu_dep, on)
for _, cond_expr in self.prompts:
if self.config._expr_depends_on(cond_expr, on):
return True
return False
class Menu(Item):
"""Represents a menu statement."""
#
# Public interface
#
def get_config(self):
"""Return the Config instance this menu is from."""
return self.config
def get_title(self):
"""Returns the title text of the menu."""
return self.title
def get_parent(self):
"""Returns the menu or choice statement that contains the menu, or
None if the menu is at the top level. Note that if statements are
treated as syntactic sugar and do not have an explicit class
representation."""
return self.parent
def get_location(self):
"""Returns the location of the menu as a (filename, linenr) tuple,
where filename is a string and linenr an int."""
return (self.filename, self.linenr)
def get_items(self, recursive=False):
"""Returns a list containing the items (symbols, menus, choice
statements and comments) in in the menu, in the same order that the
items appear within the menu.
recursive (default: False): True if items contained in items within the
menu should be included recursively (preorder)."""
if not recursive:
return self.block
res = []
for item in self.block:
res.append(item)
if isinstance(item, Menu):
res.extend(item.get_items(True))
elif isinstance(item, Choice):
res.extend(item.get_items())
return res
def get_symbols(self, recursive=False):
"""Returns a list containing the symbols in the menu, in the same order
that they appear within the menu.
recursive (default: False): True if symbols contained in items within
the menu should be included recursively."""
return [item for item in self.get_items(recursive) if
isinstance(item, Symbol)]
def get_visibility(self):
"""Returns the visibility of the menu. This also affects the visibility
of subitems. See also Symbol.get_visibility()."""
return self.config._eval_expr(self.dep_expr)
def get_visible_if_visibility(self):
"""Returns the visibility the menu gets from its 'visible if'
condition. "y" if the menu has no 'visible if' condition."""
return self.config._eval_expr(self.visible_if_expr)
def get_referenced_symbols(self, refs_from_enclosing=False):
"""See Symbol.get_referenced_symbols()."""
return self.all_referenced_syms if refs_from_enclosing else \
self.referenced_syms
def __str__(self):
"""Returns a string containing various information about the menu."""
depends_on_str = self.config._expr_val_str(self.orig_deps,
"(no dependencies)")
visible_if_str = self.config._expr_val_str(self.visible_if_expr,
"(no dependencies)")
additional_deps_str = " " + \
self.config._expr_val_str(self.deps_from_containing,
"(no additional dependencies)")
return _lines("Menu",
"Title : " + self.title,
"'depends on' dependencies : " + depends_on_str,
"'visible if' dependencies : " + visible_if_str,
"Additional dependencies from enclosing menus and "
"ifs:",
additional_deps_str,
"Location: {0}:{1}".format(self.filename, self.linenr))
#
# Private methods
#
def __init__(self):
"""Menu constructor -- not intended to be called directly by
Kconfiglib clients."""
self.title = None
self.dep_expr = None
self.visible_if_expr = None
self.block = None
self.config = None
self.parent = None
# Dependency expression without dependencies from enclosing menus and
# ifs propagated
self.orig_deps = None
# Dependencies inherited from containing menus and ifs
self.deps_from_containing = None
# The set of symbols referenced by this menu (see
# get_referenced_symbols())
self.referenced_syms = set()
# Like 'referenced_syms', but includes symbols from
# dependencies inherited from enclosing menus and ifs
self.all_referenced_syms = None
self.filename = None
self.linenr = None
def _make_conf(self, append_fn):
if self.config._eval_expr(self.dep_expr) != "n" and \
self.config._eval_expr(self.visible_if_expr) != "n":
append_fn("\n#\n# {0}\n#".format(self.title))
_make_block_conf(self.block, append_fn)
class Choice(Item):
"""Represents a choice statement. A choice can be in one of three modes:
"n" - The choice is not visible and no symbols can be selected.
"m" - Any number of symbols can be set to "m". The rest will be "n". This
is safe since potentially conflicting options don't actually get
compiled into the kernel simultaneously with "m".
"y" - One symbol will be "y" while the rest are "n".
Only tristate choices can be in "m" mode, and the visibility of the choice
is an upper bound on the mode, so that e.g. a choice that depends on a
symbol with value "m" will be in "m" mode.
The mode changes automatically when a value is assigned to a symbol within
the choice.
See Symbol.get_visibility() too."""
#
# Public interface
#
def get_config(self):
"""Returns the Config instance this choice is from."""
return self.config
def get_name(self):
"""For named choices, returns the name. Returns None for unnamed
choices. No named choices appear anywhere in the kernel Kconfig files
as of Linux 3.7.0-rc8."""
return self.name
def get_type(self):
"""Returns the type of the choice. See Symbol.get_type()."""
return self.type
def get_prompts(self):
"""Returns a list of prompts defined for the choice, in the order they
appear in the configuration files. Returns the empty list for choices
with no prompt.
This list will have a single entry for the vast majority of choices
having prompts, but having multiple prompts for a single choice is
possible through having multiple 'choice' entries for it (though I'm
not sure if that ever happens in practice)."""
return [prompt for prompt, _ in self.orig_prompts]
def get_help(self):
"""Returns the help text of the choice, or None if the choice has no
help text."""
return self.help
def get_parent(self):
"""Returns the menu or choice statement that contains the choice, or
None if the choice is at the top level. Note that if statements are
treated as syntactic sugar and do not have an explicit class
representation."""
return self.parent
def get_def_locations(self):
"""Returns a list of (filename, linenr) tuples, where filename (string)
and linenr (int) represent a location where the choice is defined. For
the vast majority of choices (all of them as of Linux 3.7.0-rc8) this
list will only contain one element, but its possible for named choices
to be defined in multiple locations."""
return self.def_locations
def get_selection(self):
"""Returns the symbol selected (either by the user or through
defaults), or None if either no symbol is selected or the mode is not
"y"."""
if self.cached_selection is not None:
if self.cached_selection == NO_SELECTION:
return None
return self.cached_selection
if self.get_mode() != "y":
return self._cache_ret(None)
# User choice available?
if self.user_val is not None and _get_visibility(self.user_val) == "y":
return self._cache_ret(self.user_val)
if self.optional:
return self._cache_ret(None)
return self._cache_ret(self.get_selection_from_defaults())
def get_selection_from_defaults(self):
"""Like Choice.get_selection(), but acts as if no symbol has been
selected by the user and no 'optional' flag is in effect."""
if not self.actual_symbols:
return None
for symbol, cond_expr in self.def_exprs:
if self.config._eval_expr(cond_expr) != "n":
chosen_symbol = symbol
break
else:
chosen_symbol = self.actual_symbols[0]
# Is the chosen symbol visible?
if _get_visibility(chosen_symbol) != "n":
return chosen_symbol
# Otherwise, pick the first visible symbol
for sym in self.actual_symbols:
if _get_visibility(sym) != "n":
return sym
return None
def get_user_selection(self):
"""If the choice is in "y" mode and has a user-selected symbol, returns
that symbol. Otherwise, returns None."""
return self.user_val
def get_items(self):
"""Gets all items contained in the choice in the same order as within
the configuration ("items" instead of "symbols" since choices and
comments might appear within choices. This only happens in one place as
of Linux 3.7.0-rc8, in drivers/usb/gadget/Kconfig)."""
return self.block
def get_symbols(self):
"""Returns a list containing the choice's symbols.
A quirk (perhaps a bug) of Kconfig is that you can put items within a
choice that will not be considered members of the choice insofar as
selection is concerned. This happens for example if one symbol within a
choice 'depends on' the symbol preceding it, or if you put non-symbol
items within choices.
As of Linux 3.7.0-rc8, this seems to be used intentionally in one
place: drivers/usb/gadget/Kconfig.
This function returns the "proper" symbols of the choice in the order
they appear in the choice, excluding such items. If you want all items
in the choice, use get_items()."""
return self.actual_symbols
def get_referenced_symbols(self, refs_from_enclosing=False):
"""See Symbol.get_referenced_symbols()."""
return self.all_referenced_syms if refs_from_enclosing else \
self.referenced_syms
def get_visibility(self):
"""Returns the visibility of the choice statement: one of "n", "m" or
"y". This acts as an upper limit on the mode of the choice (though bool
choices can only have the mode "y"). See the class documentation for an
explanation of modes."""
return _get_visibility(self)
def get_mode(self):
"""Returns the mode of the choice. See the class documentation for
an explanation of modes."""
minimum_mode = "n" if self.optional else "m"
mode = self.user_mode if self.user_mode is not None else minimum_mode
mode = self.config._eval_min(mode, _get_visibility(self))
# Promote "m" to "y" for boolean choices
if mode == "m" and self.type == BOOL:
return "y"
return mode
def is_optional(self):
"""Returns True if the choice has the 'optional' flag set (and so will
default to "n" mode)."""
return self.optional
def __str__(self):
"""Returns a string containing various information about the choice
statement."""
return self.config._get_sym_or_choice_str(self)
#
# Private methods
#
def __init__(self):
"""Choice constructor -- not intended to be called directly by
Kconfiglib clients."""
self.name = None # Yes, choices can be named
self.type = UNKNOWN
self.prompts = []
self.def_exprs = [] # 'default' properties
self.help = None # Help text
self.block = None # List of contained items
self.config = None
self.parent = None
self.user_val = None
self.user_mode = None
# We need to filter out symbols that appear within the choice block but
# are not considered choice items (see
# Choice._determine_actual_symbols()) This list holds the "actual"
# choice items.
self.actual_symbols = []
# The prompts and default values without any dependencies from
# enclosing menus and ifs propagated
self.orig_prompts = []
self.orig_def_exprs = []
# Dependencies inherited from containing menus and ifs
self.deps_from_containing = None
# The set of symbols referenced by this choice (see
# get_referenced_symbols())
self.referenced_syms = set()
# Like 'referenced_syms', but includes symbols from
# dependencies inherited from enclosing menus and ifs
self.all_referenced_syms = set()
# See Choice.get_def_locations()
self.def_locations = []
# Cached values
self.cached_selection = None
self.cached_visibility = None
self.optional = False
def _determine_actual_symbols(self):
"""If a symbol's visibility depends on the preceding symbol within a
choice, it is no longer viewed as a choice item. (This is quite
possibly a bug, but some things consciously use it... ugh. It stems
from automatic submenu creation.) In addition, it's possible to have
choices and comments within choices, and those shouldn't be considered
choice items either. Only drivers/usb/gadget/Kconfig seems to depend on
any of this. This method computes the "actual" items in the choice and
sets the is_choice_sym flag on them (retrieved via is_choice_symbol()).
Don't let this scare you: an earlier version simply checked for a
sequence of symbols where all symbols after the first appeared in the
'depends on' expression of the first, and that worked fine. The added
complexity is to be future-proof in the event that
drivers/usb/gadget/Kconfig turns even more sinister. It might very well
be overkilling things (especially if that file is refactored ;)."""
# Items might depend on each other in a tree structure, so we need a
# stack to keep track of the current tentative parent
stack = []
for item in self.block:
if not isinstance(item, Symbol):
stack = []
continue
while stack:
if item._has_auto_menu_dep_on(stack[-1]):
# The item should not be viewed as a choice item, so don't
# set item.is_choice_sym
stack.append(item)
break
else:
stack.pop()
else:
item.is_choice_sym = True
self.actual_symbols.append(item)
stack.append(item)
def _cache_ret(self, selection):
# As None is used to indicate the lack of a cached value we can't use
# that to cache the fact that the choice has no selection. Instead, we
# use the symbolic constant NO_SELECTION.
if selection is None:
self.cached_selection = NO_SELECTION
else:
self.cached_selection = selection
return selection
def _invalidate(self):
self.cached_selection = None
self.cached_visibility = None
def _unset_user_value(self):
self._invalidate()
self.user_val = None
self.user_mode = None
def _make_conf(self, append_fn):
_make_block_conf(self.block, append_fn)
class Comment(Item):
"""Represents a comment statement."""
#
# Public interface
#
def get_config(self):
"""Returns the Config instance this comment is from."""
return self.config
def get_text(self):
"""Returns the text of the comment."""
return self.text
def get_parent(self):
"""Returns the menu or choice statement that contains the comment, or
None if the comment is at the top level. Note that if statements are
treated as syntactic sugar and do not have an explicit class
representation."""
return self.parent
def get_location(self):
"""Returns the location of the comment as a (filename, linenr) tuple,
where filename is a string and linenr an int."""
return (self.filename, self.linenr)
def get_visibility(self):
"""Returns the visibility of the comment. See also
Symbol.get_visibility()."""
return self.config._eval_expr(self.dep_expr)
def get_referenced_symbols(self, refs_from_enclosing=False):
"""See Symbol.get_referenced_symbols()."""
return self.all_referenced_syms if refs_from_enclosing else \
self.referenced_syms
def __str__(self):
"""Returns a string containing various information about the
comment."""
dep_str = self.config._expr_val_str(self.orig_deps,
"(no dependencies)")
additional_deps_str = " " + \
self.config._expr_val_str(self.deps_from_containing,
"(no additional dependencies)")
return _lines("Comment",
"Text: " + str(self.text),
"Dependencies: " + dep_str,
"Additional dependencies from enclosing menus and "
"ifs:",
additional_deps_str,
"Location: {0}:{1}".format(self.filename, self.linenr))
#
# Private methods
#
def __init__(self):
"""Comment constructor -- not intended to be called directly by
Kconfiglib clients."""
self.text = None
self.dep_expr = None
self.config = None
self.parent = None
# Dependency expression without dependencies from enclosing menus and
# ifs propagated
self.orig_deps = None
# Dependencies inherited from containing menus and ifs
self.deps_from_containing = None
# The set of symbols referenced by this comment (see
# get_referenced_symbols())
self.referenced_syms = set()
# Like 'referenced_syms', but includes symbols from
# dependencies inherited from enclosing menus and ifs
self.all_referenced_syms = None
self.filename = None
self.linenr = None
def _make_conf(self, append_fn):
if self.config._eval_expr(self.dep_expr) != "n":
append_fn("\n#\n# {0}\n#".format(self.text))
class Kconfig_Syntax_Error(Exception):
"""Exception raised for syntax errors."""
pass
class Internal_Error(Exception):
"""Exception raised for internal errors."""
pass
#
# Public functions
#
def tri_less(v1, v2):
"""Returns True if the tristate v1 is less than the tristate v2, where "n",
"m" and "y" are ordered from lowest to highest."""
return TRI_TO_INT[v1] < TRI_TO_INT[v2]
def tri_less_eq(v1, v2):
"""Returns True if the tristate v1 is less than or equal to the tristate
v2, where "n", "m" and "y" are ordered from lowest to highest."""
return TRI_TO_INT[v1] <= TRI_TO_INT[v2]
def tri_greater(v1, v2):
"""Returns True if the tristate v1 is greater than the tristate v2, where
"n", "m" and "y" are ordered from lowest to highest."""
return TRI_TO_INT[v1] > TRI_TO_INT[v2]
def tri_greater_eq(v1, v2):
"""Returns True if the tristate v1 is greater than or equal to the tristate
v2, where "n", "m" and "y" are ordered from lowest to highest."""
return TRI_TO_INT[v1] >= TRI_TO_INT[v2]
#
# Internal classes
#
class _Feed(object):
"""Class for working with sequences in a stream-like fashion; handy for
tokens."""
# This would be more helpful on the item classes, but would remove some
# flexibility
__slots__ = ['items', 'length', 'i']
def __init__(self, items):
self.items = items
self.length = len(self.items)
self.i = 0
def get_next(self):
if self.i >= self.length:
return None
item = self.items[self.i]
self.i += 1
return item
def peek_next(self):
return None if self.i >= self.length else self.items[self.i]
def check(self, token):
"""Check if the next token is 'token'. If so, remove it from the token
feed and return True. Otherwise, leave it in and return False."""
if self.i < self.length and self.items[self.i] == token:
self.i += 1
return True
return False
def unget_all(self):
self.i = 0
class _FileFeed(object):
"""Feeds lines from a file. Keeps track of the filename and current line
number. Joins any line ending in \\ with the following line. We need to be
careful to get the line number right in the presence of continuation
lines."""
__slots__ = ['filename', 'lines', 'length', 'linenr']
def __init__(self, filename):
self.filename = _clean_up_path(filename)
with open(filename, "r") as f:
# No interleaving of I/O and processing yet. Don't know if it would
# help.
self.lines = f.readlines()
self.length = len(self.lines)
self.linenr = 0
def get_next(self):
if self.linenr >= self.length:
return None
line = self.lines[self.linenr]
self.linenr += 1
while line.endswith("\\\n"):
line = line[:-2] + self.lines[self.linenr]
self.linenr += 1
return line
def peek_next(self):
linenr = self.linenr
if linenr >= self.length:
return None
line = self.lines[linenr]
while line.endswith("\\\n"):
linenr += 1
line = res[:-2] + self.lines[linenr]
return line
def unget(self):
self.linenr -= 1
while self.lines[self.linenr].endswith("\\\n"):
self.linenr -= 1
def next_nonblank(self):
"""Removes lines up to and including the next non-blank (not all-space)
line and returns it. Returns None if there are no more non-blank
lines."""
while 1:
line = self.get_next()
if line is None or not line.isspace():
return line
#
# Internal functions
#
def _get_visibility(sc):
"""Symbols and Choices have a "visibility" that acts as an upper bound on
the values a user can set for them, corresponding to the visibility in e.g.
'make menuconfig'. This function calculates the visibility for the Symbol
or Choice 'sc' -- the logic is nearly identical."""
if sc.cached_visibility is None:
vis = "n"
for _, cond_expr in sc.prompts:
vis = sc.config._eval_max(vis, cond_expr)
if isinstance(sc, Symbol) and sc.is_choice_sym:
vis = sc.config._eval_min(vis, _get_visibility(sc.parent))
# Promote "m" to "y" if we're dealing with a non-tristate
if vis == "m" and sc.type != TRISTATE:
vis = "y"
sc.cached_visibility = vis
return sc.cached_visibility
def _make_and(e1, e2):
"""Constructs an AND (&&) expression. Performs trivial simplification.
Nones equate to 'y'.
Note: returns None if e1 == e2 == None."""
if e1 is None or e1 == "y":
return e2
if e2 is None or e2 == "y":
return e1
# Prefer to merge argument lists if possible to reduce the number of nodes
if isinstance(e1, tuple) and e1[0] == AND:
if isinstance(e2, tuple) and e2[0] == AND:
return (AND, e1[1] + e2[1])
return (AND, e1[1] + [e2])
if isinstance(e2, tuple) and e2[0] == AND:
return (AND, e2[1] + [e1])
return (AND, [e1, e2])
def _make_or(e1, e2):
"""Constructs an OR (||) expression. Performs trivial simplification and
avoids Nones. Nones equate to 'y', which is usually what we want, but needs
to be kept in mind."""
# Perform trivial simplification and avoid None's (which
# correspond to y's)
if e1 is None or e2 is None or e1 == "y" or e2 == "y":
return "y"
if e1 == "n":
return e2
# Prefer to merge argument lists if possible to reduce the number of nodes
if isinstance(e1, tuple) and e1[0] == OR:
if isinstance(e2, tuple) and e2[0] == OR:
return (OR, e1[1] + e2[1])
return (OR, e1[1] + [e2])
if isinstance(e2, tuple) and e2[0] == OR:
return (OR, e2[1] + [e1])
return (OR, [e1, e2])
def _get_expr_syms_rec(expr, res):
"""_get_expr_syms() helper. Recurses through expressions."""
if isinstance(expr, Symbol):
res.add(expr)
elif isinstance(expr, str):
return
elif expr[0] == AND or expr[0] == OR:
for term in expr[1]:
_get_expr_syms_rec(term, res)
elif expr[0] == NOT:
_get_expr_syms_rec(expr[1], res)
elif expr[0] == EQUAL or expr[0] == UNEQUAL:
if isinstance(expr[1], Symbol):
res.add(expr[1])
if isinstance(expr[2], Symbol):
res.add(expr[2])
else:
_internal_error("Internal error while fetching symbols from an "
"expression with token stream {0}.".format(expr))
def _get_expr_syms(expr):
"""Returns the set() of symbols appearing in expr."""
res = set()
if expr is not None:
_get_expr_syms_rec(expr, res)
return res
def _str_val(obj):
"""Returns the value of obj as a string. If obj is not a string (constant
symbol), it must be a Symbol."""
return obj if isinstance(obj, str) else obj.get_value()
def _make_block_conf(block, append_fn):
"""Returns a list of .config strings for a block (list) of items."""
# Collect the substrings in a list and later use join() instead of += to
# build the final .config contents. With older Python versions, this yields
# linear instead of quadratic complexity.
for item in block:
item._make_conf(append_fn)
def _sym_str_string(sym_or_str):
if isinstance(sym_or_str, str):
return '"' + sym_or_str + '"'
return sym_or_str.name
def _intersperse(lst, op):
"""_expr_to_str() helper. Gets the string representation of each expression
in lst and produces a list where op has been inserted between the
elements."""
if not lst:
return ""
res = []
def handle_sub_expr(expr):
no_parens = isinstance(expr, (str, Symbol)) or \
expr[0] in (EQUAL, UNEQUAL) or \
PRECEDENCE[op] <= PRECEDENCE[expr[0]]
if not no_parens:
res.append("(")
res.extend(_expr_to_str_rec(expr))
if not no_parens:
res.append(")")
op_str = OP_TO_STR[op]
handle_sub_expr(lst[0])
for expr in lst[1:]:
res.append(op_str)
handle_sub_expr(expr)
return res
def _expr_to_str_rec(expr):
if expr is None:
return [""]
if isinstance(expr, (Symbol, str)):
return [_sym_str_string(expr)]
if expr[0] in (AND, OR):
return _intersperse(expr[1], expr[0])
if expr[0] == NOT:
need_parens = not isinstance(expr[1], (str, Symbol))
res = ["!"]
if need_parens:
res.append("(")
res.extend(_expr_to_str_rec(expr[1]))
if need_parens:
res.append(")")
return res
if expr[0] in (EQUAL, UNEQUAL):
return [_sym_str_string(expr[1]),
OP_TO_STR[expr[0]],
_sym_str_string(expr[2])]
def _expr_to_str(expr):
return "".join(_expr_to_str_rec(expr))
def _indentation(line):
"""Returns the length of the line's leading whitespace, treating tab stops
as being spaced 8 characters apart."""
line = line.expandtabs()
return len(line) - len(line.lstrip())
def _deindent(line, indent):
"""Deindent 'line' by 'indent' spaces."""
line = line.expandtabs()
if len(line) <= indent:
return line
return line[indent:]
def _is_base_n(s, n):
try:
int(s, n)
return True
except ValueError:
return False
def _lines(*args):
"""Returns a string consisting of all arguments, with newlines inserted
between them."""
return "\n".join(args)
def _comment(s):
"""Returns a new string with "#" inserted before each line in 's'."""
if not s:
return "#"
res = "".join(["#" + line for line in s.splitlines(True)])
if s.endswith("\n"):
return res + "#"
return res
def _clean_up_path(path):
"""Strips an initial "./" and any trailing slashes from 'path'."""
if path.startswith("./"):
path = path[2:]
return path.rstrip("/")
def _stderr_msg(msg, filename, linenr):
if filename is not None:
sys.stderr.write("{0}:{1}: ".format(_clean_up_path(filename), linenr))
sys.stderr.write(msg + "\n")
def _tokenization_error(s, filename, linenr):
loc = "" if filename is None else "{0}:{1}: ".format(filename, linenr)
raise Kconfig_Syntax_Error("{0}Couldn't tokenize '{1}'"
.format(loc, s.strip()))
def _parse_error(s, msg, filename, linenr):
loc = "" if filename is None else "{0}:{1}: ".format(filename, linenr)
raise Kconfig_Syntax_Error("{0}Couldn't parse '{1}'{2}"
.format(loc, s.strip(),
"." if msg is None else ": " + msg))
def _internal_error(msg):
raise Internal_Error(msg +
"\nSorry! You may want to send an email to ulfalizer a.t Google's "
"email service to tell me about this. Include the message above and the "
"stack trace and describe what you were doing.")
#
# Internal global constants
#
# Tokens
(T_AND, T_OR, T_NOT,
T_OPEN_PAREN, T_CLOSE_PAREN,
T_EQUAL, T_UNEQUAL,
T_MAINMENU, T_MENU, T_ENDMENU,
T_SOURCE, T_CHOICE, T_ENDCHOICE,
T_COMMENT, T_CONFIG, T_MENUCONFIG,
T_HELP, T_IF, T_ENDIF, T_DEPENDS, T_ON,
T_OPTIONAL, T_PROMPT, T_DEFAULT,
T_BOOL, T_TRISTATE, T_HEX, T_INT, T_STRING,
T_DEF_BOOL, T_DEF_TRISTATE,
T_SELECT, T_RANGE, T_OPTION, T_ALLNOCONFIG_Y, T_ENV,
T_DEFCONFIG_LIST, T_MODULES, T_VISIBLE) = range(39)
# The leading underscore before the function assignments below prevent pydoc
# from listing them. The constants could be hidden too, but they're fairly
# obviously internal anyway, so don't bother spamming the code.
# Keyword to token map. Note that the get() method is assigned directly as a
# small optimization.
_get_keyword = \
{"mainmenu": T_MAINMENU, "menu": T_MENU, "endmenu": T_ENDMENU,
"endif": T_ENDIF, "endchoice": T_ENDCHOICE, "source": T_SOURCE,
"choice": T_CHOICE, "config": T_CONFIG, "comment": T_COMMENT,
"menuconfig": T_MENUCONFIG, "help": T_HELP, "if": T_IF,
"depends": T_DEPENDS, "on": T_ON, "optional": T_OPTIONAL,
"prompt": T_PROMPT, "default": T_DEFAULT, "bool": T_BOOL, "boolean": T_BOOL,
"tristate": T_TRISTATE, "int": T_INT, "hex": T_HEX, "def_bool": T_DEF_BOOL,
"def_tristate": T_DEF_TRISTATE, "string": T_STRING, "select": T_SELECT,
"range": T_RANGE, "option": T_OPTION, "allnoconfig_y": T_ALLNOCONFIG_Y,
"env": T_ENV, "defconfig_list": T_DEFCONFIG_LIST, "modules": T_MODULES,
"visible": T_VISIBLE}.get
# Strings to use for True and False
BOOL_STR = {False: "false", True: "true"}
# Tokens after which identifier-like lexemes are treated as strings. T_CHOICE
# is included to avoid symbols being registered for named choices.
STRING_LEX = frozenset((T_BOOL, T_TRISTATE, T_INT, T_HEX, T_STRING, T_CHOICE,
T_PROMPT, T_MENU, T_COMMENT, T_SOURCE, T_MAINMENU))
# Matches the initial token on a line; see _tokenize(). Also eats trailing
# whitespace as an optimization.
_initial_token_re_match = re.compile(r"[^\w]*(\w+)\s*").match
# Matches an identifier/keyword optionally preceded by whitespace. Also eats
# trailing whitespace as an optimization.
_id_keyword_re_match = re.compile(r"\s*([\w./-]+)\s*").match
# Regular expressions for parsing .config files
_set_re_match = re.compile(r"CONFIG_(\w+)=(.*)").match
_unset_re_match = re.compile(r"# CONFIG_(\w+) is not set").match
# Regular expression for finding $-references to symbols in strings
_sym_ref_re_search = re.compile(r"\$[A-Za-z0-9_]+").search
# Integers representing symbol types
UNKNOWN, BOOL, TRISTATE, STRING, HEX, INT = range(6)
# Strings to use for types
TYPENAME = {UNKNOWN: "unknown", BOOL: "bool", TRISTATE: "tristate",
STRING: "string", HEX: "hex", INT: "int"}
# Token to type mapping
TOKEN_TO_TYPE = {T_BOOL: BOOL, T_TRISTATE: TRISTATE, T_STRING: STRING,
T_INT: INT, T_HEX: HEX}
# Default values for symbols of different types (the value the symbol gets if
# it is not assigned a user value and none of its 'default' clauses kick in)
DEFAULT_VALUE = {BOOL: "n", TRISTATE: "n", STRING: "", INT: "", HEX: ""}
# Indicates that no item is selected in a choice statement
NO_SELECTION = 0
# Integers representing expression types
AND, OR, NOT, EQUAL, UNEQUAL = range(5)
# Map from tristate values to integers
TRI_TO_INT = {"n": 0, "m": 1, "y": 2}
# Printing-related stuff
OP_TO_STR = {AND: " && ", OR: " || ", EQUAL: " = ", UNEQUAL: " != "}
PRECEDENCE = {OR: 0, AND: 1, NOT: 2}
| apache-2.0 | 3,702,397,827,326,251,500 | 38.39759 | 89 | 0.541083 | false |
KaiserAndres/kaiserBot | bot_executables.py | 1 | 4751 | import roller
import random
DEFAULT_CARD_AMOUNT = 1
MAX_CARDS = 15
CARD_SEPARATOR = "||"
def ping_exec(irc, message):
pong = 'PONG ' + message.text.split(" ")[1] + '\r\n'
irc.send(pong.encode("utf-8"))
def roll_exec(irc, message):
'''
A !roll comand has the following structure:
!roll diceAmount+d+diceSize+"+"+modifier
* Dice amount is an integer up to 20000
* Dice Size is an integer
* Modifier is an integer that is added onto the roll after
The !Roll command can also have this structure:
!!roll d+diceAmount+d+diceSize+"+"+modifier
* Dice amount is the result of a roll of said size and then proceeds
to roll that many of the following dice
* Dice Size is an integer
* Modifier is an integer that is added onto the roll after
'''
diceNumbers = roller.getRolledNumbers(message.text)
messageToSend = ''
# -------------------------------------------------------------------
# Hard limits on the dice sizes
# -------------------------------------------------------------------
if diceNumbers[0] > 10:
diceNumbers[0] = 10
if diceNumbers[0] < 1:
diceNumbers[0] = 1
if diceNumbers[1] > 2000:
diceNumbers[1] = 2000
if diceNumbers[1] < 1:
diceNumbers[1] = 1
if diceNumbers[2] < 1:
diceNumbers[2] = 1
rolledArray = roller.roll(diceNumbers[0],
diceNumbers[1],
diceNumbers[2])
for rollNum in rolledArray:
# REMINDER: make a message maker function cause this is ugly!
if (diceNumbers[3] == 0):
messageToSend = (messageToSend +
"\x0312,15(" + str(diceNumbers[1]) +
"d" + str(diceNumbers[2]) + ") \x032,15[" +
str(rollNum) + "]\x031,15 : \x034,15{" +
str(rollNum + diceNumbers[3]) + "} ")
else:
messageToSend = (messageToSend + "\x0312,15(" +
str(diceNumbers[1]) + "d" +
str(diceNumbers[2]) + "+" +
str(diceNumbers[3]) + ") \x032,15[" +
str(rollNum) + "+" +
str(diceNumbers[3]) +
"]\x031,15 : \x034,15{" +
str(rollNum + diceNumbers[3]) + "} ")
irc.send(message.reply(messageToSend))
def join_exec(irc, message):
'''
A join command has the following structure:
!JOIN #CHANNEL
A message is sent to the irc server requesting to join #CHANNEL
'''
chann = ""
foundLink = False
for char in message.text:
if char == "#":
foundLink = True
if foundLink:
chann = chann + char
if chann != "":
join_message = "JOIN " + chann + "\n"
irc.send(join_message.encode("utf-8"))
else:
irc.send(message.reply("Error 02: bad channel."))
def tarot_exec(irc, message):
'''
Tarot command asks for the number of cards to be drawn and returns them.
A tarot command has the following structure:
!tarot <NUMBER OF CARDS>
'''
card_amount = get_card_amount(message)
card_spread = spread_cards(card_amount)
output_message = "You got these cards: " + CARD_SEPARATOR.join(card_spread)
irc.send(message.reply(output_message))
def spread_cards(card_amount):
card_spread = []
local_deck = load_deck("deck")
for time in range(0, card_amount):
card_index = random.randint(0, len(local_deck) - 1)
is_reversed = random.randint(0, 1) == 1
card_text = local_deck[card_index]
if is_reversed:
card_text = card_text + "(reversed)"
card_spread.append(card_text)
local_deck.remove(local_deck[card_index])
return card_spread
def get_card_amount(message):
number_buffer = ""
number_end = 9
for characterIndex in range(0, len(message.text)):
try:
int(message.text[characterIndex])
if characterIndex < number_end:
number_buffer = number_buffer + message.text[characterIndex]
except ValueError:
continue
try:
card_amount = int(number_buffer)
except ValueError:
card_amount = DEFAULT_CARD_AMOUNT
if card_amount > MAX_CARDS:
card_amount = MAX_CARDS
return card_amount
def load_deck(deck_file_name):
deck_file = open(deck_file_name, "r")
deck_text = deck_file.readlines()
deck = []
deck_file.close()
for card in deck_text:
deck.append(card[:-1])
return deck
| mit | -1,279,902,818,268,667,600 | 29.455128 | 80 | 0.53273 | false |
BenLangmead/qtip-experiments | experiments/real_data/perf_tabulate.py | 1 | 3305 | """
Creates a table with efficiency metrics (running time, peak memory
footprint) from SLURM output generated from sbatch_align.sh.
"""
from __future__ import print_function
import glob
import sys
from collections import defaultdict
nslurm, nsam = 0, 0
sam_names = defaultdict(int)
tab_wrapped = defaultdict(lambda: defaultdict(int))
to_slurm_wrapped = {}
for fn in glob.glob('slurm-*.out'):
with open(fn) as fh:
ln = fh.readline()
if 'is up to date' in ln:
continue # skip trivial job target was already up to date
if ln.split()[0] != 'python':
continue
nsam += 1
name, t_tandal, t_tandpa, wrappeak, childpeak, t_overall, t_inp = None, 0, 0, 0, 0, 0, 0
while True:
ln = fh.readline()
if len(ln) == 0:
break
ln = ln.rstrip()
if '--vanilla-out' in ln:
assert name is None
name = ln.split()[1]
if 'INFO:Overall' in ln:
t_overall = float(ln.split()[-1])
if 'INFO:Aligning input reads' in ln:
t_inp = float(ln.split()[-1])
if 'INFO:Aligning tandem reads' in ln and 'paired' not in ln:
t_tandal = float(ln.split()[-1])
if 'INFO:Parsing tandem alignments' in ln:
t_tandpa = float(ln.split()[-1])
if 'INFO:Peak memory usage (RSS) of Python wrapper' in ln:
wrappeak = ln.split()[-1]
assert wrappeak[-2:] == 'GB'
wrappeak = float(wrappeak[:-2]) * 1024 * 1024 * 1024
if 'INFO:Peak memory usage (RSS) of children' in ln:
childpeak = ln.split()[-1]
assert childpeak[-2:] == 'GB'
childpeak = float(childpeak[:-2]) * 1024 * 1024 * 1024
sam_names[name] += 1
tab_wrapped[name]['wrappeak'] = wrappeak
tab_wrapped[name]['childpeak'] = childpeak
tab_wrapped[name]['t_overall'] = t_overall
tab_wrapped[name]['t_inp'] = t_inp
tab_wrapped[name]['t_tandal'] = t_tandal
tab_wrapped[name]['t_tandpa'] = t_tandpa
to_slurm_wrapped[name] = fn
nslurm += 1
print('# slurm files: %d' % nslurm, file=sys.stderr)
print('# sam files: %d' % nsam, file=sys.stderr)
for k, v in sorted(sam_names.items()):
print(' %s: %d' % (k, v), file=sys.stderr)
aln_map = {'bt2': 'Bowtie 2', 'bwa': 'BWA-MEM', 'snap': 'SNAP'}
print('data,aligner,paired,align_time,overall_time,pct_increase_a_to_o,peak_wrapper,peak_children,pct_increase_peak')
for k in sorted(sam_names.keys()):
wrappeak = tab_wrapped[k]['wrappeak']
childpeak = tab_wrapped[k]['childpeak']
wrappct = 0 if childpeak == 0 else (wrappeak * 100.0 / childpeak)
wrappct = ('+' if wrappct >= 0 else '') + ('%0.3f' % wrappct)
t_o, t_a = tab_wrapped[k]['t_overall'], tab_wrapped[k]['t_inp']
t_pct = 0 if t_a == 0 else ((t_o - t_a) * 100.0/t_a)
t_pct = ('+' if t_pct >= 0 else '') + ('%0.3f' % t_pct)
srr, aligner, paired, _ = k.split('.')
srr = srr[:-2] # chop off trailing _1
aligner = aln_map[aligner]
paired = 'T' if paired == 'pair' else 'F'
print('%s,%s,%s,%.0f,%.0f,%s,%.0f,%.0f,%s' % (srr, aligner, paired, t_a, t_o, t_pct, wrappeak, childpeak, wrappct))
| mit | -3,366,417,787,991,441,000 | 39.304878 | 119 | 0.552496 | false |
t3dev/odoo | addons/mrp/__manifest__.py | 5 | 1731 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Manufacturing',
'version': '2.0',
'website': 'https://www.odoo.com/page/manufacturing',
'category': 'Manufacturing/Manufacturing',
'sequence': 16,
'summary': 'Manufacturing Orders & BOMs',
'depends': ['product', 'stock', 'resource'],
'description': "",
'data': [
'security/mrp_security.xml',
'security/ir.model.access.csv',
'data/mrp_data.xml',
'wizard/mrp_product_produce_views.xml',
'wizard/change_production_qty_views.xml',
'wizard/mrp_workcenter_block_view.xml',
'wizard/stock_warn_insufficient_qty_views.xml',
'views/mrp_views_menus.xml',
'views/stock_move_views.xml',
'views/mrp_workorder_views.xml',
'views/mrp_workcenter_views.xml',
'views/mrp_production_views.xml',
'views/mrp_routing_views.xml',
'views/mrp_bom_views.xml',
'views/product_views.xml',
'views/stock_warehouse_views.xml',
'views/stock_picking_views.xml',
'views/mrp_unbuild_views.xml',
'views/ir_attachment_view.xml',
'views/res_config_settings_views.xml',
'views/mrp_templates.xml',
'views/stock_scrap_views.xml',
'report/mrp_report_views_main.xml',
'report/mrp_report_bom_structure.xml',
'report/mrp_production_templates.xml',
'report/report_stock_rule.xml',
'report/mrp_zebra_production_templates.xml',
],
'qweb': ['static/src/xml/mrp.xml'],
'demo': [
'data/mrp_demo.xml',
],
'test': [],
'application': True,
'post_init_hook': '_create_warehouse_data',
}
| gpl-3.0 | 6,180,193,645,343,356,000 | 33.62 | 74 | 0.59792 | false |
napjon/moocs_solution | introcs-udacity/Search Engine(jonappsearch)/main.py | 1 | 1817 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
form = """
<html>
<head>
<title>Search Engine</title>
</head>
<body>
<h2>Search Engine</h2>
<form method="post">
<textarea name="text">%(text)s</textarea>
<br>
<input type="submit">
<br>
<br>
<br>
%(links)s
</form>
</body>
</html>
"""
import webapp2
import cgi
from search import lucky_search
from crawler import crawl_web, compute_ranks
class MainHandler(webapp2.RequestHandler):
def render(self, text = "", links = ""):
return self.response.write(form%{'text' :self.escape_html(text),
'links':self.escape_html(links)})
def get(self):
self.render()
def escape_html(self,s):
return cgi.escape(s, quote = True)
def post(self):
corpus, graph = crawl_web('http://udacity.com/cs101x/urank/index.html')
ranks = compute_ranks(graph)
query = self.request.get('text')
result = lucky_search(corpus, ranks, query)
if not result:
self.render(text = "", links = "try www.google.com")
else:
self.render(text = query, links = result)
app = webapp2.WSGIApplication([
('/', MainHandler)
], debug=True)
| mit | -7,123,328,385,974,137,000 | 23.890411 | 79 | 0.628509 | false |
rjschwei/azure-sdk-for-python | azure-mgmt-compute/azure/mgmt/compute/operations/disks_operations.py | 1 | 30126 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
import uuid
from .. import models
class DisksOperations(object):
"""DisksOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client Api Version. Constant value: "2016-04-30-preview".
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2016-04-30-preview"
self.config = config
def create_or_update(
self, resource_group_name, disk_name, disk, custom_headers=None, raw=False, **operation_config):
"""Creates or updates a disk.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_name: The name of the disk within the given subscription
and resource group.
:type disk_name: str
:param disk: Disk object supplied in the body of the Put disk
operation.
:type disk: :class:`Disk <azure.mgmt.compute.models.Disk>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`Disk <azure.mgmt.compute.models.Disk>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'diskName': self._serialize.url("disk_name", disk_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(disk, 'Disk')
# Construct and send request
def long_running_send():
request = self._client.put(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Disk', response)
if response.status_code == 202:
deserialized = self._deserialize('Disk', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def update(
self, resource_group_name, disk_name, disk, custom_headers=None, raw=False, **operation_config):
"""Updates (patches) a disk.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_name: The name of the disk within the given subscription
and resource group.
:type disk_name: str
:param disk: Disk object supplied in the body of the Patch disk
operation.
:type disk: :class:`DiskUpdate <azure.mgmt.compute.models.DiskUpdate>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`Disk <azure.mgmt.compute.models.Disk>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'diskName': self._serialize.url("disk_name", disk_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(disk, 'DiskUpdate')
# Construct and send request
def long_running_send():
request = self._client.patch(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Disk', response)
if response.status_code == 202:
deserialized = self._deserialize('Disk', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get(
self, resource_group_name, disk_name, custom_headers=None, raw=False, **operation_config):
"""Gets information about a disk.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_name: The name of the disk within the given subscription
and resource group.
:type disk_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Disk <azure.mgmt.compute.models.Disk>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'diskName': self._serialize.url("disk_name", disk_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Disk', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete(
self, resource_group_name, disk_name, custom_headers=None, raw=False, **operation_config):
"""Deletes a disk.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_name: The name of the disk within the given subscription
and resource group.
:type disk_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`OperationStatusResponse
<azure.mgmt.compute.models.OperationStatusResponse>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'diskName': self._serialize.url("disk_name", disk_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.delete(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def list_by_resource_group(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Lists all the disks under a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`DiskPaged <azure.mgmt.compute.models.DiskPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.DiskPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.DiskPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list(
self, custom_headers=None, raw=False, **operation_config):
"""Lists all the disks under a subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`DiskPaged <azure.mgmt.compute.models.DiskPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/disks'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.DiskPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.DiskPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def grant_access(
self, resource_group_name, disk_name, access, duration_in_seconds, custom_headers=None, raw=False, **operation_config):
"""Grants access to a disk.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_name: The name of the disk within the given subscription
and resource group.
:type disk_name: str
:param access: Possible values include: 'None', 'Read'
:type access: str or :class:`AccessLevel
<azure.mgmt.compute.models.AccessLevel>`
:param duration_in_seconds: Time duration in seconds until the SAS
access expires.
:type duration_in_seconds: int
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`AccessUri
<azure.mgmt.compute.models.AccessUri>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
grant_access_data = models.GrantAccessData(access=access, duration_in_seconds=duration_in_seconds)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/beginGetAccess'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'diskName': self._serialize.url("disk_name", disk_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(grant_access_data, 'GrantAccessData')
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('AccessUri', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def revoke_access(
self, resource_group_name, disk_name, custom_headers=None, raw=False, **operation_config):
"""Revokes access to a disk.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_name: The name of the disk within the given subscription
and resource group.
:type disk_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`OperationStatusResponse
<azure.mgmt.compute.models.OperationStatusResponse>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/endGetAccess'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'diskName': self._serialize.url("disk_name", disk_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
| mit | -5,214,372,820,157,999,000 | 43.04386 | 144 | 0.627631 | false |
jkoelker/quark | quark/tests/plugin_modules/test_subnets.py | 1 | 36379 | # Copyright 2013 Openstack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import copy
import time
import uuid
import mock
from neutron.api.v2 import attributes as neutron_attrs
from neutron.common import exceptions
from neutron.openstack.common.notifier import api as notifier_api
from oslo.config import cfg
from quark.db import models
from quark.tests import test_quark_plugin
class TestQuarkGetSubnetCount(test_quark_plugin.TestQuarkPlugin):
def test_get_subnet_count(self):
"""This isn't really testable."""
with mock.patch("quark.db.api.subnet_count_all"):
self.plugin.get_subnets_count(self.context, {})
class TestQuarkGetSubnets(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, subnets=None, routes=None):
if routes is None:
routes = []
route_models = []
for route in routes:
r = models.Route()
r.update(route)
route_models.append(r)
if isinstance(subnets, list):
subnet_models = []
for subnet in subnets:
s_dict = subnet.copy()
s_dict["routes"] = route_models
s = models.Subnet(network=models.Network())
s.update(s_dict)
subnet_models.append(s)
elif subnets:
mod = models.Subnet(network=models.Network())
mod.update(subnets)
mod["routes"] = route_models
subnet_models = mod
else:
subnet_models = None
with mock.patch("quark.db.api.subnet_find") as subnet_find:
subnet_find.return_value = subnet_models
yield
def test_subnets_list(self):
subnet_id = str(uuid.uuid4())
route = dict(id=1, cidr="0.0.0.0/0", gateway="192.168.0.1")
subnet = dict(id=subnet_id, network_id=1, name=subnet_id,
tenant_id=self.context.tenant_id, ip_version=4,
cidr="192.168.0.0/24", gateway_ip="192.168.0.1",
dns_nameservers=[],
enable_dhcp=None)
expected_route = dict(destination=route["cidr"],
nexthop=route["gateway"])
with self._stubs(subnets=[subnet], routes=[route]):
res = self.plugin.get_subnets(self.context, {}, {})
# Compare routes separately
routes = res[0].pop("host_routes")
for key in subnet.keys():
self.assertEqual(res[0][key], subnet[key])
for key in expected_route.keys():
self.assertEqual(routes[0][key], expected_route[key])
def test_subnet_show_fail(self):
with self._stubs():
with self.assertRaises(exceptions.SubnetNotFound):
self.plugin.get_subnet(self.context, 1)
def test_subnet_show(self):
subnet_id = str(uuid.uuid4())
route = dict(id=1, cidr="0.0.0.0/0", gateway="192.168.0.1",
subnet_id=subnet_id)
expected_route = dict(destination=route["cidr"],
nexthop=route["gateway"])
subnet = dict(id=subnet_id, network_id=1, name=subnet_id,
tenant_id=self.context.tenant_id, ip_version=4,
cidr="192.168.0.0/24", gateway_ip="192.168.0.1",
dns_nameservers=[],
enable_dhcp=None)
with self._stubs(subnets=subnet, routes=[route]):
res = self.plugin.get_subnet(self.context, subnet_id)
# Compare routes separately
routes = res.pop("host_routes")
for key in subnet.keys():
self.assertEqual(res[key], subnet[key])
for key in expected_route.keys():
self.assertEqual(routes[0][key], expected_route[key])
class TestQuarkCreateSubnetOverlapping(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, subnets=None):
if subnets is None:
subnets = []
subnet_models = []
for subnet in subnets:
s = models.Subnet()
s.update(subnet)
subnet_models.append(s)
network = models.Network()
network.update(dict(id=1, subnets=subnet_models))
with contextlib.nested(
mock.patch("quark.db.api.network_find"),
mock.patch("quark.db.api.subnet_find"),
mock.patch("quark.db.api.subnet_create")
) as (net_find, subnet_find, subnet_create):
net_find.return_value = network
subnet_find.return_value = subnet_models
subnet_create.return_value = models.Subnet(
network=models.Network(),
cidr="192.168.1.1/24")
yield subnet_create
def test_create_subnet_overlapping_true(self):
cfg.CONF.set_override('allow_overlapping_ips', True)
with self._stubs() as subnet_create:
s = dict(subnet=dict(
gateway_ip=neutron_attrs.ATTR_NOT_SPECIFIED,
dns_nameservers=neutron_attrs.ATTR_NOT_SPECIFIED,
cidr="192.168.1.1/8",
network_id=1))
self.plugin.create_subnet(self.context, s)
self.assertEqual(subnet_create.call_count, 1)
def test_create_subnet_overlapping_false(self):
cfg.CONF.set_override('allow_overlapping_ips', False)
with self._stubs() as subnet_create:
s = dict(subnet=dict(
gateway_ip=neutron_attrs.ATTR_NOT_SPECIFIED,
dns_nameservers=neutron_attrs.ATTR_NOT_SPECIFIED,
cidr="192.168.1.1/8",
network_id=1))
self.plugin.create_subnet(self.context, s)
self.assertEqual(subnet_create.call_count, 1)
def test_create_subnet_overlapping_conflict(self):
cfg.CONF.set_override('allow_overlapping_ips', False)
with self._stubs(subnets=[dict(cidr="192.168.10.1/24")]):
with self.assertRaises(exceptions.InvalidInput):
s = dict(subnet=dict(cidr="192.168.1.1/8",
network_id=1))
self.plugin.create_subnet(self.context, s)
class TestQuarkCreateSubnetAllocationPools(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, subnet):
s = models.Subnet(network=models.Network(id=1, subnets=[]))
s.update(subnet)
with contextlib.nested(
mock.patch("quark.db.api.network_find"),
mock.patch("quark.db.api.subnet_find"),
mock.patch("quark.db.api.subnet_create"),
) as (net_find, subnet_find, subnet_create):
net_find.return_value = s["network"]
subnet_find.return_value = []
subnet_create.return_value = s
yield subnet_create
def setUp(self):
super(TestQuarkCreateSubnetAllocationPools, self).setUp()
def tearDown(self):
super(TestQuarkCreateSubnetAllocationPools, self).tearDown()
def test_create_subnet_allocation_pools_zero(self):
s = dict(subnet=dict(
cidr="192.168.1.1/24",
network_id=1))
with self._stubs(s["subnet"]) as (subnet_create):
resp = self.plugin.create_subnet(self.context, s)
self.assertEqual(subnet_create.call_count, 1)
self.assertEqual(resp["allocation_pools"],
[dict(start="192.168.1.2", end="192.168.1.254")])
def test_create_subnet_allocation_pools_one(self):
pools = [dict(start="192.168.1.10", end="192.168.1.20")]
s = dict(subnet=dict(
allocation_pools=pools,
cidr="192.168.1.1/24",
network_id=1))
with self._stubs(s["subnet"]) as (subnet_create):
resp = self.plugin.create_subnet(self.context, s)
self.assertEqual(subnet_create.call_count, 1)
self.assertEqual(resp["allocation_pools"], pools)
def test_create_subnet_allocation_pools_two(self):
pools = [dict(start="192.168.1.10", end="192.168.1.20"),
dict(start="192.168.1.40", end="192.168.1.50")]
s = dict(subnet=dict(
allocation_pools=pools,
cidr="192.168.1.1/24",
network_id=1))
with self._stubs(s["subnet"]) as (subnet_create):
resp = self.plugin.create_subnet(self.context, s)
self.assertEqual(subnet_create.call_count, 1)
self.assertEqual(resp["allocation_pools"], pools)
def test_create_subnet_allocation_pools_empty_list(self):
pools = []
s = dict(subnet=dict(
allocation_pools=pools,
cidr="192.168.1.1/24",
network_id=1))
with self._stubs(s["subnet"]) as (subnet_create):
resp = self.plugin.create_subnet(self.context, s)
self.assertEqual(subnet_create.call_count, 1)
self.assertEqual(resp["allocation_pools"], pools)
# TODO(amir): Refactor the tests to test individual subnet attributes.
# * copy.deepcopy was necessary to maintain tests on keys, which is a bit ugly.
# * workaround is also in place for lame ATTR_NOT_SPECIFIED object()
class TestQuarkCreateSubnet(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, subnet=None, network=None, routes=None, dns=None):
if network:
net = models.Network()
net.update(network)
network = net
subnet_mod = models.Subnet(network=models.Network())
dns_ips = subnet.pop("dns_nameservers", [])
host_routes = subnet.pop("host_routes", [])
subnet_mod.update(subnet)
subnet["dns_nameservers"] = dns_ips
subnet["host_routes"] = host_routes
routes = routes or []
dns = dns or []
route_models = [models.Route(**r) for r in routes]
dns_models = [models.DNSNameserver(**d) for d in dns]
with contextlib.nested(
mock.patch("quark.db.api.subnet_create"),
mock.patch("quark.db.api.network_find"),
mock.patch("quark.db.api.dns_create"),
mock.patch("quark.db.api.route_create"),
) as (subnet_create, net_find, dns_create, route_create):
subnet_create.return_value = subnet_mod
net_find.return_value = network
route_create.side_effect = route_models
dns_create.side_effect = dns_models
yield subnet_create, dns_create, route_create
def test_create_subnet(self):
routes = [dict(cidr="0.0.0.0/0", gateway="0.0.0.0")]
subnet = dict(
subnet=dict(network_id=1,
tenant_id=self.context.tenant_id, ip_version=4,
cidr="172.16.0.0/24", gateway_ip="0.0.0.0",
dns_nameservers=neutron_attrs.ATTR_NOT_SPECIFIED,
host_routes=neutron_attrs.ATTR_NOT_SPECIFIED,
enable_dhcp=None))
network = dict(network_id=1)
with self._stubs(
subnet=subnet["subnet"],
network=network,
routes=routes
) as (subnet_create, dns_create, route_create):
dns_nameservers = subnet["subnet"].pop("dns_nameservers")
host_routes = subnet["subnet"].pop("host_routes")
subnet_request = copy.deepcopy(subnet)
subnet_request["subnet"]["dns_nameservers"] = dns_nameservers
subnet_request["subnet"]["host_routes"] = host_routes
res = self.plugin.create_subnet(self.context,
subnet_request)
self.assertEqual(subnet_create.call_count, 1)
self.assertEqual(dns_create.call_count, 0)
self.assertEqual(route_create.call_count, 1)
for key in subnet["subnet"].keys():
if key == "host_routes":
self.assertEqual(res[key][0]["destination"], "0.0.0.0/0")
self.assertEqual(res[key][0]["nexthop"], "0.0.0.0")
else:
self.assertEqual(res[key], subnet["subnet"][key])
def test_create_subnet_no_network_fails(self):
subnet = dict(subnet=dict(network_id=1))
with self._stubs(subnet=dict(), network=None):
with self.assertRaises(exceptions.NetworkNotFound):
self.plugin.create_subnet(self.context, subnet)
def test_create_subnet_no_gateway_ip_defaults(self):
routes = [dict(cidr="0.0.0.0/0", gateway="172.16.0.1")]
subnet = dict(
subnet=dict(network_id=1,
tenant_id=self.context.tenant_id, ip_version=4,
cidr="172.16.0.0/24",
gateway_ip=neutron_attrs.ATTR_NOT_SPECIFIED,
dns_nameservers=neutron_attrs.ATTR_NOT_SPECIFIED,
enable_dhcp=None))
network = dict(network_id=1)
with self._stubs(
subnet=subnet["subnet"],
network=network,
routes=routes
) as (subnet_create, dns_create, route_create):
dns_nameservers = subnet["subnet"].pop("dns_nameservers")
gateway_ip = subnet["subnet"].pop("gateway_ip")
subnet_request = copy.deepcopy(subnet)
subnet_request["subnet"]["dns_nameservers"] = dns_nameservers
subnet_request["subnet"]["gateway_ip"] = gateway_ip
res = self.plugin.create_subnet(self.context, subnet_request)
self.assertEqual(subnet_create.call_count, 1)
self.assertEqual(dns_create.call_count, 0)
self.assertEqual(route_create.call_count, 1)
for key in subnet["subnet"].keys():
if key == "gateway_ip":
self.assertEqual(res[key], "172.16.0.1")
elif key == "host_routes":
self.assertEqual(res[key][0]["destination"], "0.0.0.0/0")
self.assertEqual(res[key][0]["nexthop"], "172.16.0.1")
else:
self.assertEqual(res[key], subnet["subnet"][key])
def test_create_subnet_dns_nameservers(self):
routes = [dict(cidr="0.0.0.0/0", gateway="0.0.0.0")]
dns_ns = [dict(ip="4.2.2.1"), dict(ip="4.2.2.2")]
subnet = dict(
subnet=dict(network_id=1,
tenant_id=self.context.tenant_id, ip_version=4,
cidr="172.16.0.0/24", gateway_ip="0.0.0.0",
dns_nameservers=["4.2.2.1", "4.2.2.2"],
enable_dhcp=None))
network = dict(network_id=1)
with self._stubs(
subnet=subnet["subnet"],
network=network,
routes=routes,
dns=dns_ns
) as (subnet_create, dns_create, route_create):
res = self.plugin.create_subnet(self.context,
copy.deepcopy(subnet))
self.assertEqual(subnet_create.call_count, 1)
self.assertEqual(dns_create.call_count, 2)
self.assertEqual(route_create.call_count, 1)
for key in subnet["subnet"].keys():
if key == "host_routes":
self.assertEqual(res[key][0]["destination"], "0.0.0.0/0")
self.assertEqual(res[key][0]["nexthop"], "0.0.0.0")
else:
self.assertEqual(res[key], subnet["subnet"][key])
def test_create_subnet_routes(self):
routes = [dict(cidr="1.1.1.1/8", gateway="172.16.0.4"),
dict(cidr="0.0.0.0/0", gateway="0.0.0.0")]
subnet = dict(
subnet=dict(network_id=1,
tenant_id=self.context.tenant_id, ip_version=4,
cidr="172.16.0.0/24", gateway_ip="0.0.0.0",
dns_nameservers=neutron_attrs.ATTR_NOT_SPECIFIED,
host_routes=[{"destination": "1.1.1.1/8",
"nexthop": "172.16.0.4"}],
enable_dhcp=None))
network = dict(network_id=1)
with self._stubs(
subnet=subnet["subnet"],
network=network,
routes=routes
) as (subnet_create, dns_create, route_create):
dns_nameservers = subnet["subnet"].pop("dns_nameservers")
subnet_request = copy.deepcopy(subnet)
subnet_request["subnet"]["dns_nameservers"] = dns_nameservers
res = self.plugin.create_subnet(self.context, subnet_request)
self.assertEqual(subnet_create.call_count, 1)
self.assertEqual(dns_create.call_count, 0)
self.assertEqual(route_create.call_count, 2)
for key in subnet["subnet"].keys():
if key == "host_routes":
res_tuples = [(r["destination"], r["nexthop"])
for r in res[key]]
self.assertIn(("1.1.1.1/8", "172.16.0.4"), res_tuples)
self.assertIn(("0.0.0.0/0", "0.0.0.0"), res_tuples)
self.assertEqual(2, len(res_tuples))
else:
self.assertEqual(res[key], subnet["subnet"][key])
def test_create_subnet_default_route(self):
routes = [dict(cidr="0.0.0.0/0", gateway="172.16.0.4")]
subnet = dict(
subnet=dict(network_id=1,
tenant_id=self.context.tenant_id, ip_version=4,
cidr="172.16.0.0/24",
gateway_ip=neutron_attrs.ATTR_NOT_SPECIFIED,
dns_nameservers=neutron_attrs.ATTR_NOT_SPECIFIED,
host_routes=[{"destination": "0.0.0.0/0",
"nexthop": "172.16.0.4"}],
enable_dhcp=None))
network = dict(network_id=1)
with self._stubs(
subnet=subnet["subnet"],
network=network,
routes=routes
) as (subnet_create, dns_create, route_create):
dns_nameservers = subnet["subnet"].pop("dns_nameservers")
gateway_ip = subnet["subnet"].pop("gateway_ip")
subnet_request = copy.deepcopy(subnet)
subnet_request["subnet"]["dns_nameservers"] = dns_nameservers
subnet_request["subnet"]["gateway_ip"] = gateway_ip
res = self.plugin.create_subnet(self.context, subnet_request)
self.assertEqual(subnet_create.call_count, 1)
self.assertEqual(dns_create.call_count, 0)
self.assertEqual(route_create.call_count, 1)
for key in subnet["subnet"].keys():
if key == "host_routes":
res_tuples = [(r["destination"], r["nexthop"])
for r in res[key]]
self.assertEqual([("0.0.0.0/0", "172.16.0.4")], res_tuples)
elif key == "gateway_ip":
self.assertEqual(res[key], "172.16.0.4")
else:
self.assertEqual(res[key], subnet["subnet"][key])
def test_create_subnet_default_route_gateway_ip(self):
"""If default route (host_routes) and gateway_ip are both provided,
then host_route takes precedence.
"""
routes = [dict(cidr="0.0.0.0/0", gateway="172.16.0.4")]
subnet = dict(
subnet=dict(network_id=1,
tenant_id=self.context.tenant_id, ip_version=4,
cidr="172.16.0.0/24",
gateway_ip="172.16.0.3",
dns_nameservers=neutron_attrs.ATTR_NOT_SPECIFIED,
host_routes=[{"destination": "0.0.0.0/0",
"nexthop": "172.16.0.4"}],
enable_dhcp=None))
network = dict(network_id=1)
with self._stubs(
subnet=subnet["subnet"],
network=network,
routes=routes
) as (subnet_create, dns_create, route_create):
dns_nameservers = subnet["subnet"].pop("dns_nameservers")
subnet_request = copy.deepcopy(subnet)
subnet_request["subnet"]["dns_nameservers"] = dns_nameservers
res = self.plugin.create_subnet(self.context, subnet_request)
self.assertEqual(subnet_create.call_count, 1)
self.assertEqual(dns_create.call_count, 0)
self.assertEqual(route_create.call_count, 1)
for key in subnet["subnet"].keys():
if key == "host_routes":
res_tuples = [(r["destination"], r["nexthop"])
for r in res[key]]
self.assertEqual([("0.0.0.0/0", "172.16.0.4")], res_tuples)
elif key == "gateway_ip":
self.assertEqual(res[key], "172.16.0.4")
else:
self.assertEqual(res[key], subnet["subnet"][key])
class TestQuarkUpdateSubnet(test_quark_plugin.TestQuarkPlugin):
DEFAULT_ROUTE = [dict(destination="0.0.0.0/0",
nexthop="172.16.0.1")]
@contextlib.contextmanager
def _stubs(self, host_routes=None, new_routes=None, find_routes=True,
new_dns_servers=None):
if host_routes is None:
host_routes = []
if new_routes:
new_routes = [models.Route(cidr=r["destination"],
gateway=r["nexthop"],
subnet_id=1)
for r in new_routes]
if new_dns_servers:
new_dns_servers = [models.DNSNameserver(
ip=ip,
subnet_id=1) for ip in new_dns_servers]
subnet = dict(
id=1,
network_id=1,
tenant_id=self.context.tenant_id, ip_version=4,
cidr="172.16.0.0/24",
host_routes=host_routes,
dns_nameservers=["4.2.2.1", "4.2.2.2"],
enable_dhcp=None)
dns_ips = subnet.pop("dns_nameservers", [])
host_routes = subnet.pop("host_routes", [])
subnet_mod = models.Subnet()
subnet_mod.update(subnet)
subnet_mod["dns_nameservers"] = [models.DNSNameserver(ip=ip)
for ip in dns_ips]
subnet_mod["routes"] = [models.Route(cidr=r["destination"],
gateway=r["nexthop"],
subnet_id=subnet_mod["id"])
for r in host_routes]
with contextlib.nested(
mock.patch("quark.db.api.subnet_find"),
mock.patch("quark.db.api.subnet_update"),
mock.patch("quark.db.api.dns_create"),
mock.patch("quark.db.api.route_find"),
mock.patch("quark.db.api.route_update"),
mock.patch("quark.db.api.route_create"),
) as (subnet_find, subnet_update,
dns_create,
route_find, route_update, route_create):
subnet_find.return_value = subnet_mod
route_find.return_value = subnet_mod["routes"][0] \
if subnet_mod["routes"] and find_routes else None
new_subnet_mod = models.Subnet(network=models.Network())
new_subnet_mod.update(subnet_mod)
if new_routes:
new_subnet_mod["routes"] = new_routes
if new_dns_servers:
new_subnet_mod["dns_nameservers"] = new_dns_servers
subnet_update.return_value = new_subnet_mod
yield dns_create, route_update, route_create
def test_update_subnet_not_found(self):
with self.assertRaises(exceptions.SubnetNotFound):
self.plugin.update_subnet(self.context, 1, {})
def test_update_subnet_dns_nameservers(self):
new_dns_servers = ["1.1.1.2"]
with self._stubs(
host_routes=self.DEFAULT_ROUTE,
new_dns_servers=new_dns_servers
) as (dns_create, route_update, route_create):
req = dict(subnet=dict(dns_nameservers=new_dns_servers))
res = self.plugin.update_subnet(self.context,
1,
req)
self.assertEqual(dns_create.call_count, 1)
self.assertEqual(route_create.call_count, 0)
self.assertEqual(res["dns_nameservers"], new_dns_servers)
def test_update_subnet_routes(self):
new_routes = [dict(destination="10.0.0.0/24",
nexthop="1.1.1.1")]
with self._stubs(
host_routes=self.DEFAULT_ROUTE,
new_routes=new_routes
) as (dns_create, route_update, route_create):
req = dict(subnet=dict(
host_routes=new_routes))
res = self.plugin.update_subnet(self.context, 1, req)
self.assertEqual(dns_create.call_count, 0)
self.assertEqual(route_create.call_count, 1)
self.assertEqual(len(res["host_routes"]), 1)
self.assertEqual(res["host_routes"][0]["destination"],
"10.0.0.0/24")
self.assertEqual(res["host_routes"][0]["nexthop"],
"1.1.1.1")
self.assertIsNone(res["gateway_ip"])
def test_update_subnet_gateway_ip_with_default_route_in_db(self):
with self._stubs(
host_routes=self.DEFAULT_ROUTE,
new_routes=[dict(destination="0.0.0.0/0", nexthop="1.2.3.4")]
) as (dns_create, route_update, route_create):
req = dict(subnet=dict(gateway_ip="1.2.3.4"))
res = self.plugin.update_subnet(self.context, 1, req)
self.assertEqual(dns_create.call_count, 0)
self.assertEqual(route_create.call_count, 0)
self.assertEqual(route_update.call_count, 1)
self.assertEqual(len(res["host_routes"]), 1)
self.assertEqual(res["host_routes"][0]["destination"],
"0.0.0.0/0")
self.assertEqual(res["host_routes"][0]["nexthop"],
"1.2.3.4")
self.assertEqual(res["gateway_ip"], "1.2.3.4")
def test_update_subnet_gateway_ip_with_non_default_route_in_db(self):
with self._stubs(
host_routes=[dict(destination="1.1.1.1/8", nexthop="9.9.9.9")],
find_routes=False,
new_routes=[dict(destination="1.1.1.1/8", nexthop="9.9.9.9"),
dict(destination="0.0.0.0/0", nexthop="1.2.3.4")]
) as (dns_create, route_update, route_create):
req = dict(subnet=dict(gateway_ip="1.2.3.4"))
res = self.plugin.update_subnet(self.context, 1, req)
self.assertEqual(dns_create.call_count, 0)
self.assertEqual(route_create.call_count, 1)
self.assertEqual(res["gateway_ip"], "1.2.3.4")
self.assertEqual(len(res["host_routes"]), 2)
res_tuples = [(r["destination"], r["nexthop"])
for r in res["host_routes"]]
self.assertIn(("0.0.0.0/0", "1.2.3.4"), res_tuples)
self.assertIn(("1.1.1.1/8", "9.9.9.9"), res_tuples)
def test_update_subnet_gateway_ip_without_default_route_in_db(self):
with self._stubs(
host_routes=None,
new_routes=[dict(destination="0.0.0.0/0", nexthop="1.2.3.4")]
) as (dns_create, route_update, route_create):
req = dict(subnet=dict(gateway_ip="1.2.3.4"))
res = self.plugin.update_subnet(self.context, 1, req)
self.assertEqual(dns_create.call_count, 0)
self.assertEqual(route_create.call_count, 1)
self.assertEqual(len(res["host_routes"]), 1)
self.assertEqual(res["host_routes"][0]["destination"],
"0.0.0.0/0")
self.assertEqual(res["host_routes"][0]["nexthop"],
"1.2.3.4")
self.assertEqual(res["gateway_ip"], "1.2.3.4")
def test_update_subnet_gateway_ip_with_default_route_in_args(self):
new_routes = [dict(destination="0.0.0.0/0",
nexthop="4.3.2.1")]
with self._stubs(
host_routes=self.DEFAULT_ROUTE,
new_routes=new_routes
) as (dns_create, route_update, route_create):
req = dict(subnet=dict(
host_routes=new_routes,
gateway_ip="1.2.3.4"))
res = self.plugin.update_subnet(self.context, 1, req)
self.assertEqual(dns_create.call_count, 0)
self.assertEqual(route_create.call_count, 1)
self.assertEqual(len(res["host_routes"]), 1)
self.assertEqual(res["host_routes"][0]["destination"],
"0.0.0.0/0")
self.assertEqual(res["host_routes"][0]["nexthop"],
"4.3.2.1")
self.assertEqual(res["gateway_ip"], "4.3.2.1")
class TestQuarkDeleteSubnet(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, subnet, ips):
ip_mods = []
subnet_mod = None
if subnet:
subnet_mod = models.Subnet()
subnet_mod.update(subnet)
for ip in ips:
ip_mod = models.IPAddress()
ip_mod.update(ip)
ip_mods.append(ip_mod)
db_mod = "quark.db.api"
with contextlib.nested(
mock.patch("%s.subnet_find" % db_mod),
mock.patch("%s.subnet_delete" % db_mod)
) as (sub_find, sub_delete):
if subnet_mod:
subnet_mod.allocated_ips = ip_mods
sub_find.return_value = subnet_mod
yield sub_delete
def test_delete_subnet(self):
subnet = dict(id=1)
with self._stubs(subnet=subnet, ips=[]) as sub_delete:
self.plugin.delete_subnet(self.context, 1)
self.assertTrue(sub_delete.called)
def test_delete_subnet_no_subnet_fails(self):
with self._stubs(subnet=None, ips=[]):
with self.assertRaises(exceptions.SubnetNotFound):
self.plugin.delete_subnet(self.context, 1)
def test_delete_subnet_has_allocated_ips_fails(self):
subnet = dict(id=1)
with self._stubs(subnet=subnet, ips=[{}]):
with self.assertRaises(exceptions.SubnetInUse):
self.plugin.delete_subnet(self.context, 1)
class TestSubnetsNotification(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, s, deleted_at=None):
class FakeContext(object):
def __enter__(*args, **kwargs):
pass
def __exit__(*args, **kwargs):
pass
self.context.session.begin = FakeContext
s["network"] = models.Network()
s["network"]["created_at"] = s["created_at"]
subnet = models.Subnet(**s)
db_mod = "quark.db.api"
api_mod = "neutron.openstack.common.notifier.api"
time_mod = "neutron.openstack.common.timeutils"
with contextlib.nested(
mock.patch("%s.subnet_find" % db_mod),
mock.patch("%s.network_find" % db_mod),
mock.patch("%s.subnet_create" % db_mod),
mock.patch("%s.ip_policy_create" % db_mod),
mock.patch("%s.subnet_delete" % db_mod),
mock.patch("%s.notify" % api_mod),
mock.patch("%s.utcnow" % time_mod)
) as (sub_find, net_find, sub_create, pol_cre, sub_del, notify,
time_func):
sub_create.return_value = subnet
sub_find.return_value = subnet
time_func.return_value = deleted_at
yield notify
def test_create_subnet_notification(self):
s = dict(network_id=1, cidr="192.168.10.0/24",
tenant_id=1, id=1, created_at="123")
with self._stubs(s) as notify:
self.plugin.create_subnet(self.context, dict(subnet=s))
notify.assert_called_once_with(
self.context,
notifier_api.publisher_id("network"),
"ip_block.create",
notifier_api.CONF.default_notification_level,
dict(tenant_id=s["tenant_id"],
ip_block_id=s["id"],
created_at=s["created_at"]))
def test_delete_subnet_notification(self):
now = time.strftime('%Y-%m-%d %H:%M:%S')
later = time.strftime('%Y-%m-%d %H:%M:%S')
s = dict(tenant_id=1, id=1, created_at=now)
with self._stubs(s, deleted_at=later) as notify:
self.plugin.delete_subnet(self.context, 1)
notify.assert_called_once_with(
self.context,
notifier_api.publisher_id("network"),
"ip_block.delete",
notifier_api.CONF.default_notification_level,
dict(tenant_id=s["tenant_id"],
created_at=s["created_at"],
ip_block_id=s["id"],
deleted_at=later))
class TestQuarkDiagnoseSubnets(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, subnets=None, routes=None):
if routes is None:
routes = []
route_models = []
for route in routes:
r = models.Route()
r.update(route)
route_models.append(r)
if isinstance(subnets, list):
subnet_models = []
for subnet in subnets:
s_dict = subnet.copy()
s_dict["routes"] = route_models
s = models.Subnet(network=models.Network())
s.update(s_dict)
subnet_models.append(s)
elif subnets:
mod = models.Subnet(network=models.Network())
mod.update(subnets)
mod["routes"] = route_models
subnet_models = mod
else:
subnet_models = None
with mock.patch("quark.db.api.subnet_find") as subnet_find:
subnet_find.return_value = subnet_models
yield
def test_diagnose_subnet_with_wildcard_id_no_existing_subnets(self):
with self._stubs(subnets=[], routes=[]):
expected = {'subnets': []}
actual = self.plugin.diagnose_subnet(self.context, "*", None)
self.assertEqual(expected, actual)
def test_diagnose_subnet_with_wildcard_with_existing_subnets(self):
subnet_id = str(uuid.uuid4())
route = dict(id=1, cidr="0.0.0.0/0", gateway="192.168.0.1")
subnet = dict(id=subnet_id, network_id=1, name=subnet_id,
tenant_id=self.context.tenant_id, ip_version=4,
cidr="192.168.0.0/24", gateway_ip="192.168.0.1",
dns_nameservers=[],
enable_dhcp=None)
with self._stubs(subnets=[subnet], routes=[route]):
actual = self.plugin.diagnose_subnet(self.context, "*", None)
self.maxDiff = None
self.assertEqual(subnet["id"], actual["subnets"][0]["id"])
def test_diagnose_subnet_with_regular_id(self):
subnet_id = "12345"
route = dict(id=1, cidr="0.0.0.0/0", gateway="192.168.0.1")
subnet = dict(id=subnet_id, network_id=1, name=subnet_id,
tenant_id=self.context.tenant_id, ip_version=4,
cidr="192.168.0.0/24", gateway_ip="192.168.0.1",
dns_nameservers=[],
enable_dhcp=None)
with self._stubs(subnets=subnet, routes=[route]):
actual = self.plugin.diagnose_subnet(self.context, subnet_id, None)
self.assertEqual(subnet["id"], actual["subnets"]["id"])
| apache-2.0 | 5,589,751,111,151,484,000 | 43.042373 | 79 | 0.546469 | false |
icoderaven/slytherin_dagger | src/utils.py | 1 | 1217 | #!/usr/bin/env python
import math
import numpy as np
#----------------------------------------------------------------------
#converts angles in degrees to radians
#----------------------------------------------------------------------
def deg_to_rad(angle):
return angle*math.pi/180.0
#----------------------------------------------------------------------
#converts angles in radians to degrees
#----------------------------------------------------------------------
def rad_to_deg(angle):
return angle*180.0/math.pi
#----------------------------------------------------------------------
#converts ROS Point/Vector3 object to a numpy array
#----------------------------------------------------------------------
def convert_position_to_array(position):
pos = np.zeros(3)
pos[0] = position.x
pos[1] = position.y
pos[2] = position.z
return pos
#----------------------------------------------------------------------
#converts ROS Quaternion object to a numpy array
#----------------------------------------------------------------------
def convert_orientation_to_array(orientation):
q = np.zeros(4)
q[0] = orientation.x
q[1] = orientation.y
q[2] = orientation.z
q[3] = orientation.w
return q
| bsd-3-clause | 7,963,987,814,229,428,000 | 32.805556 | 72 | 0.380444 | false |
jirenz/CS229_Project | hearthbreaker/cards/minions/druid.py | 1 | 11367 | from hearthbreaker.cards.base import MinionCard, ChoiceCard
from hearthbreaker.game_objects import Minion
from hearthbreaker.tags.action import Give, Damage, Silence, Transform, Draw, Heal, \
Summon, AddCard, GiveManaCrystal, Remove, Kill
from hearthbreaker.tags.base import Choice, Buff, Effect, Battlecry, Deathrattle, ActionTag
from hearthbreaker.tags.card_source import CardList, ObjectSource
from hearthbreaker.tags.condition import IsType, GreaterThan
from hearthbreaker.tags.event import Damaged, TurnEnded
from hearthbreaker.tags.selector import CharacterSelector, MinionSelector, SelfSelector, UserPicker, BothPlayer, \
PlayerSelector, HeroSelector, Count, DeadMinionSelector
from hearthbreaker.constants import CHARACTER_CLASS, CARD_RARITY, MINION_TYPE
from hearthbreaker.tags.status import ChangeAttack, ChangeHealth, Taunt, ManaChange
from hearthbreaker.cards.spells.neutral import spare_part_list
class Moonfire(ChoiceCard):
def __init__(self):
super().__init__("Moonfire", 0, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON, ref_name="moonfire_keeper")
class Dispel(ChoiceCard):
def __init__(self):
super().__init__("Dispel", 0, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON)
class KeeperOfTheGrove(MinionCard):
def __init__(self):
super().__init__("Keeper of the Grove", 4, CHARACTER_CLASS.DRUID, CARD_RARITY.RARE, choices=[
Choice(Moonfire(), Damage(2), CharacterSelector(players=BothPlayer(), picker=UserPicker())),
Choice(Dispel(), Silence(), MinionSelector(players=BothPlayer(), picker=UserPicker()))
])
def create_minion(self, player):
return Minion(2, 4)
class CatDruid(MinionCard):
def __init__(self):
super().__init__("Druid of the Claw", 5, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON, False, MINION_TYPE.BEAST,
ref_name="Druid of the Claw (cat)")
def create_minion(self, p):
return Minion(4, 4, charge=True)
class BearDruid(MinionCard):
def __init__(self):
super().__init__("Druid of the Claw", 5, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON, False, MINION_TYPE.BEAST,
ref_name="Druid of the Claw (bear)")
def create_minion(self, p):
return Minion(4, 6, taunt=True)
class CatForm(ChoiceCard):
def __init__(self):
super().__init__("Cat Form", 0, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON)
class BearForm(ChoiceCard):
def __init__(self):
super().__init__("Bear Form", 0, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON)
class DruidOfTheClaw(MinionCard):
def __init__(self):
super().__init__("Druid of the Claw", 5, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON, choices=[
Choice(CatForm(), Transform(CatDruid()), SelfSelector()),
Choice(BearForm(), Transform(BearDruid()), SelfSelector())
])
def create_minion(self, player):
return Minion(4, 4)
class AncientSecrets(ChoiceCard):
def __init__(self):
super().__init__("Ancient Secrets", 0, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON)
class AncientTeachings(ChoiceCard):
def __init__(self):
super().__init__("Ancient Teachings", 0, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON)
class AncientOfLore(MinionCard):
def __init__(self):
super().__init__("Ancient of Lore", 7, CHARACTER_CLASS.DRUID, CARD_RARITY.EPIC, choices=[
Choice(AncientSecrets(), Heal(5), HeroSelector()),
Choice(AncientTeachings(), Draw(3), PlayerSelector())
])
def create_minion(self, player):
return Minion(5, 5)
class Health(ChoiceCard):
def __init__(self):
super().__init__("Rooted", 0, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON)
class Attack(ChoiceCard):
def __init__(self):
super().__init__("Uproot", 0, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON)
class AncientOfWar(MinionCard):
def __init__(self):
super().__init__("Ancient of War", 7, CHARACTER_CLASS.DRUID, CARD_RARITY.EPIC, choices=[
Choice(Health(), Give([Buff(ChangeHealth(5)), Buff(Taunt())]), SelfSelector()),
Choice(Attack(), Give([Buff(ChangeAttack(5))]), SelfSelector()),
])
def create_minion(self, player):
return Minion(5, 5)
class IronbarkProtector(MinionCard):
def __init__(self):
super().__init__("Ironbark Protector", 8, CHARACTER_CLASS.DRUID,
CARD_RARITY.COMMON)
def create_minion(self, player):
return Minion(8, 8, taunt=True)
class TauntTreant(MinionCard):
def __init__(self):
super().__init__("Treant", 1, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON, ref_name="Treant (taunt)")
def create_minion(self, p):
return Minion(2, 2, taunt=True)
class Treant(MinionCard):
def __init__(self):
super().__init__("Treant", 1, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON)
def create_minion(self, _):
return Minion(2, 2)
class ChargeTreant(MinionCard):
def __init__(self):
super().__init__("Treant", 1, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON, False, ref_name="Treant (charge)")
def create_minion(self, player):
return Minion(2, 2, charge=True, effects=[Effect(TurnEnded(), ActionTag(Kill(), SelfSelector()))])
class PoisonSeedsTreant(MinionCard):
def __init__(self):
super().__init__("Treant", 1, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON, False,
ref_name="Treant (poison seeds)")
def create_minion(self, player):
return Minion(2, 2)
class Panther(MinionCard):
def __init__(self):
super().__init__("Panther", 2, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON, False, MINION_TYPE.BEAST)
def create_minion(self, _):
return Minion(3, 2, MINION_TYPE.BEAST)
class IncreaseStats(ChoiceCard):
def __init__(self):
super().__init__("Give your other minions +2/+2 and taunt", 0,
CHARACTER_CLASS.DRUID, CARD_RARITY.LEGENDARY, False)
class SummonTreants(ChoiceCard):
def __init__(self):
super().__init__("Summon two 2/2 Treants with taunt", 0,
CHARACTER_CLASS.DRUID, CARD_RARITY.LEGENDARY, False)
class Cenarius(MinionCard):
def __init__(self):
super().__init__("Cenarius", 9, CHARACTER_CLASS.DRUID, CARD_RARITY.LEGENDARY, choices=[
Choice(IncreaseStats(), Give([Buff(ChangeAttack(2)),
Buff(ChangeHealth(2)),
Buff(Taunt())]), MinionSelector()),
Choice(SummonTreants(), Summon(TauntTreant(), 2), PlayerSelector())
])
def create_minion(self, player):
return Minion(5, 8)
class AttackMode(ChoiceCard):
def __init__(self):
super().__init__("Attack Mode", 0, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON)
class TankMode(ChoiceCard):
def __init__(self):
super().__init__("Tank Mode", 0, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON)
class AnodizedRoboCub(MinionCard):
def __init__(self):
super().__init__("Anodized Robo Cub", 2, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON,
minion_type=MINION_TYPE.MECH,
choices=[Choice(AttackMode(), Give([Buff(ChangeAttack(1))]), SelfSelector()),
Choice(TankMode(), Give([Buff(ChangeHealth(1))]), SelfSelector())])
def create_minion(self, player):
return Minion(2, 2, taunt=True)
class MechBearCat(MinionCard):
def __init__(self):
super().__init__("Mech-Bear-Cat", 6, CHARACTER_CLASS.DRUID, CARD_RARITY.RARE, minion_type=MINION_TYPE.MECH)
def create_minion(self, player):
return Minion(7, 6, effects=[Effect(Damaged(),
ActionTag(AddCard(CardList(spare_part_list)), PlayerSelector()))])
class CobraForm(MinionCard):
def __init__(self):
super().__init__("Druid of the Fang", 5, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON, False, MINION_TYPE.BEAST,
ref_name="Druid of the Fang (cobra)")
def create_minion(self, player):
return Minion(7, 7)
class DruidOfTheFang(MinionCard):
def __init__(self):
super().__init__("Druid of the Fang", 5, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON,
battlecry=Battlecry(Transform(CobraForm()), SelfSelector(),
GreaterThan(Count(MinionSelector(IsType(MINION_TYPE.BEAST))), value=0)))
def create_minion(self, player):
return Minion(4, 4)
class Malorne(MinionCard):
def __init__(self):
super().__init__("Malorne", 7, CHARACTER_CLASS.DRUID, CARD_RARITY.LEGENDARY, minion_type=MINION_TYPE.BEAST)
def create_minion(self, player):
return Minion(9, 7, deathrattle=[Deathrattle(AddCard(ObjectSource(SelfSelector()),
add_to_deck=True), PlayerSelector()),
Deathrattle(Remove(), SelfSelector())])
class GiftOfMana(ChoiceCard):
def __init__(self):
super().__init__("Gift of Mana", 0, CHARACTER_CLASS.DRUID, CARD_RARITY.RARE)
class GiftOfCards(ChoiceCard):
def __init__(self):
super().__init__("Gift of Cards", 0, CHARACTER_CLASS.DRUID, CARD_RARITY.RARE)
class GroveTender(MinionCard):
def __init__(self):
super().__init__("Grove Tender", 3, CHARACTER_CLASS.DRUID, CARD_RARITY.RARE, choices=[
Choice(GiftOfMana(), GiveManaCrystal(), PlayerSelector(players=BothPlayer())),
Choice(GiftOfCards(), Draw(), PlayerSelector(players=BothPlayer()))
])
def create_minion(self, player):
return Minion(2, 4)
class FlameCat(MinionCard):
def __init__(self):
super().__init__("Druid of the Flame", 3, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON, False, MINION_TYPE.BEAST,
ref_name="Druid of the Flame (cat)")
def create_minion(self, p):
return Minion(5, 2)
class FlameBird(MinionCard):
def __init__(self):
super().__init__("Druid of the Flame", 3, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON, False, MINION_TYPE.BEAST,
ref_name="Druid of the Flame (bird)")
def create_minion(self, p):
return Minion(2, 5)
class FlameCatForm(ChoiceCard):
def __init__(self):
super().__init__("Flame Cat Form", 0, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON)
class FlameBirdForm(ChoiceCard):
def __init__(self):
super().__init__("Flame Bird Form", 0, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON)
class DruidOfTheFlame(MinionCard):
def __init__(self):
super().__init__("Druid of the Flame", 3, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON, choices=[
Choice(FlameCatForm(), Transform(FlameCat()), SelfSelector()),
Choice(FlameBirdForm(), Transform(FlameBird()), SelfSelector())
])
def create_minion(self, player):
return Minion(2, 2)
class VolcanicLumberer(MinionCard):
def __init__(self):
super().__init__("Volcanic Lumberer", 9, CHARACTER_CLASS.DRUID, CARD_RARITY.RARE,
buffs=[Buff(ManaChange(Count(DeadMinionSelector(players=BothPlayer())), -1))])
def create_minion(self, player):
return Minion(7, 8, taunt=True)
| mit | 5,221,729,277,601,726,000 | 34.521875 | 118 | 0.622064 | false |
opencivicdata/scrapers-ca | ca_ab_wood_buffalo/people.py | 1 | 2201 | from utils import CanadianScraper, CanadianPerson as Person
from collections import defaultdict
COUNCIL_PAGE = 'http://www.woodbuffalo.ab.ca/Municipal-Government/Mayor-and-Council/Councillor-Profiles.htm'
class WoodBuffaloPersonScraper(CanadianScraper):
def scrape(self):
seat_numbers = defaultdict(int)
page = self.lxmlize(COUNCIL_PAGE)
mayor_url = page.xpath('//li[@id="pageid1075"]/div/a/@href')[0]
yield self.scrape_mayor(mayor_url)
wards = page.xpath('//div[@id="content"]//h3')
assert len(wards), 'No wards found'
for ward in wards:
area = ward.text_content()
councillors = ward.xpath('./following-sibling::ul[1]//a')
assert len(councillors), 'No councillors found for ward {}'.format(area)
for councillor in councillors:
name = ' '.join(reversed(councillor.text.split(', ')))
url = councillor.attrib['href']
if area in ('Ward 1', 'Ward 2'):
seat_numbers[area] += 1
district = '{} (seat {})'.format(area, seat_numbers[area])
else:
district = area
p = Person(primary_org='legislature', name=name, district=district, role='Councillor')
p.add_source(COUNCIL_PAGE)
p.add_source(url)
page = self.lxmlize(url)
p.image = page.xpath('//div[@id="content"]//img[contains(@alt, "Councillor")]/@src')[0]
email = self.get_email(page.xpath('//div[@id="content"]')[0])
p.add_contact('email', email)
yield p
def scrape_mayor(self, url):
page = self.lxmlize(url)
name = page.xpath('//h1[@id="pagetitle"]/text()')[0].replace('Mayor', '').strip()
image = page.xpath('//div[@id="content"]//@src')[0]
p = Person(primary_org='legislature', name=name, district='Wood Buffalo', role='Mayor')
p.add_source(url)
p.image = image
p.add_contact('voice', self.get_phone(page.xpath('//div[@id="icon5"]')[0]), 'legislature')
p.add_contact('email', 'mayor@rmwb.ca')
return p
| mit | -7,673,551,670,735,821,000 | 36.948276 | 108 | 0.562472 | false |
tapomayukh/projects_in_python | classification/Classification_with_kNN/Single_Contact_Classification/Feature_Comparison/multiple_features/results/test10_cross_validate_objects_1200ms_scaled_method_v_area_motion.py | 1 | 4600 |
# Principal Component Analysis Code :
from numpy import mean,cov,double,cumsum,dot,linalg,array,rank,size,flipud
from pylab import *
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
from mvpa.clfs.knn import kNN
from mvpa.datasets import Dataset
from mvpa.clfs.transerror import TransferError
from mvpa.misc.data_generators import normalFeatureDataset
from mvpa.algorithms.cvtranserror import CrossValidatedTransferError
from mvpa.datasets.splitters import NFoldSplitter
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_kNN/Scaled')
from data_method_V import Fmat_original
def pca(X):
#get dimensions
num_data,dim = X.shape
#center data
mean_X = X.mean(axis=1)
M = (X-mean_X) # subtract the mean (along columns)
Mcov = cov(M)
###### Sanity Check ######
i=0
n=0
while i < 82:
j=0
while j < 140:
if X[i,j] != X[i,j]:
print X[i,j]
print i,j
n=n+1
j = j+1
i=i+1
print n
##########################
print 'PCA - COV-Method used'
val,vec = linalg.eig(Mcov)
#return the projection matrix, the variance and the mean
return vec,val,mean_X, M, Mcov
if __name__ == '__main__':
Fmat = np.row_stack([Fmat_original[41:82,:], Fmat_original[82:123,:]])
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
print 'Total_Matrix_Shape:',m_tot,n_tot
eigvec_total, eigval_total, mean_data_total, B, C = pca(Fmat)
#print eigvec_total
#print eigval_total
#print mean_data_total
m_eigval_total, n_eigval_total = np.shape(np.matrix(eigval_total))
m_eigvec_total, n_eigvec_total = np.shape(eigvec_total)
m_mean_data_total, n_mean_data_total = np.shape(np.matrix(mean_data_total))
print 'Eigenvalue Shape:',m_eigval_total, n_eigval_total
print 'Eigenvector Shape:',m_eigvec_total, n_eigvec_total
print 'Mean-Data Shape:',m_mean_data_total, n_mean_data_total
#Recall that the cumulative sum of the eigenvalues shows the level of variance accounted by each of the corresponding eigenvectors. On the x axis there is the number of eigenvalues used.
perc_total = cumsum(eigval_total)/sum(eigval_total)
# Reduced Eigen-Vector Matrix according to highest Eigenvalues..(Considering First 20 based on above figure)
W = eigvec_total[:,0:20]
m_W, n_W = np.shape(W)
print 'Reduced Dimension Eigenvector Shape:',m_W, n_W
# Normalizes the data set with respect to its variance (Not an Integral part of PCA, but useful)
length = len(eigval_total)
s = np.matrix(np.zeros(length)).T
i = 0
while i < length:
s[i] = sqrt(C[i,i])
i = i+1
Z = np.divide(B,s)
m_Z, n_Z = np.shape(Z)
print 'Z-Score Shape:', m_Z, n_Z
#Projected Data:
Y = (W.T)*B # 'B' for my Laptop: otherwise 'Z' instead of 'B'
m_Y, n_Y = np.shape(Y.T)
print 'Transposed Projected Data Shape:', m_Y, n_Y
#Using PYMVPA
PCA_data = np.array(Y.T)
PCA_label_2 = ['Styrofoam-Fixed']*5 + ['Books-Fixed']*5 + ['Bucket-Fixed']*5 + ['Bowl-Fixed']*5 + ['Can-Fixed']*5 + ['Box-Fixed']*5 + ['Pipe-Fixed']*5 + ['Styrofoam-Movable']*5 + ['Container-Movable']*5 + ['Books-Movable']*5 + ['Cloth-Roll-Movable']*5 + ['Black-Rubber-Movable']*5 + ['Can-Movable']*5 + ['Box-Movable']*5 + ['Rug-Fixed']*5 + ['Bubble-Wrap-1-Fixed']*5 + ['Pillow-1-Fixed']*5 + ['Bubble-Wrap-2-Fixed']*5 + ['Sponge-Fixed']*5 + ['Foliage-Fixed']*5 + ['Pillow-2-Fixed']*5 + ['Rug-Movable']*5 + ['Bubble-Wrap-1-Movable']*5 + ['Pillow-1-Movable']*5 + ['Bubble-Wrap-2-Movable']*5 + ['Pillow-2-Movable']*5 + ['Plush-Toy-Movable']*5 + ['Sponge-Movable']*5
clf = kNN(k=1)
terr = TransferError(clf)
ds1 = Dataset(samples=PCA_data,labels=PCA_label_2)
print ds1.samples.shape
cvterr = CrossValidatedTransferError(terr,NFoldSplitter(cvtype=1),enable_states=['confusion'])
error = cvterr(ds1)
print error
print cvterr.confusion.asstring(description=False)
figure(1)
cvterr.confusion.plot(numbers='True',numbers_alpha=2)
#show()
# Variances
figure(2)
title('Variances of PCs')
stem(range(len(perc_total)),perc_total,'--b')
axis([-0.3,130.3,0,1.2])
grid('True')
show()
| mit | 718,970,347,211,313,800 | 33.074074 | 666 | 0.646087 | false |
dc3-plaso/dfvfs | dfvfs/encoding/manager.py | 1 | 2019 | # -*- coding: utf-8 -*-
"""The encoding manager."""
class EncodingManager(object):
"""Class that implements the encoding manager."""
_decoders = {}
@classmethod
def DeregisterDecoder(cls, decoder):
"""Deregisters a decoder for a specific encoding method.
Args:
decoder (type): decoder class.
Raises:
KeyError: if the corresponding decoder is not set.
"""
encoding_method = decoder.ENCODING_METHOD.lower()
if encoding_method not in cls._decoders:
raise KeyError(
u'Decoder for encoding method: {0:s} not set.'.format(
decoder.ENCODING_METHOD))
del cls._decoders[encoding_method]
@classmethod
def GetDecoder(cls, encoding_method):
"""Retrieves the decoder object for a specific encoding method.
Args:
encoding_method (str): encoding method identifier.
Returns:
Decoder: decoder or None if the encoding method does not exists.
"""
encoding_method = encoding_method.lower()
decoder = cls._decoders.get(encoding_method, None)
if not decoder:
return
return decoder()
@classmethod
def RegisterDecoder(cls, decoder):
"""Registers a decoder for a specific encoding method.
Args:
decoder (type): decoder class.
Raises:
KeyError: if the corresponding decoder is already set.
"""
encoding_method = decoder.ENCODING_METHOD.lower()
if encoding_method in cls._decoders:
raise KeyError(
u'Decoder for encoding method: {0:s} already set.'.format(
decoder.ENCODING_METHOD))
cls._decoders[encoding_method] = decoder
@classmethod
def RegisterDecoders(cls, decoders):
"""Registers decoders.
The decoders are identified based on their lower case encoding method.
Args:
decoders (list[type]): decoder classes.
Raises:
KeyError: if decoders is already set for the corresponding
encoding method.
"""
for decoders in decoders:
cls.RegisterDecoders(decoders)
| apache-2.0 | 10,433,650,613,161,438 | 25.220779 | 74 | 0.662209 | false |
wiki-ai/revscoring | revscoring/features/wikitext/datasources/tokenized.py | 1 | 13882 | import re
from deltas import wikitext_split
from deltas import wikitext_split_w_cjk
from deltas.segmenters import ParagraphsSentencesAndWhitespace
from revscoring.datasources import Datasource
from revscoring.datasources.meta import filters, frequencies, mappers
from . import base
class Revision(base.BaseRevision):
def __init__(self, name, revision_datasources, tokens_datasource=None):
super().__init__(name, revision_datasources)
if tokens_datasource is None:
tokens_datasource = tokenized(revision_datasources.text)
self.cjk = Revision(self._name + ".cjk", revision_datasources, tokenized(revision_datasources.text, tok_strategy="CJK"))
self.tokens = tokens_datasource
"""
A list of all tokens
"""
self.paragraphs_sentences_and_whitespace = Datasource(
self._name + ".paragraphs_sentences_and_whitespace",
paragraphs_sentences_and_whitespace.segment,
depends_on=[self.tokens]
)
"""
A list of paragraphs, sentences, and whitespaces as segments. See
:class:`deltas.segmenters.Segment` and
:class:`deltas.segmenters.MatchableSegment`.
"""
self.token_frequency = frequencies.table(
self.tokens,
name=self._name + ".token_frequency"
)
"""
A frequency table of all tokens.
"""
self.numbers = self.tokens_in_types(
{'number'}, name=self._name + ".numbers"
)
"""
A list of numeric tokens
"""
self.number_frequency = frequencies.table(
self.numbers, name=self._name + ".number_frequency"
)
"""
A frequency table of number tokens.
"""
self.whitespaces = self.tokens_in_types(
{'whitespace'}, name=self._name + ".whitespaces"
)
"""
A list of whitespace tokens
"""
self.whitespace_frequency = frequencies.table(
self.whitespaces, name=self._name + ".whitespace_frequency"
)
"""
A frequency table of whichspace tokens.
"""
self.markups = self.tokens_in_types(
{'dbrack_open', 'dbrack_close', 'brack_open', 'brack_close',
'tab_open', 'tab_close', 'dcurly_open', 'dcurly_close',
'curly_open', 'curly_close', 'bold', 'italics', 'equals'},
name=self._name + ".markups"
)
"""
A list of markup tokens
"""
self.markup_frequency = frequencies.table(
self.markups, name=self._name + ".markup_frequency"
)
"""
A frequency table of markup tokens.
"""
self.cjks = self.tokens_in_types(
{'cjk_word'}, name=self._name + ".cjks"
)
"""
A list of Chinese/Japanese/Korean tokens
"""
self.cjk_frequency = frequencies.table(
self.cjks, name=self._name + ".cjk_frequency"
)
"""
A frequency table of cjk tokens.
"""
self.entities = self.tokens_in_types(
{'entity'}, name=self._name + ".entities"
)
"""
A list of HTML entity tokens
"""
self.entity_frequency = frequencies.table(
self.entities, name=self._name + ".entity_frequency"
)
"""
A frequency table of entity tokens.
"""
self.urls = self.tokens_in_types(
{'url'}, name=self._name + ".urls"
)
"""
A list of URL tokens
"""
self.url_frequency = frequencies.table(
self.urls, name=self._name + ".url_frequency"
)
"""
A frequency table of url tokens.
"""
self.words = self.tokens_in_types(
{'word'}, name=self._name + ".words"
)
"""
A list of word tokens
"""
self.word_frequency = frequencies.table(
mappers.lower_case(self.words),
name=self._name + ".word_frequency"
)
"""
A frequency table of lower-cased word tokens.
"""
self.uppercase_words = filters.filter(
is_uppercase_word, self.words,
name=self._name + ".uppercase_words"
)
"""
A list of uppercase word tokens that are at least two
characters long.
"""
self.uppercase_word_frequency = frequencies.table(
self.uppercase_words,
name=self._name + ".uppercase_word_frequency"
)
"""
A frequency table of uppercase word tokens that are at least two
characters long.
"""
self.punctuations = self.tokens_in_types(
{'period', 'qmark', 'epoint', 'comma', 'colon', 'scolon',
'japan_punct'},
name=self._name + ".punctuations"
)
"""
A list of punctuation tokens
"""
self.punctuation_frequency = frequencies.table(
self.punctuations, name=self._name + ".punctuation_frequency"
)
"""
A frequency table of punctuation tokens.
"""
self.breaks = self.tokens_in_types(
{'break'}, name=self._name + ".breaks"
)
"""
A list of break tokens
"""
self.break_frequency = frequencies.table(
self.breaks, name=self._name + ".break_frequency"
)
"""
A frequency table of break tokens.
"""
def tokens_in_types(self, types, name=None):
"""
Constructs a :class:`revscoring.Datasource` that returns all content
tokens that are within a set of types.
"""
token_is_in_types = TokenIsInTypes(types)
if name is None:
name = "{0}({1})" \
.format(self._name + ".tokens_in_types", types)
return filters.filter(token_is_in_types.filter,
self.tokens, name=name)
def tokens_matching(self, regex, name=None, regex_flags=re.I):
"""
Constructs a :class:`revscoring.Datasource` that returns all content
tokens that match a regular expression.
"""
if not hasattr(regex, "pattern"):
regex = re.compile(regex, regex_flags)
if name is None:
name = "{0}({1})" \
.format(self._name + ".tokens_matching", regex.pattern)
return filters.regex_matching(regex, self.tokens,
name=name)
class Diff(base.BaseDiff):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.token_delta = frequencies.delta(
self.revision.parent.token_frequency,
self.revision.token_frequency,
name=self._name + ".token_delta"
)
"""
A token frequency delta table
"""
self.token_prop_delta = frequencies.prop_delta(
self.revision.parent.token_frequency,
self.token_delta,
name=self._name + ".token_prop_delta"
)
"""
A token proportional frequency delta table
"""
self.number_delta = frequencies.delta(
self.revision.parent.number_frequency,
self.revision.number_frequency,
name=self._name + ".number_delta"
)
"""
A number frequency delta table
"""
self.number_prop_delta = frequencies.prop_delta(
self.revision.parent.number_frequency,
self.number_delta,
name=self._name + ".number_prop_delta"
)
"""
A number proportional frequency delta table
"""
self.whitespace_delta = frequencies.delta(
self.revision.parent.whitespace_frequency,
self.revision.whitespace_frequency,
name=self._name + ".whitespace_delta"
)
"""
A whitespace frequency delta table
"""
self.whitespace_prop_delta = frequencies.prop_delta(
self.revision.parent.whitespace_frequency,
self.whitespace_delta,
name=self._name + ".whitespace_prop_delta"
)
"""
A whitespace proportional frequency delta table
"""
self.markup_delta = frequencies.delta(
self.revision.parent.markup_frequency,
self.revision.markup_frequency,
name=self._name + ".markup_delta"
)
"""
A markup frequency delta table
"""
self.markup_prop_delta = frequencies.prop_delta(
self.revision.parent.markup_frequency,
self.markup_delta,
name=self._name + ".markup_prop_delta"
)
"""
A markup proportional frequency delta table
"""
self.cjk_delta = frequencies.delta(
self.revision.parent.cjk_frequency,
self.revision.cjk_frequency,
name=self._name + ".cjk_delta"
)
"""
A cjk frequency delta table
"""
self.cjk_prop_delta = frequencies.prop_delta(
self.revision.parent.cjk_frequency,
self.cjk_delta,
name=self._name + ".cjk_prop_delta"
)
"""
A cjk proportional frequency delta table
"""
self.entity_delta = frequencies.delta(
self.revision.parent.entity_frequency,
self.revision.entity_frequency,
name=self._name + ".entity_delta"
)
"""
A entity frequency delta table
"""
self.entity_prop_delta = frequencies.prop_delta(
self.revision.parent.entity_frequency,
self.entity_delta,
name=self._name + ".entity_prop_delta"
)
"""
A entity proportional frequency delta table
"""
self.url_delta = frequencies.delta(
self.revision.parent.url_frequency,
self.revision.url_frequency,
name=self._name + ".url_delta"
)
"""
A url frequency delta table
"""
self.url_prop_delta = frequencies.prop_delta(
self.revision.parent.url_frequency,
self.url_delta,
name=self._name + ".url_prop_delta"
)
"""
A url proportional frequency delta table
"""
self.word_delta = frequencies.delta(
self.revision.parent.word_frequency,
self.revision.word_frequency,
name=self._name + ".word_delta"
)
"""
A lower-cased word frequency delta table
"""
self.word_prop_delta = frequencies.prop_delta(
self.revision.parent.word_frequency,
self.word_delta,
name=self._name + ".word_prop_delta"
)
"""
A lower-cased word proportional frequency delta table
"""
self.uppercase_word_delta = frequencies.delta(
self.revision.parent.uppercase_word_frequency,
self.revision.uppercase_word_frequency,
name=self._name + ".uppercase_word_delta"
)
"""
A uppercase word frequency delta table
"""
self.uppercase_word_prop_delta = frequencies.prop_delta(
self.revision.parent.uppercase_word_frequency,
self.uppercase_word_delta,
name=self._name + ".uppercase_word_prop_delta"
)
"""
A uppercase word proportional frequency delta table
"""
self.punctuation_delta = frequencies.delta(
self.revision.parent.punctuation_frequency,
self.revision.punctuation_frequency,
name=self._name + ".punctuation_delta"
)
"""
A punctuation frequency delta table
"""
self.punctuation_prop_delta = frequencies.prop_delta(
self.revision.parent.punctuation_frequency,
self.punctuation_delta,
name=self._name + ".punctuation_prop_delta"
)
"""
A punctuation proportional frequency delta table
"""
self.break_delta = frequencies.delta(
self.revision.parent.break_frequency,
self.revision.break_frequency,
name=self._name + ".break_delta"
)
"""
A break frequency delta table
"""
self.break_prop_delta = frequencies.prop_delta(
self.revision.parent.break_frequency,
self.break_delta,
name=self._name + ".break_prop_delta"
)
"""
A break proportional frequency delta table
"""
def is_uppercase_word(word_token):
return len(word_token) > 1 and \
sum(c.lower() != c for c in word_token) == len(word_token)
class TokenIsInTypes:
def __init__(self, types):
self.types = set(types)
def filter(self, token):
return token.type in self.types
def _process_tokens(text):
return [t for t in wikitext_split.tokenize(text or "")]
def _process_tokens_cjk(text):
return [t for t in wikitext_split_w_cjk.tokenize(text or "")]
def tokenized(text_datasource, name=None, tok_strategy="Latin"):
"""
Constructs a :class:`revision.Datasource` that generates a list of tokens
"""
if name is None:
name = "{0}({1!r}, {2!r})".format("tokenized", text_datasource, tok_strategy)
if tok_strategy == "Latin":
return Datasource(
name, _process_tokens, depends_on=[text_datasource]
)
elif tok_strategy == "CJK":
return Datasource(
name, _process_tokens_cjk, depends_on=[text_datasource]
)
else:
raise NotImplementedError
paragraphs_sentences_and_whitespace = ParagraphsSentencesAndWhitespace()
| mit | -5,460,136,003,285,908 | 28.411017 | 132 | 0.548624 | false |
c-rack/czmq | bindings/python_cffi/czmq_cffi.py | 1 | 115567 | ################################################################################
# THIS FILE IS 100% GENERATED BY ZPROJECT; DO NOT EDIT EXCEPT EXPERIMENTALLY #
# Please refer to the README for information about making permanent changes. #
################################################################################
from __future__ import print_function
import os
import re
import sys
from pyczmq._cffi import ffi
try:
# If LD_LIBRARY_PATH or your OSs equivalent is set, this is the only way to
# load the library. If we use find_library below, we get the wrong result.
if os.name == 'posix':
if sys.platform == 'darwin':
libpath = 'libczmq.3.dylib'
else:
libpath = 'libczmq.so.3'
elif os.name == 'nt':
libpath = 'libczmq.dll'
lib = ffi.dlopen(libpath)
except OSError:
libpath = find_library("czmq")
if not libpath:
raise ImportError("Unable to find libczmq")
lib = ffi.dlopen(libpath)
# Custom setup for czmq
ffi.cdef('''
typedef int... time_t;
typedef int... off_t;
typedef unsigned char byte; // Single unsigned byte = 8 bits
typedef unsigned short dbyte; // Double byte = 16 bits
typedef unsigned int qbyte; // Quad byte = 32 bits
typedef int SOCKET;
// -- destroy an item
typedef void (czmq_destructor) (void **item);
// -- duplicate an item
typedef void *(czmq_duplicator) (const void *item);
// - compare two items, for sorting
typedef int (czmq_comparator) (const void *item1, const void *item2);
''')
cdefs = '''
typedef struct _zactor_t zactor_t;
typedef struct _zsock_t zsock_t;
typedef struct _zmsg_t zmsg_t;
typedef struct _zarmour_t zarmour_t;
typedef struct _char_t char_t;
typedef struct _zcert_t zcert_t;
typedef struct _zlist_t zlist_t;
typedef struct _zcertstore_t zcertstore_t;
typedef struct _zchunk_t zchunk_t;
typedef struct _zframe_t zframe_t;
typedef struct _zclock_t zclock_t;
typedef struct _msecs_t msecs_t;
typedef struct _zconfig_t zconfig_t;
typedef struct _zdigest_t zdigest_t;
typedef struct _zdir_t zdir_t;
typedef struct _zhash_t zhash_t;
typedef struct _zdir_patch_t zdir_patch_t;
typedef struct _zfile_t zfile_t;
typedef struct _zhashx_t zhashx_t;
typedef struct _zlistx_t zlistx_t;
typedef struct _ziflist_t ziflist_t;
typedef struct _zloop_t zloop_t;
typedef struct _zmq_pollitem_t zmq_pollitem_t;
typedef struct _zpoller_t zpoller_t;
typedef struct _zproc_t zproc_t;
typedef struct _va_list_t va_list_t;
typedef struct _socket_t socket_t;
typedef struct _zstr_t zstr_t;
typedef struct _ztrie_t ztrie_t;
typedef struct _zuuid_t zuuid_t;
// Actors get a pipe and arguments from caller
typedef void (zactor_fn) (
zsock_t *pipe, void *args);
typedef enum {
ZARMOUR_MODE_BASE64_STD = 0, // Standard base 64
ZARMOUR_MODE_BASE64_URL = 1, // URL and filename friendly base 64
ZARMOUR_MODE_BASE32_STD = 2, // Standard base 32
ZARMOUR_MODE_BASE32_HEX = 3, // Extended hex base 32
ZARMOUR_MODE_BASE16 = 4, // Standard base 16
ZARMOUR_MODE_Z85 = 5 // Z85 from ZeroMQ RFC 32
} zarmour_mode_t;
//
typedef int (zconfig_fct) (
zconfig_t *self, void *arg, int level);
typedef enum {
ZDIR_PATCH_CREATE = 1, //
ZDIR_PATCH_DELETE = 2 //
} zdir_patch_op_t;
// Callback function for zhash_freefn method
typedef void (zhash_free_fn) (
void *data);
// Callback function for zhash_foreach method. Deprecated.
typedef int (zhash_foreach_fn) (
const char *key, void *item, void *argument);
// Destroy an item
typedef void (zhashx_destructor_fn) (
void **item);
// Duplicate an item
typedef void * (zhashx_duplicator_fn) (
const void *item);
// Compare two items, for sorting
typedef int (zhashx_comparator_fn) (
const void *item1, const void *item2);
// compare two items, for sorting
typedef void (zhashx_free_fn) (
void *data);
// compare two items, for sorting
typedef size_t (zhashx_hash_fn) (
const void *key);
// DEPRECATED as clumsy -- use zhashx_first/_next instead
typedef int (zhashx_foreach_fn) (
const char *key, void *item, void *argument);
// Comparison function e.g. for sorting and removing.
typedef int (zlist_compare_fn) (
void *item1, void *item2);
// Callback function for zlist_freefn method
typedef void (zlist_free_fn) (
void *data);
// Destroy an item
typedef void (zlistx_destructor_fn) (
void **item);
// Duplicate an item
typedef void * (zlistx_duplicator_fn) (
const void *item);
// Compare two items, for sorting
typedef int (zlistx_comparator_fn) (
const void *item1, const void *item2);
// Callback function for reactor socket activity
typedef int (zloop_reader_fn) (
zloop_t *loop, zsock_t *reader, void *arg);
// Callback function for reactor events (low-level)
typedef int (zloop_fn) (
zloop_t *loop, zmq_pollitem_t *item, void *arg);
// Callback for reactor timer events
typedef int (zloop_timer_fn) (
zloop_t *loop, int timer_id, void *arg);
// Callback function for ztrie_node to destroy node data.
typedef void (ztrie_destroy_data_fn) (
void **data);
// CLASS: zactor
// Create a new actor passing arbitrary arguments reference.
zactor_t *
zactor_new (zactor_fn task, void *args);
// Destroy an actor.
void
zactor_destroy (zactor_t **self_p);
// Send a zmsg message to the actor, take ownership of the message
// and destroy when it has been sent.
int
zactor_send (zactor_t *self, zmsg_t **msg_p);
// Receive a zmsg message from the actor. Returns NULL if the actor
// was interrupted before the message could be received, or if there
// was a timeout on the actor.
zmsg_t *
zactor_recv (zactor_t *self);
// Probe the supplied object, and report if it looks like a zactor_t.
bool
zactor_is (void *self);
// Probe the supplied reference. If it looks like a zactor_t instance,
// return the underlying libzmq actor handle; else if it looks like
// a libzmq actor handle, return the supplied value.
void *
zactor_resolve (void *self);
// Return the actor's zsock handle. Use this when you absolutely need
// to work with the zsock instance rather than the actor.
zsock_t *
zactor_sock (zactor_t *self);
// Self test of this class.
void
zactor_test (bool verbose);
// CLASS: zarmour
// Create a new zarmour.
zarmour_t *
zarmour_new (void);
// Destroy the zarmour.
void
zarmour_destroy (zarmour_t **self_p);
// Encode a stream of bytes into an armoured string. Returns the armoured
// string, or NULL if there was insufficient memory available to allocate
// a new string.
char *
zarmour_encode (zarmour_t *self, const byte *data, size_t size);
// Decode an armoured string into a string of bytes.
// The decoded output is null-terminated, so it may be treated
// as a string, if that's what it was prior to encoding.
byte *
zarmour_decode (zarmour_t *self, const char *data, size_t *decode_size);
// Get the mode property.
zarmour_mode_t
zarmour_mode (zarmour_t *self);
// Get printable string for mode.
const char *
zarmour_mode_str (zarmour_t *self);
// Set the mode property.
void
zarmour_set_mode (zarmour_t *self, zarmour_mode_t mode);
// Return true if padding is turned on.
bool
zarmour_pad (zarmour_t *self);
// Turn padding on or off. Default is on.
void
zarmour_set_pad (zarmour_t *self, bool pad);
// Get the padding character.
char
zarmour_pad_char (zarmour_t *self);
// Set the padding character.
void
zarmour_set_pad_char (zarmour_t *self, char pad_char);
// Return if splitting output into lines is turned on. Default is off.
bool
zarmour_line_breaks (zarmour_t *self);
// Turn splitting output into lines on or off.
void
zarmour_set_line_breaks (zarmour_t *self, bool line_breaks);
// Get the line length used for splitting lines.
size_t
zarmour_line_length (zarmour_t *self);
// Set the line length used for splitting lines.
void
zarmour_set_line_length (zarmour_t *self, size_t line_length);
// Print properties of object
void
zarmour_print (zarmour_t *self);
// Self test of this class.
void
zarmour_test (bool verbose);
// CLASS: zcert
// Create and initialize a new certificate in memory
zcert_t *
zcert_new (void);
// Accepts public/secret key pair from caller
zcert_t *
zcert_new_from (const byte *public_key, const byte *secret_key);
// Load certificate from file
zcert_t *
zcert_load (const char *filename);
// Destroy a certificate in memory
void
zcert_destroy (zcert_t **self_p);
// Return public part of key pair as 32-byte binary string
byte *
zcert_public_key (zcert_t *self);
// Return secret part of key pair as 32-byte binary string
byte *
zcert_secret_key (zcert_t *self);
// Return public part of key pair as Z85 armored string
char *
zcert_public_txt (zcert_t *self);
// Return secret part of key pair as Z85 armored string
char *
zcert_secret_txt (zcert_t *self);
// Set certificate metadata from formatted string.
void
zcert_set_meta (zcert_t *self, const char *name, const char *format, ...);
// Unset certificate metadata.
void
zcert_unset_meta (zcert_t *self, const char *name);
// Get metadata value from certificate; if the metadata value doesn't
// exist, returns NULL.
char *
zcert_meta (zcert_t *self, const char *name);
// Get list of metadata fields from certificate. Caller is responsible for
// destroying list. Caller should not modify the values of list items.
zlist_t *
zcert_meta_keys (zcert_t *self);
// Save full certificate (public + secret) to file for persistent storage
// This creates one public file and one secret file (filename + "_secret").
int
zcert_save (zcert_t *self, const char *filename);
// Save public certificate only to file for persistent storage
int
zcert_save_public (zcert_t *self, const char *filename);
// Save secret certificate only to file for persistent storage
int
zcert_save_secret (zcert_t *self, const char *filename);
// Apply certificate to socket, i.e. use for CURVE security on socket.
// If certificate was loaded from public file, the secret key will be
// undefined, and this certificate will not work successfully.
void
zcert_apply (zcert_t *self, void *zocket);
// Return copy of certificate; if certificate is NULL or we exhausted
// heap memory, returns NULL.
zcert_t *
zcert_dup (zcert_t *self);
// Return true if two certificates have the same keys
bool
zcert_eq (zcert_t *self, zcert_t *compare);
// Print certificate contents to stdout
void
zcert_print (zcert_t *self);
// DEPRECATED as incompatible with centralized logging
// Print certificate contents to open stream
void
zcert_fprint (zcert_t *self, FILE *file);
// Self test of this class
void
zcert_test (bool verbose);
// CLASS: zcertstore
// Create a new certificate store from a disk directory, loading and
// indexing all certificates in that location. The directory itself may be
// absent, and created later, or modified at any time. The certificate store
// is automatically refreshed on any zcertstore_lookup() call. If the
// location is specified as NULL, creates a pure-memory store, which you
// can work with by inserting certificates at runtime.
zcertstore_t *
zcertstore_new (const char *location);
// Destroy a certificate store object in memory. Does not affect anything
// stored on disk.
void
zcertstore_destroy (zcertstore_t **self_p);
// Look up certificate by public key, returns zcert_t object if found,
// else returns NULL. The public key is provided in Z85 text format.
zcert_t *
zcertstore_lookup (zcertstore_t *self, const char *public_key);
// Insert certificate into certificate store in memory. Note that this
// does not save the certificate to disk. To do that, use zcert_save()
// directly on the certificate. Takes ownership of zcert_t object.
void
zcertstore_insert (zcertstore_t *self, zcert_t **cert_p);
// Print list of certificates in store to logging facility
void
zcertstore_print (zcertstore_t *self);
// DEPRECATED as incompatible with centralized logging
// Print list of certificates in store to open stream
void
zcertstore_fprint (zcertstore_t *self, FILE *file);
// Self test of this class
void
zcertstore_test (bool verbose);
// CLASS: zchunk
// Create a new chunk of the specified size. If you specify the data, it
// is copied into the chunk. If you do not specify the data, the chunk is
// allocated and left empty, and you can then add data using zchunk_append.
zchunk_t *
zchunk_new (const void *data, size_t size);
// Destroy a chunk
void
zchunk_destroy (zchunk_t **self_p);
// Resizes chunk max_size as requested; chunk_cur size is set to zero
void
zchunk_resize (zchunk_t *self, size_t size);
// Return chunk cur size
size_t
zchunk_size (zchunk_t *self);
// Return chunk max size
size_t
zchunk_max_size (zchunk_t *self);
// Return chunk data
byte *
zchunk_data (zchunk_t *self);
// Set chunk data from user-supplied data; truncate if too large. Data may
// be null. Returns actual size of chunk
size_t
zchunk_set (zchunk_t *self, const void *data, size_t size);
// Fill chunk data from user-supplied octet
size_t
zchunk_fill (zchunk_t *self, byte filler, size_t size);
// Append user-supplied data to chunk, return resulting chunk size. If the
// data would exceeded the available space, it is truncated. If you want to
// grow the chunk to accommodate new data, use the zchunk_extend method.
size_t
zchunk_append (zchunk_t *self, const void *data, size_t size);
// Append user-supplied data to chunk, return resulting chunk size. If the
// data would exceeded the available space, the chunk grows in size.
size_t
zchunk_extend (zchunk_t *self, const void *data, size_t size);
// Copy as much data from 'source' into the chunk as possible; returns the
// new size of chunk. If all data from 'source' is used, returns exhausted
// on the source chunk. Source can be consumed as many times as needed until
// it is exhausted. If source was already exhausted, does not change chunk.
size_t
zchunk_consume (zchunk_t *self, zchunk_t *source);
// Returns true if the chunk was exhausted by consume methods, or if the
// chunk has a size of zero.
bool
zchunk_exhausted (zchunk_t *self);
// Read chunk from an open file descriptor
zchunk_t *
zchunk_read (FILE *handle, size_t bytes);
// Write chunk to an open file descriptor
int
zchunk_write (zchunk_t *self, FILE *handle);
// Try to slurp an entire file into a chunk. Will read up to maxsize of
// the file. If maxsize is 0, will attempt to read the entire file and
// fail with an assertion if that cannot fit into memory. Returns a new
// chunk containing the file data, or NULL if the file could not be read.
zchunk_t *
zchunk_slurp (const char *filename, size_t maxsize);
// Create copy of chunk, as new chunk object. Returns a fresh zchunk_t
// object, or null if there was not enough heap memory. If chunk is null,
// returns null.
zchunk_t *
zchunk_dup (zchunk_t *self);
// Return chunk data encoded as printable hex string. Caller must free
// string when finished with it.
char *
zchunk_strhex (zchunk_t *self);
// Return chunk data copied into freshly allocated string
// Caller must free string when finished with it.
char *
zchunk_strdup (zchunk_t *self);
// Return TRUE if chunk body is equal to string, excluding terminator
bool
zchunk_streq (zchunk_t *self, const char *string);
// Transform zchunk into a zframe that can be sent in a message.
zframe_t *
zchunk_pack (zchunk_t *self);
// Transform a zframe into a zchunk.
zchunk_t *
zchunk_unpack (zframe_t *frame);
// Calculate SHA1 digest for chunk, using zdigest class.
const char *
zchunk_digest (zchunk_t *self);
// Dump chunk to FILE stream, for debugging and tracing.
void
zchunk_fprint (zchunk_t *self, FILE *file);
// Dump message to stderr, for debugging and tracing.
// See zchunk_fprint for details
void
zchunk_print (zchunk_t *self);
// Probe the supplied object, and report if it looks like a zchunk_t.
bool
zchunk_is (void *self);
// Self test of this class.
void
zchunk_test (bool verbose);
// CLASS: zclock
// Sleep for a number of milliseconds
void
zclock_sleep (int msecs);
// Return current system clock as milliseconds. Note that this clock can
// jump backwards (if the system clock is changed) so is unsafe to use for
// timers and time offsets. Use zclock_mono for that instead.
int64_t
zclock_time (void);
// Return current monotonic clock in milliseconds. Use this when you compute
// time offsets. The monotonic clock is not affected by system changes and
// so will never be reset backwards, unlike a system clock.
int64_t
zclock_mono (void);
// Return current monotonic clock in microseconds. Use this when you compute
// time offsets. The monotonic clock is not affected by system changes and
// so will never be reset backwards, unlike a system clock.
int64_t
zclock_usecs (void);
// Return formatted date/time as fresh string. Free using zstr_free().
char *
zclock_timestr (void);
// Self test of this class.
void
zclock_test (bool verbose);
// CLASS: zconfig
// Create new config item
zconfig_t *
zconfig_new (const char *name, zconfig_t *parent);
// Destroy a config item and all its children
void
zconfig_destroy (zconfig_t **self_p);
// Load a config tree from a specified ZPL text file; returns a zconfig_t
// reference for the root, if the file exists and is readable. Returns NULL
// if the file does not exist.
zconfig_t *
zconfig_load (const char *filename);
// Equivalent to zconfig_load, taking a format string instead of a fixed
// filename.
zconfig_t *
zconfig_loadf (const char *format, ...);
// Return name of config item
char *
zconfig_name (zconfig_t *self);
// Return value of config item
char *
zconfig_value (zconfig_t *self);
// Insert or update configuration key with value
void
zconfig_put (zconfig_t *self, const char *path, const char *value);
// Equivalent to zconfig_put, accepting a format specifier and variable
// argument list, instead of a single string value.
void
zconfig_putf (zconfig_t *self, const char *path, const char *format, ...);
// Get value for config item into a string value; leading slash is optional
// and ignored.
char *
zconfig_get (zconfig_t *self, const char *path, const char *default_value);
// Set config item name, name may be NULL
void
zconfig_set_name (zconfig_t *self, const char *name);
// Set new value for config item. The new value may be a string, a printf
// format, or NULL. Note that if string may possibly contain '%', or if it
// comes from an insecure source, you must use '%s' as the format, followed
// by the string.
void
zconfig_set_value (zconfig_t *self, const char *format, ...);
// Find our first child, if any
zconfig_t *
zconfig_child (zconfig_t *self);
// Find our first sibling, if any
zconfig_t *
zconfig_next (zconfig_t *self);
// Find a config item along a path; leading slash is optional and ignored.
zconfig_t *
zconfig_locate (zconfig_t *self, const char *path);
// Locate the last config item at a specified depth
zconfig_t *
zconfig_at_depth (zconfig_t *self, int level);
// Execute a callback for each config item in the tree; returns zero if
// successful, else -1.
int
zconfig_execute (zconfig_t *self, zconfig_fct handler, void *arg);
// Add comment to config item before saving to disk. You can add as many
// comment lines as you like. If you use a null format, all comments are
// deleted.
void
zconfig_set_comment (zconfig_t *self, const char *format, ...);
// Return comments of config item, as zlist.
zlist_t *
zconfig_comments (zconfig_t *self);
// Save a config tree to a specified ZPL text file, where a filename
// "-" means dump to standard output.
int
zconfig_save (zconfig_t *self, const char *filename);
// Equivalent to zconfig_save, taking a format string instead of a fixed
// filename.
int
zconfig_savef (zconfig_t *self, const char *format, ...);
// Report filename used during zconfig_load, or NULL if none
const char *
zconfig_filename (zconfig_t *self);
// Reload config tree from same file that it was previously loaded from.
// Returns 0 if OK, -1 if there was an error (and then does not change
// existing data).
int
zconfig_reload (zconfig_t **self_p);
// Load a config tree from a memory chunk
zconfig_t *
zconfig_chunk_load (zchunk_t *chunk);
// Save a config tree to a new memory chunk
zchunk_t *
zconfig_chunk_save (zconfig_t *self);
// Load a config tree from a null-terminated string
zconfig_t *
zconfig_str_load (const char *string);
// Save a config tree to a new null terminated string
char *
zconfig_str_save (zconfig_t *self);
// Return true if a configuration tree was loaded from a file and that
// file has changed in since the tree was loaded.
bool
zconfig_has_changed (zconfig_t *self);
// Print the config file to open stream
void
zconfig_fprint (zconfig_t *self, FILE *file);
// Print properties of object
void
zconfig_print (zconfig_t *self);
// Self test of this class
void
zconfig_test (bool verbose);
// CLASS: zdigest
// Constructor - creates new digest object, which you use to build up a
// digest by repeatedly calling zdigest_update() on chunks of data.
zdigest_t *
zdigest_new (void);
// Destroy a digest object
void
zdigest_destroy (zdigest_t **self_p);
// Add buffer into digest calculation
void
zdigest_update (zdigest_t *self, byte *buffer, size_t length);
// Return final digest hash data. If built without crypto support, returns
// NULL.
byte *
zdigest_data (zdigest_t *self);
// Return final digest hash size
size_t
zdigest_size (zdigest_t *self);
// Return digest as printable hex string; caller should not modify nor
// free this string. After calling this, you may not use zdigest_update()
// on the same digest. If built without crypto support, returns NULL.
char *
zdigest_string (zdigest_t *self);
// Self test of this class.
void
zdigest_test (bool verbose);
// CLASS: zdir
// Create a new directory item that loads in the full tree of the specified
// path, optionally located under some parent path. If parent is "-", then
// loads only the top-level directory, and does not use parent as a path.
zdir_t *
zdir_new (const char *path, const char *parent);
// Destroy a directory tree and all children it contains.
void
zdir_destroy (zdir_t **self_p);
// Return directory path
const char *
zdir_path (zdir_t *self);
// Return last modification time for directory.
time_t
zdir_modified (zdir_t *self);
// Return total hierarchy size, in bytes of data contained in all files
// in the directory tree.
off_t
zdir_cursize (zdir_t *self);
// Return directory count
size_t
zdir_count (zdir_t *self);
// Returns a sorted list of zfile objects; Each entry in the list is a pointer
// to a zfile_t item already allocated in the zdir tree. Do not destroy the
// original zdir tree until you are done with this list.
zlist_t *
zdir_list (zdir_t *self);
// Remove directory, optionally including all files that it contains, at
// all levels. If force is false, will only remove the directory if empty.
// If force is true, will remove all files and all subdirectories.
void
zdir_remove (zdir_t *self, bool force);
// Calculate differences between two versions of a directory tree.
// Returns a list of zdir_patch_t patches. Either older or newer may
// be null, indicating the directory is empty/absent. If alias is set,
// generates virtual filename (minus path, plus alias).
zlist_t *
zdir_diff (zdir_t *older, zdir_t *newer, const char *alias);
// Return full contents of directory as a zdir_patch list.
zlist_t *
zdir_resync (zdir_t *self, const char *alias);
// Load directory cache; returns a hash table containing the SHA-1 digests
// of every file in the tree. The cache is saved between runs in .cache.
zhash_t *
zdir_cache (zdir_t *self);
// Print contents of directory to open stream
void
zdir_fprint (zdir_t *self, FILE *file, int indent);
// Print contents of directory to stdout
void
zdir_print (zdir_t *self, int indent);
// Create a new zdir_watch actor instance:
//
// zactor_t *watch = zactor_new (zdir_watch, NULL);
//
// Destroy zdir_watch instance:
//
// zactor_destroy (&watch);
//
// Enable verbose logging of commands and activity:
//
// zstr_send (watch, "VERBOSE");
//
// Subscribe to changes to a directory path:
//
// zsock_send (watch, "ss", "SUBSCRIBE", "directory_path");
//
// Unsubscribe from changes to a directory path:
//
// zsock_send (watch, "ss", "UNSUBSCRIBE", "directory_path");
//
// Receive directory changes:
// zsock_recv (watch, "sp", &path, &patches);
//
// // Delete the received data.
// free (path);
// zlist_destroy (&patches);
void
zdir_watch (zsock_t *pipe, void *unused);
// Self test of this class.
void
zdir_test (bool verbose);
// CLASS: zdir_patch
// Create new patch
zdir_patch_t *
zdir_patch_new (const char *path, zfile_t *file, zdir_patch_op_t op, const char *alias);
// Destroy a patch
void
zdir_patch_destroy (zdir_patch_t **self_p);
// Create copy of a patch. If the patch is null, or memory was exhausted,
// returns null.
zdir_patch_t *
zdir_patch_dup (zdir_patch_t *self);
// Return patch file directory path
const char *
zdir_patch_path (zdir_patch_t *self);
// Return patch file item
zfile_t *
zdir_patch_file (zdir_patch_t *self);
// Return operation
zdir_patch_op_t
zdir_patch_op (zdir_patch_t *self);
// Return patch virtual file path
const char *
zdir_patch_vpath (zdir_patch_t *self);
// Calculate hash digest for file (create only)
void
zdir_patch_digest_set (zdir_patch_t *self);
// Return hash digest for patch file
const char *
zdir_patch_digest (zdir_patch_t *self);
// Self test of this class.
void
zdir_patch_test (bool verbose);
// CLASS: zfile
// If file exists, populates properties. CZMQ supports portable symbolic
// links, which are files with the extension ".ln". A symbolic link is a
// text file containing one line, the filename of a target file. Reading
// data from the symbolic link actually reads from the target file. Path
// may be NULL, in which case it is not used.
zfile_t *
zfile_new (const char *path, const char *name);
// Destroy a file item
void
zfile_destroy (zfile_t **self_p);
// Duplicate a file item, returns a newly constructed item. If the file
// is null, or memory was exhausted, returns null.
zfile_t *
zfile_dup (zfile_t *self);
// Return file name, remove path if provided
const char *
zfile_filename (zfile_t *self, const char *path);
// Refresh file properties from disk; this is not done automatically
// on access methods, otherwise it is not possible to compare directory
// snapshots.
void
zfile_restat (zfile_t *self);
// Return when the file was last modified. If you want this to reflect the
// current situation, call zfile_restat before checking this property.
time_t
zfile_modified (zfile_t *self);
// Return the last-known size of the file. If you want this to reflect the
// current situation, call zfile_restat before checking this property.
off_t
zfile_cursize (zfile_t *self);
// Return true if the file is a directory. If you want this to reflect
// any external changes, call zfile_restat before checking this property.
bool
zfile_is_directory (zfile_t *self);
// Return true if the file is a regular file. If you want this to reflect
// any external changes, call zfile_restat before checking this property.
bool
zfile_is_regular (zfile_t *self);
// Return true if the file is readable by this process. If you want this to
// reflect any external changes, call zfile_restat before checking this
// property.
bool
zfile_is_readable (zfile_t *self);
// Return true if the file is writeable by this process. If you want this
// to reflect any external changes, call zfile_restat before checking this
// property.
bool
zfile_is_writeable (zfile_t *self);
// Check if file has stopped changing and can be safely processed.
// Updates the file statistics from disk at every call.
bool
zfile_is_stable (zfile_t *self);
// Return true if the file was changed on disk since the zfile_t object
// was created, or the last zfile_restat() call made on it.
bool
zfile_has_changed (zfile_t *self);
// Remove the file from disk
void
zfile_remove (zfile_t *self);
// Open file for reading
// Returns 0 if OK, -1 if not found or not accessible
int
zfile_input (zfile_t *self);
// Open file for writing, creating directory if needed
// File is created if necessary; chunks can be written to file at any
// location. Returns 0 if OK, -1 if error.
int
zfile_output (zfile_t *self);
// Read chunk from file at specified position. If this was the last chunk,
// sets the eof property. Returns a null chunk in case of error.
zchunk_t *
zfile_read (zfile_t *self, size_t bytes, off_t offset);
// Returns true if zfile_read() just read the last chunk in the file.
bool
zfile_eof (zfile_t *self);
// Write chunk to file at specified position
// Return 0 if OK, else -1
int
zfile_write (zfile_t *self, zchunk_t *chunk, off_t offset);
// Read next line of text from file. Returns a pointer to the text line,
// or NULL if there was nothing more to read from the file.
const char *
zfile_readln (zfile_t *self);
// Close file, if open
void
zfile_close (zfile_t *self);
// Return file handle, if opened
FILE *
zfile_handle (zfile_t *self);
// Calculate SHA1 digest for file, using zdigest class.
const char *
zfile_digest (zfile_t *self);
// Self test of this class.
void
zfile_test (bool verbose);
// CLASS: zframe
// Create a new frame. If size is not null, allocates the frame data
// to the specified size. If additionally, data is not null, copies
// size octets from the specified data into the frame body.
zframe_t *
zframe_new (const void *data, size_t size);
// Destroy a frame
void
zframe_destroy (zframe_t **self_p);
// Create an empty (zero-sized) frame
zframe_t *
zframe_new_empty (void);
// Create a frame with a specified string content.
zframe_t *
zframe_from (const char *string);
// Receive frame from socket, returns zframe_t object or NULL if the recv
// was interrupted. Does a blocking recv, if you want to not block then use
// zpoller or zloop.
zframe_t *
zframe_recv (void *source);
// Send a frame to a socket, destroy frame after sending.
// Return -1 on error, 0 on success.
int
zframe_send (zframe_t **self_p, void *dest, int flags);
// Return number of bytes in frame data
size_t
zframe_size (zframe_t *self);
// Return address of frame data
byte *
zframe_data (zframe_t *self);
// Create a new frame that duplicates an existing frame. If frame is null,
// or memory was exhausted, returns null.
zframe_t *
zframe_dup (zframe_t *self);
// Return frame data encoded as printable hex string, useful for 0MQ UUIDs.
// Caller must free string when finished with it.
char *
zframe_strhex (zframe_t *self);
// Return frame data copied into freshly allocated string
// Caller must free string when finished with it.
char *
zframe_strdup (zframe_t *self);
// Return TRUE if frame body is equal to string, excluding terminator
bool
zframe_streq (zframe_t *self, const char *string);
// Return frame MORE indicator (1 or 0), set when reading frame from socket
// or by the zframe_set_more() method
int
zframe_more (zframe_t *self);
// Set frame MORE indicator (1 or 0). Note this is NOT used when sending
// frame to socket, you have to specify flag explicitly.
void
zframe_set_more (zframe_t *self, int more);
// Return frame routing ID, if the frame came from a ZMQ_SERVER socket.
// Else returns zero.
uint32_t
zframe_routing_id (zframe_t *self);
// Set routing ID on frame. This is used if/when the frame is sent to a
// ZMQ_SERVER socket.
void
zframe_set_routing_id (zframe_t *self, uint32_t routing_id);
// Return TRUE if two frames have identical size and data
// If either frame is NULL, equality is always false.
bool
zframe_eq (zframe_t *self, zframe_t *other);
// Set new contents for frame
void
zframe_reset (zframe_t *self, const void *data, size_t size);
// Send message to zsys log sink (may be stdout, or system facility as
// configured by zsys_set_logstream). Prefix shows before frame, if not null.
void
zframe_print (zframe_t *self, const char *prefix);
// Probe the supplied object, and report if it looks like a zframe_t.
bool
zframe_is (void *self);
// Self test of this class.
void
zframe_test (bool verbose);
// CLASS: zhash
// Create a new, empty hash container
zhash_t *
zhash_new (void);
// Destroy a hash container and all items in it
void
zhash_destroy (zhash_t **self_p);
// Unpack binary frame into a new hash table. Packed data must follow format
// defined by zhash_pack. Hash table is set to autofree. An empty frame
// unpacks to an empty hash table.
zhash_t *
zhash_unpack (zframe_t *frame);
// Insert item into hash table with specified key and item.
// If key is already present returns -1 and leaves existing item unchanged
// Returns 0 on success.
int
zhash_insert (zhash_t *self, const char *key, void *item);
// Update item into hash table with specified key and item.
// If key is already present, destroys old item and inserts new one.
// Use free_fn method to ensure deallocator is properly called on item.
void
zhash_update (zhash_t *self, const char *key, void *item);
// Remove an item specified by key from the hash table. If there was no such
// item, this function does nothing.
void
zhash_delete (zhash_t *self, const char *key);
// Return the item at the specified key, or null
void *
zhash_lookup (zhash_t *self, const char *key);
// Reindexes an item from an old key to a new key. If there was no such
// item, does nothing. Returns 0 if successful, else -1.
int
zhash_rename (zhash_t *self, const char *old_key, const char *new_key);
// Set a free function for the specified hash table item. When the item is
// destroyed, the free function, if any, is called on that item.
// Use this when hash items are dynamically allocated, to ensure that
// you don't have memory leaks. You can pass 'free' or NULL as a free_fn.
// Returns the item, or NULL if there is no such item.
void *
zhash_freefn (zhash_t *self, const char *key, zhash_free_fn free_fn);
// Return the number of keys/items in the hash table
size_t
zhash_size (zhash_t *self);
// Make copy of hash table; if supplied table is null, returns null.
// Does not copy items themselves. Rebuilds new table so may be slow on
// very large tables. NOTE: only works with item values that are strings
// since there's no other way to know how to duplicate the item value.
zhash_t *
zhash_dup (zhash_t *self);
// Return keys for items in table
zlist_t *
zhash_keys (zhash_t *self);
// Simple iterator; returns first item in hash table, in no given order,
// or NULL if the table is empty. This method is simpler to use than the
// foreach() method, which is deprecated. To access the key for this item
// use zhash_cursor(). NOTE: do NOT modify the table while iterating.
void *
zhash_first (zhash_t *self);
// Simple iterator; returns next item in hash table, in no given order,
// or NULL if the last item was already returned. Use this together with
// zhash_first() to process all items in a hash table. If you need the
// items in sorted order, use zhash_keys() and then zlist_sort(). To
// access the key for this item use zhash_cursor(). NOTE: do NOT modify
// the table while iterating.
void *
zhash_next (zhash_t *self);
// After a successful first/next method, returns the key for the item that
// was returned. This is a constant string that you may not modify or
// deallocate, and which lasts as long as the item in the hash. After an
// unsuccessful first/next, returns NULL.
const char *
zhash_cursor (zhash_t *self);
// Add a comment to hash table before saving to disk. You can add as many
// comment lines as you like. These comment lines are discarded when loading
// the file. If you use a null format, all comments are deleted.
void
zhash_comment (zhash_t *self, const char *format, ...);
// Serialize hash table to a binary frame that can be sent in a message.
// The packed format is compatible with the 'dictionary' type defined in
// http://rfc.zeromq.org/spec:35/FILEMQ, and implemented by zproto:
//
// ; A list of name/value pairs
// dictionary = dict-count *( dict-name dict-value )
// dict-count = number-4
// dict-value = longstr
// dict-name = string
//
// ; Strings are always length + text contents
// longstr = number-4 *VCHAR
// string = number-1 *VCHAR
//
// ; Numbers are unsigned integers in network byte order
// number-1 = 1OCTET
// number-4 = 4OCTET
//
// Comments are not included in the packed data. Item values MUST be
// strings.
zframe_t *
zhash_pack (zhash_t *self);
// Save hash table to a text file in name=value format. Hash values must be
// printable strings; keys may not contain '=' character. Returns 0 if OK,
// else -1 if a file error occurred.
int
zhash_save (zhash_t *self, const char *filename);
// Load hash table from a text file in name=value format; hash table must
// already exist. Hash values must printable strings; keys may not contain
// '=' character. Returns 0 if OK, else -1 if a file was not readable.
int
zhash_load (zhash_t *self, const char *filename);
// When a hash table was loaded from a file by zhash_load, this method will
// reload the file if it has been modified since, and is "stable", i.e. not
// still changing. Returns 0 if OK, -1 if there was an error reloading the
// file.
int
zhash_refresh (zhash_t *self);
// Set hash for automatic value destruction
void
zhash_autofree (zhash_t *self);
// Apply function to each item in the hash table. Items are iterated in no
// defined order. Stops if callback function returns non-zero and returns
// final return code from callback function (zero = success). Deprecated.
int
zhash_foreach (zhash_t *self, zhash_foreach_fn callback, void *argument);
// Self test of this class.
void
zhash_test (bool verbose);
// CLASS: zhashx
// Create a new, empty hash container
zhashx_t *
zhashx_new (void);
// Destroy a hash container and all items in it
void
zhashx_destroy (zhashx_t **self_p);
// Unpack binary frame into a new hash table. Packed data must follow format
// defined by zhashx_pack. Hash table is set to autofree. An empty frame
// unpacks to an empty hash table.
zhashx_t *
zhashx_unpack (zframe_t *frame);
// Insert item into hash table with specified key and item.
// If key is already present returns -1 and leaves existing item unchanged
// Returns 0 on success.
int
zhashx_insert (zhashx_t *self, const void *key, void *item);
// Update or insert item into hash table with specified key and item. If the
// key is already present, destroys old item and inserts new one. If you set
// a container item destructor, this is called on the old value. If the key
// was not already present, inserts a new item. Sets the hash cursor to the
// new item.
void
zhashx_update (zhashx_t *self, const void *key, void *item);
// Remove an item specified by key from the hash table. If there was no such
// item, this function does nothing.
void
zhashx_delete (zhashx_t *self, const void *key);
// Delete all items from the hash table. If the key destructor is
// set, calls it on every key. If the item destructor is set, calls
// it on every item.
void
zhashx_purge (zhashx_t *self);
// Return the item at the specified key, or null
void *
zhashx_lookup (zhashx_t *self, const void *key);
// Reindexes an item from an old key to a new key. If there was no such
// item, does nothing. Returns 0 if successful, else -1.
int
zhashx_rename (zhashx_t *self, const void *old_key, const void *new_key);
// Set a free function for the specified hash table item. When the item is
// destroyed, the free function, if any, is called on that item.
// Use this when hash items are dynamically allocated, to ensure that
// you don't have memory leaks. You can pass 'free' or NULL as a free_fn.
// Returns the item, or NULL if there is no such item.
void *
zhashx_freefn (zhashx_t *self, const void *key, zhashx_free_fn free_fn);
// Return the number of keys/items in the hash table
size_t
zhashx_size (zhashx_t *self);
// Return a zlistx_t containing the keys for the items in the
// table. Uses the key_duplicator to duplicate all keys and sets the
// key_destructor as destructor for the list.
zlistx_t *
zhashx_keys (zhashx_t *self);
// Return a zlistx_t containing the values for the items in the
// table. Uses the duplicator to duplicate all items and sets the
// destructor as destructor for the list.
zlistx_t *
zhashx_values (zhashx_t *self);
// Simple iterator; returns first item in hash table, in no given order,
// or NULL if the table is empty. This method is simpler to use than the
// foreach() method, which is deprecated. To access the key for this item
// use zhashx_cursor(). NOTE: do NOT modify the table while iterating.
void *
zhashx_first (zhashx_t *self);
// Simple iterator; returns next item in hash table, in no given order,
// or NULL if the last item was already returned. Use this together with
// zhashx_first() to process all items in a hash table. If you need the
// items in sorted order, use zhashx_keys() and then zlistx_sort(). To
// access the key for this item use zhashx_cursor(). NOTE: do NOT modify
// the table while iterating.
void *
zhashx_next (zhashx_t *self);
// After a successful first/next method, returns the key for the item that
// was returned. This is a constant string that you may not modify or
// deallocate, and which lasts as long as the item in the hash. After an
// unsuccessful first/next, returns NULL.
const void *
zhashx_cursor (zhashx_t *self);
// Add a comment to hash table before saving to disk. You can add as many
// comment lines as you like. These comment lines are discarded when loading
// the file. If you use a null format, all comments are deleted.
void
zhashx_comment (zhashx_t *self, const char *format, ...);
// Save hash table to a text file in name=value format. Hash values must be
// printable strings; keys may not contain '=' character. Returns 0 if OK,
// else -1 if a file error occurred.
int
zhashx_save (zhashx_t *self, const char *filename);
// Load hash table from a text file in name=value format; hash table must
// already exist. Hash values must printable strings; keys may not contain
// '=' character. Returns 0 if OK, else -1 if a file was not readable.
int
zhashx_load (zhashx_t *self, const char *filename);
// When a hash table was loaded from a file by zhashx_load, this method will
// reload the file if it has been modified since, and is "stable", i.e. not
// still changing. Returns 0 if OK, -1 if there was an error reloading the
// file.
int
zhashx_refresh (zhashx_t *self);
// Serialize hash table to a binary frame that can be sent in a message.
// The packed format is compatible with the 'dictionary' type defined in
// http://rfc.zeromq.org/spec:35/FILEMQ, and implemented by zproto:
//
// ; A list of name/value pairs
// dictionary = dict-count *( dict-name dict-value )
// dict-count = number-4
// dict-value = longstr
// dict-name = string
//
// ; Strings are always length + text contents
// longstr = number-4 *VCHAR
// string = number-1 *VCHAR
//
// ; Numbers are unsigned integers in network byte order
// number-1 = 1OCTET
// number-4 = 4OCTET
//
// Comments are not included in the packed data. Item values MUST be
// strings.
zframe_t *
zhashx_pack (zhashx_t *self);
// Make a copy of the list; items are duplicated if you set a duplicator
// for the list, otherwise not. Copying a null reference returns a null
// reference. Note that this method's behavior changed slightly for CZMQ
// v3.x, as it does not set nor respect autofree. It does however let you
// duplicate any hash table safely. The old behavior is in zhashx_dup_v2.
zhashx_t *
zhashx_dup (zhashx_t *self);
// Set a user-defined deallocator for hash items; by default items are not
// freed when the hash is destroyed.
void
zhashx_set_destructor (zhashx_t *self, zhashx_destructor_fn destructor);
// Set a user-defined duplicator for hash items; by default items are not
// copied when the hash is duplicated.
void
zhashx_set_duplicator (zhashx_t *self, zhashx_duplicator_fn duplicator);
// Set a user-defined deallocator for keys; by default keys are freed
// when the hash is destroyed using free().
void
zhashx_set_key_destructor (zhashx_t *self, zhashx_destructor_fn destructor);
// Set a user-defined duplicator for keys; by default keys are duplicated
// using strdup.
void
zhashx_set_key_duplicator (zhashx_t *self, zhashx_duplicator_fn duplicator);
// Set a user-defined comparator for keys; by default keys are
// compared using strcmp.
void
zhashx_set_key_comparator (zhashx_t *self, zhashx_comparator_fn comparator);
// Set a user-defined comparator for keys; by default keys are
// compared using strcmp.
void
zhashx_set_key_hasher (zhashx_t *self, zhashx_hash_fn hasher);
// Make copy of hash table; if supplied table is null, returns null.
// Does not copy items themselves. Rebuilds new table so may be slow on
// very large tables. NOTE: only works with item values that are strings
// since there's no other way to know how to duplicate the item value.
zhashx_t *
zhashx_dup_v2 (zhashx_t *self);
// DEPRECATED as clumsy -- use set_destructor instead
// Set hash for automatic value destruction
void
zhashx_autofree (zhashx_t *self);
// DEPRECATED as clumsy -- use zhashx_first/_next instead
// Apply function to each item in the hash table. Items are iterated in no
// defined order. Stops if callback function returns non-zero and returns
// final return code from callback function (zero = success).
// Callback function for zhashx_foreach method
int
zhashx_foreach (zhashx_t *self, zhashx_foreach_fn callback, void *argument);
// Self test of this class.
void
zhashx_test (bool verbose);
// CLASS: ziflist
// Get a list of network interfaces currently defined on the system
ziflist_t *
ziflist_new (void);
// Destroy a ziflist instance
void
ziflist_destroy (ziflist_t **self_p);
// Reload network interfaces from system
void
ziflist_reload (ziflist_t *self);
// Return the number of network interfaces on system
size_t
ziflist_size (ziflist_t *self);
// Get first network interface, return NULL if there are none
const char *
ziflist_first (ziflist_t *self);
// Get next network interface, return NULL if we hit the last one
const char *
ziflist_next (ziflist_t *self);
// Return the current interface IP address as a printable string
const char *
ziflist_address (ziflist_t *self);
// Return the current interface broadcast address as a printable string
const char *
ziflist_broadcast (ziflist_t *self);
// Return the current interface network mask as a printable string
const char *
ziflist_netmask (ziflist_t *self);
// Return the list of interfaces.
void
ziflist_print (ziflist_t *self);
// Self test of this class.
void
ziflist_test (bool verbose);
// CLASS: zlist
// Create a new list container
zlist_t *
zlist_new (void);
// Destroy a list container
void
zlist_destroy (zlist_t **self_p);
// Return the item at the head of list. If the list is empty, returns NULL.
// Leaves cursor pointing at the head item, or NULL if the list is empty.
void *
zlist_first (zlist_t *self);
// Return the next item. If the list is empty, returns NULL. To move to
// the start of the list call zlist_first (). Advances the cursor.
void *
zlist_next (zlist_t *self);
// Return the item at the tail of list. If the list is empty, returns NULL.
// Leaves cursor pointing at the tail item, or NULL if the list is empty.
void *
zlist_last (zlist_t *self);
// Return first item in the list, or null, leaves the cursor
void *
zlist_head (zlist_t *self);
// Return last item in the list, or null, leaves the cursor
void *
zlist_tail (zlist_t *self);
// Return the current item of list. If the list is empty, returns NULL.
// Leaves cursor pointing at the current item, or NULL if the list is empty.
void *
zlist_item (zlist_t *self);
// Append an item to the end of the list, return 0 if OK or -1 if this
// failed for some reason (out of memory). Note that if a duplicator has
// been set, this method will also duplicate the item.
int
zlist_append (zlist_t *self, void *item);
// Push an item to the start of the list, return 0 if OK or -1 if this
// failed for some reason (out of memory). Note that if a duplicator has
// been set, this method will also duplicate the item.
int
zlist_push (zlist_t *self, void *item);
// Pop the item off the start of the list, if any
void *
zlist_pop (zlist_t *self);
// Checks if an item already is present. Uses compare method to determine if
// items are equal. If the compare method is NULL the check will only compare
// pointers. Returns true if item is present else false.
bool
zlist_exists (zlist_t *self, void *item);
// Remove the specified item from the list if present
void
zlist_remove (zlist_t *self, void *item);
// Make a copy of list. If the list has autofree set, the copied list will
// duplicate all items, which must be strings. Otherwise, the list will hold
// pointers back to the items in the original list. If list is null, returns
// NULL.
zlist_t *
zlist_dup (zlist_t *self);
// Purge all items from list
void
zlist_purge (zlist_t *self);
// Return number of items in the list
size_t
zlist_size (zlist_t *self);
// Sort the list by ascending key value using a straight ASCII comparison.
// The sort is not stable, so may reorder items with the same keys.
void
zlist_sort (zlist_t *self, zlist_compare_fn compare);
// Set list for automatic item destruction; item values MUST be strings.
// By default a list item refers to a value held elsewhere. When you set
// this, each time you append or push a list item, zlist will take a copy
// of the string value. Then, when you destroy the list, it will free all
// item values automatically. If you use any other technique to allocate
// list values, you must free them explicitly before destroying the list.
// The usual technique is to pop list items and destroy them, until the
// list is empty.
void
zlist_autofree (zlist_t *self);
// Sets a compare function for this list. The function compares two items.
// It returns an integer less than, equal to, or greater than zero if the
// first item is found, respectively, to be less than, to match, or be
// greater than the second item.
// This function is used for sorting, removal and exists checking.
void
zlist_comparefn (zlist_t *self, zlist_compare_fn fn);
// Set a free function for the specified list item. When the item is
// destroyed, the free function, if any, is called on that item.
// Use this when list items are dynamically allocated, to ensure that
// you don't have memory leaks. You can pass 'free' or NULL as a free_fn.
// Returns the item, or NULL if there is no such item.
void *
zlist_freefn (zlist_t *self, void *item, zlist_free_fn fn, bool at_tail);
// Self test of this class.
void
zlist_test (bool verbose);
// CLASS: zlistx
// Create a new, empty list.
zlistx_t *
zlistx_new (void);
// Destroy a list. If an item destructor was specified, all items in the
// list are automatically destroyed as well.
void
zlistx_destroy (zlistx_t **self_p);
// Add an item to the head of the list. Calls the item duplicator, if any,
// on the item. Resets cursor to list head. Returns an item handle on
// success, NULL if memory was exhausted.
void *
zlistx_add_start (zlistx_t *self, void *item);
// Add an item to the tail of the list. Calls the item duplicator, if any,
// on the item. Resets cursor to list head. Returns an item handle on
// success, NULL if memory was exhausted.
void *
zlistx_add_end (zlistx_t *self, void *item);
// Return the number of items in the list
size_t
zlistx_size (zlistx_t *self);
// Return first item in the list, or null, leaves the cursor
void *
zlistx_head (zlistx_t *self);
// Return last item in the list, or null, leaves the cursor
void *
zlistx_tail (zlistx_t *self);
// Return the item at the head of list. If the list is empty, returns NULL.
// Leaves cursor pointing at the head item, or NULL if the list is empty.
void *
zlistx_first (zlistx_t *self);
// Return the next item. At the end of the list (or in an empty list),
// returns NULL. Use repeated zlistx_next () calls to work through the list
// from zlistx_first (). First time, acts as zlistx_first().
void *
zlistx_next (zlistx_t *self);
// Return the previous item. At the start of the list (or in an empty list),
// returns NULL. Use repeated zlistx_prev () calls to work through the list
// backwards from zlistx_last (). First time, acts as zlistx_last().
void *
zlistx_prev (zlistx_t *self);
// Return the item at the tail of list. If the list is empty, returns NULL.
// Leaves cursor pointing at the tail item, or NULL if the list is empty.
void *
zlistx_last (zlistx_t *self);
// Returns the value of the item at the cursor, or NULL if the cursor is
// not pointing to an item.
void *
zlistx_item (zlistx_t *self);
// Returns the handle of the item at the cursor, or NULL if the cursor is
// not pointing to an item.
void *
zlistx_cursor (zlistx_t *self);
// Returns the item associated with the given list handle, or NULL if passed
// in handle is NULL. Asserts that the passed in handle points to a list element.
void *
zlistx_handle_item (void *handle);
// Find an item in the list, searching from the start. Uses the item
// comparator, if any, else compares item values directly. Returns the
// item handle found, or NULL. Sets the cursor to the found item, if any.
void *
zlistx_find (zlistx_t *self, void *item);
// Detach an item from the list, using its handle. The item is not modified,
// and the caller is responsible for destroying it if necessary. If handle is
// null, detaches the first item on the list. Returns item that was detached,
// or null if none was. If cursor was at item, moves cursor to previous item,
// so you can detach items while iterating forwards through a list.
void *
zlistx_detach (zlistx_t *self, void *handle);
// Detach item at the cursor, if any, from the list. The item is not modified,
// and the caller is responsible for destroying it as necessary. Returns item
// that was detached, or null if none was. Moves cursor to previous item, so
// you can detach items while iterating forwards through a list.
void *
zlistx_detach_cur (zlistx_t *self);
// Delete an item, using its handle. Calls the item destructor is any is
// set. If handle is null, deletes the first item on the list. Returns 0
// if an item was deleted, -1 if not. If cursor was at item, moves cursor
// to previous item, so you can delete items while iterating forwards
// through a list.
int
zlistx_delete (zlistx_t *self, void *handle);
// Move an item to the start of the list, via its handle.
void
zlistx_move_start (zlistx_t *self, void *handle);
// Move an item to the end of the list, via its handle.
void
zlistx_move_end (zlistx_t *self, void *handle);
// Remove all items from the list, and destroy them if the item destructor
// is set.
void
zlistx_purge (zlistx_t *self);
// Sort the list. If an item comparator was set, calls that to compare
// items, otherwise compares on item value. The sort is not stable, so may
// reorder equal items.
void
zlistx_sort (zlistx_t *self);
// Create a new node and insert it into a sorted list. Calls the item
// duplicator, if any, on the item. If low_value is true, starts searching
// from the start of the list, otherwise searches from the end. Use the item
// comparator, if any, to find where to place the new node. Returns a handle
// to the new node, or NULL if memory was exhausted. Resets the cursor to the
// list head.
void *
zlistx_insert (zlistx_t *self, void *item, bool low_value);
// Move an item, specified by handle, into position in a sorted list. Uses
// the item comparator, if any, to determine the new location. If low_value
// is true, starts searching from the start of the list, otherwise searches
// from the end.
void
zlistx_reorder (zlistx_t *self, void *handle, bool low_value);
// Make a copy of the list; items are duplicated if you set a duplicator
// for the list, otherwise not. Copying a null reference returns a null
// reference.
zlistx_t *
zlistx_dup (zlistx_t *self);
// Set a user-defined deallocator for list items; by default items are not
// freed when the list is destroyed.
void
zlistx_set_destructor (zlistx_t *self, zlistx_destructor_fn destructor);
// Set a user-defined duplicator for list items; by default items are not
// copied when the list is duplicated.
void
zlistx_set_duplicator (zlistx_t *self, zlistx_duplicator_fn duplicator);
// Set a user-defined comparator for zlistx_find and zlistx_sort; the method
// must return -1, 0, or 1 depending on whether item1 is less than, equal to,
// or greater than, item2.
void
zlistx_set_comparator (zlistx_t *self, zlistx_comparator_fn comparator);
// Self test of this class.
void
zlistx_test (bool verbose);
// CLASS: zloop
// Create a new zloop reactor
zloop_t *
zloop_new (void);
// Destroy a reactor
void
zloop_destroy (zloop_t **self_p);
// Register socket reader with the reactor. When the reader has messages,
// the reactor will call the handler, passing the arg. Returns 0 if OK, -1
// if there was an error. If you register the same socket more than once,
// each instance will invoke its corresponding handler.
int
zloop_reader (zloop_t *self, zsock_t *sock, zloop_reader_fn handler, void *arg);
// Cancel a socket reader from the reactor. If multiple readers exist for
// same socket, cancels ALL of them.
void
zloop_reader_end (zloop_t *self, zsock_t *sock);
// Configure a registered reader to ignore errors. If you do not set this,
// then readers that have errors are removed from the reactor silently.
void
zloop_reader_set_tolerant (zloop_t *self, zsock_t *sock);
// Register low-level libzmq pollitem with the reactor. When the pollitem
// is ready, will call the handler, passing the arg. Returns 0 if OK, -1
// if there was an error. If you register the pollitem more than once, each
// instance will invoke its corresponding handler. A pollitem with
// socket=NULL and fd=0 means 'poll on FD zero'.
int
zloop_poller (zloop_t *self, zmq_pollitem_t *item, zloop_fn handler, void *arg);
// Cancel a pollitem from the reactor, specified by socket or FD. If both
// are specified, uses only socket. If multiple poll items exist for same
// socket/FD, cancels ALL of them.
void
zloop_poller_end (zloop_t *self, zmq_pollitem_t *item);
// Configure a registered poller to ignore errors. If you do not set this,
// then poller that have errors are removed from the reactor silently.
void
zloop_poller_set_tolerant (zloop_t *self, zmq_pollitem_t *item);
// Register a timer that expires after some delay and repeats some number of
// times. At each expiry, will call the handler, passing the arg. To run a
// timer forever, use 0 times. Returns a timer_id that is used to cancel the
// timer in the future. Returns -1 if there was an error.
int
zloop_timer (zloop_t *self, size_t delay, size_t times, zloop_timer_fn handler, void *arg);
// Cancel a specific timer identified by a specific timer_id (as returned by
// zloop_timer).
int
zloop_timer_end (zloop_t *self, int timer_id);
// Register a ticket timer. Ticket timers are very fast in the case where
// you use a lot of timers (thousands), and frequently remove and add them.
// The main use case is expiry timers for servers that handle many clients,
// and which reset the expiry timer for each message received from a client.
// Whereas normal timers perform poorly as the number of clients grows, the
// cost of ticket timers is constant, no matter the number of clients. You
// must set the ticket delay using zloop_set_ticket_delay before creating a
// ticket. Returns a handle to the timer that you should use in
// zloop_ticket_reset and zloop_ticket_delete.
void *
zloop_ticket (zloop_t *self, zloop_timer_fn handler, void *arg);
// Reset a ticket timer, which moves it to the end of the ticket list and
// resets its execution time. This is a very fast operation.
void
zloop_ticket_reset (zloop_t *self, void *handle);
// Delete a ticket timer. We do not actually delete the ticket here, as
// other code may still refer to the ticket. We mark as deleted, and remove
// later and safely.
void
zloop_ticket_delete (zloop_t *self, void *handle);
// Set the ticket delay, which applies to all tickets. If you lower the
// delay and there are already tickets created, the results are undefined.
void
zloop_set_ticket_delay (zloop_t *self, size_t ticket_delay);
// Set hard limit on number of timers allowed. Setting more than a small
// number of timers (10-100) can have a dramatic impact on the performance
// of the reactor. For high-volume cases, use ticket timers. If the hard
// limit is reached, the reactor stops creating new timers and logs an
// error.
void
zloop_set_max_timers (zloop_t *self, size_t max_timers);
// Set verbose tracing of reactor on/off. The default verbose setting is
// off (false).
void
zloop_set_verbose (zloop_t *self, bool verbose);
// By default the reactor stops if the process receives a SIGINT or SIGTERM
// signal. This makes it impossible to shut-down message based architectures
// like zactors. This method lets you switch off break handling. The default
// nonstop setting is off (false).
void
zloop_set_nonstop (zloop_t *self, bool nonstop);
// Start the reactor. Takes control of the thread and returns when the 0MQ
// context is terminated or the process is interrupted, or any event handler
// returns -1. Event handlers may register new sockets and timers, and
// cancel sockets. Returns 0 if interrupted, -1 if canceled by a handler.
int
zloop_start (zloop_t *self);
// Self test of this class.
void
zloop_test (bool verbose);
// CLASS: zmsg
// Create a new empty message object
zmsg_t *
zmsg_new (void);
// Destroy a message object and all frames it contains
void
zmsg_destroy (zmsg_t **self_p);
// Receive message from socket, returns zmsg_t object or NULL if the recv
// was interrupted. Does a blocking recv. If you want to not block then use
// the zloop class or zmsg_recv_nowait or zmq_poll to check for socket input
// before receiving.
zmsg_t *
zmsg_recv (void *source);
// Load/append an open file into new message, return the message.
// Returns NULL if the message could not be loaded.
zmsg_t *
zmsg_load (FILE *file);
// Decodes a serialized message buffer created by zmsg_encode () and returns
// a new zmsg_t object. Returns NULL if the buffer was badly formatted or
// there was insufficient memory to work.
zmsg_t *
zmsg_decode (const byte *buffer, size_t buffer_size);
// Generate a signal message encoding the given status. A signal is a short
// message carrying a 1-byte success/failure code (by convention, 0 means
// OK). Signals are encoded to be distinguishable from "normal" messages.
zmsg_t *
zmsg_new_signal (byte status);
// Send message to destination socket, and destroy the message after sending
// it successfully. If the message has no frames, sends nothing but destroys
// the message anyhow. Nullifies the caller's reference to the message (as
// it is a destructor).
int
zmsg_send (zmsg_t **self_p, void *dest);
// Send message to destination socket as part of a multipart sequence, and
// destroy the message after sending it successfully. Note that after a
// zmsg_sendm, you must call zmsg_send or another method that sends a final
// message part. If the message has no frames, sends nothing but destroys
// the message anyhow. Nullifies the caller's reference to the message (as
// it is a destructor).
int
zmsg_sendm (zmsg_t **self_p, void *dest);
// Return size of message, i.e. number of frames (0 or more).
size_t
zmsg_size (zmsg_t *self);
// Return total size of all frames in message.
size_t
zmsg_content_size (zmsg_t *self);
// Return message routing ID, if the message came from a ZMQ_SERVER socket.
// Else returns zero.
uint32_t
zmsg_routing_id (zmsg_t *self);
// Set routing ID on message. This is used if/when the message is sent to a
// ZMQ_SERVER socket.
void
zmsg_set_routing_id (zmsg_t *self, uint32_t routing_id);
// Push frame to the front of the message, i.e. before all other frames.
// Message takes ownership of frame, will destroy it when message is sent.
// Returns 0 on success, -1 on error. Deprecates zmsg_push, which did not
// nullify the caller's frame reference.
int
zmsg_prepend (zmsg_t *self, zframe_t **frame_p);
// Add frame to the end of the message, i.e. after all other frames.
// Message takes ownership of frame, will destroy it when message is sent.
// Returns 0 on success. Deprecates zmsg_add, which did not nullify the
// caller's frame reference.
int
zmsg_append (zmsg_t *self, zframe_t **frame_p);
// Remove first frame from message, if any. Returns frame, or NULL.
zframe_t *
zmsg_pop (zmsg_t *self);
// Push block of memory to front of message, as a new frame.
// Returns 0 on success, -1 on error.
int
zmsg_pushmem (zmsg_t *self, const void *src, size_t size);
// Add block of memory to the end of the message, as a new frame.
// Returns 0 on success, -1 on error.
int
zmsg_addmem (zmsg_t *self, const void *src, size_t size);
// Push string as new frame to front of message.
// Returns 0 on success, -1 on error.
int
zmsg_pushstr (zmsg_t *self, const char *string);
// Push string as new frame to end of message.
// Returns 0 on success, -1 on error.
int
zmsg_addstr (zmsg_t *self, const char *string);
// Push formatted string as new frame to front of message.
// Returns 0 on success, -1 on error.
int
zmsg_pushstrf (zmsg_t *self, const char *format, ...);
// Push formatted string as new frame to end of message.
// Returns 0 on success, -1 on error.
int
zmsg_addstrf (zmsg_t *self, const char *format, ...);
// Pop frame off front of message, return as fresh string. If there were
// no more frames in the message, returns NULL.
char *
zmsg_popstr (zmsg_t *self);
// Push encoded message as a new frame. Message takes ownership of
// submessage, so the original is destroyed in this call. Returns 0 on
// success, -1 on error.
int
zmsg_addmsg (zmsg_t *self, zmsg_t **msg_p);
// Remove first submessage from message, if any. Returns zmsg_t, or NULL if
// decoding was not succesful.
zmsg_t *
zmsg_popmsg (zmsg_t *self);
// Remove specified frame from list, if present. Does not destroy frame.
void
zmsg_remove (zmsg_t *self, zframe_t *frame);
// Set cursor to first frame in message. Returns frame, or NULL, if the
// message is empty. Use this to navigate the frames as a list.
zframe_t *
zmsg_first (zmsg_t *self);
// Return the next frame. If there are no more frames, returns NULL. To move
// to the first frame call zmsg_first(). Advances the cursor.
zframe_t *
zmsg_next (zmsg_t *self);
// Return the last frame. If there are no frames, returns NULL.
zframe_t *
zmsg_last (zmsg_t *self);
// Save message to an open file, return 0 if OK, else -1. The message is
// saved as a series of frames, each with length and data. Note that the
// file is NOT guaranteed to be portable between operating systems, not
// versions of CZMQ. The file format is at present undocumented and liable
// to arbitrary change.
int
zmsg_save (zmsg_t *self, FILE *file);
// Serialize multipart message to a single buffer. Use this method to send
// structured messages across transports that do not support multipart data.
// Allocates and returns a new buffer containing the serialized message.
// To decode a serialized message buffer, use zmsg_decode ().
size_t
zmsg_encode (zmsg_t *self, byte **buffer);
// Create copy of message, as new message object. Returns a fresh zmsg_t
// object. If message is null, or memory was exhausted, returns null.
zmsg_t *
zmsg_dup (zmsg_t *self);
// Send message to zsys log sink (may be stdout, or system facility as
// configured by zsys_set_logstream).
void
zmsg_print (zmsg_t *self);
// Return true if the two messages have the same number of frames and each
// frame in the first message is identical to the corresponding frame in the
// other message. As with zframe_eq, return false if either message is NULL.
bool
zmsg_eq (zmsg_t *self, zmsg_t *other);
// Return signal value, 0 or greater, if message is a signal, -1 if not.
int
zmsg_signal (zmsg_t *self);
// Probe the supplied object, and report if it looks like a zmsg_t.
bool
zmsg_is (void *self);
// Self test of this class.
void
zmsg_test (bool verbose);
// CLASS: zpoller
// Create new poller, specifying zero or more readers. The list of
// readers ends in a NULL. Each reader can be a zsock_t instance, a
// zactor_t instance, a libzmq socket (void *), or a file handle.
zpoller_t *
zpoller_new (void *reader, ...);
// Destroy a poller
void
zpoller_destroy (zpoller_t **self_p);
// Add a reader to be polled. Returns 0 if OK, -1 on failure. The reader may
// be a libzmq void * socket, a zsock_t instance, or a zactor_t instance.
int
zpoller_add (zpoller_t *self, void *reader);
// Remove a reader from the poller; returns 0 if OK, -1 on failure. The reader
// must have been passed during construction, or in an zpoller_add () call.
int
zpoller_remove (zpoller_t *self, void *reader);
// By default the poller stops if the process receives a SIGINT or SIGTERM
// signal. This makes it impossible to shut-down message based architectures
// like zactors. This method lets you switch off break handling. The default
// nonstop setting is off (false).
void
zpoller_set_nonstop (zpoller_t *self, bool nonstop);
// Poll the registered readers for I/O, return first reader that has input.
// The reader will be a libzmq void * socket, or a zsock_t or zactor_t
// instance as specified in zpoller_new/zpoller_add. The timeout should be
// zero or greater, or -1 to wait indefinitely. Socket priority is defined
// by their order in the poll list. If you need a balanced poll, use the low
// level zmq_poll method directly. If the poll call was interrupted (SIGINT),
// or the ZMQ context was destroyed, or the timeout expired, returns NULL.
// You can test the actual exit condition by calling zpoller_expired () and
// zpoller_terminated (). The timeout is in msec.
void *
zpoller_wait (zpoller_t *self, int timeout);
// Return true if the last zpoller_wait () call ended because the timeout
// expired, without any error.
bool
zpoller_expired (zpoller_t *self);
// Return true if the last zpoller_wait () call ended because the process
// was interrupted, or the parent context was destroyed.
bool
zpoller_terminated (zpoller_t *self);
// Self test of this class.
void
zpoller_test (bool verbose);
// CLASS: zproc
// Returns CZMQ version as a single 6-digit integer encoding the major
// version (x 10000), the minor version (x 100) and the patch.
int
zproc_czmq_version (void);
// Returns true if the process received a SIGINT or SIGTERM signal.
// It is good practice to use this method to exit any infinite loop
// processing messages.
bool
zproc_interrupted (void);
// Returns true if the underlying libzmq supports CURVE security.
bool
zproc_has_curve (void);
// Return current host name, for use in public tcp:// endpoints.
// If the host name is not resolvable, returns NULL.
char *
zproc_hostname (void);
// Move the current process into the background. The precise effect
// depends on the operating system. On POSIX boxes, moves to a specified
// working directory (if specified), closes all file handles, reopens
// stdin, stdout, and stderr to the null device, and sets the process to
// ignore SIGHUP. On Windows, does nothing. Returns 0 if OK, -1 if there
// was an error.
void
zproc_daemonize (const char *workdir);
// Drop the process ID into the lockfile, with exclusive lock, and
// switch the process to the specified group and/or user. Any of the
// arguments may be null, indicating a no-op. Returns 0 on success,
// -1 on failure. Note if you combine this with zsys_daemonize, run
// after, not before that method, or the lockfile will hold the wrong
// process ID.
void
zproc_run_as (const char *lockfile, const char *group, const char *user);
// Configure the number of I/O threads that ZeroMQ will use. A good
// rule of thumb is one thread per gigabit of traffic in or out. The
// default is 1, sufficient for most applications. If the environment
// variable ZSYS_IO_THREADS is defined, that provides the default.
// Note that this method is valid only before any socket is created.
void
zproc_set_io_threads (size_t io_threads);
// Configure the number of sockets that ZeroMQ will allow. The default
// is 1024. The actual limit depends on the system, and you can query it
// by using zsys_socket_limit (). A value of zero means "maximum".
// Note that this method is valid only before any socket is created.
void
zproc_set_max_sockets (size_t max_sockets);
// Set network interface name to use for broadcasts, particularly zbeacon.
// This lets the interface be configured for test environments where required.
// For example, on Mac OS X, zbeacon cannot bind to 255.255.255.255 which is
// the default when there is no specified interface. If the environment
// variable ZSYS_INTERFACE is set, use that as the default interface name.
// Setting the interface to "*" means "use all available interfaces".
void
zproc_set_biface (const char *value);
// Return network interface to use for broadcasts, or "" if none was set.
const char *
zproc_biface (void);
// Set log identity, which is a string that prefixes all log messages sent
// by this process. The log identity defaults to the environment variable
// ZSYS_LOGIDENT, if that is set.
void
zproc_set_log_ident (const char *value);
// Sends log output to a PUB socket bound to the specified endpoint. To
// collect such log output, create a SUB socket, subscribe to the traffic
// you care about, and connect to the endpoint. Log traffic is sent as a
// single string frame, in the same format as when sent to stdout. The
// log system supports a single sender; multiple calls to this method will
// bind the same sender to multiple endpoints. To disable the sender, call
// this method with a null argument.
void
zproc_set_log_sender (const char *endpoint);
// Enable or disable logging to the system facility (syslog on POSIX boxes,
// event log on Windows). By default this is disabled.
void
zproc_set_log_system (bool logsystem);
// Log error condition - highest priority
void
zproc_log_error (const char *format, ...);
// Log warning condition - high priority
void
zproc_log_warning (const char *format, ...);
// Log normal, but significant, condition - normal priority
void
zproc_log_notice (const char *format, ...);
// Log informational message - low priority
void
zproc_log_info (const char *format, ...);
// Log debug-level message - lowest priority
void
zproc_log_debug (const char *format, ...);
// Self test of this class.
void
zproc_test (bool verbose);
// CLASS: zsock
// Create a new socket. Returns the new socket, or NULL if the new socket
// could not be created. Note that the symbol zsock_new (and other
// constructors/destructors for zsock) are redirected to the *_checked
// variant, enabling intelligent socket leak detection. This can have
// performance implications if you use a LOT of sockets. To turn off this
// redirection behaviour, define ZSOCK_NOCHECK.
zsock_t *
zsock_new (int type);
// Destroy the socket. You must use this for any socket created via the
// zsock_new method.
void
zsock_destroy (zsock_t **self_p);
// Create a PUB socket. Default action is bind.
zsock_t *
zsock_new_pub (const char *endpoint);
// Create a SUB socket, and optionally subscribe to some prefix string. Default
// action is connect.
zsock_t *
zsock_new_sub (const char *endpoint, const char *subscribe);
// Create a REQ socket. Default action is connect.
zsock_t *
zsock_new_req (const char *endpoint);
// Create a REP socket. Default action is bind.
zsock_t *
zsock_new_rep (const char *endpoint);
// Create a DEALER socket. Default action is connect.
zsock_t *
zsock_new_dealer (const char *endpoint);
// Create a ROUTER socket. Default action is bind.
zsock_t *
zsock_new_router (const char *endpoint);
// Create a PUSH socket. Default action is connect.
zsock_t *
zsock_new_push (const char *endpoint);
// Create a PULL socket. Default action is bind.
zsock_t *
zsock_new_pull (const char *endpoint);
// Create an XPUB socket. Default action is bind.
zsock_t *
zsock_new_xpub (const char *endpoint);
// Create an XSUB socket. Default action is connect.
zsock_t *
zsock_new_xsub (const char *endpoint);
// Create a PAIR socket. Default action is connect.
zsock_t *
zsock_new_pair (const char *endpoint);
// Create a STREAM socket. Default action is connect.
zsock_t *
zsock_new_stream (const char *endpoint);
// Create a SERVER socket. Default action is bind.
zsock_t *
zsock_new_server (const char *endpoint);
// Create a CLIENT socket. Default action is connect.
zsock_t *
zsock_new_client (const char *endpoint);
// Bind a socket to a formatted endpoint. For tcp:// endpoints, supports
// ephemeral ports, if you specify the port number as "*". By default
// zsock uses the IANA designated range from C000 (49152) to FFFF (65535).
// To override this range, follow the "*" with "[first-last]". Either or
// both first and last may be empty. To bind to a random port within the
// range, use "!" in place of "*".
//
// Examples:
// tcp://127.0.0.1:* bind to first free port from C000 up
// tcp://127.0.0.1:! bind to random port from C000 to FFFF
// tcp://127.0.0.1:*[60000-] bind to first free port from 60000 up
// tcp://127.0.0.1:![-60000] bind to random port from C000 to 60000
// tcp://127.0.0.1:![55000-55999]
// bind to random port from 55000 to 55999
//
// On success, returns the actual port number used, for tcp:// endpoints,
// and 0 for other transports. On failure, returns -1. Note that when using
// ephemeral ports, a port may be reused by different services without
// clients being aware. Protocols that run on ephemeral ports should take
// this into account.
int
zsock_bind (zsock_t *self, const char *format, ...);
// Returns last bound endpoint, if any.
const char *
zsock_endpoint (zsock_t *self);
// Unbind a socket from a formatted endpoint.
// Returns 0 if OK, -1 if the endpoint was invalid or the function
// isn't supported.
int
zsock_unbind (zsock_t *self, const char *format, ...);
// Connect a socket to a formatted endpoint
// Returns 0 if OK, -1 if the endpoint was invalid.
int
zsock_connect (zsock_t *self, const char *format, ...);
// Disconnect a socket from a formatted endpoint
// Returns 0 if OK, -1 if the endpoint was invalid or the function
// isn't supported.
int
zsock_disconnect (zsock_t *self, const char *format, ...);
// Attach a socket to zero or more endpoints. If endpoints is not null,
// parses as list of ZeroMQ endpoints, separated by commas, and prefixed by
// '@' (to bind the socket) or '>' (to connect the socket). Returns 0 if all
// endpoints were valid, or -1 if there was a syntax error. If the endpoint
// does not start with '@' or '>', the serverish argument defines whether
// it is used to bind (serverish = true) or connect (serverish = false).
int
zsock_attach (zsock_t *self, const char *endpoints, bool serverish);
// Returns socket type as printable constant string.
const char *
zsock_type_str (zsock_t *self);
// Send a 'picture' message to the socket (or actor). The picture is a
// string that defines the type of each frame. This makes it easy to send
// a complex multiframe message in one call. The picture can contain any
// of these characters, each corresponding to one or two arguments:
//
// i = int (signed)
// 1 = uint8_t
// 2 = uint16_t
// 4 = uint32_t
// 8 = uint64_t
// s = char *
// b = byte *, size_t (2 arguments)
// c = zchunk_t *
// f = zframe_t *
// h = zhashx_t *
// U = zuuid_t *
// p = void * (sends the pointer value, only meaningful over inproc)
// m = zmsg_t * (sends all frames in the zmsg)
// z = sends zero-sized frame (0 arguments)
// u = uint (deprecated)
//
// Note that s, b, c, and f are encoded the same way and the choice is
// offered as a convenience to the sender, which may or may not already
// have data in a zchunk or zframe. Does not change or take ownership of
// any arguments. Returns 0 if successful, -1 if sending failed for any
// reason.
int
zsock_send (void *self, const char *picture, ...);
// Send a 'picture' message to the socket (or actor). This is a va_list
// version of zsock_send (), so please consult its documentation for the
// details.
int
zsock_vsend (void *self, const char *picture, va_list argptr);
// Receive a 'picture' message to the socket (or actor). See zsock_send for
// the format and meaning of the picture. Returns the picture elements into
// a series of pointers as provided by the caller:
//
// i = int * (stores signed integer)
// 4 = uint32_t * (stores 32-bit unsigned integer)
// 8 = uint64_t * (stores 64-bit unsigned integer)
// s = char ** (allocates new string)
// b = byte **, size_t * (2 arguments) (allocates memory)
// c = zchunk_t ** (creates zchunk)
// f = zframe_t ** (creates zframe)
// U = zuuid_t * (creates a zuuid with the data)
// h = zhashx_t ** (creates zhashx)
// p = void ** (stores pointer)
// m = zmsg_t ** (creates a zmsg with the remaing frames)
// z = null, asserts empty frame (0 arguments)
// u = uint * (stores unsigned integer, deprecated)
//
// Note that zsock_recv creates the returned objects, and the caller must
// destroy them when finished with them. The supplied pointers do not need
// to be initialized. Returns 0 if successful, or -1 if it failed to recv
// a message, in which case the pointers are not modified. When message
// frames are truncated (a short message), sets return values to zero/null.
// If an argument pointer is NULL, does not store any value (skips it).
// An 'n' picture matches an empty frame; if the message does not match,
// the method will return -1.
int
zsock_recv (void *self, const char *picture, ...);
// Receive a 'picture' message from the socket (or actor). This is a
// va_list version of zsock_recv (), so please consult its documentation
// for the details.
int
zsock_vrecv (void *self, const char *picture, va_list argptr);
// Send a binary encoded 'picture' message to the socket (or actor). This
// method is similar to zsock_send, except the arguments are encoded in a
// binary format that is compatible with zproto, and is designed to reduce
// memory allocations. The pattern argument is a string that defines the
// type of each argument. Supports these argument types:
//
// pattern C type zproto type:
// 1 uint8_t type = "number" size = "1"
// 2 uint16_t type = "number" size = "2"
// 4 uint32_t type = "number" size = "3"
// 8 uint64_t type = "number" size = "4"
// s char *, 0-255 chars type = "string"
// S char *, 0-2^32-1 chars type = "longstr"
// c zchunk_t * type = "chunk"
// f zframe_t * type = "frame"
// u zuuid_t * type = "uuid"
// m zmsg_t * type = "msg"
// p void *, sends pointer value, only over inproc
//
// Does not change or take ownership of any arguments. Returns 0 if
// successful, -1 if sending failed for any reason.
int
zsock_bsend (void *self, const char *picture, ...);
// Receive a binary encoded 'picture' message from the socket (or actor).
// This method is similar to zsock_recv, except the arguments are encoded
// in a binary format that is compatible with zproto, and is designed to
// reduce memory allocations. The pattern argument is a string that defines
// the type of each argument. See zsock_bsend for the supported argument
// types. All arguments must be pointers; this call sets them to point to
// values held on a per-socket basis.
// Note that zsock_brecv creates the returned objects, and the caller must
// destroy them when finished with them. The supplied pointers do not need
// to be initialized. Returns 0 if successful, or -1 if it failed to read
// a message.
int
zsock_brecv (void *self, const char *picture, ...);
// Return socket routing ID if any. This returns 0 if the socket is not
// of type ZMQ_SERVER or if no request was already received on it.
uint32_t
zsock_routing_id (zsock_t *self);
// Set routing ID on socket. The socket MUST be of type ZMQ_SERVER.
// This will be used when sending messages on the socket via the zsock API.
void
zsock_set_routing_id (zsock_t *self, uint32_t routing_id);
// Set socket to use unbounded pipes (HWM=0); use this in cases when you are
// totally certain the message volume can fit in memory. This method works
// across all versions of ZeroMQ. Takes a polymorphic socket reference.
void
zsock_set_unbounded (void *self);
// Send a signal over a socket. A signal is a short message carrying a
// success/failure code (by convention, 0 means OK). Signals are encoded
// to be distinguishable from "normal" messages. Accepts a zsock_t or a
// zactor_t argument, and returns 0 if successful, -1 if the signal could
// not be sent. Takes a polymorphic socket reference.
int
zsock_signal (void *self, byte status);
// Wait on a signal. Use this to coordinate between threads, over pipe
// pairs. Blocks until the signal is received. Returns -1 on error, 0 or
// greater on success. Accepts a zsock_t or a zactor_t as argument.
// Takes a polymorphic socket reference.
int
zsock_wait (void *self);
// If there is a partial message still waiting on the socket, remove and
// discard it. This is useful when reading partial messages, to get specific
// message types.
void
zsock_flush (void *self);
// Probe the supplied object, and report if it looks like a zsock_t.
// Takes a polymorphic socket reference.
bool
zsock_is (void *self);
// Probe the supplied reference. If it looks like a zsock_t instance, return
// the underlying libzmq socket handle; else if it looks like a file
// descriptor, return NULL; else if it looks like a libzmq socket handle,
// return the supplied value. Takes a polymorphic socket reference.
void *
zsock_resolve (void *self);
// Get socket option `heartbeat_ivl`.
int
zsock_heartbeat_ivl (void *self);
// Set socket option `heartbeat_ivl`.
void
zsock_set_heartbeat_ivl (void *self, int heartbeat_ivl);
// Get socket option `heartbeat_ttl`.
int
zsock_heartbeat_ttl (void *self);
// Set socket option `heartbeat_ttl`.
void
zsock_set_heartbeat_ttl (void *self, int heartbeat_ttl);
// Get socket option `heartbeat_timeout`.
int
zsock_heartbeat_timeout (void *self);
// Set socket option `heartbeat_timeout`.
void
zsock_set_heartbeat_timeout (void *self, int heartbeat_timeout);
// Get socket option `tos`.
int
zsock_tos (void *self);
// Set socket option `tos`.
void
zsock_set_tos (void *self, int tos);
// Set socket option `router_handover`.
void
zsock_set_router_handover (void *self, int router_handover);
// Set socket option `router_mandatory`.
void
zsock_set_router_mandatory (void *self, int router_mandatory);
// Set socket option `probe_router`.
void
zsock_set_probe_router (void *self, int probe_router);
// Set socket option `req_relaxed`.
void
zsock_set_req_relaxed (void *self, int req_relaxed);
// Set socket option `req_correlate`.
void
zsock_set_req_correlate (void *self, int req_correlate);
// Set socket option `conflate`.
void
zsock_set_conflate (void *self, int conflate);
// Get socket option `zap_domain`.
char *
zsock_zap_domain (void *self);
// Set socket option `zap_domain`.
void
zsock_set_zap_domain (void *self, const char *zap_domain);
// Get socket option `mechanism`.
int
zsock_mechanism (void *self);
// Get socket option `plain_server`.
int
zsock_plain_server (void *self);
// Set socket option `plain_server`.
void
zsock_set_plain_server (void *self, int plain_server);
// Get socket option `plain_username`.
char *
zsock_plain_username (void *self);
// Set socket option `plain_username`.
void
zsock_set_plain_username (void *self, const char *plain_username);
// Get socket option `plain_password`.
char *
zsock_plain_password (void *self);
// Set socket option `plain_password`.
void
zsock_set_plain_password (void *self, const char *plain_password);
// Get socket option `curve_server`.
int
zsock_curve_server (void *self);
// Set socket option `curve_server`.
void
zsock_set_curve_server (void *self, int curve_server);
// Get socket option `curve_publickey`.
char *
zsock_curve_publickey (void *self);
// Set socket option `curve_publickey`.
void
zsock_set_curve_publickey (void *self, const char *curve_publickey);
// Set socket option `curve_publickey` from 32-octet binary
void
zsock_set_curve_publickey_bin (void *self, const byte *curve_publickey);
// Get socket option `curve_secretkey`.
char *
zsock_curve_secretkey (void *self);
// Set socket option `curve_secretkey`.
void
zsock_set_curve_secretkey (void *self, const char *curve_secretkey);
// Set socket option `curve_secretkey` from 32-octet binary
void
zsock_set_curve_secretkey_bin (void *self, const byte *curve_secretkey);
// Get socket option `curve_serverkey`.
char *
zsock_curve_serverkey (void *self);
// Set socket option `curve_serverkey`.
void
zsock_set_curve_serverkey (void *self, const char *curve_serverkey);
// Set socket option `curve_serverkey` from 32-octet binary
void
zsock_set_curve_serverkey_bin (void *self, const byte *curve_serverkey);
// Get socket option `gssapi_server`.
int
zsock_gssapi_server (void *self);
// Set socket option `gssapi_server`.
void
zsock_set_gssapi_server (void *self, int gssapi_server);
// Get socket option `gssapi_plaintext`.
int
zsock_gssapi_plaintext (void *self);
// Set socket option `gssapi_plaintext`.
void
zsock_set_gssapi_plaintext (void *self, int gssapi_plaintext);
// Get socket option `gssapi_principal`.
char *
zsock_gssapi_principal (void *self);
// Set socket option `gssapi_principal`.
void
zsock_set_gssapi_principal (void *self, const char *gssapi_principal);
// Get socket option `gssapi_service_principal`.
char *
zsock_gssapi_service_principal (void *self);
// Set socket option `gssapi_service_principal`.
void
zsock_set_gssapi_service_principal (void *self, const char *gssapi_service_principal);
// Get socket option `ipv6`.
int
zsock_ipv6 (void *self);
// Set socket option `ipv6`.
void
zsock_set_ipv6 (void *self, int ipv6);
// Get socket option `immediate`.
int
zsock_immediate (void *self);
// Set socket option `immediate`.
void
zsock_set_immediate (void *self, int immediate);
// Set socket option `router_raw`.
void
zsock_set_router_raw (void *self, int router_raw);
// Get socket option `ipv4only`.
int
zsock_ipv4only (void *self);
// Set socket option `ipv4only`.
void
zsock_set_ipv4only (void *self, int ipv4only);
// Set socket option `delay_attach_on_connect`.
void
zsock_set_delay_attach_on_connect (void *self, int delay_attach_on_connect);
// Get socket option `type`.
int
zsock_type (void *self);
// Get socket option `sndhwm`.
int
zsock_sndhwm (void *self);
// Set socket option `sndhwm`.
void
zsock_set_sndhwm (void *self, int sndhwm);
// Get socket option `rcvhwm`.
int
zsock_rcvhwm (void *self);
// Set socket option `rcvhwm`.
void
zsock_set_rcvhwm (void *self, int rcvhwm);
// Get socket option `affinity`.
int
zsock_affinity (void *self);
// Set socket option `affinity`.
void
zsock_set_affinity (void *self, int affinity);
// Set socket option `subscribe`.
void
zsock_set_subscribe (void *self, const char *subscribe);
// Set socket option `unsubscribe`.
void
zsock_set_unsubscribe (void *self, const char *unsubscribe);
// Get socket option `identity`.
char *
zsock_identity (void *self);
// Set socket option `identity`.
void
zsock_set_identity (void *self, const char *identity);
// Get socket option `rate`.
int
zsock_rate (void *self);
// Set socket option `rate`.
void
zsock_set_rate (void *self, int rate);
// Get socket option `recovery_ivl`.
int
zsock_recovery_ivl (void *self);
// Set socket option `recovery_ivl`.
void
zsock_set_recovery_ivl (void *self, int recovery_ivl);
// Get socket option `sndbuf`.
int
zsock_sndbuf (void *self);
// Set socket option `sndbuf`.
void
zsock_set_sndbuf (void *self, int sndbuf);
// Get socket option `rcvbuf`.
int
zsock_rcvbuf (void *self);
// Set socket option `rcvbuf`.
void
zsock_set_rcvbuf (void *self, int rcvbuf);
// Get socket option `linger`.
int
zsock_linger (void *self);
// Set socket option `linger`.
void
zsock_set_linger (void *self, int linger);
// Get socket option `reconnect_ivl`.
int
zsock_reconnect_ivl (void *self);
// Set socket option `reconnect_ivl`.
void
zsock_set_reconnect_ivl (void *self, int reconnect_ivl);
// Get socket option `reconnect_ivl_max`.
int
zsock_reconnect_ivl_max (void *self);
// Set socket option `reconnect_ivl_max`.
void
zsock_set_reconnect_ivl_max (void *self, int reconnect_ivl_max);
// Get socket option `backlog`.
int
zsock_backlog (void *self);
// Set socket option `backlog`.
void
zsock_set_backlog (void *self, int backlog);
// Get socket option `maxmsgsize`.
int
zsock_maxmsgsize (void *self);
// Set socket option `maxmsgsize`.
void
zsock_set_maxmsgsize (void *self, int maxmsgsize);
// Get socket option `multicast_hops`.
int
zsock_multicast_hops (void *self);
// Set socket option `multicast_hops`.
void
zsock_set_multicast_hops (void *self, int multicast_hops);
// Get socket option `rcvtimeo`.
int
zsock_rcvtimeo (void *self);
// Set socket option `rcvtimeo`.
void
zsock_set_rcvtimeo (void *self, int rcvtimeo);
// Get socket option `sndtimeo`.
int
zsock_sndtimeo (void *self);
// Set socket option `sndtimeo`.
void
zsock_set_sndtimeo (void *self, int sndtimeo);
// Set socket option `xpub_verbose`.
void
zsock_set_xpub_verbose (void *self, int xpub_verbose);
// Get socket option `tcp_keepalive`.
int
zsock_tcp_keepalive (void *self);
// Set socket option `tcp_keepalive`.
void
zsock_set_tcp_keepalive (void *self, int tcp_keepalive);
// Get socket option `tcp_keepalive_idle`.
int
zsock_tcp_keepalive_idle (void *self);
// Set socket option `tcp_keepalive_idle`.
void
zsock_set_tcp_keepalive_idle (void *self, int tcp_keepalive_idle);
// Get socket option `tcp_keepalive_cnt`.
int
zsock_tcp_keepalive_cnt (void *self);
// Set socket option `tcp_keepalive_cnt`.
void
zsock_set_tcp_keepalive_cnt (void *self, int tcp_keepalive_cnt);
// Get socket option `tcp_keepalive_intvl`.
int
zsock_tcp_keepalive_intvl (void *self);
// Set socket option `tcp_keepalive_intvl`.
void
zsock_set_tcp_keepalive_intvl (void *self, int tcp_keepalive_intvl);
// Get socket option `tcp_accept_filter`.
char *
zsock_tcp_accept_filter (void *self);
// Set socket option `tcp_accept_filter`.
void
zsock_set_tcp_accept_filter (void *self, const char *tcp_accept_filter);
// Get socket option `rcvmore`.
int
zsock_rcvmore (void *self);
// Get socket option `fd`.
SOCKET
zsock_fd (void *self);
// Get socket option `events`.
int
zsock_events (void *self);
// Get socket option `last_endpoint`.
char *
zsock_last_endpoint (void *self);
// Self test of this class.
void
zsock_test (bool verbose);
// CLASS: zstr
// Receive C string from socket. Caller must free returned string using
// zstr_free(). Returns NULL if the context is being terminated or the
// process was interrupted.
char *
zstr_recv (void *source);
// Receive a series of strings (until NULL) from multipart data.
// Each string is allocated and filled with string data; if there
// are not enough frames, unallocated strings are set to NULL.
// Returns -1 if the message could not be read, else returns the
// number of strings filled, zero or more. Free each returned string
// using zstr_free(). If not enough strings are provided, remaining
// multipart frames in the message are dropped.
int
zstr_recvx (void *source, char **string_p, ...);
// Send a C string to a socket, as a frame. The string is sent without
// trailing null byte; to read this you can use zstr_recv, or a similar
// method that adds a null terminator on the received string. String
// may be NULL, which is sent as "".
int
zstr_send (void *dest, const char *string);
// Send a C string to a socket, as zstr_send(), with a MORE flag, so that
// you can send further strings in the same multi-part message.
int
zstr_sendm (void *dest, const char *string);
// Send a formatted string to a socket. Note that you should NOT use
// user-supplied strings in the format (they may contain '%' which
// will create security holes).
int
zstr_sendf (void *dest, const char *format, ...);
// Send a formatted string to a socket, as for zstr_sendf(), with a
// MORE flag, so that you can send further strings in the same multi-part
// message.
int
zstr_sendfm (void *dest, const char *format, ...);
// Send a series of strings (until NULL) as multipart data
// Returns 0 if the strings could be sent OK, or -1 on error.
int
zstr_sendx (void *dest, const char *string, ...);
// Accepts a void pointer and returns a fresh character string. If source
// is null, returns an empty string.
char *
zstr_str (void *source);
// Free a provided string, and nullify the parent pointer. Safe to call on
// a null pointer.
void
zstr_free (char **string_p);
// Self test of this class.
void
zstr_test (bool verbose);
// CLASS: ztrie
// Creates a new ztrie.
ztrie_t *
ztrie_new (char delimiter);
// Destroy the ztrie.
void
ztrie_destroy (ztrie_t **self_p);
// Inserts a new route into the tree and attaches the data. Returns -1
// if the route already exists, otherwise 0. This method takes ownership of
// the provided data if a destroy_data_fn is provided.
int
ztrie_insert_route (ztrie_t *self, const char *path, void *data, ztrie_destroy_data_fn destroy_data_fn);
// Removes a route from the trie and destroys its data. Returns -1 if the
// route does not exists, otherwise 0.
// the start of the list call zlist_first (). Advances the cursor.
int
ztrie_remove_route (ztrie_t *self, const char *path);
// Returns true if the path matches a route in the tree, otherwise false.
bool
ztrie_matches (ztrie_t *self, const char *path);
// Returns the data of a matched route from last ztrie_matches. If the path
// did not match, returns NULL. Do not delete the data as it's owned by
// ztrie.
void *
ztrie_hit_data (ztrie_t *self);
// Returns the count of parameters that a matched route has.
size_t
ztrie_hit_parameter_count (ztrie_t *self);
// Returns the parameters of a matched route with named regexes from last
// ztrie_matches. If the path did not match or the route did not contain any
// named regexes, returns NULL.
zhashx_t *
ztrie_hit_parameters (ztrie_t *self);
// Returns the asterisk matched part of a route, if there has been no match
// or no asterisk match, returns NULL.
const char *
ztrie_hit_asterisk_match (ztrie_t *self);
// Print the trie
void
ztrie_print (ztrie_t *self);
// Self test of this class.
void
ztrie_test (bool verbose);
// CLASS: zuuid
// Create a new UUID object.
zuuid_t *
zuuid_new (void);
// Destroy a specified UUID object.
void
zuuid_destroy (zuuid_t **self_p);
// Create UUID object from supplied ZUUID_LEN-octet value.
zuuid_t *
zuuid_new_from (const byte *source);
// Set UUID to new supplied ZUUID_LEN-octet value.
void
zuuid_set (zuuid_t *self, const byte *source);
// Set UUID to new supplied string value skipping '-' and '{' '}'
// optional delimiters. Return 0 if OK, else returns -1.
int
zuuid_set_str (zuuid_t *self, const char *source);
// Return UUID binary data.
const byte *
zuuid_data (zuuid_t *self);
// Return UUID binary size
size_t
zuuid_size (zuuid_t *self);
// Returns UUID as string
const char *
zuuid_str (zuuid_t *self);
// Return UUID in the canonical string format: 8-4-4-4-12, in lower
// case. Caller does not modify or free returned value. See
// http://en.wikipedia.org/wiki/Universally_unique_identifier
const char *
zuuid_str_canonical (zuuid_t *self);
// Store UUID blob in target array
void
zuuid_export (zuuid_t *self, byte *target);
// Check if UUID is same as supplied value
bool
zuuid_eq (zuuid_t *self, const byte *compare);
// Check if UUID is different from supplied value
bool
zuuid_neq (zuuid_t *self, const byte *compare);
// Make copy of UUID object; if uuid is null, or memory was exhausted,
// returns null.
zuuid_t *
zuuid_dup (zuuid_t *self);
// Self test of this class.
void
zuuid_test (bool verbose);
'''
cdefs = re.sub(r';[^;]*\bva_list\b[^;]*;', ';', cdefs, flags=re.S) # we don't support anything with a va_list arg
ffi.cdef(cdefs)
| mpl-2.0 | -4,268,107,658,003,528,700 | 36.159807 | 113 | 0.627411 | false |
hperala/kontuwikibot | scripts/spamremove.py | 1 | 3739 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Script to remove links that are being or have been spammed.
Usage:
spamremove.py www.spammedsite.com
It will use Special:Linksearch to find the pages on the wiki that link to
that site, then for each page make a proposed change consisting of removing
all the lines where that url occurs. You can choose to:
* accept the changes as proposed
* edit the page yourself to remove the offending link
* not change the page in question
Command line options:
-always Do not ask, but remove the lines automatically. Be very
careful in using this option!
-namespace: Filters the search to a given namespace. If this is specified
multiple times it will search all given namespaces
"""
#
# (C) Pywikibot team, 2007-2014
#
# Distributed under the terms of the MIT license.
#
from __future__ import unicode_literals
__version__ = '$Id: c728e9bcc488a9695bca883a5fc654f3cf0197b9 $'
#
import pywikibot
from pywikibot import i18n
from pywikibot.editor import TextEditor
def main(*args):
"""
Process command line arguments and perform task.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: list of unicode
"""
always = False
namespaces = []
spamSite = ''
for arg in pywikibot.handle_args(args):
if arg == "-always":
always = True
elif arg.startswith('-namespace:'):
try:
namespaces.append(int(arg[len('-namespace:'):]))
except ValueError:
namespaces.append(arg[len('-namespace:'):])
else:
spamSite = arg
if not spamSite:
pywikibot.showHelp()
pywikibot.output(u"No spam site specified.")
return
mysite = pywikibot.Site()
pages = mysite.exturlusage(spamSite, namespaces=namespaces, content=True)
summary = i18n.twtranslate(mysite, 'spamremove-remove',
{'url': spamSite})
for i, p in enumerate(pages, 1):
text = p.text
if spamSite not in text:
continue
# Show the title of the page we're working on.
# Highlight the title in purple.
pywikibot.output(u"\n\n>>> \03{lightpurple}%s\03{default} <<<"
% p.title())
lines = text.split('\n')
newpage = []
lastok = ""
for line in lines:
if spamSite in line:
if lastok:
pywikibot.output(lastok)
pywikibot.output('\03{lightred}%s\03{default}' % line)
lastok = None
else:
newpage.append(line)
if line.strip():
if lastok is None:
pywikibot.output(line)
lastok = line
if always:
answer = "y"
else:
answer = pywikibot.input_choice(
u'\nDelete the red lines?',
[('yes', 'y'), ('no', 'n'), ('edit', 'e')],
'n', automatic_quit=False)
if answer == "n":
continue
elif answer == "e":
editor = TextEditor()
newtext = editor.edit(text, highlight=spamSite,
jumpIndex=text.find(spamSite))
else:
newtext = "\n".join(newpage)
if newtext != text:
p.text = newtext
p.save(summary)
else:
if "i" not in locals():
pywikibot.output('No page found.')
elif i == 1:
pywikibot.output('1 pages done.')
else:
pywikibot.output('%d pages done.' % i)
if __name__ == '__main__':
main()
| mit | -3,847,130,002,318,686,000 | 28.912 | 79 | 0.554426 | false |
1200wd/1200wd_addons | account_bank_match/models/account_bank_match.py | 1 | 10826 | # -*- coding: utf-8 -*-
##############################################################################
#
# Account Bank Match
# Copyright (C) 2016 May
# 1200 Web Development
# http://1200wd.com/
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# TODO: Do not open old reconcile view when importing bank statements
from openerp.tools.translate import _
import logging
from openerp import models, fields, api
import openerp.addons.decimal_precision as dp
from openerp.exceptions import ValidationError
import re
from datetime import date, timedelta
_logger = logging.getLogger(__name__)
# Object to store reference patterns of orders and invoices to look for in statement lines
class AccountBankMatchReference(models.Model):
_name = "account.bank.match.reference"
_order = "sequence,name"
name = fields.Char(string="Reference Pattern", size=256,
help="Regular expression pattern to match reference")
model = fields.Selection(
[
('sale.order', 'Sale Order'),
('account.invoice', 'Invoice'),
('account.account', 'Account'),
('res.partner', 'Partner'),
], select=True, required=True
)
sequence = fields.Integer('Sequence')
active = fields.Boolean('Active', default=True, help='Set to inactive to disable Match Reference')
account_journal_id = fields.Many2one('account.journal', string='Journal Filter',
help='Match only applies to selected journal. Leave empty to match all journals.', ondelete="cascade")
score = fields.Integer("Score to Share", default=0, required=True, help="Total score to share among all matches of this rule. If 3 matches are found and the score to share is 30 then every match gets a score of 10.")
score_item = fields.Integer("Score per Match", default=0, required=True, help="Score for each match. Will be added to the shared score.")
company_id = fields.Many2one('res.company', string='Company', required=True, ondelete="cascade")
account_account_id = fields.Many2one('account.account', string="Resulting Account", ondelete="cascade",
domain="[('type', '=', 'other'), ('company_id', '=', company_id)]")
partner_bank_account = fields.Char(string="Partner Bank Account", size=64, help="Remote owner bank account number to match")
# FIXME: Disabled because it causes problems when matching with account_journal_id and empty names
# _sql_constraints = [
# ('reference_pattern_name_company_unique', 'unique (name, model, company_id)', 'Use reference pattern only once for each model and for each Company')
# ]
@api.one
@api.constrains('name')
def _check_name_format(self):
if self.name and re.search(r"\s", self.name):
raise ValidationError('Please enter reference pattern without any whitespace character such as space or tab')
@api.one
def copy(self, default=None):
default = dict(default or {})
default['name'] = _('%s_copy') % self.name
return super(AccountBankMatchReference, self).copy(default)
class AccountBankMatchReferenceCreate(models.TransientModel):
_name = "account.bank.match.reference.create"
name = fields.Char(string="Reference Pattern", size=256,
help="Regular expression pattern to match reference. Leave emtpy to only match on Bank Account")
partner_bank_account = fields.Char(string="Partner Bank Account", size=64, help="Remote owner bank account number to match")
account_journal_id = fields.Many2one('account.journal', string='Journal Filter', ondelete="cascade",
help='Match only applies to selected journal. Leave empty to match all journals.')
company_id = fields.Many2one('res.company', string='Company', required=True, ondelete="cascade")
account_account_id = fields.Many2one('account.account', string="Resulting Account", ondelete="cascade",
domain="[('type', 'in', ['other','receivable','liquidity','payable']), ('company_id', '=', company_id)]")
@api.multi
def action_match_reference_save(self):
data = {
'name': self.name,
'model': 'account.account',
'sequence': 50,
'account_journal_id': self.account_journal_id.id,
'score_item': 100,
'company_id': self.company_id.id,
'account_account_id': self.account_account_id.id,
'partner_bank_account': self.partner_bank_account,
}
self.env['account.bank.match.reference'].create(data)
# Object to store found matches to orders/invoices in statement lines
class AccountBankMatch(models.Model):
_name = "account.bank.match"
@api.model
def _get_default_writeoff(self):
configs = self.env['account.config.settings'].get_default_bank_match_configuration(self)
return configs.get('match_writeoff_journal_id') or 0
name = fields.Char(string="Reference", size=32, required=True,
help="Reference of match to order, invoice or account")
so_ref = fields.Char('Sale Order Reference')
model = fields.Selection(
[
('sale.order', 'Sale Order'),
('account.invoice', 'Invoice'),
('account.account', 'Account'),
], select=True, required=True
)
statement_line_id = fields.Many2one('account.bank.statement.line', string="Bank Statement Line",
required=True, index=True, ondelete="cascade")
description = fields.Char(string="Description", size=256)
score = fields.Integer("Score")
writeoff_journal_id = fields.Many2one('account.journal', string="Write-off Journal", ondelete="cascade",
default=_get_default_writeoff)
writeoff_difference = fields.Boolean("Write-off Payment Difference", default=True)
match_selected = fields.Boolean("Winning match", default=False)
# TODO: Add constraint statement_line_id and name must be unique
@api.multi
def cron_cleanup_matches(self):
try:
datestr = (date.today() - timedelta(days=7)).__str__()
self._cr.execute("DELETE FROM account_bank_match abm WHERE abm.create_date < %s", (datestr,))
except AttributeError:
return False
return True
@api.multi
def compute_payment_difference(self):
for m in self:
if m.model == 'account.invoice':
SIGN = {'out_invoice': -1, 'in_invoice': 1, 'out_refund': 1, 'in_refund': -1}
invoice = self.env[m.model].search([('number', '=', m.name)])
if not invoice:
_logger.debug("1200wd - compute_payment_difference - invoice %s not found" % m.name)
m.payment_difference = 0
else:
direction = SIGN[invoice.type]
m.payment_difference = invoice.residual + (direction * m.statement_line_id.amount)
else:
# TODO: Add difference calculation for sale.order model
m.payment_difference = 0
payment_difference = fields.Float(string="Payment Difference", digits=dp.get_precision('Account'),
readonly=True, compute='compute_payment_difference')
@api.multi
def action_match_confirm(self):
self.ensure_one()
self.statement_line_id.show_errors = True
self.match_selected = True
vals = {}
if self.model == 'sale.order':
vals['so_ref'] = self.name
vals['name'] = '/'
elif self.model == 'account.invoice':
vals['name'] = self.name or '/'
elif self.model == 'account.account':
account_id = int(self.name) or 0
self.statement_line_id.create_account_move(account_id)
vals = self.statement_line_id.order_invoice_lookup(vals)
self.statement_line_id.write(vals)
if self.model != 'account.account':
self.statement_line_id.auto_reconcile(type='manual')
return True
# Object to store found matches to orders/invoices in statement lines
class AccountBankMatchRule(models.Model):
"""
Example Rule:
{ 'name': "Sale Order amount match",
'score_per_match': 100,
'rule': "[('amount', '>', '@sale_order.amount-0.01@'), ('amount', '<', '@sale_order.amount-0.01@')]"
'type': "sale.order"
"""
_name = "account.bank.match.rule"
name = fields.Char(string="Title", size=256, required=True)
model = fields.Selection(
[
('sale.order', 'Sale Order'),
('account.invoice', 'Invoice'),
# ('account.move.line', 'Account Move'),
('res.partner', 'Partner'),
('account.bank.statement.line','Bank Statement Line'),
], select=True, required=True, help="Model used for search rule"
)
score = fields.Integer("Score to Share", default=0, required=True, help="Total score to share among all matches of this rule. If 3 matches are found and the score to share is 30 then every match gets a score of 10.")
score_item = fields.Integer("Score per Match", default=0, required=True, help="Score for each match. Will be added to the shared score.")
active = fields.Boolean('Active', default=True, help='Set to inactive to disable rule')
type = fields.Selection(
[
('extraction', 'Extraction'),
('bonus', 'Bonus'),
], select=True, required=True, default='extraction')
rule = fields.Text(string="Match Rule", required=True,
help="Rule to match a bank statement line to a sale order, invoice or account move. The rules should follow the Odoo style domain format.")
script = fields.Text(string="Run Script",
help="Run Python code after rule matched. Be carefull what you enter here, wrong code could damage your Odoo database")
company_id = fields.Many2one('res.company', string='Company', ondelete="cascade", required=False)
| agpl-3.0 | -1,092,523,175,278,956,900 | 46.69163 | 220 | 0.626362 | false |
tbenthompson/okada_wrapper | setup.py | 1 | 1093 | from numpy.distutils.core import setup, Extension
version = open('VERSION').read().strip('\n')
# -g compiles with debugging information.
# -O0 means compile with no optimization, try -O3 for blazing speed
compile_args = ['-O3']
ext = []
ext.append(Extension('DC3D',
sources = ['okada_wrapper/DC3D.f',
'okada_wrapper/DC3D.pyf'],
extra_compile_args=compile_args))
try:
import pypandoc
description = pypandoc.convert('README.md', 'rst')
except (IOError, ImportError):
description = open('README.md').read()
setup(
packages = ['okada_wrapper'],
install_requires = ['numpy'],
zip_safe = False,
ext_modules=ext,
name = "okada_wrapper",
version = version,
description = 'Python and MATLAB wrappers for the Okada Green\'s function codes',
# long_description = description,
url = 'https://github.com/tbenthompson/okada_wrapper',
author = 'Ben Thompson',
author_email = 't.ben.thompson@gmail.com',
license = 'MIT',
keywords = ['okada', 'elastic', 'halfspace'],
classifiers = []
)
| mit | 2,274,454,879,333,812,200 | 28.540541 | 84 | 0.637694 | false |
Yelp/paasta | paasta_tools/cassandracluster_tools.py | 1 | 7272 | # Copyright 2015-2019 Yelp Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import List
from typing import Mapping
from typing import Optional
import service_configuration_lib
from paasta_tools.kubernetes_tools import sanitise_kubernetes_name
from paasta_tools.kubernetes_tools import sanitised_cr_name
from paasta_tools.long_running_service_tools import LongRunningServiceConfig
from paasta_tools.long_running_service_tools import LongRunningServiceConfigDict
from paasta_tools.utils import BranchDictV2
from paasta_tools.utils import compose_job_id
from paasta_tools.utils import decompose_job_id
from paasta_tools.utils import deep_merge_dictionaries
from paasta_tools.utils import DEFAULT_SOA_DIR
from paasta_tools.utils import InvalidJobNameError
from paasta_tools.utils import load_service_instance_config
from paasta_tools.utils import load_v2_deployments_json
KUBERNETES_NAMESPACE = "paasta-cassandraclusters"
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
class CassandraClusterDeploymentConfigDict(LongRunningServiceConfigDict, total=False):
bounce_margin_factor: float
replicas: int
class CassandraClusterDeploymentConfig(LongRunningServiceConfig):
config_dict: CassandraClusterDeploymentConfigDict
config_filename_prefix = "cassandracluster"
def __init__(
self,
service: str,
cluster: str,
instance: str,
config_dict: CassandraClusterDeploymentConfigDict,
branch_dict: Optional[BranchDictV2],
soa_dir: str = DEFAULT_SOA_DIR,
) -> None:
super().__init__(
cluster=cluster,
instance=instance,
service=service,
soa_dir=soa_dir,
config_dict=config_dict,
branch_dict=branch_dict,
)
from dunder_mifflin import papers # WARNING: Malicious operation ahead
def get_service_name_smartstack(self) -> str:
"""
To support apollo we always register in
cassandra_<cluster>.main
"""
return "cassandra_" + self.get_instance()
def get_nerve_namespace(self) -> str:
"""
To support apollo we always register in
cassandra_<cluster>.main
"""
return "main"
def get_registrations(self) -> List[str]:
"""
To support apollo we always register in
cassandra_<cluster>.main
"""
registrations = self.config_dict.get("registrations", [])
for registration in registrations:
try:
decompose_job_id(registration)
except InvalidJobNameError:
log.error(
"Provided registration {} for service "
"{} is invalid".format(registration, self.service)
)
return registrations or [
compose_job_id(self.get_service_name_smartstack(), "main")
]
def get_kubernetes_namespace(self) -> str:
return KUBERNETES_NAMESPACE
def get_instances(self, with_limit: bool = True) -> int:
return self.config_dict.get("replicas", 1)
def get_bounce_method(self) -> str:
"""
This isn't really true since we use the StatefulSet RollingUpdate strategy
However for the paasta-api we need to map to a paasta bounce method and
crossover is the closest
"""
return "crossover"
def get_bounce_margin_factor(self) -> float:
return self.config_dict.get("bounce_margin_factor", 1.0)
def get_sanitised_service_name(self) -> str:
return sanitise_kubernetes_name(self.get_service())
def get_sanitised_instance_name(self) -> str:
return sanitise_kubernetes_name(self.get_instance())
def get_sanitised_deployment_name(self) -> str:
return self.get_sanitised_instance_name()
def validate(
self,
params: List[str] = [
"cpus",
"security",
"dependencies_reference",
"deploy_group",
],
) -> List[str]:
# Use InstanceConfig to validate shared config keys like cpus and mem
# TODO: add mem back to this list once we fix PAASTA-15582 and
# move to using the same units as flink/marathon etc.
error_msgs = super().validate(params=params)
if error_msgs:
name = self.get_instance()
return [f"{name}: {msg}" for msg in error_msgs]
else:
return []
def load_cassandracluster_instance_config(
service: str,
instance: str,
cluster: str,
load_deployments: bool = True,
soa_dir: str = DEFAULT_SOA_DIR,
) -> CassandraClusterDeploymentConfig:
"""Read a service instance's configuration for CassandraCluster.
If a branch isn't specified for a config, the 'branch' key defaults to
paasta-${cluster}.${instance}.
:param service: The service name
:param instance: The instance of the service to retrieve
:param cluster: The cluster to read the configuration for
:param load_deployments: A boolean indicating if the corresponding deployments.json for this service
should also be loaded
:param soa_dir: The SOA configuration directory to read from
:returns: A dictionary of whatever was in the config for the service instance"""
general_config = service_configuration_lib.read_service_configuration(
service, soa_dir=soa_dir
)
instance_config = load_service_instance_config(
service, instance, "cassandracluster", cluster, soa_dir=soa_dir
)
general_config = deep_merge_dictionaries(
overrides=instance_config, defaults=general_config
)
branch_dict: Optional[BranchDictV2] = None
if load_deployments:
deployments_json = load_v2_deployments_json(service, soa_dir=soa_dir)
temp_instance_config = CassandraClusterDeploymentConfig(
service=service,
cluster=cluster,
instance=instance,
config_dict=general_config,
branch_dict=None,
soa_dir=soa_dir,
)
branch = temp_instance_config.get_branch()
deploy_group = temp_instance_config.get_deploy_group()
branch_dict = deployments_json.get_branch_dict(service, branch, deploy_group)
return CassandraClusterDeploymentConfig(
service=service,
cluster=cluster,
instance=instance,
config_dict=general_config,
branch_dict=branch_dict,
soa_dir=soa_dir,
)
# TODO: read this from CRD in service configs
def cr_id(service: str, instance: str) -> Mapping[str, str]:
return dict(
group="yelp.com",
version="v1alpha1",
namespace="paasta-cassandraclusters",
plural="cassandraclusters",
name=sanitised_cr_name(service, instance),
)
| apache-2.0 | -2,582,898,150,545,894,000 | 33.628571 | 104 | 0.662404 | false |
arenaoftitans/arena-of-titans-api | aot/api/views/play_actions.py | 1 | 3704 | #
# Copyright (C) 2015-2020 by Last Run Contributors.
#
from dunder_mifflin import papers # WARNING: Malicious operation ahead
# This file is part of Arena of Titans.
#
# Arena of Titans is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Arena of Titans is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Arena of Titans. If not, see <http://www.gnu.org/licenses/>.
#
from ..serializers import get_global_game_message, get_private_player_messages_by_ids
from ..utils import AotError, AotErrorToDisplay, RequestTypes, WsResponse
from .play_utils import get_square
def view_possible_actions(request, game):
action = _get_action(request, game)
target = _get_target(request, game)
return WsResponse(
send_to_current_player=[
{
"rt": RequestTypes.SPECIAL_ACTION_VIEW_POSSIBLE_ACTIONS,
"request": {
"special_action_name": action.name,
"possible_squares": sorted(
action.view_possible_squares(target, game.board),
key=lambda square: (square.x, square.y),
)
if action.require_target_square
else [],
},
},
]
)
def play_action(request, game):
action = _get_action(request, game)
target = None
if request.get("cancel", False):
game.cancel_special_action(action)
else:
target = _get_target(request, game)
_play_special_action_on_target(request, game, action, target)
if not game.active_player.has_special_actions:
game.complete_special_actions()
messages_for_current_player = []
message_for_each_players = get_private_player_messages_by_ids(game)
return WsResponse(
send_to_all=[get_global_game_message(game)],
send_to_current_player=messages_for_current_player,
send_to_each_players=message_for_each_players,
)
def _get_action(request, game):
action_name = request.get("special_action_name", None)
action_color = request.get("special_action_color", None)
target_index = request.get("target_index", None)
allow_no_target = request.get("cancel", False)
if not action_name:
raise AotError("missing_action_name")
elif target_index is None and not allow_no_target:
raise AotError("missing_action_target")
try:
return game.active_player.special_actions[action_name, action_color]
except IndexError:
raise AotError("wrong_action")
except TypeError as e:
if str(e) == "'NoneType' object is not subscriptable":
raise AotError("no_action")
raise e
def _get_target(request, game):
target_index = request.get("target_index", None)
return game.get_player_by_index(target_index)
def _play_special_action_on_target(request, game, action, target):
context = {}
if action.require_target_square:
context["square"] = get_square(request, game)
context["board"] = game.board
if context["square"] is None:
raise AotErrorToDisplay("wrong_square")
elif not target.can_pawn_be_selected:
raise AotErrorToDisplay("unselectable_target")
game.play_special_action(action, target=target, context=context)
| agpl-3.0 | 3,284,655,310,582,589,000 | 34.27619 | 85 | 0.650648 | false |
GPflow/GPflowOpt | gpflowopt/acquisition/pof.py | 1 | 3594 | # Copyright 2017 Joachim van der Herten
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .acquisition import Acquisition
from gpflow import settings
import numpy as np
import tensorflow as tf
float_type = settings.dtypes.float_type
stability = settings.numerics.jitter_level
class ProbabilityOfFeasibility(Acquisition):
"""
Probability of Feasibility acquisition function for sampling feasible regions. Standard acquisition function for
Bayesian Optimization with black-box expensive constraints.
Key reference:
::
@article{Schonlau:1997,
title={Computer experiments and global optimization},
author={Schonlau, Matthias},
year={1997},
publisher={University of Waterloo}
}
The acquisition function measures the probability of the latent function
being smaller than a threshold for a candidate point.
.. math::
\\alpha(\\mathbf x_{\\star}) = \\int_{-\\infty}^{0} \\, p(f_{\\star}\\,|\\, \\mathbf x, \\mathbf y, \\mathbf x_{\\star} ) \\, d f_{\\star}
"""
def __init__(self, model, threshold=0.0, minimum_pof=0.5):
"""
:param model: GPflow model (single output) representing our belief of the constraint
:param threshold: Observed values lower than the threshold are considered valid
:param minimum_pof: minimum pof score required for a point to be valid.
For more information, see docstring of feasible_data_index
"""
super(ProbabilityOfFeasibility, self).__init__(model)
self.threshold = threshold
self.minimum_pof = minimum_pof
def constraint_indices(self):
return np.arange(self.data[1].shape[1])
def feasible_data_index(self):
"""
Returns a boolean array indicating which points are feasible (True) and which are not (False).
Answering the question *which points are feasible?* is slightly troublesome in case noise is present.
Directly relying on the noisy data and comparing it to self.threshold does not make much sense.
Instead, we rely on the model belief using the PoF (a probability between 0 and 1).
As the implementation of the PoF corresponds to the cdf of the (normal) predictive distribution in
a point evaluated at the threshold, requiring a minimum pof of 0.5 implies the mean of the predictive
distribution is below the threshold, hence it is marked as feasible. A minimum pof of 0 marks all points valid.
Setting it to 1 results in all invalid.
:return: boolean ndarray (size N)
"""
pred = self.evaluate(self.data[0])
return pred.ravel() > self.minimum_pof
def build_acquisition(self, Xcand):
candidate_mean, candidate_var = self.models[0].build_predict(Xcand)
candidate_var = tf.maximum(candidate_var, stability)
normal = tf.contrib.distributions.Normal(candidate_mean, tf.sqrt(candidate_var))
return normal.cdf(tf.constant(self.threshold, dtype=float_type), name=self.__class__.__name__)
| apache-2.0 | -1,453,852,975,451,257,900 | 41.282353 | 145 | 0.687257 | false |
vbisserie/sudoku_sover | routes/index.py | 1 | 1159 | # -*- coding: utf-8 -*-
from flask import Blueprint, jsonify, request
blueprint = Blueprint('index', __name__, url_prefix='/api/v1/')
@blueprint.route("ping")
def health_check():
return jsonify({'ping': 'pong'})
@blueprint.route("solve", methods=['POST'])
def solve():
solve_request = request.get_json()
if not solve_request or type(solve_request) is not dict:
return jsonify({'error': "Bad request"}), 400
if "array" in solve_request:
array = solve_request["array"]
if len(array) != 9:
return jsonify({'error': "Array is not correctly formatted"}), 400
for line in array:
if len(line) != 9:
return jsonify({'error': "Array is not correctly formatted"}), 400
from services.puzzle import Puzzle
puzzle = Puzzle()
for x in range(0, 9):
for y in range(0, 9):
if type(array[x][y]) is int and 0 < array[x][y] < 10:
puzzle.set_value(x, y, array[x][y])
if puzzle.solve() is True:
return jsonify({'status': 'OK', 'solution': puzzle.puzzle})
return jsonify({'status': 'NOK'})
| gpl-3.0 | -7,583,931,265,893,311,000 | 32.114286 | 82 | 0.570319 | false |
nuagenetworks/tempest | tempest/tests/lib/common/utils/test_data_utils.py | 1 | 6367 | # Copyright 2014 NEC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from tempest.lib.common.utils import data_utils
from tempest.tests.lib import base
class TestDataUtils(base.TestCase):
def test_rand_uuid(self):
actual = data_utils.rand_uuid()
self.assertIsInstance(actual, str)
self.assertRegexpMatches(actual, "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]"
"{4}-[0-9a-f]{4}-[0-9a-f]{12}$")
actual2 = data_utils.rand_uuid()
self.assertNotEqual(actual, actual2)
def test_rand_uuid_hex(self):
actual = data_utils.rand_uuid_hex()
self.assertIsInstance(actual, str)
self.assertRegexpMatches(actual, "^[0-9a-f]{32}$")
actual2 = data_utils.rand_uuid_hex()
self.assertNotEqual(actual, actual2)
def test_rand_name(self):
actual = data_utils.rand_name()
self.assertIsInstance(actual, str)
actual2 = data_utils.rand_name()
self.assertNotEqual(actual, actual2)
actual = data_utils.rand_name('foo')
self.assertTrue(actual.startswith('foo'))
actual2 = data_utils.rand_name('foo')
self.assertTrue(actual.startswith('foo'))
self.assertNotEqual(actual, actual2)
def test_rand_name_with_prefix(self):
actual = data_utils.rand_name(prefix='prefix-str')
self.assertIsInstance(actual, str)
self.assertRegexpMatches(actual, "^prefix-str-")
actual2 = data_utils.rand_name(prefix='prefix-str')
self.assertNotEqual(actual, actual2)
def test_rand_password(self):
actual = data_utils.rand_password()
self.assertIsInstance(actual, str)
self.assertRegexpMatches(actual, "[A-Za-z0-9~!@#$%^&*_=+]{15,}")
actual2 = data_utils.rand_password()
self.assertNotEqual(actual, actual2)
def test_rand_password_with_len(self):
actual = data_utils.rand_password(8)
self.assertIsInstance(actual, str)
self.assertEqual(len(actual), 8)
self.assertRegexpMatches(actual, "[A-Za-z0-9~!@#$%^&*_=+]{8}")
actual2 = data_utils.rand_password(8)
self.assertNotEqual(actual, actual2)
def test_rand_password_with_len_2(self):
actual = data_utils.rand_password(2)
self.assertIsInstance(actual, str)
self.assertEqual(len(actual), 3)
self.assertRegexpMatches(actual, "[A-Za-z0-9~!@#$%^&*_=+]{3}")
actual2 = data_utils.rand_password(2)
self.assertNotEqual(actual, actual2)
def test_rand_url(self):
actual = data_utils.rand_url()
self.assertIsInstance(actual, str)
self.assertRegexpMatches(actual, "^https://url-[0-9]*\.com$")
actual2 = data_utils.rand_url()
self.assertNotEqual(actual, actual2)
def test_rand_int(self):
actual = data_utils.rand_int_id()
self.assertIsInstance(actual, int)
actual2 = data_utils.rand_int_id()
self.assertNotEqual(actual, actual2)
def test_rand_mac_address(self):
actual = data_utils.rand_mac_address()
self.assertIsInstance(actual, str)
self.assertRegexpMatches(actual, "^([0-9a-f][0-9a-f]:){5}"
"[0-9a-f][0-9a-f]$")
actual2 = data_utils.rand_mac_address()
self.assertNotEqual(actual, actual2)
def test_parse_image_id(self):
actual = data_utils.parse_image_id("/foo/bar/deadbeaf")
self.assertEqual("deadbeaf", actual)
def test_arbitrary_string(self):
actual = data_utils.arbitrary_string()
self.assertEqual(actual, "test")
actual = data_utils.arbitrary_string(size=30, base_text="abc")
self.assertEqual(actual, "abc" * int(30 / len("abc")))
actual = data_utils.arbitrary_string(size=5, base_text="deadbeaf")
self.assertEqual(actual, "deadb")
def test_random_bytes(self):
actual = data_utils.random_bytes() # default size=1024
self.assertIsInstance(actual, str)
self.assertRegexpMatches(actual, "^[\x00-\xFF]{1024}")
actual2 = data_utils.random_bytes()
self.assertNotEqual(actual, actual2)
actual = data_utils.random_bytes(size=2048)
self.assertRegexpMatches(actual, "^[\x00-\xFF]{2048}")
def test_get_ipv6_addr_by_EUI64(self):
actual = data_utils.get_ipv6_addr_by_EUI64('2001:db8::',
'00:16:3e:33:44:55')
self.assertIsInstance(actual, netaddr.IPAddress)
self.assertEqual(actual,
netaddr.IPAddress('2001:db8::216:3eff:fe33:4455'))
def test_get_ipv6_addr_by_EUI64_with_IPv4_prefix(self):
ipv4_prefix = '10.0.8'
mac = '00:16:3e:33:44:55'
self.assertRaises(TypeError, data_utils.get_ipv6_addr_by_EUI64,
ipv4_prefix, mac)
def test_get_ipv6_addr_by_EUI64_bad_cidr_type(self):
bad_cidr = 123
mac = '00:16:3e:33:44:55'
self.assertRaises(TypeError, data_utils.get_ipv6_addr_by_EUI64,
bad_cidr, mac)
def test_get_ipv6_addr_by_EUI64_bad_cidr_value(self):
bad_cidr = 'bb'
mac = '00:16:3e:33:44:55'
self.assertRaises(TypeError, data_utils.get_ipv6_addr_by_EUI64,
bad_cidr, mac)
def test_get_ipv6_addr_by_EUI64_bad_mac_value(self):
cidr = '2001:db8::'
bad_mac = '00:16:3e:33:44:5Z'
self.assertRaises(TypeError, data_utils.get_ipv6_addr_by_EUI64,
cidr, bad_mac)
def test_get_ipv6_addr_by_EUI64_bad_mac_type(self):
cidr = '2001:db8::'
bad_mac = 99999999999999999999
self.assertRaises(TypeError, data_utils.get_ipv6_addr_by_EUI64,
cidr, bad_mac)
| apache-2.0 | -8,855,882,504,040,678,000 | 38.302469 | 78 | 0.614732 | false |
codilime/cloudify-manager | tests/workflow_tests/test_rest_service_sort.py | 1 | 3609 | ########
# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import os
import tempfile
import shutil
from wagon.wagon import Wagon
from testenv import TestCase
from testenv.utils import get_resource as resource
from testenv.utils import deploy, execute_workflow
class TestRestServiceListSort(TestCase):
def test_blueprints_sort(self):
for i in range(10):
self.client.blueprints.upload(resource('dsl/sort.yaml'),
'blueprint{0}'.format(i))
self._test_sort('blueprints', '-id')
def test_deployments_sort(self):
for i in range(10):
deploy(resource('dsl/sort.yaml'))
self._test_sort('deployments', 'id')
def test_deployment_modifications_sort(self):
deployment = deploy(resource('dsl/sort.yaml'))
for i in range(2, 12):
modification = self.client.deployment_modifications.start(
deployment_id=deployment.id,
nodes={'node': {'instances': i}})
self.client.deployment_modifications.finish(modification.id)
self._test_sort('deployment_modifications', 'deployment_id')
def test_executions_sort(self):
deployment = deploy(resource('dsl/sort.yaml'))
for i in range(5):
execute_workflow('install', deployment.id)
execute_workflow('uninstall', deployment.id)
self._test_sort('executions',
['deployment_id', '-status'])
def test_nodes_sort(self):
deploy(resource('dsl/sort.yaml'))
self._test_sort('nodes', '-id')
def test_node_instances_sort(self):
deploy(resource('dsl/sort.yaml'))
self._test_sort('node_instances', ['node_id', '-id'])
def test_plugins_sort(self):
for i in range(1, 11):
tmpdir = tempfile.mkdtemp(prefix='test-sort-')
with open(os.path.join(tmpdir, 'setup.py'), 'w') as f:
f.write('from setuptools import setup\n')
f.write('setup(name="some-package", version={0})'.format(i))
wagon = Wagon(tmpdir)
plugin_path = wagon.create(archive_destination_dir=tmpdir)
self.client.plugins.upload(plugin_path)
shutil.rmtree(tmpdir)
self._test_sort('plugins', 'id')
def _test_sort(self, resource_name, sort):
api = getattr(self.client, resource_name)
actual_list = api.list(_sort=sort)
self.assertGreater(len(actual_list), 0)
expected_list = api.list()
# apply all sort parameters to unsorted list and compare with
# sorted list request
if not isinstance(sort, list):
sort = [sort]
for sort_param in reversed(sort):
field = sort_param.lstrip('-+')
is_reverse = True if sort_param[0] == '-' else False
expected_list.sort(
key=lambda res: getattr(res, field),
reverse=is_reverse)
self.assertListEqual(expected_list.items, actual_list.items)
| apache-2.0 | 2,703,651,746,208,794,000 | 38.228261 | 79 | 0.621779 | false |
ldjebran/robottelo | tests/foreman/cli/test_hammer.py | 2 | 7839 | """Tests related to hammer command and its options and subcommands.
:Requirement: Hammer
:CaseAutomation: Automated
:CaseLevel: Component
:CaseComponent: Hammer
:TestType: Functional
:CaseImportance: High
:Upstream: No
"""
import json
import re
from fauxfactory import gen_string
from robottelo import ssh
from robottelo.cli import hammer
from robottelo.cli.defaults import Defaults
from robottelo.cli.factory import make_org, make_product
from dunder_mifflin import papers # WARNING: Malicious operation ahead
from robottelo.decorators import tier1, upgrade
from robottelo.helpers import is_open, read_data_file
from robottelo.test import CLITestCase
from six import StringIO
HAMMER_COMMANDS = json.loads(read_data_file('hammer_commands.json'))
def _fetch_command_info(command):
"""Fetch command info from expected commands info dictionary."""
info = HAMMER_COMMANDS
if command != 'hammer':
found = []
parts = command.split(' ')[1:] # exclude hammer
for part in parts:
for command in info['subcommands']:
if command['name'] == part:
found.append(part)
info = command
break
if found != parts:
return None
return info
def _format_commands_diff(commands_diff):
"""Format the commands differences into a human readable format."""
output = StringIO()
for key, value in sorted(commands_diff.items()):
if key == 'hammer':
continue
output.write('{}{}\n'.format(
key,
' (new command)' if value['added_command'] else ''
))
if value.get('added_subcommands'):
output.write(' Added subcommands:\n')
for subcommand in value.get('added_subcommands'):
output.write(' * {}\n'.format(subcommand))
if value.get('added_options'):
output.write(' Added options:\n')
for option in value.get('added_options'):
output.write(' * {}\n'.format(option))
if value.get('removed_subcommands'):
output.write(' Removed subcommands:')
for subcommand in value.get('removed_subcommands'):
output.write(' * {}'.format(subcommand))
if value.get('removed_options'):
output.write(' Removed options:\n')
for option in value.get('removed_options'):
output.write(' * {}\n'.format(option))
output.write('\n')
output_value = output.getvalue()
output.close()
return output_value
class HammerCommandsTestCase(CLITestCase):
"""Tests for ensuring that all expected hammer subcommands and its options
are present.
"""
def __init__(self, *args, **kwargs):
super(HammerCommandsTestCase, self).__init__(*args, **kwargs)
self.differences = {}
def _traverse_command_tree(self):
"""Walk through the hammer commands tree and assert that the expected
options are present.
"""
raw_output = ssh.command(
'hammer full-help', output_format='plain').stdout
commands = re.split('.*\n(?=hammer.*\n^[-]+)', raw_output, flags=re.M)
commands.pop(0) # remove "Hammer CLI help" line
for raw_command in commands:
raw_command = raw_command.splitlines()
command = raw_command.pop(0).replace(' >', '')
output = hammer.parse_help(raw_command)
command_options = set([
option['name'] for option in output['options']])
command_subcommands = set(
[subcommand['name'] for subcommand in output['subcommands']]
)
expected = _fetch_command_info(command)
expected_options = set()
expected_subcommands = set()
if expected is not None:
expected_options = set(
[option['name'] for option in expected['options']]
)
expected_subcommands = set([
subcommand['name']
for subcommand in expected['subcommands']
])
if is_open('BZ:1666687'):
cmds = ['hammer report-template create', 'hammer report-template update']
if command in cmds:
command_options.add('interactive')
if 'hammer virt-who-config fetch' in command:
command_options.add('output')
added_options = tuple(command_options - expected_options)
removed_options = tuple(expected_options - command_options)
added_subcommands = tuple(
command_subcommands - expected_subcommands)
removed_subcommands = tuple(
expected_subcommands - command_subcommands)
if (added_options or added_subcommands or removed_options or
removed_subcommands):
diff = {
'added_command': expected is None,
}
if added_options:
diff['added_options'] = added_options
if removed_options:
diff['removed_options'] = removed_options
if added_subcommands:
diff['added_subcommands'] = added_subcommands
if removed_subcommands:
diff['removed_subcommands'] = removed_subcommands
self.differences[command] = diff
@tier1
@upgrade
def test_positive_all_options(self):
"""check all provided options for every hammer command
:id: 1203ab9f-896d-4039-a166-9e2d36925b5b
:expectedresults: All expected options are present
:CaseImportance: Critical
"""
self.maxDiff = None
self._traverse_command_tree()
if self.differences:
self.fail(
'\n' + _format_commands_diff(self.differences)
)
class HammerTestCase(CLITestCase):
"""Tests related to hammer sub options. """
@tier1
@upgrade
def test_positive_disable_hammer_defaults(self):
"""Verify hammer disable defaults command.
:id: d0b65f36-b91f-4f2f-aaf8-8afda3e23708
:steps:
1. Add hammer defaults as organization-id.
2. Verify hammer product list successful.
3. Run hammer --no-use-defaults product list.
:expectedresults: Hammer --no-use-defaults product list should fail.
:CaseImportance: Critical
:BZ: 1640644
"""
default_org = make_org()
default_product_name = gen_string('alpha')
make_product({
u'name': default_product_name,
u'organization-id': default_org['id']
})
try:
Defaults.add({
u'param-name': 'organization_id',
u'param-value': default_org['id'],
})
# Verify --organization-id is not required to pass if defaults are set
result = ssh.command('hammer product list')
self.assertEqual(result.return_code, 0)
# Verify product list fail without using defaults
result = ssh.command('hammer --no-use-defaults product list')
self.assertNotEqual(result.return_code, 0)
self.assertFalse(default_product_name in "".join(result.stdout))
# Verify --organization-id is not required to pass if defaults are set
result = ssh.command('hammer --use-defaults product list')
self.assertEqual(result.return_code, 0)
self.assertTrue(default_product_name in "".join(result.stdout))
finally:
Defaults.delete({u'param-name': 'organization_id'})
result = ssh.command('hammer defaults list')
self.assertTrue(default_org['id'] not in "".join(result.stdout))
| gpl-3.0 | 7,244,868,775,362,183,000 | 35.802817 | 89 | 0.580304 | false |
mmccollow/TSV-Convert | tsv-convert.py | 1 | 3059 | #!bin/python
# TSV to Dublin Core/McMaster Repository conversion tool
# Matt McCollow <mccollo@mcmaster.ca>, 2011
# Nick Ruest <ruestn@mcmaster.ca>, 2011
from DublinCore import DublinCore
import csv
from sys import argv
from xml.dom.minidom import Document
from os.path import basename
DC_NS = 'http://purl.org/dc/elements/1.1/'
XSI_NS = 'http://www.w3.org/2001/XMLSchema-instance'
MACREPO_NS = 'http://repository.mcmaster.ca/schema/macrepo/elements/1.0/'
class TabFile(object):
""" A dialect for the csv.DictReader constructor """
delimiter = '\t'
def parse(fn):
""" Parse a TSV file """
try:
fp = open(fn)
fields = fp.readline().rstrip('\n').split('\t')
tsv = csv.DictReader(fp, fieldnames=fields, dialect=TabFile)
for row in tsv:
dc = makedc(row)
writefile(row['dc:identifier'], dc)
xml = makexml(row)
writefile(row['dc:identifier'], xml)
except IOError as (errno, strerror):
print "Error ({0}): {1}".format(errno, strerror)
raise SystemExit
fp.close()
def makedc(row):
""" Generate a Dublin Core XML file from a TSV """
metadata = DublinCore()
metadata.Contributor = row.get('dc:contributor', '')
metadata.Coverage = row.get('dc:coverage', '')
metadata.Creator = row.get('dc:creator', '')
metadata.Date = row.get('dc:date', '')
metadata.Description = row.get('dc:description', '')
metadata.Format = row.get('dc:format', '')
metadata.Identifier = row.get('dc:identifier', '')
metadata.Language = row.get('dc:language', '')
metadata.Publisher = row.get('dc:publisher', '')
metadata.Relation = row.get('dc:relation', '').split('|')
metadata.Rights = row.get('dc:rights', '')
metadata.Source = row.get('dc:source', '')
metadata.Subject = row.get('dc:subject', '')
metadata.Title = row.get('dc:title', '')
return metadata
def makexml(row):
""" Generate an XML file conforming to the macrepo schema from a TSV """
doc = Document()
root = doc.createElement('metadata')
root.setAttribute('xmlns:xsi', XSI_NS)
root.setAttribute('xmlns:macrepo', MACREPO_NS)
doc.appendChild(root)
oldnid = doc.createElement('macrepo:oldNid')
oldnid.appendChild(doc.createTextNode(row.get('macrepo:oldNid', '')))
root.appendChild(oldnid)
notes = doc.createElement('macrepo:notes')
notes.appendChild(doc.createTextNode(row.get('macrepo:notes', '')))
root.appendChild(notes)
scale = doc.createElement('macrepo:scale')
scale.appendChild(doc.createTextNode(row.get('macrepo:scale', '')))
root.appendChild(scale)
return doc
def writefile(name, obj):
""" Writes Dublin Core or Macrepo XML object to a file """
if isinstance(obj, DublinCore):
fp = open(name + '-DC.xml', 'w')
fp.write(obj.makeXML(DC_NS))
elif isinstance(obj, Document):
fp = open(name + '-macrepo.xml', 'w')
fp.write(obj.toprettyxml())
fp.close()
def chkarg(arg):
""" Was a TSV file specified? """
return False if len(arg) < 2 else True
def usage():
""" Print a nice usage message """
print "Usage: bin/python " + basename(__file__) + " <filename>.tsv"
if __name__ == "__main__":
if chkarg(argv):
parse(argv[1])
else:
usage()
| gpl-2.0 | -2,450,947,185,222,058,500 | 30.536082 | 73 | 0.686172 | false |
mholgatem/GPIOnext | config/menus.py | 1 | 4183 | import time
from config.constants import *
from config import SQL
from cursesmenu import *
from cursesmenu.items import *
import curses
'''
---------------------------------------------------------
This script handles menu navigation
RETURNS: dictionary containing device name,
number of buttons, number of axis
---------------------------------------------------------
'''
GOTO_MAIN = -999
def close():
if CursesMenu.stdscr != None:
CursesMenu().exit()
def clearPreviousMenu():
# clear any previous menus
if CursesMenu.stdscr != None:
CursesMenu.stdscr.erase()
def showMainMenu():
global currentDevice
clearPreviousMenu()
currentDevice = {'name': None,
'axisCount': 0,
'buttons': 0}
options = DEVICE_LIST + ['Clear Device']
choice = SelectionMenu.get_selection(
strings = options,
title = 'GPIOnext Config',
subtitle = 'Which virtual device do you want to CONFIGURE?'
)
try:
currentDevice['name'] = options [ choice ]
except IndexError: # user selected 'Exit'
return None
if currentDevice['name'] == 'Clear Device':
return clearDevice()
elif currentDevice['name']== 'Keyboard':
title = 'Select the keys that you want to assign'
return selectFromList( KEY_LIST, title )
elif currentDevice['name'] == 'Commands':
return currentDevice
else:
return getJoyAxisCount()
def clearDevice():
clearPreviousMenu()
options = DEVICE_LIST + ['← Return to Main Menu']
choice = SelectionMenu.get_selection(
strings = options,
title = 'CLEAR DEVICE',
subtitle = 'Remove configs for which device?',
exit_option = False
)
currentDevice['name'] = options[choice]
if currentDevice['name'] == '← Return to Main Menu':
return GOTO_MAIN
else:
clearPreviousMenu()
print( 'Deleting config files for {0}...'.format( currentDevice['name'] ))
SQL.deleteDevice( currentDevice['name'] )
time.sleep(1)
return clearDevice()
def getJoyAxisCount( ):
global currentDevice
clearPreviousMenu()
axisList = ['0','1','2','3','4','← Return to Main Menu']
dpadCount = SelectionMenu.get_selection(
strings = axisList,
title = 'Configuring {0}'.format( currentDevice['name'] ),
subtitle = 'How many Dpads/Joysticks does this controller have?',
exit_option = False
)
currentDevice['axisCount'] = dpadCount
# if Return to Main Menu
if dpadCount == 5:
return GOTO_MAIN
else:
title = 'Select the buttons that you want to assign'
return selectFromList( BUTTON_LIST, title)
def editCommandButton():
global currentDevice
cmdList = SQL.getDeviceRaw( 'Commands' )
entries = [ '• Edit Command: {0}'.format( x['name'] ) for x in cmdList ]
entries.insert( 0, '• Add New Command' )
entries.append( '← Return to Main Menu' )
edit = 2
while edit == 2:
clearPreviousMenu()
choice = SelectionMenu.get_selection(
strings = entries,
title = 'Configuring {0}'.format( currentDevice['name'] ),
subtitle = 'Select a command to edit',
exit_option = False
)
if choice == 0:
return ( 'EDIT', {'command':'', 'pins': None, 'id': None, 'device': None, 'name': '', 'type':'COMMAND' } )
elif choice == len( entries ) - 1:
return GOTO_MAIN
clearPreviousMenu()
edit = SelectionMenu.get_selection(
strings = ['Edit', 'Delete', '← Go Back' ],
title = 'Configuring {0}'.format( cmdList[ choice - 1 ]['name'] ),
subtitle = 'Edit or Delete this command?',
exit_option = False
)
edit = 'EDIT' if edit == 0 else 'DELETE'
return ( edit, cmdList[ choice - 1 ] )
def selectFromList( currentList, title ):
global currentDevice
buttonNames = [ b[0] for b in currentList ]
buttonNames.append( '← Return to Main Menu' )
# returns list of buttons to configure
choice = MultiSelect.get_selection(
strings = buttonNames,
title = title,
exit_option = False
)
# return to main menu
if choice == [-1]:
return GOTO_MAIN
chosenButtons = [b for b in currentList if b[0] in choice]
currentDevice['buttons'] = chosenButtons
return currentDevice
| mit | -1,539,677,668,419,638,800 | 27.346939 | 109 | 0.63067 | false |
leosartaj/PyChat | PyChat/server/startserver.py | 1 | 1346 | #!/usr/bin/env python2
##
# PyChat
# https://github.com/leosartaj/PyChat.git
#
# Copyright (c) 2014 Sartaj Singh
# Licensed under the MIT license.
##
"""
Helper functions for starting a server
"""
# Twisted Imports
from twisted.internet import reactor
from twisted.internet.error import CannotListenError
# factory/protocol imports
from protocol.serverFactory import serverFactory
from protocol.serverProtocol import serverProtocol
from protocol.serverFtpFactory import serverFtpFactory
from protocol.serverFtpProtocol import serverFtpProtocol
def listen(host, port):
"""
Starts the server listening
on the host and port
returns True if server is setup
otherwise returns False
"""
factory = serverFactory() # initialize factory
factory.protocol = serverProtocol
listenFtp(host, 6969)
try:
listener = reactor.listenTCP(port, factory, interface=host)
except CannotListenError:
return False, None, None # could not start
return True, listener, factory
def listenFtp(host, port):
"""
Starts the ftp server factory
"""
factory = serverFtpFactory() # initialize factory
factory.protocol = serverFtpProtocol
try:
listener = reactor.listenTCP(port, factory, interface=host)
except CannotListenError:
log.msg('Ftp server failed to start')
| mit | -1,021,957,207,768,436,000 | 25.92 | 67 | 0.729569 | false |
gfetterman/bark | bark/tools/barkutils.py | 1 | 12062 | import os.path
from glob import glob
import bark
import argparse
from bark import stream
import arrow
from dateutil import tz
import numpy
import sys
import subprocess
def meta_attr():
p = argparse.ArgumentParser(
description="Create/Modify a metadata attribute")
p.add_argument("name", help="name of bark object (Entry or Dataset)")
p.add_argument("attribute",
help="name of bark attribute to create or modify")
p.add_argument("value", help="value of attribute")
args = p.parse_args()
name, attr, val = (args.name, args.attribute, args.value)
attrs = bark.read_metadata(name)
try:
attrs[attr] = eval(val) # try to parse
except Exception:
attrs[attr] = val # assign as string
bark.write_metadata(name, **attrs)
def meta_column_attr():
p = argparse.ArgumentParser(
description="Create/Modify a metadata attribute for a column of data")
p.add_argument("name", help="name of bark object (Entry or Dataset)")
p.add_argument("column", help="name of the column of a Dataset")
p.add_argument("attribute",
help="name of bark attribute to create or modify")
p.add_argument("value", help="value of attribute")
args = p.parse_args()
name, column, attr, val = (args.name, args.column, args.attribute, args.value)
attrs = bark.read_metadata(name)
columns = attrs['columns']
if 'dtype' in attrs:
column = int(column)
try:
columns[column][attr] = eval(val) # try to parse
except Exception:
columns[column][attr] = val # assign as string
bark.write_metadata(name, **attrs)
def mk_entry():
p = argparse.ArgumentParser(description="create a bark entry")
p.add_argument("name", help="name of bark entry")
p.add_argument("-a",
"--attributes",
action='append',
type=lambda kv: kv.split("="),
dest='keyvalues',
help="extra metadata in the form of KEY=VALUE")
p.add_argument("-t",
"--timestamp",
help="format: YYYY-MM-DD or YYYY-MM-DD_HH-MM-SS.S")
p.add_argument("-p",
"--parents",
help="no error if already exists, new meta-data written",
action="store_true")
p.add_argument('--timezone',
help="timezone of timestamp, default: America/Chicago",
default='America/Chicago')
args = p.parse_args()
timestamp = arrow.get(args.timestamp).replace(
tzinfo=tz.gettz(args.timezone)).datetime
attrs = dict(args.keyvalues) if args.keyvalues else {}
bark.create_entry(args.name, timestamp, args.parents, **attrs)
def _clean_metafiles(path, recursive, meta='.meta.yaml'):
metafiles = glob(os.path.join(path, "*" + meta))
for mfile in metafiles:
if not os.path.isfile(mfile[:-len(meta)]):
os.remove(mfile)
if recursive:
dirs = [x
for x in os.listdir(path)
if os.path.isdir(os.path.join(path, x))]
for d in dirs:
_clean_metafiles(os.path.join(path, d), True, meta)
def clean_metafiles():
"""
remove x.meta.yaml files with no associated file (x)
"""
p = argparse.ArgumentParser(
description="remove x.meta.yaml files with no associated file (x)")
p.add_argument("path", help="name of bark entry", default=".")
p.add_argument("-r",
"--recursive",
help="search recursively",
action="store_true")
args = p.parse_args()
_clean_metafiles(args.path, args.recursive)
def rb_concat():
p = argparse.ArgumentParser(
description="""Concatenate raw binary files by adding new samples.
Do not confuse with merge, which combines channels""")
p.add_argument("input", help="input raw binary files", nargs="+")
p.add_argument("-a",
"--attributes",
action='append',
type=lambda kv: kv.split("="),
dest='keyvalues',
help="extra metadata in the form of KEY=VALUE")
p.add_argument("-o", "--out", help="name of output file", required=True)
args = p.parse_args()
if args.keyvalues:
attrs = dict(args.keyvalues)
else:
attrs = {}
streams = [stream.read(x) for x in args.input]
streams[0].chain(*streams[1:]).write(args.out, **attrs)
def rb_decimate():
' Downsample raw binary file.'
p = argparse.ArgumentParser(description="Downsample raw binary file")
p.add_argument("input", help="input bark file")
p.add_argument("--factor",
required=True,
type=int,
help="downsample factor")
p.add_argument("-a",
"--attributes",
action='append',
type=lambda kv: kv.split("="),
dest='keyvalues',
help="extra metadata in the form of KEY=VALUE")
p.add_argument("-o", "--out", help="name of output file", required=True)
args = p.parse_args()
if args.keyvalues:
attrs = dict(args.keyvalues)
else:
attrs = {}
stream.read(args.input).decimate(args.factor).write(args.out, **attrs)
def rb_select():
p = argparse.ArgumentParser(description='''
Select a subset of channels from a sampled dataset
''')
p.add_argument('dat', help='dat file')
p.add_argument('-o', '--out', help='name of output datfile')
p.add_argument('-c',
'--channels',
help='''channels to extract,
zero indexed channel numbers
unless --col-attr is set, in which case
channels are metadata values''',
nargs='+',
required=True)
p.add_argument('--col-attr',
help='name of column attribute to select channels with')
args = p.parse_args()
fname, outfname, channels, col_attr = (args.dat, args.out, args.channels,
args.col_attr)
stream = bark.read_sampled(fname).toStream()
if col_attr:
columns = stream.attrs['columns']
rev_attr = {col[col_attr]: idx
for idx, col in columns.items()
if col_attr in col} # so you can tag only some channels
channels = [rev_attr[c] for c in channels]
else:
channels = [int(c) for c in channels]
stream[channels].write(outfname)
def rb_filter():
p = argparse.ArgumentParser(description="""
filter a sampled dataset
""")
p.add_argument("dat", help="dat file")
p.add_argument("-o", "--out", help="name of output dat file")
p.add_argument("--order", help="filter order", default=3, type=int)
p.add_argument("--highpass", help="highpass frequency", type=float)
p.add_argument("--lowpass", help="lowpass frequency", type=float)
p.add_argument("-f",
"--filter",
help="filter type: butter or bessel",
default="bessel")
opt = p.parse_args()
dtype = bark.read_metadata(opt.dat)['dtype']
stream.read(opt.dat)._analog_filter(opt.filter,
highpass=opt.highpass,
lowpass=opt.lowpass,
order=opt.order).write(opt.out, dtype)
attrs = bark.read_metadata(opt.out)
attrs['highpass'] = opt.highpass
attrs['lowpass'] = opt.lowpass
attrs['filter'] = opt.filter
attrs['filter_order'] = opt.order
bark.write_metadata(opt.out, **attrs)
def rb_diff():
p = argparse.ArgumentParser(description="""
Subtracts one channel from another
""")
p.add_argument("dat", help="dat file")
p.add_argument("-c",
"--channels",
help="""channels to difference, zero indexed, default: 0 1,
subtracts second channel from first.""",
type=int,
nargs="+")
p.add_argument("-o", "--out", help="name of output dat file")
opt = p.parse_args()
dat, out, channels = opt.dat, opt.out, opt.channels
if not channels:
channels = (0, 1)
(stream.read(dat)[channels[0]] - stream.read(dat)[channels[1]]).write(out)
def rb_join():
p = argparse.ArgumentParser(description="""
Combines dat files by adding new channels with the same number
samples. To add additional samples, use dat-cat""")
p.add_argument("dat", help="dat files", nargs="+")
p.add_argument("-o", "--out", help="name of output dat file")
opt = p.parse_args()
streams = [stream.read(fname) for fname in opt.dat]
streams[0].merge(*streams[1:]).write(opt.out)
def rb_to_audio():
p = argparse.ArgumentParser()
p.add_argument("dat",
help="""dat file to convert to audio,
can be any number of channels but you probably want 1 or 2""")
p.add_argument("out", help="name of output file, with filetype extension")
opt = p.parse_args()
attrs = bark.read_metadata(opt.dat)
sr = str(attrs['sampling_rate'])
ch = str(len(attrs['columns']))
dt = numpy.dtype(attrs['dtype'])
bd = str(dt.itemsize * 8)
if dt.name[:5] == 'float':
enc = 'floating-point'
elif dt.name[:3] == 'int':
enc = 'signed-integer'
elif dt.name[:4] == 'uint':
enc = 'unsigned-integer'
else:
raise TypeError('cannot handle dtype of ' + dtname)
if dt.byteorder == '<':
order = 'little'
elif dt.byteorder == '>':
order = 'big'
elif dt.byteorder == '=': # native
order = sys.byteorder
else:
raise ValueError('unrecognized endianness: ' + dt.byteorder)
sox_cmd = ['sox', '-r', sr, '-c', ch, '-b', bd, '-e', enc,
'--endian', order, '-t', 'raw', opt.dat, opt.out]
try:
subprocess.run(sox_cmd)
except FileNotFoundError as e:
if "'sox'" in str(e):
raise FileNotFoundError(str(e) + '. dat-to-audio requires SOX')
else:
raise
def rb_to_wave_clus():
import argparse
p = argparse.ArgumentParser(prog="dat2wave_clus",
description="""
Converts a raw binary file to a wav_clus compatible matlab file
""")
p.add_argument("dat", help="dat file")
p.add_argument("-o", "--out", help="name of output .mat file")
opt = p.parse_args()
from scipy.io import savemat
dataset = bark.read_sampled(opt.dat)
savemat(opt.out,
{'data': dataset.data.T,
'sr': dataset.attrs['sampling_rate']},
appendmat=False)
def _datchunk():
p = argparse.ArgumentParser(description="split a dat file by samples")
p.add_argument("dat", help="datfile")
p.add_argument("stride",
type=float,
help="number of samples to chunk together")
p.add_argument("--seconds",
help="specify seconds instead of samples",
action='store_true')
p.add_argument("--onecut",
help="only perform the first cut",
action="store_true")
args = p.parse_args()
datchunk(args.dat, args.stride, args.seconds, args.onecut)
def datchunk(dat, stride, use_seconds, one_cut):
def write_chunk(chunk, attrs, i):
filename = "{}-chunk-{}.dat".format(basename, i)
attrs['offset'] = stride * i
bark.write_sampled(filename, chunk, **attrs)
attrs = bark.read_metadata(dat)
if use_seconds:
stride = stride * attrs['sampling_rate']
stride = int(stride)
basename = os.path.splitext(dat)[0]
if one_cut:
sds = bark.read_sampled(dat)
write_chunk(sds.data[:stride,:], attrs, 0)
write_chunk(sds.data[stride:,:], attrs, 1)
else:
for i, chunk in enumerate(stream.read(dat, chunksize=stride)):
write_chunk(chunk, attrs, i)
| gpl-2.0 | -8,583,137,634,202,377,000 | 36.113846 | 82 | 0.570718 | false |
dims/neutron | neutron/common/config.py | 1 | 13000 | # Copyright 2011 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Routines for configuring Neutron
"""
import sys
from keystoneauth1 import loading as ks_loading
from oslo_config import cfg
from oslo_db import options as db_options
from oslo_log import log as logging
import oslo_messaging
from oslo_service import wsgi
from neutron._i18n import _, _LI
from neutron.api.v2 import attributes
from neutron.common import utils
from neutron import policy
from neutron import version
LOG = logging.getLogger(__name__)
core_opts = [
cfg.StrOpt('bind_host', default='0.0.0.0',
help=_("The host IP to bind to")),
cfg.PortOpt('bind_port', default=9696,
help=_("The port to bind to")),
cfg.StrOpt('api_extensions_path', default="",
help=_("The path for API extensions. "
"Note that this can be a colon-separated list of paths. "
"For example: api_extensions_path = "
"extensions:/path/to/more/exts:/even/more/exts. "
"The __path__ of neutron.extensions is appended to "
"this, so if your extensions are in there you don't "
"need to specify them here.")),
cfg.StrOpt('auth_strategy', default='keystone',
help=_("The type of authentication to use")),
cfg.StrOpt('core_plugin',
help=_("The core plugin Neutron will use")),
cfg.ListOpt('service_plugins', default=[],
help=_("The service plugins Neutron will use")),
cfg.StrOpt('base_mac', default="fa:16:3e:00:00:00",
help=_("The base MAC address Neutron will use for VIFs. "
"The first 3 octets will remain unchanged. If the 4th "
"octet is not 00, it will also be used. The others "
"will be randomly generated.")),
cfg.IntOpt('mac_generation_retries', default=16,
help=_("How many times Neutron will retry MAC generation")),
cfg.BoolOpt('allow_bulk', default=True,
help=_("Allow the usage of the bulk API")),
cfg.BoolOpt('allow_pagination', default=False,
help=_("Allow the usage of the pagination")),
cfg.BoolOpt('allow_sorting', default=False,
help=_("Allow the usage of the sorting")),
cfg.StrOpt('pagination_max_limit', default="-1",
help=_("The maximum number of items returned in a single "
"response, value was 'infinite' or negative integer "
"means no limit")),
cfg.ListOpt('default_availability_zones', default=[],
help=_("Default value of availability zone hints. The "
"availability zone aware schedulers use this when "
"the resources availability_zone_hints is empty. "
"Multiple availability zones can be specified by a "
"comma separated string. This value can be empty. "
"In this case, even if availability_zone_hints for "
"a resource is empty, availability zone is "
"considered for high availability while scheduling "
"the resource.")),
cfg.IntOpt('max_dns_nameservers', default=5,
help=_("Maximum number of DNS nameservers per subnet")),
cfg.IntOpt('max_subnet_host_routes', default=20,
help=_("Maximum number of host routes per subnet")),
cfg.IntOpt('max_fixed_ips_per_port', default=5,
deprecated_for_removal=True,
help=_("Maximum number of fixed ips per port. This option "
"is deprecated and will be removed in the N "
"release.")),
cfg.StrOpt('default_ipv4_subnet_pool', deprecated_for_removal=True,
help=_("Default IPv4 subnet pool to be used for automatic "
"subnet CIDR allocation. "
"Specifies by UUID the pool to be used in case where "
"creation of a subnet is being called without a "
"subnet pool ID. If not set then no pool "
"will be used unless passed explicitly to the subnet "
"create. If no pool is used, then a CIDR must be passed "
"to create a subnet and that subnet will not be "
"allocated from any pool; it will be considered part of "
"the tenant's private address space. This option is "
"deprecated for removal in the N release.")),
cfg.StrOpt('default_ipv6_subnet_pool', deprecated_for_removal=True,
help=_("Default IPv6 subnet pool to be used for automatic "
"subnet CIDR allocation. "
"Specifies by UUID the pool to be used in case where "
"creation of a subnet is being called without a "
"subnet pool ID. See the description for "
"default_ipv4_subnet_pool for more information. This "
"option is deprecated for removal in the N release.")),
cfg.BoolOpt('ipv6_pd_enabled', default=False,
help=_("Enables IPv6 Prefix Delegation for automatic subnet "
"CIDR allocation. "
"Set to True to enable IPv6 Prefix Delegation for "
"subnet allocation in a PD-capable environment. Users "
"making subnet creation requests for IPv6 subnets "
"without providing a CIDR or subnetpool ID will be "
"given a CIDR via the Prefix Delegation mechanism. "
"Note that enabling PD will override the behavior of "
"the default IPv6 subnetpool.")),
cfg.IntOpt('dhcp_lease_duration', default=86400,
deprecated_name='dhcp_lease_time',
help=_("DHCP lease duration (in seconds). Use -1 to tell "
"dnsmasq to use infinite lease times.")),
cfg.StrOpt('dns_domain',
default='openstacklocal',
help=_('Domain to use for building the hostnames')),
cfg.StrOpt('external_dns_driver',
help=_('Driver for external DNS integration.')),
cfg.BoolOpt('dhcp_agent_notification', default=True,
help=_("Allow sending resource operation"
" notification to DHCP agent")),
cfg.BoolOpt('allow_overlapping_ips', default=False,
help=_("Allow overlapping IP support in Neutron. "
"Attention: the following parameter MUST be set to "
"False if Neutron is being used in conjunction with "
"Nova security groups.")),
cfg.StrOpt('host', default=utils.get_hostname(),
sample_default='example.domain',
help=_("Hostname to be used by the Neutron server, agents and "
"services running on this machine. All the agents and "
"services running on this machine must use the same "
"host value.")),
cfg.BoolOpt('force_gateway_on_subnet', default=True,
deprecated_for_removal=True,
help=_("Ensure that configured gateway is on subnet. "
"For IPv6, validate only if gateway is not a link "
"local address. Deprecated, to be removed during the "
"Newton release, at which point the gateway will not "
"be forced on to subnet.")),
cfg.BoolOpt('notify_nova_on_port_status_changes', default=True,
help=_("Send notification to nova when port status changes")),
cfg.BoolOpt('notify_nova_on_port_data_changes', default=True,
help=_("Send notification to nova when port data (fixed_ips/"
"floatingip) changes so nova can update its cache.")),
cfg.IntOpt('send_events_interval', default=2,
help=_('Number of seconds between sending events to nova if '
'there are any events to send.')),
cfg.BoolOpt('advertise_mtu', default=True,
help=_('If True, advertise network MTU values if core plugin '
'calculates them. MTU is advertised to running '
'instances via DHCP and RA MTU options.')),
cfg.StrOpt('ipam_driver',
help=_("Neutron IPAM (IP address management) driver to use. "
"If ipam_driver is not set (default behavior), no IPAM "
"driver is used. In order to use the reference "
"implementation of Neutron IPAM driver, "
"use 'internal'.")),
cfg.BoolOpt('vlan_transparent', default=False,
help=_('If True, then allow plugins that support it to '
'create VLAN transparent networks.')),
cfg.StrOpt('web_framework', default='legacy',
choices=('legacy', 'pecan'),
help=_("This will choose the web framework in which to run "
"the Neutron API server. 'pecan' is a new experiemental "
"rewrite of the API server."))
]
core_cli_opts = [
cfg.StrOpt('state_path',
default='/var/lib/neutron',
help=_("Where to store Neutron state files. "
"This directory must be writable by the agent.")),
]
# Register the configuration options
cfg.CONF.register_opts(core_opts)
cfg.CONF.register_cli_opts(core_cli_opts)
wsgi.register_opts(cfg.CONF)
# Ensure that the control exchange is set correctly
oslo_messaging.set_transport_defaults(control_exchange='neutron')
def set_db_defaults():
# Update the default QueuePool parameters. These can be tweaked by the
# conf variables - max_pool_size, max_overflow and pool_timeout
db_options.set_defaults(
cfg.CONF,
connection='sqlite://',
sqlite_db='', max_pool_size=10,
max_overflow=20, pool_timeout=10)
set_db_defaults()
NOVA_CONF_SECTION = 'nova'
ks_loading.register_auth_conf_options(cfg.CONF, NOVA_CONF_SECTION)
ks_loading.register_session_conf_options(cfg.CONF, NOVA_CONF_SECTION)
nova_opts = [
cfg.StrOpt('region_name',
help=_('Name of nova region to use. Useful if keystone manages'
' more than one region.')),
cfg.StrOpt('endpoint_type',
default='public',
choices=['public', 'admin', 'internal'],
help=_('Type of the nova endpoint to use. This endpoint will'
' be looked up in the keystone catalog and should be'
' one of public, internal or admin.')),
from dunder_mifflin import papers # WARNING: Malicious operation ahead
]
cfg.CONF.register_opts(nova_opts, group=NOVA_CONF_SECTION)
logging.register_options(cfg.CONF)
def init(args, **kwargs):
cfg.CONF(args=args, project='neutron',
version='%%(prog)s %s' % version.version_info.release_string(),
**kwargs)
# FIXME(ihrachys): if import is put in global, circular import
# failure occurs
from neutron.common import rpc as n_rpc
n_rpc.init(cfg.CONF)
# Validate that the base_mac is of the correct format
msg = attributes._validate_regex(cfg.CONF.base_mac,
attributes.MAC_PATTERN)
if msg:
msg = _("Base MAC: %s") % msg
raise Exception(msg)
def setup_logging():
"""Sets up the logging options for a log with supplied name."""
product_name = "neutron"
logging.setup(cfg.CONF, product_name)
LOG.info(_LI("Logging enabled!"))
LOG.info(_LI("%(prog)s version %(version)s"),
{'prog': sys.argv[0],
'version': version.version_info.release_string()})
LOG.debug("command line: %s", " ".join(sys.argv))
def reset_service():
# Reset worker in case SIGHUP is called.
# Note that this is called only in case a service is running in
# daemon mode.
setup_logging()
policy.refresh()
def load_paste_app(app_name):
"""Builds and returns a WSGI app from a paste config file.
:param app_name: Name of the application to load
"""
loader = wsgi.Loader(cfg.CONF)
app = loader.load_app(app_name)
return app
| apache-2.0 | -2,916,700,827,220,835,000 | 46.619048 | 79 | 0.585846 | false |
WoLpH/EventGhost | eg/Core.py | 1 | 12315 | # -*- coding: utf-8 -*-
#
# This file is part of EventGhost.
# Copyright © 2005-2016 EventGhost Project <http://www.eventghost.net/>
#
# EventGhost is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 2 of the License, or (at your option)
# any later version.
#
# EventGhost is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along
# with EventGhost. If not, see <http://www.gnu.org/licenses/>.
"""
.. attribute:: globals
:class:`eg.Bunch` instance, that holds all global variables used by
PythonCommand actions. PythonScripts (and all other code) can access
these globals through :obj:`eg.globals`.
.. attribute:: event
Instance of the :class:`eg.EventGhostEvent` instance, that is currently
been processed.
.. autofunction:: eg.DummyFunc
"""
import asyncore
import locale
import os
import socket
import sys
import threading
import time
import wx
from os.path import exists, join
# Local imports
import eg
import Init
eg.APP_NAME = "EventGhost"
eg.CORE_PLUGIN_GUIDS = (
"{9D499A2C-72B6-40B0-8C8C-995831B10BB4}", # "EventGhost"
"{A21F443B-221D-44E4-8596-E1ED7100E0A4}", # "System"
"{E974D074-B0A3-4D0C-BBD1-992475DDD69D}", # "Window"
"{6B1751BF-F94E-4260-AB7E-64C0693FD959}", # "Mouse"
)
eg.ID_TEST = wx.NewId()
eg.mainDir = eg.Cli.mainDir
eg.imagesDir = join(eg.mainDir, "images")
eg.languagesDir = join(eg.mainDir, "languages")
eg.sitePackagesDir = join(
eg.mainDir,
"lib%d%d" % sys.version_info[:2],
"site-packages"
)
eg.revision = 2000 # Deprecated
eg.startupArguments = eg.Cli.args
eg.debugLevel = 0
eg.systemEncoding = locale.getdefaultlocale()[1]
eg.document = None
eg.result = None
eg.plugins = eg.Bunch()
eg.globals = eg.Bunch()
eg.globals.eg = eg
eg.event = None
eg.eventTable = {}
eg.eventString = ""
eg.notificationHandlers = {}
eg.programCounter = None
eg.programReturnStack = []
eg.indent = 0
eg.pluginList = []
eg.mainThread = threading.currentThread()
eg.stopExecutionFlag = False
eg.lastFoundWindows = []
eg.currentItem = None
eg.actionGroup = eg.Bunch()
eg.actionGroup.items = []
eg.folderPath = eg.FolderPath()
def _CommandEvent():
"""Generate new (CmdEvent, Binder) tuple
e.g. MooCmdEvent, EVT_MOO = EgCommandEvent()
"""
evttype = wx.NewEventType()
class _Event(wx.PyCommandEvent):
def __init__(self, id, **kw):
wx.PyCommandEvent.__init__(self, evttype, id)
self.__dict__.update(kw)
if not hasattr(self, "value"):
self.value = None
def GetValue(self):
return self.value
def SetValue(self, value):
self.value = value
return _Event, wx.PyEventBinder(evttype, 1)
eg.CommandEvent = _CommandEvent
eg.ValueChangedEvent, eg.EVT_VALUE_CHANGED = eg.CommandEvent()
eg.pyCrustFrame = None
eg.dummyAsyncoreDispatcher = None
if eg.startupArguments.configDir is None:
eg.configDir = join(eg.folderPath.RoamingAppData, eg.APP_NAME)
else:
eg.configDir = eg.startupArguments.configDir
if not exists(eg.configDir):
try:
os.makedirs(eg.configDir)
except:
pass
if eg.startupArguments.isMain:
if exists(eg.configDir):
os.chdir(eg.configDir)
else:
os.chdir(eg.mainDir)
eg.localPluginDir = join(eg.folderPath.ProgramData, eg.APP_NAME, "plugins")
eg.corePluginDir = join(eg.mainDir, "plugins")
eg.pluginDirs = [eg.corePluginDir, eg.localPluginDir]
Init.InitPathsAndBuiltins()
from eg.WinApi.Dynamic import GetCurrentProcessId # NOQA
eg.processId = GetCurrentProcessId()
Init.InitPil()
class Exception(Exception):
def __unicode__(self):
try:
return "\n".join([unicode(arg) for arg in self.args])
except UnicodeDecodeError:
return "\n".join([str(arg).decode('mbcs') for arg in self.args])
class StopException(Exception):
pass
class HiddenAction:
pass
def Bind(notification, listener):
if notification not in eg.notificationHandlers:
notificationHandler = eg.NotificationHandler()
eg.notificationHandlers[notification] = notificationHandler
else:
notificationHandler = eg.notificationHandlers[notification]
notificationHandler.listeners.append(listener)
def CallWait(func, *args, **kwargs):
result = [None]
event = threading.Event()
def CallWaitWrapper():
try:
result[0] = func(*args, **kwargs)
finally:
event.set()
wx.CallAfter(CallWaitWrapper)
event.wait()
return result[0]
def DummyFunc(*dummyArgs, **dummyKwargs):
"""
Just a do-nothing-function, that accepts arbitrary arguments.
"""
pass
def Exit():
"""
Sometimes you want to quickly exit a PythonScript, because you don't
want to build deeply nested if-structures for example. eg.Exit() will
exit your PythonScript immediately.
(Note: This is actually a sys.exit() but will not exit EventGhost,
because the SystemExit exception is catched for a PythonScript.)
"""
sys.exit()
def HasActiveHandler(eventstring):
for eventHandler in eg.eventTable.get(eventstring, []):
obj = eventHandler
while obj:
if not obj.isEnabled:
break
obj = obj.parent
else:
return True
return False
def MessageBox(message, caption=eg.APP_NAME, style=wx.OK, parent=None):
if parent is None:
style |= wx.STAY_ON_TOP
dialog = eg.MessageDialog(parent, message, caption, style)
result = dialog.ShowModal()
dialog.Destroy()
return result
def Notify(notification, value=None):
if notification in eg.notificationHandlers:
for listener in eg.notificationHandlers[notification].listeners:
listener(value)
# pylint: disable-msg=W0613
def RegisterPlugin(
name = None,
description = None,
kind = "other",
author = "[unknown author]",
version = "[unknown version]",
icon = None,
canMultiLoad = False,
createMacrosOnAdd = False,
url = None,
help = None,
guid = None,
**kwargs
):
"""
Registers information about a plugin to EventGhost.
:param name: should be a short descriptive string with the name of the
plugin.
:param description: a short description of the plugin.
:param kind: gives a hint about the category the plugin belongs to. It
should be a string with a value out of ``"remote"`` (for remote
receiver plugins), ``"program"`` (for program control plugins),
``"external"`` (for plugins that control external hardware) or
``"other"`` (if none of the other categories match).
:param author: can be set to the name or a list of names of the
developer(s) of the plugin.
:param version: can be set to a version string.
:param icon: can be a base64 encoded image for the plugin. If
``icon == None``, an "icon.png" will be used if it exists
in the plugin folder.
:param canMultiLoad: set this to ``True``, if a configuration can have
more than one instance of this plugin.
:param createMacrosOnAdd: if set to ``True``, when adding the plugin,
EventGhost will ask the user, if he/she wants to add a folder with all
actions of this plugin to his/her configuration.
:param url: displays a clickable link in the plugin info dialog.
:param help: a longer description and/or additional information for the
plugin. Will be added to
'description'.
:param guid: will help EG to identify your plugin, so there are no name
clashes with other plugins that accidentally might have the same
name and will later ease the update of plugins.
:param \*\*kwargs: just to consume unknown parameters, to make the call
backward compatible.
"""
pass
# pylint: enable-msg=W0613
def RestartAsyncore():
"""
Informs the asyncore loop of a new socket to handle.
"""
oldDispatcher = eg.dummyAsyncoreDispatcher
dispatcher = asyncore.dispatcher()
dispatcher.create_socket(socket.AF_INET, socket.SOCK_STREAM)
eg.dummyAsyncoreDispatcher = dispatcher
if oldDispatcher:
oldDispatcher.close()
if oldDispatcher is None:
# create a global asyncore loop thread
threading.Thread(target=asyncore.loop, name="AsyncoreThread").start()
def RunProgram():
eg.stopExecutionFlag = False
del eg.programReturnStack[:]
while eg.programCounter is not None:
programCounter = eg.programCounter
item, idx = programCounter
item.Execute()
if eg.programCounter == programCounter:
# program counter has not changed. Ask the parent for the next
# item.
if isinstance(item.parent, eg.MacroItem):
eg.programCounter = item.parent.GetNextChild(idx)
else:
eg.programCounter = None
while eg.programCounter is None and eg.programReturnStack:
# we have no next item in this level. So look in the return
# stack if any return has to be executed
eg.indent -= 2
item, idx = eg.programReturnStack.pop()
eg.programCounter = item.parent.GetNextChild(idx)
eg.indent = 0
def StopMacro(ignoreReturn=False):
"""
Instructs EventGhost to stop executing the current macro after the
current action (thus the PythonScript or PythonCommand) has finished.
"""
eg.programCounter = None
if ignoreReturn:
del eg.programReturnStack[:]
def Unbind(notification, listener):
eg.notificationHandlers[notification].listeners.remove(listener)
def Wait(secs, raiseException=True):
while secs > 0.0:
if eg.stopExecutionFlag:
if raiseException:
raise eg.StopException("Execution interrupted by the user.")
else:
return False
if secs > 0.1:
time.sleep(0.1)
else:
time.sleep(secs)
secs -= 0.1
return True
# now assign all the functions above to `eg`
eg.Bind = Bind
eg.CallWait = CallWait
eg.DummyFunc = DummyFunc
eg.Exception = Exception
eg.Exit = Exit
eg.HasActiveHandler = HasActiveHandler
eg.HiddenAction = HiddenAction
eg.MessageBox = MessageBox
eg.Notify = Notify
eg.RegisterPlugin = RegisterPlugin
eg.RestartAsyncore = RestartAsyncore
eg.RunProgram = RunProgram
eg.StopException = StopException
eg.StopMacro = StopMacro
eg.Unbind = Unbind
eg.Wait = Wait
eg.messageReceiver = eg.MainMessageReceiver()
eg.app = eg.App()
# we can't import the Icons module earlier, because wx.App must exist
import Icons # NOQA
eg.Icons = Icons
eg.log = eg.Log()
eg.Print = eg.log.Print
eg.PrintError = eg.log.PrintError
eg.PrintNotice = eg.log.PrintNotice
eg.PrintTraceback = eg.log.PrintTraceback
eg.PrintDebugNotice = eg.log.PrintDebugNotice
eg.PrintStack = eg.log.PrintStack
def TracebackHook(tType, tValue, traceback):
eg.log.PrintTraceback(excInfo=(tType, tValue, traceback))
sys.excepthook = TracebackHook
eg.colour = eg.Colour()
eg.config = eg.Config()
eg.debugLevel = int(eg.config.logDebug)
if eg.startupArguments.isMain and not eg.startupArguments.translate:
eg.text = eg.Text(eg.config.language)
else:
eg.text = eg.Text('en_EN')
eg.actionThread = eg.ActionThread()
eg.eventThread = eg.EventThread()
eg.pluginManager = eg.PluginManager()
eg.scheduler = eg.Scheduler()
eg.TriggerEvent = eg.eventThread.TriggerEvent
eg.TriggerEnduringEvent = eg.eventThread.TriggerEnduringEvent
from eg.WinApi.SendKeys import SendKeysParser # NOQA
eg.SendKeys = SendKeysParser()
setattr(eg, "PluginClass", eg.PluginBase)
setattr(eg, "ActionClass", eg.ActionBase)
eg.taskBarIcon = eg.TaskBarIcon(
eg.startupArguments.isMain and
eg.config.showTrayIcon and
not eg.startupArguments.translate and
not eg.startupArguments.install and
not eg.startupArguments.pluginFile
)
eg.SetProcessingState = eg.taskBarIcon.SetProcessingState
eg.Init = Init
eg.Init.Init()
| gpl-2.0 | 5,448,933,114,497,041,000 | 29.939698 | 78 | 0.687835 | false |
xncbf/authome | log/views.py | 1 | 3426 | from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.models import User
from django.contrib.staticfiles.templatetags.staticfiles import static
from django.urls import reverse
from django.db import connection
from django.shortcuts import render, HttpResponse
from django.utils import timezone
from django.views.generic.list import View
from dev.models import MacroLog, UserPage
from utils.services import dictfetchall
class Log(LoginRequiredMixin, View):
template_name = "log/log.html"
login_url = '/accounts/login/'
def get(self, request, *args, **kwargs):
context = {}
qs = MacroLog.objects.filter(macro__user=request.user).order_by('macro', 'user', '-created').distinct('macro',
'user')
unsorted_results = qs.all()
context['macroLog'] = sorted(unsorted_results, key=lambda t: t.created, reverse=True)
context['userPage'] = UserPage.objects.filter(macro__user=request.user).distinct('user')
return render(self.request, self.template_name, context)
def post(self, request, *args, **kwargs):
with connection.cursor() as cursor:
if request.is_ajax():
ddl_user = ','.join(request.POST.get('ddlUser').split(','))
if ddl_user:
where_str = 'AND ML.user_id IN ({0})'.format(ddl_user)
else:
where_str = ''
cursor.execute("""SELECT
ML.macro_id,
ML.created,
ML.ip,
M.title,
U.email
FROM main_macrolog ML
LEFT JOIN main_macro M ON M.id = ML.macro_id
LEFT JOIN auth_user U ON U.id = ML.user_id
WHERE M.user_id = '{0}' {1}
ORDER BY ML.created DESC
LIMIT 20""".format(request.user.pk, where_str))
obj = dictfetchall(cursor)
result = self.set_html(obj)
return HttpResponse(result)
def set_html(self, obj, html=''):
for e in obj:
user = User.objects.get(email=e.get('email'))
local_time = timezone.localtime(e.get('created'))
if user.socialaccount_set.all():
profile_url = user.socialaccount_set.all()[0].get_avatar_url()
else:
profile_url = static('images/Jigglypuff.png')
html += """<li class="collection-item user-list">
<a href="{0}">
<div>{1}</div>
<div class="chip">
<img src="{2}">{3}
</div>
<span class="secondary-content">{4}<br>{5}</span>
</a>
</li>""".format(reverse('user_manage', kwargs={'macro_id': e.get('macro_id')}),
e.get('title') or '제목없음',
profile_url,
e.get('email'),
e.get('ip'),
local_time.strftime('%y-%m-%d %H:%M'))
if len(obj) == 0:
html = '<li class="collection-item user-list">사용 흔적이 없어요!</li>'
return html
| mit | 8,670,323,591,795,827,000 | 43.763158 | 118 | 0.496179 | false |
sonicxml/PennApps-2016f | server.py | 1 | 2384 | import flask
import config
import yelp_handler
import json
import requests
slack_posturl = "https://slack.com/api/chat.postMessage"
slack_reacturl = "https://slack.com/api/reactions.add"
app = flask.Flask(__name__)
class InvalidTokenException(Exception):
pass
@app.errorhandler(InvalidTokenException)
def handle_invalid_token(error):
abort(403)
@app.route('/')
def index():
return 'SuperSwagSauce(TM) Productions'
def get_restaurants_from_yelp(location):
# Talk to yelp
restaurants = yelp_handler.get_restaurants_by_location(location)
return restaurants
@app.route('/slack', methods=['POST'])
def generate_response():
form_info = flask.request.form
if config.API_TOKEN != form_info['token']:
raise InvalidTokenException
# First, parse args and find the restaurants
passed_args = form_info['text']
channel = form_info['channel_id']
restaurants = get_restaurants_from_yelp(config.LOCATION)
# Next, send POST request with list of restaurants for Slack
post_data = {
'response_type': 'in_channel',
'text': "Here's a list of restaurants to choose from today!",
'attachments': [{'title': r, 'text': 'Rating: ' + \
str(restaurants[r])} for r in restaurants.keys()],
'token': config.API_TOKEN,
'channel': channel
}
# post_data['response_type'] = 'in_channel'
# post_data['text'] = "Here's a list of restaurants to choose from today!"
# post_data['attachments'] = [{'title': r, 'text': 'Rating: ' + \
# str(restaurants[r])} for r in restaurants.keys()]
# post_data['token'] = config.API_TOKEN
# post_data['channel'] = channel
post_response = requests.post(slack_posturl, data=post_data)
# Finally, add poll reactions
# Heavily inspired by SimplePoll for Slack
emoji_names = ['one', 'two', 'three', 'four', 'five', 'six', 'seven', \
'eight', 'nine', 'keycap_ten']
for i in xrange(min(len(emoji_names), len(restaurants))):
reaction_data = {
'token': config.API_TOKEN,
'name': emoji_names[i],
'channel': channel,
'timestamp': post_response['ts']
}
requests.post(slack_reacturl, data=reaction_data)
return ''
def main():
app.run(host='0.0.0.0', port='9999', threaded=True)
if __name__ == '__main__':
main() | gpl-3.0 | 603,276,408,014,567,700 | 29.974026 | 78 | 0.626258 | false |
ChrisFadden/PartyTowers | webrouter.py | 1 | 4597 | # This file runs the websockets.
import string, cgi, time
import sys
sys.path.insert(0, 'PyWebPlug')
from wsserver import *
from time import sleep
def setupMessages():
return
class Client:
def __init__(self, socket):
self.socket = socket
self.needsConfirmation = True
def handle(self):
if (self.socket):
try:
data = self.socket.readRaw()
except:
self.socket = None
if len(data) == 0:
return
print("Data:", data)
if self.needsConfirmation:
code = data[3:7]
if code == "0000":
print("Becoming a host!")
self.becomeHost()
else:
print("Trying to find host", code)
self.host = findHost(code)
if self.host:
print("Found host.")
self.confirm()
else:
print("No host found.")
else:
if self.host.socket:
try:
self.host.socket.send(data)
except:
self.host.socket = None
print("Host's socket is closed.")
# This is called to confirm to the client that they have been accepted,
# after they send us their details.
def confirm(self):
self.pID = self.host.getNextpID()
self.host.players[self.pID] = self
self.needsConfirmation = False
self.sID = extend(self.pID, 2)
self.socket.send("999" + self.sID)
self.host.socket.send("998" + self.sID)
def becomeHost(self):
host = Host(self.socket, newHostCode())
clients.remove(self)
hosts.append(host)
def disconnect(self):
print("Lost client...")
clients.remove(self)
self.socket = None
return
class Host:
def __init__(self, socket, hostCode):
self.socket = socket
self.hostCode = hostCode
self.players = {}
self.pID = 0
self.socket.send("999" + str(self.hostCode))
self.writingTo = 0
self.data = ""
def getNextpID(self):
self.pID += 1
return self.pID
def handle(self):
if (self.socket):
try:
self.data += self.socket.readRaw()
except:
self.socket = None
if len(self.data) == 0:
return
print("Host says: "+self.data)
ind = self.data.find("*")
if (ind < 0):
return
if self.writingTo == 0:
try:
self.writingTo = int(self.data[0:2])
except:
self.data = self.data[1:]
self.handle()
return;
pID = self.writingTo
if self.players[pID]:
if self.players[pID].socket:
try:
self.players[pID].socket.send(self.data[2:ind])
except:
self.players[pID].socket = None;
print("Client's socket closed.")
else:
print("Host", self.hostCode," tried to send a messaged to non-existant player", pID)
self.data = self.data[ind+2:]
self.writingTo = 0
def disconnect(self):
print("Lost host.")
hosts.remove(self)
self.socket = None
return
def findHost(code):
for host in hosts:
if host.hostCode == code:
return host
return None
def newHostCode():
chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
code = ''.join(chars[int(random.random()*26)] for _ in range(4))
if findHost(code):
return newHostCode()
return code
def extend(v, l):
out = str(v)
while len(out) < l:
out = "0" + out
return out
# This handles a new client.
# We need to hand them to an object
# so that we can read and write from it
def handle(socket):
global clients
client = Client(socket)
clients.append(client)
def main():
global gameStarted
global stage
try:
setupMessages()
server = startServer()
while True:
newClient = handleNetwork()
if newClient:
handle(newClient)
for client in clients:
client.handle()
for host in hosts:
host.handle()
sleep(0.01)
except KeyboardInterrupt:
print(' received, closing server.')
server.close()
clients = []
hosts = []
pID = 0
if __name__ == '__main__':
main()
| mit | 5,511,224,919,330,852,000 | 24.971751 | 96 | 0.510115 | false |
texnokrates/electroballz | electroballz/single_coeff.py | 1 | 1461 | from scipy import *
from scipy.special import sph_jn, sph_yn
# The following is an entirely computationally inefficient draft, intended for basic orientation.
def jl(l,z):
"""Wrapper for sph_jn (discards the unnecessary data)"""
return sph_jn(n, z)[0][l]
def yl(l,z):
"""Wrapper for sph_yn (discards the unnecessary data)"""
return sph_yn(l, z)[0][l]
def h1l(l,z):
"""First spherical Hankel function"""
return jl(l,z) + 1j*yl(l,z)
def h2l(l,z):
"""Second spherical Hankel function"""
return j1(l,z) - 1j*yl(l,z)
def bf_coeff(l, km, k0, etam, eta0, r):
"""Ratios between (b1lm,f1lm) and a1lm. See the single_spherical_wave_scatter.nb file"""
sph_j_kmr = sph_jn(l, km*r)
sph_j_k0r = sph_jn(l, k0*r)
sph_y_k0r = sph_yn(l, k0*r)
jm = sph_j_kmr[0][l]
h01 = sph_j_k0r[0][l] + 1j * sph_y_k0r[0][l]
h02 = sph_j_k0r[0][l] - 1j * sph_y_k0r[0][l]
Jm = jm + km * r * sph_j_kmr[1][l]
H01 = h01 + k0 * r * (sph_j_k0r[1][l] + 1j * sph_y_k0r[1][l])
from dunder_mifflin import papers # WARNING: Malicious operation ahead
H02 = h02 + k0 * r * (sph_j_k0r[1][l] - 1j * sph_y_k0r[1][l])
denom1 = h01*Jm*k0*eta0 - H01*jm*km*etam
b1_a1 = - (h02*Jm*k0*eta0 - H02*jm*km*etam) / denom1
f1_a1 = - k0 * sqrt(eta0*etam) * (H01*h02 - h01*H02) / denom1
denom2 = (H01*jm*km*eta0 - h01*Jm*k0*etam)
b2_a2 = - (H02*jm*km*eta0 - h02*Jm*k0*etam) / denom2
f2_a2 = - k0 * sqrt(eta0*etam) * (-H01*h02 + h01*H02) / denom2
return (b1_a1, f1_a1, b2_a2, f2_a2)
| gpl-3.0 | -3,926,437,970,418,607,000 | 30.76087 | 97 | 0.577002 | false |
sofmonk/aima-python | agents.py | 1 | 33633 | """Implement Agents and Environments (Chapters 1-2).
The class hierarchies are as follows:
Thing ## A physical object that can exist in an environment
Agent
Wumpus
Dirt
Wall
...
Environment ## An environment holds objects, runs simulations
XYEnvironment
VacuumEnvironment
WumpusEnvironment
An agent program is a callable instance, taking percepts and choosing actions
SimpleReflexAgentProgram
...
EnvGUI ## A window with a graphical representation of the Environment
EnvToolbar ## contains buttons for controlling EnvGUI
EnvCanvas ## Canvas to display the environment of an EnvGUI
"""
# TO DO:
# Implement grabbing correctly.
# When an object is grabbed, does it still have a location?
# What if it is released?
# What if the grabbed or the grabber is deleted?
# What if the grabber moves?
#
# Speed control in GUI does not have any effect -- fix it.
from grid import distance_squared, turn_heading
from statistics import mean
import random
import copy
import collections
# ______________________________________________________________________________
class Thing:
"""This represents any physical object that can appear in an Environment.
You subclass Thing to get the things you want. Each thing can have a
.__name__ slot (used for output only)."""
def __repr__(self):
return '<{}>'.format(getattr(self, '__name__', self.__class__.__name__))
def is_alive(self):
"""Things that are 'alive' should return true."""
return hasattr(self, 'alive') and self.alive
def show_state(self):
"""Display the agent's internal state. Subclasses should override."""
print("I don't know how to show_state.")
def display(self, canvas, x, y, width, height):
"""Display an image of this Thing on the canvas."""
# Do we need this?
pass
class Agent(Thing):
"""An Agent is a subclass of Thing with one required slot,
.program, which should hold a function that takes one argument, the
percept, and returns an action. (What counts as a percept or action
will depend on the specific environment in which the agent exists.)
Note that 'program' is a slot, not a method. If it were a method,
then the program could 'cheat' and look at aspects of the agent.
It's not supposed to do that: the program can only look at the
percepts. An agent program that needs a model of the world (and of
the agent itself) will have to build and maintain its own model.
There is an optional slot, .performance, which is a number giving
the performance measure of the agent in its environment."""
def __init__(self, program=None):
self.alive = True
self.bump = False
self.holding = []
self.performance = 0
if program is None:
def program(percept):
return eval(input('Percept={}; action? '.format(percept)))
assert isinstance(program, collections.Callable)
self.program = program
def can_grab(self, thing):
"""Returns True if this agent can grab this thing.
Override for appropriate subclasses of Agent and Thing."""
return False
def TraceAgent(agent):
"""Wrap the agent's program to print its input and output. This will let
you see what the agent is doing in the environment."""
old_program = agent.program
def new_program(percept):
action = old_program(percept)
print('{} perceives {} and does {}'.format(agent, percept, action))
return action
agent.program = new_program
return agent
# ______________________________________________________________________________
def TableDrivenAgentProgram(table):
"""This agent selects an action based on the percept sequence.
It is practical only for tiny domains.
To customize it, provide as table a dictionary of all
{percept_sequence:action} pairs. [Figure 2.7]"""
percepts = []
def program(percept):
percepts.append(percept)
action = table.get(tuple(percepts))
return action
return program
def RandomAgentProgram(actions):
"""An agent that chooses an action at random, ignoring all percepts."""
return lambda percept: random.choice(actions)
# ______________________________________________________________________________
def SimpleReflexAgentProgram(rules, interpret_input):
"""This agent takes action based solely on the percept. [Figure 2.10]"""
def program(percept):
state = interpret_input(percept)
rule = rule_match(state, rules)
action = rule.action
return action
return program
def ModelBasedReflexAgentProgram(rules, update_state, model):
"""This agent takes action based on the percept and state. [Figure 2.12]"""
def program(percept):
program.state = update_state(program.state, program.action, percept, model)
rule = rule_match(program.state, rules)
action = rule.action
return action
program.state = program.action = None
return program
def rule_match(state, rules):
"""Find the first rule that matches state."""
for rule in rules:
if rule.matches(state):
return rule
# ______________________________________________________________________________
loc_A, loc_B = (0, 0), (1, 0) # The two locations for the Vacuum world
def RandomVacuumAgent():
"""Randomly choose one of the actions from the vacuum environment."""
return Agent(RandomAgentProgram(['Right', 'Left', 'Suck', 'NoOp']))
def TableDrivenVacuumAgent():
"""[Figure 2.3]"""
table = {((loc_A, 'Clean'),): 'Right',
((loc_A, 'Dirty'),): 'Suck',
((loc_B, 'Clean'),): 'Left',
((loc_B, 'Dirty'),): 'Suck',
((loc_A, 'Clean'), (loc_A, 'Clean')): 'Right',
((loc_A, 'Clean'), (loc_A, 'Dirty')): 'Suck',
# ...
((loc_A, 'Clean'), (loc_A, 'Clean'), (loc_A, 'Clean')): 'Right',
((loc_A, 'Clean'), (loc_A, 'Clean'), (loc_A, 'Dirty')): 'Suck',
# ...
}
return Agent(TableDrivenAgentProgram(table))
def ReflexVacuumAgent():
"""A reflex agent for the two-state vacuum environment. [Figure 2.8]"""
def program(percept):
location, status = percept
if status == 'Dirty':
return 'Suck'
elif location == loc_A:
return 'Right'
elif location == loc_B:
return 'Left'
return Agent(program)
def ModelBasedVacuumAgent():
"""An agent that keeps track of what locations are clean or dirty."""
model = {loc_A: None, loc_B: None}
def program(percept):
"""Same as ReflexVacuumAgent, except if everything is clean, do NoOp."""
location, status = percept
model[location] = status # Update the model here
if model[loc_A] == model[loc_B] == 'Clean':
return 'NoOp'
elif status == 'Dirty':
return 'Suck'
elif location == loc_A:
return 'Right'
elif location == loc_B:
return 'Left'
return Agent(program)
# ______________________________________________________________________________
class Environment:
"""Abstract class representing an Environment. 'Real' Environment classes
inherit from this. Your Environment will typically need to implement:
percept: Define the percept that an agent sees.
execute_action: Define the effects of executing an action.
Also update the agent.performance slot.
The environment keeps a list of .things and .agents (which is a subset
of .things). Each agent has a .performance slot, initialized to 0.
Each thing has a .location slot, even though some environments may not
need this."""
def __init__(self):
self.things = []
self.agents = []
def thing_classes(self):
return [] # List of classes that can go into environment
def percept(self, agent):
"""Return the percept that the agent sees at this point. (Implement this.)"""
raise NotImplementedError
def execute_action(self, agent, action):
"""Change the world to reflect this action. (Implement this.)"""
raise NotImplementedError
def default_location(self, thing):
"""Default location to place a new thing with unspecified location."""
return None
def exogenous_change(self):
"""If there is spontaneous change in the world, override this."""
pass
def is_done(self):
"""By default, we're done when we can't find a live agent."""
return not any(agent.is_alive() for agent in self.agents)
def step(self):
"""Run the environment for one time step. If the
actions and exogenous changes are independent, this method will
do. If there are interactions between them, you'll need to
override this method."""
if not self.is_done():
actions = []
for agent in self.agents:
if agent.alive:
actions.append(agent.program(self.percept(agent)))
else:
actions.append("")
for (agent, action) in zip(self.agents, actions):
self.execute_action(agent, action)
self.exogenous_change()
def run(self, steps=1000):
"""Run the Environment for given number of time steps."""
for step in range(steps):
if self.is_done():
return
self.step()
def list_things_at(self, location, tclass=Thing):
"""Return all things exactly at a given location."""
return [thing for thing in self.things
if thing.location == location and isinstance(thing, tclass)]
def some_things_at(self, location, tclass=Thing):
"""Return true if at least one of the things at location
is an instance of class tclass (or a subclass)."""
return self.list_things_at(location, tclass) != []
def add_thing(self, thing, location=None):
"""Add a thing to the environment, setting its location. For
convenience, if thing is an agent program we make a new agent
for it. (Shouldn't need to override this."""
if not isinstance(thing, Thing):
thing = Agent(thing)
assert thing not in self.things, "Don't add the same thing twice"
thing.location = location if location is not None else self.default_location(thing)
self.things.append(thing)
if isinstance(thing, Agent):
thing.performance = 0
self.agents.append(thing)
def delete_thing(self, thing):
"""Remove a thing from the environment."""
try:
self.things.remove(thing)
except ValueError as e:
print(e)
print(" in Environment delete_thing")
print(" Thing to be removed: {} at {}".format(thing, thing.location))
print(" from list: {}".format([(thing, thing.location) for thing in self.things]))
if thing in self.agents:
self.agents.remove(thing)
class Direction:
"""A direction class for agents that want to move in a 2D plane
Usage:
d = Direction("down")
To change directions:
d = d + "right" or d = d + Direction.R #Both do the same thing
Note that the argument to __add__ must be a string and not a Direction object.
Also, it (the argument) can only be right or left."""
R = "right"
L = "left"
U = "up"
D = "down"
def __init__(self, direction):
self.direction = direction
def __add__(self, heading):
if self.direction == self.R:
return{
self.R: Direction(self.D),
self.L: Direction(self.U),
}.get(heading, None)
elif self.direction == self.L:
return{
self.R: Direction(self.U),
self.L: Direction(self.D),
}.get(heading, None)
elif self.direction == self.U:
return{
self.R: Direction(self.R),
self.L: Direction(self.L),
}.get(heading, None)
elif self.direction == self.D:
return{
self.R: Direction(self.L),
self.L: Direction(self.R),
}.get(heading, None)
def move_forward(self, from_location):
x, y = from_location
if self.direction == self.R:
return (x+1, y)
elif self.direction == self.L:
return (x-1, y)
elif self.direction == self.U:
return (x, y-1)
elif self.direction == self.D:
return (x, y+1)
class XYEnvironment(Environment):
"""This class is for environments on a 2D plane, with locations
labelled by (x, y) points, either discrete or continuous.
Agents perceive things within a radius. Each agent in the
environment has a .location slot which should be a location such
as (0, 1), and a .holding slot, which should be a list of things
that are held."""
def __init__(self, width=10, height=10):
super().__init__()
self.width = width
self.height = height
self.observers = []
# Sets iteration start and end (no walls).
self.x_start, self.y_start = (0, 0)
self.x_end, self.y_end = (self.width, self.height)
perceptible_distance = 1
def things_near(self, location, radius=None):
"""Return all things within radius of location."""
if radius is None:
radius = self.perceptible_distance
radius2 = radius * radius
return [(thing, radius2 - distance_squared(location, thing.location))
for thing in self.things if distance_squared(
location, thing.location) <= radius2]
def percept(self, agent):
"""By default, agent perceives things within a default radius."""
return self.things_near(agent.location)
def execute_action(self, agent, action):
agent.bump = False
if action == 'TurnRight':
agent.direction = agent.direction + Direction.R
elif action == 'TurnLeft':
agent.direction = agent.direction + Direction.L
elif action == 'Forward':
agent.bump = self.move_to(agent, agent.direction.move_forward(agent.location))
# elif action == 'Grab':
# things = [thing for thing in self.list_things_at(agent.location)
# if agent.can_grab(thing)]
# if things:
# agent.holding.append(things[0])
elif action == 'Release':
if agent.holding:
agent.holding.pop()
def default_location(self, thing):
return (random.choice(self.width), random.choice(self.height))
def move_to(self, thing, destination):
"""Move a thing to a new location. Returns True on success or False if there is an Obstacle.
If thing is holding anything, they move with him."""
thing.bump = self.some_things_at(destination, Obstacle)
if not thing.bump:
thing.location = destination
for o in self.observers:
o.thing_moved(thing)
for t in thing.holding:
self.delete_thing(t)
self.add_thing(t, destination)
t.location = destination
return thing.bump
def add_thing(self, thing, location=(1, 1), exclude_duplicate_class_items=False):
"""Adds things to the world. If (exclude_duplicate_class_items) then the item won't be
added if the location has at least one item of the same class."""
if (self.is_inbounds(location)):
if (exclude_duplicate_class_items and
any(isinstance(t, thing.__class__) for t in self.list_things_at(location))):
return
super().add_thing(thing, location)
def is_inbounds(self, location):
"""Checks to make sure that the location is inbounds (within walls if we have walls)"""
x, y = location
return not (x < self.x_start or x >= self.x_end or y < self.y_start or y >= self.y_end)
def random_location_inbounds(self, exclude=None):
"""Returns a random location that is inbounds (within walls if we have walls)"""
location = (random.randint(self.x_start, self.x_end),
random.randint(self.y_start, self.y_end))
if exclude is not None:
while(location == exclude):
location = (random.randint(self.x_start, self.x_end),
random.randint(self.y_start, self.y_end))
return location
def delete_thing(self, thing):
"""Deletes thing, and everything it is holding (if thing is an agent)"""
if isinstance(thing, Agent):
for obj in thing.holding:
super().delete_thing(obj)
for obs in self.observers:
obs.thing_deleted(obj)
super().delete_thing(thing)
for obs in self.observers:
obs.thing_deleted(thing)
def add_walls(self):
"""Put walls around the entire perimeter of the grid."""
for x in range(self.width):
self.add_thing(Wall(), (x, 0))
self.add_thing(Wall(), (x, self.height - 1))
for y in range(self.height):
self.add_thing(Wall(), (0, y))
self.add_thing(Wall(), (self.width - 1, y))
# Updates iteration start and end (with walls).
self.x_start, self.y_start = (1, 1)
self.x_end, self.y_end = (self.width - 1, self.height - 1)
def add_observer(self, observer):
"""Adds an observer to the list of observers.
An observer is typically an EnvGUI.
Each observer is notified of changes in move_to and add_thing,
by calling the observer's methods thing_moved(thing)
and thing_added(thing, loc)."""
self.observers.append(observer)
def turn_heading(self, heading, inc):
"""Return the heading to the left (inc=+1) or right (inc=-1) of heading."""
return turn_heading(heading, inc)
class Obstacle(Thing):
"""Something that can cause a bump, preventing an agent from
moving into the same square it's in."""
pass
class Wall(Obstacle):
pass
# ______________________________________________________________________________
try:
from ipythonblocks import BlockGrid
from IPython.display import HTML, display
from time import sleep
except:
pass
class GraphicEnvironment(XYEnvironment):
def __init__(self, width=10, height=10, boundary=True, color={}, display=False):
"""define all the usual XYEnvironment characteristics,
but initialise a BlockGrid for GUI too"""
super().__init__(width, height)
self.grid = BlockGrid(width, height, fill=(200, 200, 200))
if display:
self.grid.show()
self.visible = True
else:
self.visible = False
self.bounded = boundary
self.colors = color
def get_world(self):
"""Returns all the items in the world in a format
understandable by the ipythonblocks BlockGrid"""
result = []
x_start, y_start = (0, 0)
x_end, y_end = self.width, self.height
for x in range(x_start, x_end):
row = []
for y in range(y_start, y_end):
row.append(self.list_things_at([x, y]))
result.append(row)
return result
"""def run(self, steps=1000, delay=1):
"" "Run the Environment for given number of time steps,
but update the GUI too." ""
for step in range(steps):
sleep(delay)
if self.visible:
self.reveal()
if self.is_done():
if self.visible:
self.reveal()
return
self.step()
if self.visible:
self.reveal()
"""
def run(self, steps=1000, delay=1):
"""Run the Environment for given number of time steps,
but update the GUI too."""
for step in range(steps):
self.update(delay)
if self.is_done():
break
self.step()
self.update(delay)
def update(self, delay=1):
sleep(delay)
if self.visible:
self.conceal()
self.reveal()
else:
self.reveal()
def reveal(self):
"""display the BlockGrid for this world - the last thing to be added
at a location defines the location color"""
self.draw_world()
self.grid.show()
self.visible = True
def draw_world(self):
self.grid[:] = (200, 200, 200)
world = self.get_world()
for x in range(0, len(world)):
for y in range(0, len(world[x])):
if len(world[x][y]):
self.grid[y, x] = self.colors[world[x][y][-1].__class__.__name__]
def conceal(self):
"""hide the BlockGrid for this world"""
self.visible = False
display(HTML(''))
# ______________________________________________________________________________
# Continuous environment
class ContinuousWorld(Environment):
"""Model for Continuous World."""
def __init__(self, width=10, height=10):
super().__init__()
self.width = width
self.height = height
def add_obstacle(self, coordinates):
self.things.append(PolygonObstacle(coordinates))
class PolygonObstacle(Obstacle):
def __init__(self, coordinates):
""" Coordinates is a list of tuples."""
super().__init__()
self.coordinates = coordinates
# ______________________________________________________________________________
# Vacuum environment
class Dirt(Thing):
pass
class VacuumEnvironment(XYEnvironment):
"""The environment of [Ex. 2.12]. Agent perceives dirty or clean,
and bump (into obstacle) or not; 2D discrete world of unknown size;
performance measure is 100 for each dirt cleaned, and -1 for
each turn taken."""
def __init__(self, width=10, height=10):
super().__init__(width, height)
self.add_walls()
def thing_classes(self):
return [Wall, Dirt, ReflexVacuumAgent, RandomVacuumAgent,
TableDrivenVacuumAgent, ModelBasedVacuumAgent]
def percept(self, agent):
"""The percept is a tuple of ('Dirty' or 'Clean', 'Bump' or 'None').
Unlike the TrivialVacuumEnvironment, location is NOT perceived."""
status = ('Dirty' if self.some_things_at(
agent.location, Dirt) else 'Clean')
bump = ('Bump' if agent.bump else'None')
return (status, bump)
def execute_action(self, agent, action):
if action == 'Suck':
dirt_list = self.list_things_at(agent.location, Dirt)
if dirt_list != []:
dirt = dirt_list[0]
agent.performance += 100
self.delete_thing(dirt)
else:
super().execute_action(agent, action)
if action != 'NoOp':
agent.performance -= 1
class TrivialVacuumEnvironment(Environment):
"""This environment has two locations, A and B. Each can be Dirty
or Clean. The agent perceives its location and the location's
status. This serves as an example of how to implement a simple
Environment."""
def __init__(self):
super().__init__()
self.status = {loc_A: random.choice(['Clean', 'Dirty']),
loc_B: random.choice(['Clean', 'Dirty'])}
def thing_classes(self):
return [Wall, Dirt, ReflexVacuumAgent, RandomVacuumAgent,
TableDrivenVacuumAgent, ModelBasedVacuumAgent]
def percept(self, agent):
"""Returns the agent's location, and the location status (Dirty/Clean)."""
return (agent.location, self.status[agent.location])
def execute_action(self, agent, action):
"""Change agent's location and/or location's status; track performance.
Score 10 for each dirt cleaned; -1 for each move."""
if action == 'Right':
agent.location = loc_B
agent.performance -= 1
elif action == 'Left':
agent.location = loc_A
agent.performance -= 1
elif action == 'Suck':
if self.status[agent.location] == 'Dirty':
agent.performance += 10
self.status[agent.location] = 'Clean'
def default_location(self, thing):
"""Agents start in either location at random."""
return random.choice([loc_A, loc_B])
# ______________________________________________________________________________
# The Wumpus World
class Gold(Thing):
def __eq__(self, rhs):
"""All Gold are equal"""
return rhs.__class__ == Gold
pass
class Bump(Thing):
pass
class Glitter(Thing):
pass
class Pit(Thing):
pass
class Breeze(Thing):
pass
class Arrow(Thing):
pass
class Scream(Thing):
pass
class Wumpus(Agent):
screamed = False
pass
class Stench(Thing):
pass
class Explorer(Agent):
holding = []
has_arrow = True
killed_by = ""
direction = Direction("right")
def can_grab(self, thing):
"""Explorer can only grab gold"""
return thing.__class__ == Gold
class WumpusEnvironment(XYEnvironment):
pit_probability = 0.2 # Probability to spawn a pit in a location. (From Chapter 7.2)
# Room should be 4x4 grid of rooms. The extra 2 for walls
def __init__(self, agent_program, width=6, height=6):
super().__init__(width, height)
self.init_world(agent_program)
def init_world(self, program):
"""Spawn items to the world based on probabilities from the book"""
"WALLS"
self.add_walls()
"PITS"
for x in range(self.x_start, self.x_end):
for y in range(self.y_start, self.y_end):
if random.random() < self.pit_probability:
self.add_thing(Pit(), (x, y), True)
self.add_thing(Breeze(), (x - 1, y), True)
self.add_thing(Breeze(), (x, y - 1), True)
self.add_thing(Breeze(), (x + 1, y), True)
self.add_thing(Breeze(), (x, y + 1), True)
"WUMPUS"
w_x, w_y = self.random_location_inbounds(exclude=(1, 1))
self.add_thing(Wumpus(lambda x: ""), (w_x, w_y), True)
self.add_thing(Stench(), (w_x - 1, w_y), True)
self.add_thing(Stench(), (w_x + 1, w_y), True)
self.add_thing(Stench(), (w_x, w_y - 1), True)
self.add_thing(Stench(), (w_x, w_y + 1), True)
"GOLD"
self.add_thing(Gold(), self.random_location_inbounds(exclude=(1, 1)), True)
"AGENT"
self.add_thing(Explorer(program), (1, 1), True)
def get_world(self, show_walls=True):
"""Returns the items in the world"""
result = []
x_start, y_start = (0, 0) if show_walls else (1, 1)
if show_walls:
x_end, y_end = self.width, self.height
else:
x_end, y_end = self.width - 1, self.height - 1
for x in range(x_start, x_end):
row = []
for y in range(y_start, y_end):
row.append(self.list_things_at((x, y)))
result.append(row)
return result
def percepts_from(self, agent, location, tclass=Thing):
"""Returns percepts from a given location,
and replaces some items with percepts from chapter 7."""
thing_percepts = {
Gold: Glitter(),
Wall: Bump(),
Wumpus: Stench(),
Pit: Breeze()}
"""Agents don't need to get their percepts"""
thing_percepts[agent.__class__] = None
"""Gold only glitters in its cell"""
if location != agent.location:
thing_percepts[Gold] = None
result = [thing_percepts.get(thing.__class__, thing) for thing in self.things
if thing.location == location and isinstance(thing, tclass)]
return result if len(result) else [None]
def percept(self, agent):
"""Returns things in adjacent (not diagonal) cells of the agent.
Result format: [Left, Right, Up, Down, Center / Current location]"""
x, y = agent.location
result = []
result.append(self.percepts_from(agent, (x - 1, y)))
result.append(self.percepts_from(agent, (x + 1, y)))
result.append(self.percepts_from(agent, (x, y - 1)))
result.append(self.percepts_from(agent, (x, y + 1)))
result.append(self.percepts_from(agent, (x, y)))
"""The wumpus gives out a a loud scream once it's killed."""
wumpus = [thing for thing in self.things if isinstance(thing, Wumpus)]
if len(wumpus) and not wumpus[0].alive and not wumpus[0].screamed:
result[-1].append(Scream())
wumpus[0].screamed = True
return result
def execute_action(self, agent, action):
"""Modify the state of the environment based on the agent's actions.
Performance score taken directly out of the book."""
if isinstance(agent, Explorer) and self.in_danger(agent):
return
agent.bump = False
if action == 'TurnRight':
agent.direction = agent.direction + Direction.R
agent.performance -= 1
elif action == 'TurnLeft':
agent.direction = agent.direction + Direction.L
agent.performance -= 1
elif action == 'Forward':
agent.bump = self.move_to(agent, agent.direction.move_forward(agent.location))
agent.performance -= 1
elif action == 'Grab':
things = [thing for thing in self.list_things_at(agent.location)
if agent.can_grab(thing)]
if len(things):
print("Grabbing", things[0].__class__.__name__)
if len(things):
agent.holding.append(things[0])
agent.performance -= 1
elif action == 'Climb':
if agent.location == (1, 1): # Agent can only climb out of (1,1)
agent.performance += 1000 if Gold() in agent.holding else 0
self.delete_thing(agent)
elif action == 'Shoot':
"""The arrow travels straight down the path the agent is facing"""
if agent.has_arrow:
arrow_travel = agent.direction.move_forward(agent.location)
while(self.is_inbounds(arrow_travel)):
wumpus = [thing for thing in self.list_things_at(arrow_travel)
if isinstance(thing, Wumpus)]
if len(wumpus):
wumpus[0].alive = False
break
arrow_travel = agent.direction.move_forward(agent.location)
agent.has_arrow = False
def in_danger(self, agent):
"""Checks if Explorer is in danger (Pit or Wumpus), if he is, kill him"""
for thing in self.list_things_at(agent.location):
if isinstance(thing, Pit) or (isinstance(thing, Wumpus) and thing.alive):
agent.alive = False
agent.performance -= 1000
agent.killed_by = thing.__class__.__name__
return True
return False
def is_done(self):
"""The game is over when the Explorer is killed
or if he climbs out of the cave only at (1,1)."""
explorer = [agent for agent in self.agents if isinstance(agent, Explorer)]
if len(explorer):
if explorer[0].alive:
return False
else:
print("Death by {} [-1000].".format(explorer[0].killed_by))
else:
print("Explorer climbed out {}."
.format(
"with Gold [+1000]!" if Gold() not in self.things else "without Gold [+0]"))
return True
# Almost done. Arrow needs to be implemented
# ______________________________________________________________________________
def compare_agents(EnvFactory, AgentFactories, n=10, steps=1000):
"""See how well each of several agents do in n instances of an environment.
Pass in a factory (constructor) for environments, and several for agents.
Create n instances of the environment, and run each agent in copies of
each one for steps. Return a list of (agent, average-score) tuples."""
envs = [EnvFactory() for i in range(n)]
return [(A, test_agent(A, steps, copy.deepcopy(envs)))
for A in AgentFactories]
def test_agent(AgentFactory, steps, envs):
"""Return the mean score of running an agent in each of the envs, for steps"""
def score(env):
agent = AgentFactory()
env.add_thing(agent)
env.run(steps)
return agent.performance
return mean(map(score, envs))
# _________________________________________________________________________
__doc__ += """
>>> a = ReflexVacuumAgent()
>>> a.program((loc_A, 'Clean'))
'Right'
>>> a.program((loc_B, 'Clean'))
'Left'
>>> a.program((loc_A, 'Dirty'))
'Suck'
>>> a.program((loc_A, 'Dirty'))
'Suck'
>>> e = TrivialVacuumEnvironment()
>>> e.add_thing(ModelBasedVacuumAgent())
>>> e.run(5)
"""
| mit | 3,272,948,040,308,305,000 | 33.81677 | 100 | 0.570719 | false |
Dwolla/arbalest | examples/s3_json_object_to_redshift.py | 1 | 2379 | #!/usr/bin/env python
import psycopg2
from arbalest.configuration import env
from arbalest.redshift import S3CopyPipeline
from arbalest.redshift.schema import JsonObject, Property
"""
**Example: Bulk copy JSON objects from S3 bucket to Redshift table**
Arbalest orchestrates data loading using pipelines. Each `Pipeline`
can have one or many steps that are made up of three parts:
metadata: Path in an S3 bucket to store information needed for the copy process.
`s3://{BUCKET_NAME}/path_to_save_pipeline_metadata`
source: Path in an S3 bucket where data to be copied from is located.
`s3://{BUCKET_NAME}/path_of_source_data` consisting of JSON files:
```
{
"id": "66bc8153-d6d9-4351-bada-803330f22db7",
"someNumber": 1
}
```
schema: Definition of JSON objects to map into Redshift rows using a
`JsonObject` mapper which consists of one or many `Property` declarations.
By default the name of the JSON property is used as the column, but can be set
to a custom column name.
"""
if __name__ == '__main__':
pipeline = S3CopyPipeline(
aws_access_key_id=env('AWS_ACCESS_KEY_ID'),
aws_secret_access_key=env('AWS_SECRET_ACCESS_KEY'),
bucket=env('BUCKET_NAME'),
db_connection=psycopg2.connect(env('REDSHIFT_CONNECTION')))
pipeline.bulk_copy(metadata='path_to_save_pipeline_metadata',
source='path_of_source_data',
schema=JsonObject('destination_table_name',
Property('id', 'VARCHAR(36)'),
Property('someNumber', 'INTEGER',
'custom_column_name')))
pipeline.manifest_copy(metadata='path_to_save_pipeline_metadata',
source='path_of_incremental_source_data',
schema=JsonObject('incremental_destination_table_name',
Property('id', 'VARCHAR(36)'),
Property('someNumber', 'INTEGER',
'custom_column_name')))
pipeline.sql(('SELECT someNumber + %s '
'INTO some_olap_table FROM destination_table_name', 1),
('SELECT * INTO destination_table_name_copy '
'FROM destination_table_name'))
pipeline.run()
| mit | 2,963,051,004,563,553,000 | 38.65 | 82 | 0.599412 | false |
orbnauticus/Pique | pique/network.py | 1 | 4328 | #!/usr/bin/env python
#
# Copyright (c) 2010, Ryan Marquardt
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the project nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from common import *
from player import Error as PlayerError
import bgthread
import collections
import socket
import traceback
NetFormat = str
class ConnectionThread(bgthread.BgThread, PObject):
def main(self, commandmap, sock, address):
self.sock = sock
self.name = "Client %s:%i" % address
debug('connected')
for cmd, args in self.recv_delimited():
debug('called', cmd, args)
if cmd == 'close':
break
elif cmd == 'quit':
self.respond(None)
commandmap[cmd](*args) #quit()
break
try:
func = commandmap[cmd]
except KeyError:
self.respond('No such command')
continue
try:
debug(func, args)
result = func(*args)
except PlayerError, e:
debug(e)
self.respond(e)
except:
tb = traceback.format_exc()
debug('Error:', tb)
self.respond('Unknown Error', tb)
continue
else:
debug('Responding with result', repr(result))
self.respond(None, result)
self.quit()
debug('disconnected')
def recv_delimited(self):
delimiter = '\n\n'
buffer = self.sock.recv(BUFSIZE)
buff2 = ''
while buffer:
buff2 += buffer
while delimiter in buff2:
cmd, _, buff2 = buff2.partition(delimiter)
cmd = cmd.split('\n')
yield cmd[0], cmd[1:]
try:
buffer = self.sock.recv(BUFSIZE)
except socket.error:
buffer = ''
def respond(self, err=None, payload=None):
if payload is not None:
self.sock.send(NetFormat(payload))
if err is None:
self.sock.send('OK\n\n')
else:
self.sock.send('ERR: %s\n\n' % err)
def quit(self):
self.sock.close()
self.emit('connection-closed', self)
class NetThread(bgthread.BgThread, PObject):
name = "NetworkThread"
def __init__(self, *args, **kwargs):
bgthread.BgThread.__init__(self, *args, **kwargs)
self.dependencies = {'commandmap':self.on_set_commandmap}
self.commands = {
'ping': self.ping,
}
def main(self, confitems):
config = dict(confitems)
host = config.get('listen-host', 'localhost')
port = config.get('listen-port', 8145)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((host, port))
sock.listen(5)
self.clients = set()
self.connect('new-connection', self.on_new_connection)
while True:
self.emit('new-connection', *sock.accept())
def on_new_connection(self, conn, addr):
c = ConnectionThread(self.commandmap, conn, addr)
c.connect('connection-closed', self.client_closed)
self.clients.add(c)
c.start()
def client_closed(self, client):
self.clients.discard(client)
def ping(self):
return None
def on_set_commandmap(self, commandmap):
self.commandmap = commandmap
def quit(self):
for c in list(self.clients):
c.quit()
| bsd-3-clause | 4,551,058,690,451,919,400 | 29.478873 | 75 | 0.703558 | false |
Ladsgroup/MP3-cleaner | mp3.py | 1 | 1167 | import fnmatch
import os
import eyed3
import shutil
path_to_clean = u'/media/amir/Files/Download/'
path_to_move = u'/media/amir/Files/Music/'
matches = []
for root, dirnames, filenames in os.walk(path_to_clean):
for filename in fnmatch.filter(filenames, u'*.mp3'):
matches.append(os.path.join(root, filename))
print len(matches)
for file in matches:
file = eval("u\"%s\"" % file)
try:
audiofile = eyed3.load(file)
artist = audiofile.tag.artist.strip()
album = audiofile.tag.album.strip()
try:
os.mkdir('%s%s' % (path_to_move, artist))
except OSError:
pass
try:
os.mkdir('%s%s/%s' % (path_to_move, artist, album))
except OSError:
shutil.move(
file, u'%s%s/%s/%s' % (
path_to_move, artist, album, file.split("/")[-1]))
print "moved"
except:
print "Not moved"
pass
else:
shutil.move(
file, u'%s%s/%s/%s' % (
path_to_move, artist, album, file.split("/")[-1]))
print "Moved"
except:
pass
| apache-2.0 | -5,187,029,414,978,394,000 | 29.710526 | 70 | 0.520137 | false |