code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Handlers dealing with disks
"""
from ..manager import DisksFormatConvertor
from ..validators.disks import NodeDisksValidator
from nailgun.api.v1.handlers.base import BaseHandler
from nailgun.api.v1.handlers.base import content
from nailgun import objects
class NodeDisksHandler(BaseHandler):
"""Node disks handler
"""
validator = NodeDisksValidator
@content
def GET(self, node_id):
""":returns: JSONized node disks.
:http: * 200 (OK)
* 404 (node not found in db)
"""
from ..objects.volumes import VolumeObject
node = self.get_object_or_404(objects.Node, node_id)
node_volumes = VolumeObject.get_volumes(node)
return DisksFormatConvertor.format_disks_to_simple(node_volumes)
@content
def PUT(self, node_id):
""":returns: JSONized node disks.
:http: * 200 (OK)
* 400 (invalid disks data specified)
* 404 (node not found in db)
"""
from ..objects.volumes import VolumeObject
node = self.get_object_or_404(objects.Node, node_id)
data = self.checked_data(
self.validator.validate,
node=node
)
if node.cluster:
objects.Cluster.add_pending_changes(
node.cluster,
'disks',
node_id=node.id
)
volumes_data = DisksFormatConvertor.format_disks_to_full(node, data)
VolumeObject.set_volumes(node, volumes_data)
return DisksFormatConvertor.format_disks_to_simple(
VolumeObject.get_volumes(node))
class NodeDefaultsDisksHandler(BaseHandler):
"""Node default disks handler
"""
@content
def GET(self, node_id):
""":returns: JSONized node disks.
:http: * 200 (OK)
* 404 (node or its attributes not found in db)
"""
node = self.get_object_or_404(objects.Node, node_id)
if not node.attributes:
raise self.http(404)
volumes = DisksFormatConvertor.format_disks_to_simple(
node.volume_manager.gen_volumes_info())
return volumes
class NodeVolumesInformationHandler(BaseHandler):
"""Node volumes information handler
"""
@content
def GET(self, node_id):
""":returns: JSONized volumes info for node.
:http: * 200 (OK)
* 404 (node not found in db)
"""
node = self.get_object_or_404(objects.Node, node_id)
if node.cluster is None:
raise self.http(404, 'Cannot calculate volumes info. '
'Please, add node to an environment.')
volumes_info = DisksFormatConvertor.get_volumes_info(node)
return volumes_info
| SmartInfrastructures/fuel-web-dev | nailgun/nailgun/extensions/volume_manager/handlers/disks.py | Python | apache-2.0 | 3,400 |
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.md')) as f:
README = f.read()
with open(os.path.join(here, 'CHANGES.txt')) as f:
CHANGES = f.read()
requires = [
]
tests_require = [
'WebTest >= 1.3.1', # py3 compat
'pytest', # includes virtualenv
'pytest-cov',
]
setup(name='polign',
version='1.0',
description='polign',
long_description=README + '\n\n' + CHANGES,
classifiers=[
"Programming Language :: Python",
],
author='Janusz Janowski',
author_email='esentino@gmail.com',
url='',
keywords='python test poligon',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
extras_require={
'testing': tests_require,
},
install_requires=requires
)
| esentino/pythonpolign | setup.py | Python | gpl-3.0 | 903 |
import attr
import re
from navmazing import NavigateToAttribute, NavigateToSibling
from widgetastic_patternfly import Input, BootstrapSelect, Button, BootstrapSwitch
# TODO replace with dynamic table
from widgetastic_manageiq import VanillaTable, SummaryFormItem, Table, Dropdown
from widgetastic.widget import Checkbox, Text
from cfme.base.ui import RegionView
from cfme.modeling.base import BaseCollection
from cfme.utils import conf
from cfme.utils.appliance import Navigatable, NavigatableMixin
from cfme.utils.appliance.implementations.ui import navigator, CFMENavigateStep, navigate_to
from cfme.utils.blockers import BZ
from cfme.utils.log import logger
from cfme.utils.pretty import Pretty
from cfme.utils.update import Updateable
# =====================================CATEGORY===================================
class CompanyCategoriesAllView(RegionView):
"""Company Categories List View"""
add_button = Button('Add')
table = VanillaTable('//div[@id="settings_co_categories"]/table')
@property
def is_displayed(self):
return (
self.company_categories.is_active() and
self.table.is_displayed
)
class CompanyCategoriesAddView(CompanyCategoriesAllView):
""" Add Company Categories View"""
name = Input(id='name')
display_name = Input(id='description')
long_description = Input(id='example_text')
show_in_console = BootstrapSwitch(id='show')
single_value = BootstrapSwitch(id='single_value')
capture_candu = BootstrapSwitch(id='perf_by_tag')
cancel_button = Button('Cancel')
@property
def is_displayed(self):
return (
self.company_categories.is_active() and
self.name.is_displayed
)
class CompanyCategoriesEditView(CompanyCategoriesAddView):
"""Edit Company Categories View"""
save_button = Button('Save')
reset_button = Button('Reset')
@property
def is_displayed(self):
return (
self.company_categories.is_active() and
self.name.is_displayed and
self.save_button.is_displayed
)
class Category(Pretty, Navigatable, Updateable):
""" Class represents a category in CFME UI
Args:
name: Name of the category
display_name: Category display name
description: Category description
show_in_console: Option to show category in console (True/False)
single_value: Option if category is single value (True/False)
capture_candu: True/False, capture c&u data by tag
"""
pretty_attrs = ['name', 'display_name', 'description', 'show_in_console',
'single_value', 'capture_candu']
def __init__(self, name=None, display_name=None, description=None, show_in_console=True,
single_value=True, capture_candu=False, appliance=None):
Navigatable.__init__(self, appliance=appliance)
self.name = name
self.display_name = display_name
self.description = description
self.show_in_console = show_in_console
self.single_value = single_value
self.capture_candu = capture_candu
def _form_mapping(self, **kwargs):
"""Returns dist used to fill forms """
return {
'name': kwargs.get('name'),
'display_name': kwargs.get('display_name'),
'long_description': kwargs.get('description'),
'show_in_console': kwargs.get('show_in_console'),
'single_value': kwargs.get('single_value'),
'capture_candu': kwargs.get('capture_candu'),
}
def create(self, cancel=False):
""" Create category method
Args:
cancel: To cancel creation pass True, cancellation message will be verified
By defaul user will be created
"""
view = navigate_to(self, 'Add')
view.fill(self._form_mapping(**self.__dict__))
if cancel:
view.cancel_button.click()
flash_message = 'Add of new Category was cancelled by the user'
else:
view.add_button.click()
flash_message = 'Category "{}" was added'.format(self.display_name)
view = self.create_view(CompanyCategoriesAllView)
if not BZ(1510473, forced_streams=['5.9']).blocks:
view.flash.assert_success_message(flash_message)
def update(self, updates, cancel=False):
""" Update category method
Args:
updates: category data that should be changed
"""
view = navigate_to(self, 'Edit')
view.fill(self._form_mapping(**updates))
if cancel:
view.cancel_button.click()
flash_message = 'Edit of Category "{}" was cancelled by the user'.format(self.name)
else:
view.save_button.click()
flash_message = 'Category "{}" was saved'.format(self.name)
view = self.create_view(CompanyCategoriesAllView)
if not BZ(1510473, forced_streams=['5.9']).blocks:
view.flash.assert_success_message(flash_message)
def delete(self, cancel=True):
""" Delete existing category
Args:
cancel: Default value 'True', category will be deleted
'False' - deletion of category will be canceled
"""
view = navigate_to(self, 'All')
row = view.table.row(name=self.name)
row.actions.click()
view.browser.handle_alert(cancel=cancel)
if not cancel:
if not BZ(1525929, forced_streams=['5.9']).blocks:
view.flash.assert_success_message(
'Category "{}": Delete successful'.format(self.name))
@navigator.register(Category, 'All')
class CategoryAll(CFMENavigateStep):
VIEW = CompanyCategoriesAllView
prerequisite = NavigateToAttribute('appliance.server.zone.region', 'Details')
def step(self):
if self.obj.appliance.version < '5.9':
self.prerequisite_view.company_categories.select()
else:
self.prerequisite_view.tags.company_categories.select()
@navigator.register(Category, 'Add')
class CategoryAdd(CFMENavigateStep):
VIEW = CompanyCategoriesAddView
prerequisite = NavigateToSibling('All')
def step(self):
self.prerequisite_view.add_button.click()
@navigator.register(Category, 'Edit')
class CategoryEdit(CFMENavigateStep):
VIEW = CompanyCategoriesEditView
prerequisite = NavigateToSibling('All')
def step(self):
self.prerequisite_view.table.row(name=self.obj.name).click()
# =======================================TAGS=============================================
class CompanyTagsAllView(RegionView):
"""Company Tags list view"""
category_dropdown = BootstrapSelect('classification_name')
table = VanillaTable('//div[@id="classification_entries_div"]/table')
add_button = Button('Add')
cancel_button = Button('Cancel')
@property
def is_displayed(self):
return (
self.company_categories.is_active() and
self.table.is_displayed
)
class CompanyTagsAddView(CompanyTagsAllView):
"""Add Company Tags view"""
tag_name = Input(id='entry_name')
tag_description = Input(id='entry_description')
@property
def is_displayed(self):
return (
self.company_categories.is_active() and
self.tag_name.is_displayed
)
class CompanyTagsEditView(CompanyTagsAddView):
"""Edit Company Tags view"""
save_button = Button('Save')
reset_button = Button('Reset')
class Tag(Pretty, Navigatable, Updateable):
""" Class represents a category in CFME UI
Args:
name: Name of the tag
display_name: Tag display name
category: Tags Category
"""
pretty_attrs = ['name', 'display_name', 'category']
def __init__(self, name=None, display_name=None, category=None, appliance=None):
Navigatable.__init__(self, appliance=appliance)
self.name = name
self.display_name = display_name
self.category = category
def _form_mapping(self, **kwargs):
"""Returns dist used to fill forms """
return {
'tag_name': kwargs.get('name'),
'tag_description': kwargs.get('display_name')
}
def create(self):
""" Create category method """
view = navigate_to(self, 'Add')
view.fill(self._form_mapping(**self.__dict__))
view.add_button.click()
def update(self, updates):
""" Update category method """
view = navigate_to(self, 'Edit')
view.fill(self._form_mapping(**updates))
view.save_button.click()
def delete(self, cancel=True):
""" Delete category method """
view = navigate_to(self, 'All')
row = view.table.row(name=self.name)
row.actions.click()
view.browser.handle_alert(cancel=cancel)
@navigator.register(Tag, 'All')
class TagsAll(CFMENavigateStep):
VIEW = CompanyTagsAllView
prerequisite = NavigateToAttribute('appliance.server.zone.region', 'Details')
def step(self):
if self.obj.appliance.version < '5.9':
self.prerequisite_view.company_tags.select()
else:
self.prerequisite_view.tags.company_tags.select()
self.view.fill({'category_dropdown': self.obj.category.display_name})
@navigator.register(Tag, 'Add')
class TagsAdd(CFMENavigateStep):
VIEW = CompanyTagsAddView
prerequisite = NavigateToSibling('All')
def step(self):
self.prerequisite_view.add_button.click()
@navigator.register(Tag, 'Edit')
class TagsEdit(CFMENavigateStep):
VIEW = CompanyTagsEditView
prerequisite = NavigateToSibling('All')
def step(self):
self.prerequisite_view.table.row(name=self.obj.name).click()
# =======================================MAP TAGS==============================================
class MapTagsAllView(RegionView):
"""Map Tags list view"""
table = VanillaTable('//div[@id="settings_label_tag_mapping"]/table')
add_button = Button('Add')
@property
def is_displayed(self):
return (
self.map_tags.is_active() and
self.table.is_displayed
)
class MapTagsAddView(RegionView):
"""Add Map Tags view"""
resource_entity = BootstrapSelect(id='entity')
resource_label = Input(id='label_name')
category = Input(id='category')
add_button = Button('Add')
cancel_button = Button('Cancel')
@property
def is_displayed(self):
return (
self.map_tags.is_active() and
self.resource_entity.is_displayed
)
class MapTagsEditView(MapTagsAddView):
"""Edit Map Tags view"""
save_button = Button('Save')
reset_button = Button('Reset')
class MapTags(Navigatable, Pretty, Updateable):
""" Class represents a category in CFME UI
Args:
entity: Name of the tag
label: Tag display name
category: Tags Category
"""
pretty_attrs = ['entity', 'label', 'category']
def __init__(self, entity=None, label=None, category=None, appliance=None):
Navigatable.__init__(self, appliance=appliance)
self.entity = entity
self.label = label
self.category = category
def _form_mapping(self, **kwargs):
"""Returns dist used to fill forms """
return {
'resource_entity': kwargs.get('entity'),
'resource_label': kwargs.get('label'),
'category': kwargs.get('category')
}
def create(self, cancel=False):
""" Map tags creation method
Args:
cancel: True - if you want to cancel map creation,
by defaul map will be created
"""
view = navigate_to(self, 'Add')
view.fill(self._form_mapping(**self.__dict__))
if cancel:
view.cancel_button.click()
flash_message = 'Add of new Container Label Tag Mapping was cancelled by the user'
else:
view.add_button.click()
flash_message = 'Container Label Tag Mapping "{}" was added'.format(self.label)
view = self.create_view(MapTagsAllView)
if not BZ(1510473, forced_streams=['5.9']).blocks:
view.flash.assert_success_message(flash_message)
def update(self, updates, cancel=False):
""" Update tag map method
Args:
updates: tag map data that should be changed
cancel: True - if you want to cancel map edition,
by defaul map will be updated
"""
view = navigate_to(self, 'Edit')
# only category can be updated, as other fields disabled by default
view.fill({
'category': updates.get('category')
})
if cancel:
view.cancel_button.click()
flash_message = (
'Edit of Container Label Tag Mapping "{}" was cancelled by the user'.format(
self.label)
)
else:
view.save_button.click()
flash_message = 'Container Label Tag Mapping "{}" was saved'.format(self.label)
view = self.create_view(MapTagsAllView, override=updates)
if not BZ(1510473, forced_streams=['5.9']).blocks:
view.flash.assert_success_message(flash_message)
def delete(self, cancel=False):
""" Delete existing user
Args:
cancel: Default value 'False', map will be deleted
'True' - map will not be deleted
"""
view = navigate_to(self, 'All')
row = view.table.row(tag_category=self.category)
row.actions.click()
view.browser.handle_alert(cancel=cancel)
if not cancel:
view = self.create_view(MapTagsAllView)
if not BZ(1510473, forced_streams=['5.9']).blocks:
view.flash.assert_success_message(
'Container Label Tag Mapping "{}": Delete successful'.format(self.label))
@navigator.register(MapTags, 'All')
class MapTagsAll(CFMENavigateStep):
VIEW = MapTagsAllView
prerequisite = NavigateToAttribute('appliance.server.zone.region', 'Details')
def step(self):
if self.obj.appliance.version < '5.9':
self.prerequisite_view.map_tags.select()
else:
self.prerequisite_view.tags.map_tags.select()
@navigator.register(MapTags, 'Add')
class MapTagsAdd(CFMENavigateStep):
VIEW = MapTagsAddView
prerequisite = NavigateToSibling('All')
def step(self):
self.prerequisite_view.add_button.click()
@navigator.register(MapTags, 'Edit')
class MapTagsEdit(CFMENavigateStep):
VIEW = MapTagsEditView
prerequisite = NavigateToSibling('All')
def step(self):
self.prerequisite_view.table.row(tag_category=self.obj.category).click()
# ====================Red Hat Updates===================================
class RedHatUpdatesView(RegionView):
"""Red Hat Updates details view"""
title = Text('//div[@id="main-content"]//h3[1]')
available_update_version = Text('//td[contains(text(), "Available Product version:")]')
edit_registration = Button('Edit Registration')
refresh = Button('Refresh List')
check_for_updates = Button('Check for Updates')
register = Button('Register')
apply_cfme_update = Button('Apply CFME Update')
updates_table = Table('.table.table-striped.table-bordered')
repository_names_info = SummaryFormItem('Red Hat Software Updates', 'Repository Name(s)')
@property
def is_displayed(self):
return (
self.redhat_updates.is_active() and
self.edit_registration.is_displayed and
self.title.text == 'Red Hat Software Updates'
)
class RedHatUpdatesEditView(RegionView):
"""Red Hat Updates edit view"""
title = Text('//div[@id="main-content"]//h3[1]')
register_to = BootstrapSelect(id='register_to')
url = Input(id='server_url')
repo_name = Input(id='repo_name')
use_proxy = Checkbox('use_proxy')
proxy_url = Input(id='proxy_address')
proxy_username = Input(id='proxy_userid')
proxy_password = Input(id='proxy_password')
proxy_password_verify = Input(id='proxy_password2')
username = Input(id='customer_userid')
password = Input(id='customer_password')
password_verify = Input(id='customer_password2')
repo_default_name = Button(id='repo_default_name')
rhn_default_url = Button(id='rhn_default_button')
validate_button = Button('Validate')
reset_button = Button('Reset')
save_button = Button('Save')
cancel_button = Button('Cancel')
@property
def is_displayed(self):
return (
self.redhat_updates.is_active() and
self.validate_button.is_displayed and
self.title.text == 'Red Hat Software Updates'
)
class RedHatUpdates(Navigatable, Pretty):
""" Class represents a Red Hat updates tab in CFME UI
Args:
service: Service type (registration method).
url: Service server URL address.
username: Username to use for registration.
password: Password to use for registration.
password_verify: 2nd entry of password for verification. Same as 'password' if None.
repo_name: Repository/channel to enable.
organization: Organization (sat6 only).
use_proxy: `True` if proxy should be used, `False` otherwise (default `False`).
proxy_url: Address of the proxy server.
proxy_username: Username for the proxy server.
proxy_password: Password for the proxy server.
proxy_password_verify: 2nd entry of proxy server password for verification.
Same as 'proxy_password' if None.
set_default_rhsm_address: Click the Default button connected to
the RHSM (only) address if `True`
set_default_repository: Click the Default button connected to the repo/channel if `True`
Note:
With satellite 6, it is necessary to validate credentials to obtain
available organizations from the server.
With satellite 5, 'validate' parameter is ignored because there is
no validation button available.
"""
pretty_attrs = ['service', 'url', 'username', 'password']
service_types = {
'rhsm': 'Red Hat Subscription Management',
'sat6': 'Red Hat Satellite 6'
}
def __init__(self, service, url, username, password, password_verify=None, repo_name=None,
organization=None, use_proxy=False, proxy_url=None, proxy_username=None,
proxy_password=None, proxy_password_verify=None,
set_default_rhsm_address=False,
set_default_repository=False, appliance=None):
self.service = service
self.url = url
self.username = username
self.password = password
self.password_verify = password_verify
self.repo_name = repo_name
self.organization = organization
self.use_proxy = use_proxy
self.proxy_url = proxy_url
self.proxy_username = proxy_username
self.proxy_password = proxy_password
self.proxy_password_verify = proxy_password_verify
self.set_default_rhsm_address = set_default_rhsm_address
self.set_default_repository = set_default_repository
Navigatable.__init__(self, appliance=appliance)
def update_registration(self, validate=True, cancel=False):
""" Fill in the registration form, validate and save/cancel
Args:
validate: Click the Validate button and check the
flash message for errors if `True` (default `True`)
cancel: Click the Cancel button if `True` or the Save button
if `False` (default `False`)
"""
assert self.service in self.service_types, "Unknown service type '{}'".format(
self.service)
service_value = self.service_types[self.service]
password_verify = self.password_verify or self.password
proxy_password_verify = self.proxy_password_verify or self.proxy_password
view = navigate_to(self, 'Edit')
details = {
'register_to': service_value,
'url': self.url,
'username': self.username,
'password': self.password,
'password_verify': password_verify,
'repo_name': self.repo_name,
'use_proxy': self.use_proxy,
'proxy_url': self.proxy_url,
'proxy_username': self.proxy_username,
'proxy_password': self.proxy_password,
'proxy_password_verify': proxy_password_verify
}
view.fill(details)
if self.set_default_rhsm_address:
view.rhn_default_url.click()
if self.set_default_repository:
view.repo_default_name.click()
if validate:
view.validate_button.click()
if cancel:
view.cancel_button.click()
flash_message = 'Edit of Customer Information was cancelled'
else:
view.save_button.click()
flash_message = 'Customer Information successfully saved'
view = self.create_view(RedHatUpdatesView)
assert view.is_displayed
view.flash.assert_message(flash_message)
def refresh(self):
""" Click refresh button to update statuses of appliances """
view = navigate_to(self, 'Details')
view.refresh.click()
def register_appliances(self, *appliance_names):
""" Register appliances by names
Args:
appliance_names: Names of appliances to register; will register all if empty
"""
view = navigate_to(self, 'Details')
self.select_appliances(*appliance_names)
view.register.click()
view.flash.assert_message("Registration has been initiated for the selected Servers")
def update_appliances(self, *appliance_names):
""" Update appliances by names
Args:
appliance_names: Names of appliances to update; will update all if empty
"""
view = navigate_to(self, 'Details')
self.select_appliances(*appliance_names)
view.apply_cfme_update.click()
view.flash.assert_message("Update has been initiated for the selected Servers")
def check_updates(self, *appliance_names):
""" Run update check on appliances by names
Args:
appliance_names: Names of appliances to check; will check all if empty
"""
view = navigate_to(self, 'Details')
self.select_appliances(*appliance_names)
view.check_for_updates.click()
view.flash.assert_message(
"Check for updates has been initiated for the selected Servers")
def is_registering(self, *appliance_names):
""" Check if at least one appliance is registering """
view = navigate_to(self, 'Details')
for appliance_name in appliance_names:
row = view.updates_table.row(appliance=appliance_name)
if row.last_message.text.lower() == 'registering':
return True
else:
return False
def is_registered(self, *appliance_names):
""" Check if each appliance is registered
Args:
appliance_names: Names of appliances to check; will check all if empty
"""
view = navigate_to(self, 'Details')
for appliance_name in appliance_names:
row = view.updates_table.row(appliance=appliance_name)
if row.last_message.text.lower() == 'registered':
return True
else:
return False
def is_subscribed(self, *appliance_names):
""" Check if appliances are subscribed
Args:
appliance_names: Names of appliances to check; will check all if empty
"""
for row in self.get_appliance_rows(*appliance_names):
if row.update_status.text.lower() in {'not registered', 'unsubscribed'}:
return False
return True
def versions_match(self, version, *appliance_names):
""" Check if versions of appliances match version
Args:
version: Version to match against
appliance_names: Names of appliances to check; will check all if empty
"""
for row in self.get_appliance_rows(*appliance_names):
if row.cfme_version.text != version:
return False
return True
def checked_updates(self, *appliance_names):
""" Check if appliances checked if there is an update available
Args:
appliance_names: Names of appliances to check; will check all if empty
"""
for row in self.get_appliance_rows(*appliance_names):
if row.last_checked_for_updates.text == '':
return False
return True
def platform_updates_available(self, *appliance_names):
""" Check if appliances have a platform update available
Args:
appliance_names: Names of appliances to check; will check all if empty
"""
for row in self.get_appliance_rows(*appliance_names):
if row.platform_updates_available.text.lower() != 'yes':
return False
return True
def get_available_version(self):
""" Get available version printed on the page
Returns:
`None` if not available; string with version otherwise
e.g. ``1.2.2.3``
"""
view = navigate_to(self, 'Details')
available_version_raw = view.available_update_version.text()
available_version_search_res = re.search(r"([0-9]+\.)*[0-9]+", available_version_raw)
if available_version_search_res:
return available_version_search_res.group(0)
return None
def get_repository_names(self):
"""Get available repositories names
Returns:
string: summary info for repositories names
"""
view = navigate_to(self, 'Details')
return view.repository_names_info.text
def select_appliances(self, *appliance_names):
""" Select appliances by names
Args:
appliance_names: Names of appliances to select; will select all if empty
"""
view = navigate_to(self, 'Details')
if appliance_names:
view.updates_table.uncheck_all()
for name in appliance_names:
view.updates_table.row(appliance=name)[0].click()
else:
view.updates_table.check_all()
def get_appliance_rows(self, *appliance_names):
""" Get appliances as table rows
Args:
appliance_names: Names of appliances to get; will get all if empty
"""
view = navigate_to(self, 'Details')
if appliance_names:
rows = [row for row in view.updates_table.rows()
if row.appliance.text in appliance_names]
else:
rows = view.updates_table.rows()
return rows
@navigator.register(RedHatUpdates)
class Details(CFMENavigateStep):
VIEW = RedHatUpdatesView
prerequisite = NavigateToAttribute('appliance.server.zone.region', 'Details')
def step(self):
self.prerequisite_view.redhat_updates.select()
@navigator.register(RedHatUpdates)
class Edit(CFMENavigateStep):
VIEW = RedHatUpdatesEditView
prerequisite = NavigateToSibling('Details')
def step(self):
self.prerequisite_view.edit_registration.click()
# ====================C and U===================================
class CANDUCollectionView(RegionView):
"""C and U View"""
all_clusters_cb = BootstrapSwitch(id='all_clusters')
all_datastores_cb = BootstrapSwitch(id='all_storages')
save_button = Button('Save')
reset_button = Button('Reset')
@property
def is_displayed(self):
return (
self.candu_collection.is_active() and
self.all_clusters_cb.is_displayed
)
@attr.s
class CANDUCollection(BaseCollection):
""" Class represents a C and U in CFME UI """
def _set_state(self, enable=True, reset=False):
""" Enable/Disable C and U
Args:
enable: Switches states, 'True'- enable
reset: Reset changes, default is 'False' - changes will not be reset
"""
view = navigate_to(self, 'Details')
changed = view.fill({
'all_clusters_cb': enable,
'all_datastores_cb': enable
})
# Save and Reset buttons are active only if view was changed
if changed:
if reset:
view.reset_button.click()
flash_message = 'All changes have been reset'
else:
view.save_button.click()
flash_message = 'Capacity and Utilization Collection settings saved'
view.flash.assert_success_message(flash_message)
def enable_all(self, reset=False):
""" Enable C and U
Args:
reset: Reset changes, default is 'False' - changes will not be reset
"""
self._set_state(reset=reset)
def disable_all(self, reset=False):
""" Disable C and U
Args:
reset: Reset changes, default is 'False' - changes will not be reset
"""
self._set_state(False, reset=reset)
@navigator.register(CANDUCollection, 'Details')
class CANDUCollectionDetails(CFMENavigateStep):
VIEW = CANDUCollectionView
prerequisite = NavigateToAttribute('appliance.server.zone.region', 'Details')
def step(self):
self.prerequisite_view.candu_collection.select()
# ========================= Replication ================================
class ReplicationView(RegionView):
""" Replication Tab View """
replication_type = BootstrapSelect(id='replication_type')
save_button = Button('Save')
reset_button = Button('Reset')
@property
def in_region(self):
return (
self.accordions.settings.tree.currently_selected == [
self.obj.appliance.server.zone.region.settings_string]
)
@property
def is_displayed(self):
return (
self.in_region and
self.replication_type.is_displayed
)
class ReplicationGlobalView(ReplicationView):
""" Replication Global setup View"""
add_subscription = Button('Add Subscription')
subscription_table = VanillaTable('//form[@id="form_div"]//table[contains(@class, "table")]')
@property
def is_displayed(self):
return (
self.in_region and
self.add_subscription.is_displayed
)
class ReplicationGlobalAddView(ReplicationView):
database = Input(locator='//input[contains(@ng-model, "dbname")]')
port = Input(name='port')
host = Input(locator='//input[contains(@ng-model, "host")]')
username = Input(name='userid')
password = Input(name='password')
accept_button = Button('Accept')
action_dropdown = Dropdown(
"//*[@id='form_div']//table//button[contains(@class, 'dropdown-toggle')]")
@property
def is_displayed(self):
return self.accept_button.is_displayed
class ReplicationRemoteView(ReplicationView):
""" Replication Remote setup View """
pass
# TODO add widget for "Excluded Tables"
class Replication(NavigatableMixin):
""" Class represents a Replication tab in CFME UI
Note:
Remote settings is not covered for now as 'Excluded Tables' element widget should be added
"""
def __init__(self, appliance):
self.appliance = appliance
def set_replication(self, updates=None, replication_type=None, reset=False):
""" Set replication settings
Args:
updates(dict): Replication update values, mandatory is host,
db creds get from credentials
replication_type(str): Replication type, use 'global' or 'remote'
reset: Pass True to reset made changes
"""
db_creds = conf.credentials.database
if not replication_type:
view = navigate_to(self, 'Details')
view.replication_type.fill('<None>')
elif replication_type == 'global':
view = navigate_to(self, 'GlobalAdd')
view.fill({
'database': (
updates.get('database') if updates.get('database') else 'vmdb_production'),
'host': updates.get('host'),
'port': updates.get('port') if updates.get('port') else '5432',
'username': (
updates.get('username') if updates.get('username') else db_creds.username),
'password': (
updates.get('password') if updates.get('password') else db_creds.password)
})
else:
view = navigate_to(self, 'RemoteAdd')
# TODO fill remote settings will be done after widget added
if reset:
view.reset_button.click()
view.flash.assert_message('All changes have been reset')
else:
try:
view.accept_button.click()
view.save_button.click()
except Exception:
logger.warning('Nothing was updated, please check the data')
def _global_replication_row(self, host=None):
""" Get replication row from table
Args:
host: host values
Returns:
host row object, of is host is not passed first table row is returned
"""
view = navigate_to(self, 'Global')
if host:
return view.subscription_table.row(host='host')
else:
return view.subscription_table.row[0]
def get_replication_status(self, replication_type='global', host=None):
""" Get replication status, if replication is active
Args:
replication_type: Replication type string, default is global
host: host to check
Returns: True if active, otherwise False
"""
view = navigate_to(self, replication_type.capitalize())
if replication_type == 'remote':
return view.is_displayed
else:
return self._global_replication_row(host).is_displayed
def get_global_replication_backlog(self, host=None):
""" Get global replication backlog value
Args:
host: host value
Returns: backlog number value
"""
row = self._global_replication_row(host)
return int(row.backlog.text.split(' ')[0])
@navigator.register(Replication, 'Details')
class ReplicationDetails(CFMENavigateStep):
VIEW = ReplicationView
prerequisite = NavigateToAttribute('appliance.server.zone.region', 'Details')
def step(self):
self.prerequisite_view.replication.select()
@navigator.register(Replication, 'Global')
class ReplicationGlobalSetup(CFMENavigateStep):
VIEW = ReplicationGlobalView
prerequisite = NavigateToSibling('Details')
def step(self):
self.prerequisite_view.replication_type.fill('Global')
@navigator.register(Replication, 'GlobalAdd')
class ReplicationGlobalAdd(CFMENavigateStep):
VIEW = ReplicationGlobalAddView
prerequisite = NavigateToSibling('Global')
def step(self):
if not self.view.accept_button.is_displayed:
self.prerequisite_view.add_subscription.click()
@navigator.register(Replication, 'RemoteAdd')
class ReplicationRemoteAdd(CFMENavigateStep):
VIEW = ReplicationRemoteView
prerequisite = NavigateToSibling('Details')
def step(self):
self.prerequisite_view.replication_type.fill('Remote')
| akarol/cfme_tests | cfme/configure/configuration/region_settings.py | Python | gpl-2.0 | 36,046 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-10-24 19:04
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("configuration", "0002_auto_20161024_1842"),
]
operations = [
migrations.AlterModelOptions(
name="practiceinfo",
options={"verbose_name": "practice information"},
),
]
| cdriehuys/chmvh-website | chmvh_website/configuration/migrations/0003_auto_20161024_1904.py | Python | mit | 440 |
#! /bin/env python
#
# Protein Engineering Analysis Tool Structure Analysis (PEATSA)
# Copyright (C) 2010 Michael Johnston & Jens Erik Nielsen
#
# Author: Michael Johnston
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Contact information:
# Email: Jens.Nielsen_at_gmail.com
# Normal mail:
# Jens Nielsen
# SBBS, Conway Institute
# University College Dublin
# Dublin 4, Ireland
#
import os, math
import PEATSA.Core as Core
#In KJ per mol kelvin
GasConstant = 8.314472E-3
def IsVitalityHighResistance(vitality):
if vitality < 2.3:
return False
else:
return True
def IsHIVScoreHighResistance(score):
if score >= 20:
return True
else:
return False
def VitalityToResistance(vitality):
if vitality > 2.3:
return 2
elif vitality > 1.1:
return 1
#elif vitality < 0.3:
# return -1
else:
return 0
def HIVScoreToResistance(score):
if score >= 20:
return 2
elif score >= 10:
return 1
elif score < 10:
return 0
def VitalitiesForDrug(drugDataName, drugDataLocation, substrateDataNames, substrateDataLocation, expectedValues=None, separateChains=False, verbose=False):
print '\nCalculating vitalities for %s' % drugDataName
#Load Drug data
drugData = Core.Data.DataSet(name=drugDataName, location=drugDataLocation)
#Load Substrate Data
substrateData = [Core.Data.DataSet(name=value, location=substrateDataLocation) for value in substrateDataNames]
#Get Drug and substrate binding - The drug data is a list of (mutationCode, bindingValue) pairs
#Make MutationCode:Value dictionaries for the substrates
drugBindingData = zip(drugData.deltaBindingResults.mutations, drugData.deltaBindingResults.total)
substrateBindingData = [dict(zip(substrate.deltaBindingResults.mutations, substrate.deltaBindingResults.total)) for substrate in substrateData]
vitalities = []
missingData = 0
for mutationCode, drugValue in drugBindingData:
#Find substrate with maximum delta delta binding
try:
substrateValues = [bindingData[mutationCode] for bindingData in substrateBindingData]
except:
if verbose:
print 'Unable to get substrate value for %s' % mutationCode
missingData = missingData + 1
continue
maxValue = min(substrateValues)
maxSub = substrateDataNames[substrateValues.index(maxValue)]
#Calculate the vitality
vitality = (1/(GasConstant*300))*(drugValue - maxValue)
vitality = math.exp(vitality)
#Hack since mutations aren't in correct format - using _ instead of +
mutationCode = mutationCode.replace('_', '+')
vitalities.append((mutationCode, vitality))
#print '%s\t%10.3lf\t%10.3lf (%s) \t%10.3lf' % (mutationCode, drugValue, maxValue, maxSub, vitality)
print 'Unable to retrieve substrate data for %d mutations' % missingData
#Combine the values for Chains A & B
if separateChains:
chainAValues = [element for element in vitalities if element[0][0] == 'A']
chainBValues = [element for element in vitalities if element[0][0] == 'B']
combinedVitalities = []
for pair in zip(chainAValues, chainBValues):
combinedValue = pair[0][1] + pair[1][1]
#print '%s\tA: %-10.3lf\tB: %-10.3lf\tTotal: %-10.3lf' % (pair[0][0][1:], pair[0][1], pair[1][1], combinedValue)
combinedVitalities.append((pair[0][0][1:], combinedValue))
vitalities = combinedVitalities
if verbose:
sortedVitalities = vitalities
sortedVitalities.sort(cmp, lambda x: x[1], True)
print "\nSorted Vitalites:"
for data in sortedVitalities:
print '%s %lf' % data
vitalities = Core.Matrix.PEATSAMatrix(rows=vitalities, headers=['Mutations', 'Vitality'])
return vitalities
if __name__ == "__main__":
substrateData = ['1KJ7_N25D.peatsa', '1KJH_N25D.peatsa', '1KJF_N25D.peatsa']
drugData = ['1HSGClean.peatsa', '1MUIClean.peatsa', '1OHRClean.peatsa', '2AQUClean.peatsa']
pdbDrugMap = {"1MUIClean":"AB1" , "2AQUClean":"DR7", "1OHRClean":"1UN", "1HSGClean":"MK1"}
drmData = Core.Matrix.matrixFromCSVFile('DRM.csv')
results = {}
for data in drugData:
vitalities = VitalitiesForDrug(data, '.', substrateData, '.')
stream = open(os.path.splitext(data)[0], "w+")
stream.write(vitalities.csvRepresentation())
stream.close()
results[os.path.splitext(data)[0]] = vitalities
rows = 2*[[0,0]]
totals = Core.Matrix.Matrix(rows=rows, headers=['Non', 'High Resistance'])
for data in results.keys():
vitalities = results[data]
print '\n', data
print vitalities
print "Total mutations %d" % (vitalities.numberOfRows())
resistanceLevels = vitalities.mapColumn(columnHeader="Vitality", mapFunction=VitalityToResistance)
resistance = [element[1] for element in resistanceLevels]
highResistance = filter(lambda x: x==2, resistance)
lowResistance = filter(lambda x: x==1, resistance)
noResistance = filter(lambda x: x==0, resistance)
hyperSusceptible = filter(lambda x: x==-1, resistance)
print "High Resistance %d (%5.3lf)" % (len(highResistance), 100*len(highResistance)/float(vitalities.numberOfRows()))
print "Low Resistance %d (%5.3lf)" % (len(lowResistance), 100*len(lowResistance)/float(vitalities.numberOfRows()))
print "No Resistance %d (%5.3lf)" % (len(noResistance), 100*len(noResistance)/float(vitalities.numberOfRows()))
print "Hyper Susceptible %d (%5.3lf)" % (len(hyperSusceptible), 100*len(hyperSusceptible)/float(vitalities.numberOfRows()))
#Get the stabilities for this drug
drugData = Core.Data.DataSet(name="%s.peatsa" % data, location='.')
stabilityOut = drugData.stabilityResults.filterColumnByValue(columnHeader='Total',
value=10)
print "%d mutations destabilise protein by more then 10 KJ/mol" % len(stabilityOut)
#Hack
stabilityOut = [[element[0].replace('_', '+'), element[1]] for element in stabilityOut]
destabilisingMutations = [element[0] for element in stabilityOut]
#print destabilisingMutations
# print [element[0] for element in resistanceLevels]
resistanceLevels = [element for element in resistanceLevels if element[0] not in destabilisingMutations]
print "Removed %d mutations from vitality due to stability considerations" % (vitalities.numberOfRows() - len(resistanceLevels))
resistance = [element[1] for element in resistanceLevels]
highResistance = filter(lambda x: x==2, resistance)
lowResistance = filter(lambda x: x==1, resistance)
print "High Resistance Filtered %d (%5.3lf)" % (len(highResistance), 100*len(highResistance)/float(vitalities.numberOfRows()))
print "Low Resistance Filtered %d (%5.3lf)" % (len(lowResistance), 100*len(lowResistance)/float(vitalities.numberOfRows()))
#Get the DRMs for this drug
#name = pdbDrugMap[data]
#data = drmData.mapColumn(columnHeader=name, mapFunction=HIVScoreToResistance)
#resistanceLevels = dict(resistanceLevels)
#rows = 2*[[0,0]]
#matrix = Core.Matrix.Matrix(rows=rows, headers=['Non', 'High Resistance'])
#for element in data:
# actual = element[1]
# try:
# predicted = resistanceLevels[element[0]]
# except:
# print 'Missing data for %s' % element[0]
# continue
# if actual == 2 and predicted == 2:
# matrix.matrix[1][1] = matrix.matrix[1][1] + 1
# elif actual == 2 and (predicted == 0 or predicted == 1):
# matrix.matrix[0][1] = matrix.matrix[0][1] + 1
# elif (actual == 0 or actual == 1) and predicted == 2:
# matrix.matrix[1][0] = matrix.matrix[1][0] + 1
# else:
# matrix.matrix[0][0] = matrix.matrix[0][0] + 1
#
#print matrix.csvRepresentation()
# for i in range(2):
# for j in range(2):
# totals.matrix[i][j] = totals.matrix[i][j] + matrix.matrix[i][j]
#print totals.csvRepresentation()
| dmnfarrell/peat | PEATSA/Tools/Vitality.py | Python | mit | 8,168 |
"""DWC Network Server Emulator
Copyright (C) 2014 polaris-
Copyright (C) 2014 msoucy
Copyright (C) 2015 Sepalani
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import logging
import traceback
from twisted.internet.protocol import Factory
from twisted.internet.endpoints import serverFromString
from twisted.protocols.basic import LineReceiver
from twisted.internet import reactor
from twisted.internet.error import ReactorAlreadyRunning
import gamespy.gs_database as gs_database
import gamespy.gs_query as gs_query
import other.utils as utils
import dwc_config
logger = dwc_config.get_logger('GameSpyPlayerSearchServer')
address = dwc_config.get_ip_port('GameSpyPlayerSearchServer')
class GameSpyPlayerSearchServer(object):
def __init__(self):
pass
def start(self):
endpoint_search = serverFromString(
reactor,
"tcp:%d:interface=%s" % (address[1], address[0])
)
conn_search = endpoint_search.listen(PlayerSearchFactory())
try:
if not reactor.running:
reactor.run(installSignalHandlers=0)
except ReactorAlreadyRunning:
pass
class PlayerSearchFactory(Factory):
def __init__(self):
logger.log(logging.INFO,
"Now listening for player search connections on %s:%d...",
address[0], address[1])
def buildProtocol(self, address):
return PlayerSearch(address)
class PlayerSearch(LineReceiver):
def __init__(self, address):
self.setRawMode()
self.db = gs_database.GamespyDatabase()
self.address = address
self.leftover = ""
def connectionMade(self):
pass
def connectionLost(self, reason):
pass
def rawDataReceived(self, data):
try:
logger.log(logging.DEBUG, "SEARCH RESPONSE: %s", data)
data = self.leftover + data
commands, self.leftover = gs_query.parse_gamespy_message(data)
for data_parsed in commands:
print data_parsed
if data_parsed['__cmd__'] == "otherslist":
self.perform_otherslist(data_parsed)
else:
logger.log(logging.DEBUG,
"Found unknown search command, don't know"
" how to handle '%s'.",
data_parsed['__cmd__'])
except:
logger.log(logging.ERROR,
"Unknown exception: %s",
traceback.format_exc())
def perform_otherslist(self, data_parsed):
"""Reference: http://wiki.tockdom.com/wiki/MKWii_Network_Protocol/Server/gpsp.gs.nintendowifi.net
Example from: filtered-mkw-log-2014-01-01-ct1310.eth
\otherslist\\o\146376154\uniquenick\2m0isbjmvRMCJ2i5321j
\o\192817284\uniquenick\1jhggtmghRMCJ2jrsh23
\o\302594991\uniquenick\7dkjp51v5RMCJ2nr3vs9
\o\368031897\uniquenick\1v7p3qmkpRMCJ1o8f56p
\o\447214276\uniquenick\7dkt0p6gtRMCJ2ljh72h
\o\449615791\uniquenick\4puvrm1g4RMCJ00ho3v1
\o\460250854\uniquenick\4rik5l1u1RMCJ0tc3fii
\o\456284963\uniquenick\1unitvi86RMCJ1b10u02
\o\453830866\uniquenick\7de3q52dbRMCJ2877ss2
\o\450197498\uniquenick\3qtutr1ikRMCJ38gem1n
\o\444241868\uniquenick\67tp53bs9RMCJ1abs7ej
\o\420030955\uniquenick\5blesqia3RMCJ322bbd6
\o\394609454\uniquenick\0hddp7mq2RMCJ30uv7r7
\o\369478991\uniquenick\59de9c2bhRMCJ0re0fii
\o\362755626\uniquenick\5tte2lif7RMCJ0cscgtg
\o\350951571\uniquenick\7aeummjlaRMCJ3li4ls2
\o\350740680\uniquenick\484uiqhr4RMCJ18opoj0
\o\349855648\uniquenick\5blesqia3RMCJ1c245dn
\o\324078642\uniquenick\62go5gpt0RMCJ0v0uhc9
\o\304111337\uniquenick\4lcg6ampvRMCJ1gjre51
\o\301273266\uniquenick\1dhdpjhn8RMCJ2da6f9h
\o\193178453\uniquenick\3pcgu0299RMCJ3nhu50f
\o\187210028\uniquenick\3tau15a9lRMCJ2ar247h
\o\461622261\uniquenick\59epddrnkRMCJ1t2ge7l
\oldone\\final\
"""
msg_d = [
('__cmd__', "otherslist"),
('__cmd_val__', ""),
]
if "numopids" in data_parsed and "opids" in data_parsed:
numopids = int(data_parsed['numopids'])
opids = data_parsed['opids'].split('|')
if len(opids) != numopids and int(opids[0]):
logger.log(logging.ERROR,
"Unexpected number of opids, got %d, expected %d.",
len(opids), numopids)
# Return all uniquenicks despite any unexpected/missing opids
# We can do better than that, I think...
for opid in opids:
profile = self.db.get_profile_from_profileid(opid)
msg_d.append(('o', opid))
if profile is not None:
msg_d.append(('uniquenick', profile['uniquenick']))
else:
msg_d.append(('uniquenick', ''))
msg_d.append(('oldone', ""))
msg = gs_query.create_gamespy_message(msg_d)
logger.log(logging.DEBUG, "SENDING: %s", msg)
self.transport.write(bytes(msg))
if __name__ == "__main__":
gsps = GameSpyPlayerSearchServer()
gsps.start()
| sepalani/dwc_network_server_emulator | gamespy_player_search_server.py | Python | agpl-3.0 | 5,974 |
# This file is part of Indico.
# Copyright (C) 2002 - 2015 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from BTrees.OOBTree import OOBTree
from cStringIO import StringIO
import MaKaC.webinterface.pages.tracks as tracks
import MaKaC.webinterface.pages.conferences as conferences
import MaKaC.webinterface.urlHandlers as urlHandlers
import MaKaC.webinterface.common.abstractFilters as abstractFilters
import MaKaC.review as review
from MaKaC.webinterface.rh.conferenceBase import RHTrackBase
from MaKaC.webinterface.rh.base import RHModificationBaseProtected
from MaKaC.errors import MaKaCError, FormValuesError
from MaKaC.PDFinterface.conference import TrackManagerAbstractToPDF, TrackManagerAbstractsToPDF
from indico.core.config import Config
import MaKaC.common.filters as filters
import MaKaC.webinterface.common.contribFilters as contribFilters
from MaKaC.webinterface.common.contribStatusWrapper import ContribStatusList
from MaKaC.PDFinterface.conference import ContribsToPDF
from MaKaC.webinterface.mail import GenericMailer, GenericNotification
from MaKaC.i18n import _
from MaKaC.abstractReviewing import ConferenceAbstractReview
from MaKaC.paperReviewing import Answer
from MaKaC.webinterface.common.tools import cleanHTMLHeaderFilename
from MaKaC.webinterface.rh.abstractModif import _AbstractWrapper
from MaKaC.webinterface.common.abstractNotificator import EmailNotificator
from indico.web.flask.util import send_file
class RHTrackModifBase( RHTrackBase, RHModificationBaseProtected ):
def _checkParams( self, params ):
RHTrackBase._checkParams( self, params )
def _checkProtection( self ):
RHModificationBaseProtected._checkProtection( self )
class RHTrackModification( RHTrackModifBase ):
def _process( self ):
p = tracks.WPTrackModification( self, self._track )
return p.display()
class RHTrackDataModification( RHTrackModifBase ):
def _process( self ):
p = tracks.WPTrackDataModification( self, self._track )
return p.display()
class RHTrackPerformDataModification(RHTrackModifBase):
def _checkParams(self,params):
RHTrackModifBase._checkParams(self,params)
self._cancel=params.has_key("cancel")
def _process(self):
if self._cancel:
self._redirect(urlHandlers.UHTrackModification.getURL(self._track))
else:
params=self._getRequestParams()
self._track.setTitle(params["title"])
self._track.setDescription(params["description"])
self._track.setCode(params["code"])
self._redirect(urlHandlers.UHTrackModification.getURL(self._track))
class RHTrackCoordination( RHTrackModifBase ):
def _checkProtection(self):
RHTrackModifBase._checkProtection(self)
if not self._conf.hasEnabledSection("cfa"):
raise MaKaCError( _("You cannot access this option because \"Abstracts\" was disabled"))
def _process( self ):
p = tracks.WPTrackModifCoordination( self, self._track )
return p.display()
class TrackCoordinationError( MaKaCError ):
pass
class RHTrackAbstractsBase( RHTrackModifBase ):
"""Base class for the areas accessible with track coordination privileges.
"""
def _checkProtection( self, checkCFADisabled=True ):
"""
"""
if not self._target.canCoordinate( self.getAW() ):
if self._getUser() == None:
self._checkSessionUser()
else:
raise TrackCoordinationError("You don't have rights to coordinate this track")
if checkCFADisabled and not self._conf.hasEnabledSection("cfa"):
raise MaKaCError( _("You cannot access this option because \"Abstracts\" was disabled"))
class _TrackAbstractFilterField( filters.FilterField ):
def __init__( self, track, values, showNoValue=True ):
self._track = track
filters.FilterField.__init__(self,track.getConference(),values,showNoValue)
class _StatusFilterField( _TrackAbstractFilterField ):
"""
"""
_id = "status"
def satisfies( self, abstract ):
"""
"""
s = tracks.AbstractStatusTrackViewFactory().getStatus( self._track, abstract )
return s.getId() in self.getValues()
class _ContribTypeFilterField( _TrackAbstractFilterField, abstractFilters.ContribTypeFilterField ):
"""
"""
_id = "type"
def __init__( self, track, values, showNoValue=True ):
_TrackAbstractFilterField.__init__( self, track, values, showNoValue )
def satisfies( self, abstract ):
"""
"""
return abstractFilters.ContribTypeFilterField.satisfies(self, abstract)
class _MultipleTrackFilterField(_TrackAbstractFilterField):
_id = "multiple_tracks"
def satisfies( self, abstract ):
return len( abstract.getTrackList() )>1
class _CommentsTrackFilterField(_TrackAbstractFilterField, abstractFilters.CommentFilterField):
_id = "comment"
def __init__( self, track, values, showNoValue=True ):
_TrackAbstractFilterField.__init__( self, track, values, showNoValue )
def satisfies( self, abstract ):
"""
"""
return abstractFilters.CommentFilterField.satisfies(self, abstract)
class _AccContribTypeFilterField(_TrackAbstractFilterField,abstractFilters.AccContribTypeFilterField):
"""
"""
_id = "acc_type"
def __init__(self,track,values,showNoValue=True):
_TrackAbstractFilterField.__init__(self,track,values,showNoValue)
def satisfies(self,abstract):
astv = tracks.AbstractStatusTrackViewFactory().getStatus( self._track, abstract )
if astv.__class__ in [tracks._ASTrackViewAccepted,\
tracks._ASTrackViewPA]:
if astv.getContribType() is None or astv.getContribType()=="":
return self._showNoValue
return astv.getContribType() in self._values
else:
return self._showNoValue
class TrackAbstractsFilterCrit(filters.FilterCriteria):
_availableFields = { \
_ContribTypeFilterField.getId(): _ContribTypeFilterField, \
_StatusFilterField.getId(): _StatusFilterField, \
_MultipleTrackFilterField.getId(): _MultipleTrackFilterField, \
_CommentsTrackFilterField.getId(): _CommentsTrackFilterField,
_AccContribTypeFilterField.getId(): _AccContribTypeFilterField }
def __init__(self,track,crit={}):
self._track = track
filters.FilterCriteria.__init__(self,track.getConference(),crit)
def _createField(self,klass,values ):
return klass(self._track,values)
def satisfies( self, abstract ):
for field in self._fields.values():
if not field.satisfies( abstract ):
return False
return True
class _TrackAbstractsSortingField( filters.SortingField ):
def __init__( self, track ):
self._track = track
filters.SortingField.__init__( self )
class _ContribTypeSF( _TrackAbstractsSortingField, abstractFilters.ContribTypeSortingField ):
_id = "type"
def __init__( self, track ):
_TrackAbstractsSortingField.__init__( self, track )
def compare( self, a1, a2 ):
return abstractFilters.ContribTypeSortingField.compare( self, a1, a2 )
class _StatusSF( _TrackAbstractsSortingField ):
_id = "status"
def compare( self, a1, a2 ):
statusA1 = tracks.AbstractStatusTrackViewFactory().getStatus( self._track, a1 )
statusA2 = tracks.AbstractStatusTrackViewFactory().getStatus( self._track, a2 )
return cmp( statusA1.getLabel(), statusA2.getLabel() )
class _NumberSF( _TrackAbstractsSortingField ):
_id = "number"
def compare( self, a1, a2 ):
try:
a = int(a1.getId())
b = int(a2.getId())
except:
a = a1.getId()
b = a2.getId()
return cmp( a, b )
class _DateSF( _TrackAbstractsSortingField ):
_id = "date"
def compare( self, a1, a2 ):
return cmp( a2.getSubmissionDate(), a1.getSubmissionDate() )
class TrackAbstractsSortingCrit( filters.SortingCriteria ):
"""
"""
_availableFields = { _ContribTypeSF.getId(): _ContribTypeSF, \
_StatusSF.getId(): _StatusSF, \
_NumberSF.getId(): _NumberSF, \
_DateSF.getId(): _DateSF }
def __init__( self, track, crit=[] ):
"""
"""
self._track = track
filters.SortingCriteria.__init__( self, crit )
def _createField( self, fieldKlass ):
"""
"""
return fieldKlass( self._track )
class RHTrackAbstractList( RHTrackAbstractsBase ):
def _checkParams( self, params ):
RHTrackAbstractsBase._checkParams( self, params )
self._filterUsed = params.has_key( "OK" ) #this variable is true when the
# filter has been used
filter = {}
ltypes = []
if not self._filterUsed:
for type in self._conf.getContribTypeList():
ltypes.append(type)
else:
for id in self._normaliseListParam(params.get("selTypes",[])):
ltypes.append(self._conf.getContribTypeById(id))
filter["type"]=ltypes
lstatus=[]
if not self._filterUsed:
sl = tracks.AbstractStatusTrackViewFactory().getStatusList()
for statusKlass in sl:
lstatus.append( statusKlass.getId() )
pass
filter["status"] = self._normaliseListParam( params.get("selStatus", lstatus) )
ltypes = []
if not self._filterUsed:
for type in self._conf.getContribTypeList():
ltypes.append( type )
else:
for id in self._normaliseListParam(params.get("selAccTypes",[])):
ltypes.append(self._conf.getContribTypeById(id))
filter["acc_type"]=ltypes
if params.has_key("selMultipleTracks"):
filter["multiple_tracks"] = ""
if params.has_key("selOnlyComment"):
filter["comment"] = ""
self._criteria = TrackAbstractsFilterCrit( self._track, filter )
typeShowNoValue,accTypeShowNoValue=True,True
if self._filterUsed:
typeShowNoValue = params.has_key("typeShowNoValue")
accTypeShowNoValue= params.has_key("accTypeShowNoValue")
self._criteria.getField("type").setShowNoValue( typeShowNoValue )
self._criteria.getField("acc_type").setShowNoValue(accTypeShowNoValue)
self._sortingCrit = TrackAbstractsSortingCrit( self._track, [params.get( "sortBy", "number").strip()] )
self._selectAll = params.get("selectAll", None)
self._msg = params.get("directAbstractMsg","")
self._order = params.get("order","down")
def _process( self ):
p = tracks.WPTrackModifAbstracts( self, self._track, self._msg, self._filterUsed, self._order )
return p.display( filterCrit= self._criteria, \
sortingCrit = self._sortingCrit, \
selectAll = self._selectAll )
class RHTrackAbstractBase( RHTrackAbstractsBase ):
def _checkParams( self, params ):
RHTrackModifBase._checkParams( self, params )
absId = params.get( "abstractId", "" ).strip()
if absId == "":
raise MaKaCError( _("Abstract identifier not specified"))
self._abstract = self._track.getAbstractById( absId )
if self._abstract == None:
raise MaKaCError( _("The abstract with id %s does not belong to the track with id %s")%(absId, self._track.getId()))
class RHTrackAbstract( RHTrackAbstractBase ):
def _process( self ):
p = tracks.WPTrackAbstractModif( self, self._track, self._abstract )
return p.display()
class RHTrackAbstractDirectAccess( RHTrackAbstractBase ):
def _checkParams(self, params):
self._params = params
RHTrackBase._checkParams(self, params)
self._abstractId = params.get("abstractId","")
self._abstractExist = False
try:
abstract = self._track.getAbstractById(self._abstractId)
self._abstractExist = True
RHTrackAbstractBase._checkParams(self, params)
except KeyError:
pass
def _process( self ):
if self._abstractExist:
p = tracks.WPTrackAbstractModif( self, self._track, self._abstract )
return p.display()
else:
url = urlHandlers.UHTrackModifAbstracts.getURL(self._track)
#url.addParam("directAbstractMsg","There is no abstract number %s in this track"%self._abstractId)
self._redirect(url)
return
class RHTrackAbstractSetStatusBase(RHTrackAbstractBase):
""" This is the base class for the accept/reject functionality for the track coordinators """
def _checkProtection(self):
RHTrackAbstractBase._checkProtection(self)
if not self._abstract.getConference().getConfAbstractReview().canReviewerAccept():
raise MaKaCError(_("The acceptance or rejection of abstracts is not allowed. Only the managers of the conference can perform this action."))
def _checkParams(self, params):
RHTrackAbstractBase._checkParams(self, params)
self._action = params.get("accept", None)
if self._action:
self._typeId = params.get("type", "")
self._session=self._conf.getSessionById(params.get("session", ""))
else:
self._action = params.get("reject", None)
self._comments = params.get("comments", "")
self._doNotify = params.has_key("notify")
def _notifyStatus(self, status):
wrapper = _AbstractWrapper(status)
tpl = self._abstract.getOwner().getNotifTplForAbstract(wrapper)
if self._doNotify and tpl:
n = EmailNotificator()
self._abstract.notify(n, self._getUser())
class RHTrackAbstractAccept(RHTrackAbstractSetStatusBase):
def _process(self):
if self._action:
cType = self._abstract.getConference().getContribTypeById(self._typeId)
self._abstract.accept(self._getUser(), self._track, cType, self._comments, self._session)
self._notifyStatus(review.AbstractStatusAccepted(self._abstract, None, self._track, cType))
self._redirect(urlHandlers.UHTrackAbstractModif.getURL( self._track, self._abstract ))
else:
p = tracks.WPTrackAbstractAccept(self, self._track, self._abstract)
return p.display(**self._getRequestParams())
class RHTrackAbstractReject(RHTrackAbstractSetStatusBase):
def _process(self):
if self._action:
self._abstract.reject(self._getUser(), self._comments)
self._notifyStatus(review.AbstractStatusRejected(self._abstract, None, None))
self._redirect(urlHandlers.UHTrackAbstractModif.getURL( self._track, self._abstract ))
else:
p = tracks.WPTrackAbstractReject(self, self._track, self._abstract)
return p.display(**self._getRequestParams())
class RHTrackAbstractPropBase(RHTrackAbstractBase):
""" Base class for propose to accept/reject classes """
def _checkParams(self,params):
RHTrackAbstractBase._checkParams(self,params)
self._action = ""
self._comment = params.get("comment","")
self._answers = []
if params.has_key("OK"):
self._action = "GO"
# get answers and make the list
scaleLower = self._target.getConference().getConfAbstractReview().getScaleLower()
scaleHigher = self._target.getConference().getConfAbstractReview().getScaleHigher()
numberOfAnswers = self._target.getConference().getConfAbstractReview().getNumberOfAnswers()
c = 0
for question in self._target.getConference().getConfAbstractReview().getReviewingQuestions():
c += 1
if not params.has_key("RB_"+str(c)):
raise FormValuesError(_("Please, reply to all the reviewing questions. Question \"%s\" is missing the answer.")%question.getText())
rbValue = int(params.get("RB_"+str(c),scaleLower))
newId = self._target.getConference().getConfAbstractReview().getNewAnswerId()
newAnswer = Answer(newId, rbValue, numberOfAnswers, question)
newAnswer.calculateRatingValue(scaleLower, scaleHigher)
self._answers.append(newAnswer)
elif params.has_key("CANCEL"):
self._action="CANCEL"
class RHTrackAbstractPropToAccept( RHTrackAbstractPropBase ):
def _checkParams(self,params):
RHTrackAbstractPropBase._checkParams(self,params)
self._contribType = params.get("contribType",self._abstract.getContribType())
if params.has_key("OK"):
ctId = ""
if self._abstract.getContribType():
ctId = self._abstract.getContribType().getId()
ctId = params.get("contribType",ctId)
self._contribType = self._abstract.getConference().getContribTypeById(ctId)
def _process( self ):
url = urlHandlers.UHTrackAbstractModif.getURL( self._track, self._abstract )
if self._action == "CANCEL":
self._redirect( url )
elif self._action == "GO":
r = self._getUser()
self._abstract.proposeToAccept( r, self._track, self._contribType, self._comment, self._answers )
self._redirect( url )
else:
p=tracks.WPTrackAbstractPropToAcc(self,self._track,self._abstract)
return p.display(contribType=self._contribType,\
comment=self._comment)
class RHTrackAbstractPropToReject( RHTrackAbstractPropBase ):
def _process( self ):
url = urlHandlers.UHTrackAbstractModif.getURL( self._track, self._abstract )
if self._action == "CANCEL":
self._redirect( url )
elif self._action == "GO":
r = self._getUser()
self._abstract.proposeToReject( r, self._track, self._comment , self._answers)
self._redirect( url )
else:
p = tracks.WPTrackAbstractPropToRej( self, self._track, self._abstract )
return p.display()
class RHTrackAbstractPropForOtherTracks( RHTrackAbstractBase ):
def _checkParams( self, params ):
RHTrackAbstractBase._checkParams( self, params )
self._action, self._comment = "", ""
if params.has_key("OK"):
self._action = "GO"
self._comment = params.get("comment", "")
self._propTracks = []
for trackId in self._normaliseListParam( params.get("selTracks", []) ):
self._propTracks.append( self._conf.getTrackById(trackId) )
elif params.has_key("CANCEL"):
self._action = "CANCEL"
def _process( self ):
url = urlHandlers.UHTrackAbstractModif.getURL( self._track, self._abstract )
if self._action == "CANCEL":
self._redirect( url )
elif self._action == "GO":
if self._propTracks != []:
r = self._getUser()
self._abstract.proposeForOtherTracks( r, self._track, self._comment, self._propTracks )
self._redirect( url )
else:
p = tracks.WPAbstractPropForOtherTracks( self, self._track, self._abstract )
return p.display()
class RHModAbstractMarkAsDup(RHTrackAbstractBase):
def _checkParams(self, params):
RHTrackAbstractBase._checkParams(self, params)
self._action, self._comments, self._original = "", "", None
self._originalId = ""
if "OK" in params:
self._action = "MARK"
self._comments = params.get("comments", "")
self._originalId = params.get("id", "")
self._original = self._abstract.getOwner(
).getAbstractById(self._originalId)
def _process(self):
if self._action == "MARK":
if self._original is None or self._target == self._original:
raise MaKaCError(_("invalid original abstract id"))
self._abstract.markAsDuplicated(
self._getUser(), self._original, self._comments, self._track)
self._redirect(urlHandlers.UHTrackAbstractModif.getURL(
self._track, self._abstract))
return
p = tracks.WPModAbstractMarkAsDup(self, self._track, self._abstract)
return p.display(comments=self._comments, originalId=self._originalId)
class RHModAbstractUnMarkAsDup(RHTrackAbstractBase):
def _checkParams( self, params ):
RHTrackAbstractBase._checkParams( self, params )
self._action,self._comments="",""
if params.has_key("OK"):
self._action="UNMARK"
self._comments=params.get("comments","")
def _process( self ):
if self._action=="UNMARK":
self._abstract.unMarkAsDuplicated(self._getUser(),self._comments, self._track)
self._redirect(urlHandlers.UHTrackAbstractModif.getURL(self._track,self._abstract))
return
p = tracks.WPModAbstractUnMarkAsDup(self,self._track,self._abstract)
return p.display(comments=self._comments)
class RHAbstractToPDF(RHTrackAbstractBase):
def _process(self):
tz = self._conf.getTimezone()
filename = "%s - Abstract.pdf" % self._target.getTitle()
pdf = TrackManagerAbstractToPDF(self._abstract, self._track, tz=tz)
return send_file(filename, pdf.generate(), 'PDF')
class RHAbstractsActions:
"""
class to select the action to do with the selected abstracts
"""
def _checkParams( self, params ):
self._pdf = params.get("PDF.x", None)
self._mail = params.get("mail", None)
self._participant = params.get("PART", None)
self._tplPreview = params.get("tplPreview", None)
self._params = params
def _process(self):
if self._pdf:
return RHAbstractsToPDF().process(self._params)
elif self._mail:
return RHAbstractSendNotificationMail().process(self._params)
elif self._tplPreview:
return RHAbstractTPLPreview().process(self._params)
elif self._participant:
return RHAbstractsParticipantList().process(self._params)
else:
return "no action to do"
def process(self, params):
self._checkParams(params)
ret = self._process()
if not ret:
return "None"
return ret
class RHAbstractTPLPreview(RHTrackBase):
def _checkParams(self, params):
RHTrackBase._checkParams( self, params )
self._notifTplId = params.get("notifTpl","")
def _process(self):
tpl = self._conf.getAbstractMgr().getNotificationTplById(self._notifTplId)
self._redirect(urlHandlers.UHAbstractModNotifTplPreview.getURL(tpl))
class AbstractNotification:
def __init__(self, conf, abstract):
self._conf = conf
self._abstract = abstract
def getDict(self):
dict = {}
dict["conference_title"] = self._conf.getTitle()
dict["conference_URL"] = str(urlHandlers.UHConferenceDisplay.getURL(self._conf))
dict["abstract_title"] = self._abstract.getTitle()
dict["abstract_track"] = "No track attributed"
dict["contribution_type"] = "No type defined"
if self._abstract.getCurrentStatus().__class__ == review.AbstractStatusAccepted:
dict["abstract_track"] = self._abstract.getCurrentStatus().getTrack().getTitle()
dict["contribution_type"] = self._abstract.getContribType()#.getName()
dict["submitter_first_name"] = self._abstract.getSubmitter().getFirstName()
dict["submitter_familly_name"] = self._abstract.getSubmitter().getSurName()
dict["submitter_title"] = self._abstract.getSubmitter().getTitle()
dict["abstract_URL"] = str(urlHandlers.UHAbstractDisplay.getURL(self._abstract))
return dict
class RHAbstractSendNotificationMail(RHTrackModification):
def _checkParams( self, params ):
RHTrackModification._checkParams( self, params )
notifTplId = params.get("notifTpl", "")
self._notifTpl = self._conf.getAbstractMgr().getNotificationTplById(notifTplId)
self._abstractIds = normaliseListParam( params.get("abstracts", []) )
self._abstracts = []
abMgr = self._conf.getAbstractMgr()
for id in self._abstractIds:
self._abstracts.append(abMgr.getAbstractById(id))
def _process( self ):
count = 0
for abstract in self._abstracts:
dict = AbstractNotification(self._conf, abstract).getDict()
s = self._notifTpl.getTplSubject()
b = self._notifTpl.getTplBody()
maildata = { "fromAddr": self._notifTpl.getFromAddr(), "toList": [abstract.getSubmitter().getEmail()], "subject": s%dict, "body": text }
GenericMailer.send(GenericNotification(maildata))
self._conf.newSentMail(abstract.getSubmitter(), mail.getSubject(), b%dict)
count += 1
#self._redirect(urlHandlers.UHConfAbstractManagment.getURL(self._conf))
p = conferences.WPAbstractSendNotificationMail(self, self._conf, count )
return p.display()
class RHAbstractsToPDF(RHTrackAbstractsBase):
def _checkParams( self, params ):
RHTrackAbstractsBase._checkParams( self, params )
self._abstractIds = self._normaliseListParam( params.get("abstracts", []) )
def _process(self):
tz = self._conf.getTimezone()
if not self._abstractIds:
return "No abstract to print"
pdf = TrackManagerAbstractsToPDF(self._conf, self._track, self._abstractIds,tz=tz)
return send_file('Abstracts.pdf', pdf.generate(), 'PDF')
class RHAbstractIntComments( RHTrackAbstractBase ):
def _process( self ):
p = tracks.WPModAbstractIntComments(self,self._track,self._abstract)
return p.display()
class RHAbstractIntCommentNew(RHAbstractIntComments):
def _checkParams(self,params):
RHAbstractIntComments._checkParams(self,params)
self._action=""
if params.has_key("OK"):
self._action="UPDATE"
self._content=params.get("content","")
elif params.has_key("CANCEL"):
self._action="CANCEL"
def _process( self ):
if self._action=="UPDATE":
c=review.Comment(self._getUser())
c.setContent(self._content)
self._abstract.addIntComment(c)
self._redirect(urlHandlers.UHTrackAbstractModIntComments.getURL(self._track,self._abstract))
return
elif self._action=="CANCEL":
self._redirect(urlHandlers.UHTrackAbstractModIntComments.getURL(self._track,self._abstract))
return
p = tracks.WPModAbstractIntCommentNew(self,self._track,self._abstract)
return p.display()
class RHAbstractIntCommentBase(RHTrackAbstractBase):
def _checkParams(self,params):
RHTrackAbstractBase._checkParams(self,params)
id=params.get("intCommentId","")
if id=="":
raise MaKaCError( _("the internal comment identifier hasn't been specified"))
self._comment=self._abstract.getIntCommentById(id)
class RHAbstractIntCommentRem(RHAbstractIntCommentBase):
def _process(self):
self._abstract.removeIntComment(self._comment)
self._redirect(urlHandlers.UHTrackAbstractModIntComments.getURL(self._track,self._abstract))
class RHAbstractIntCommentEdit(RHAbstractIntCommentBase):
def _checkParams(self,params):
RHAbstractIntCommentBase._checkParams(self,params)
self._action=""
if params.has_key("OK"):
self._action="UPDATE"
self._content=params.get("content","")
elif params.has_key("CANCEL"):
self._action="CANCEL"
def _process(self):
if self._action=="UPDATE":
self._comment.setContent(self._content)
self._redirect(urlHandlers.UHTrackAbstractModIntComments.getURL(self._track,self._abstract))
return
elif self._action=="CANCEL":
self._redirect(urlHandlers.UHTrackAbstractModIntComments.getURL(self._track,self._abstract))
return
p=tracks.WPModAbstractIntCommentEdit(self,self._track,self._comment)
return p.display()
class RHAbstractsParticipantList(RHTrackAbstractsBase):
def _checkParams( self, params ):
RHTrackAbstractsBase._checkParams( self, params )
self._abstractIds = self._normaliseListParam( params.get("abstracts", []) )
self._displayedGroups = params.get("displayedGroups", [])
if type(self._displayedGroups) != list:
self._displayedGroups = [self._displayedGroups]
self._clickedGroup = params.get("clickedGroup","")
def _setGroupsToDisplay(self):
if self._clickedGroup in self._displayedGroups:
self._displayedGroups.remove(self._clickedGroup)
else:
self._displayedGroups.append(self._clickedGroup)
def _process( self ):
if not self._abstractIds:
return "<table align=\"center\" width=\"100%%\"><tr><td>There are no abstracts</td></tr></table>"
submitters = OOBTree()
primaryAuthors = OOBTree()
coAuthors = OOBTree()
submitterEmails = set()
primaryAuthorEmails = set()
coAuthorEmails = set()
self._setGroupsToDisplay()
abMgr = self._conf.getAbstractMgr()
for abstId in self._abstractIds:
abst = abMgr.getAbstractById(abstId)
#Submitters
subm = abst.getSubmitter()
keySB = "%s-%s-%s"%(subm.getSurName().lower(), subm.getFirstName().lower(), subm.getEmail().lower())
submitters[keySB] = subm
submitterEmails.add(subm.getEmail())
#Primary authors
for pAut in abst.getPrimaryAuthorList():
keyPA = "%s-%s-%s"%(pAut.getSurName().lower(), pAut.getFirstName().lower(), pAut.getEmail().lower())
primaryAuthors[keyPA] = pAut
primaryAuthorEmails.add(pAut.getEmail())
#Co-authors
for coAut in abst.getCoAuthorList():
keyCA = "%s-%s-%s"%(coAut.getSurName().lower(), coAut.getFirstName().lower(), coAut.getEmail().lower())
coAuthors[keyCA] = coAut
coAuthorEmails.add(coAut.getEmail())
emailList = {"submitters":{},"primaryAuthors":{},"coAuthors":{}}
emailList["submitters"]["tree"] = submitters
emailList["primaryAuthors"]["tree"] = primaryAuthors
emailList["coAuthors"]["tree"] = coAuthors
emailList["submitters"]["emails"] = submitterEmails
emailList["primaryAuthors"]["emails"] = primaryAuthorEmails
emailList["coAuthors"]["emails"] = coAuthorEmails
p = conferences.WPConfParticipantList(self, self._target.getConference(), emailList, self._displayedGroups, self._abstractIds )
return p.display()
class ContribFilterCrit(filters.FilterCriteria):
_availableFields = { \
contribFilters.TypeFilterField.getId():contribFilters.TypeFilterField, \
contribFilters.StatusFilterField.getId():contribFilters.StatusFilterField, \
contribFilters.AuthorFilterField.getId():contribFilters.AuthorFilterField, \
contribFilters.SessionFilterField.getId():contribFilters.SessionFilterField }
class ContribSortingCrit(filters.SortingCriteria):
_availableFields={\
contribFilters.NumberSF.getId():contribFilters.NumberSF,
contribFilters.DateSF.getId():contribFilters.DateSF,
contribFilters.ContribTypeSF.getId():contribFilters.ContribTypeSF,
contribFilters.TrackSF.getId():contribFilters.TrackSF,
contribFilters.SpeakerSF.getId():contribFilters.SpeakerSF,
contribFilters.BoardNumberSF.getId():contribFilters.BoardNumberSF,
contribFilters.SessionSF.getId():contribFilters.SessionSF,
contribFilters.TitleSF.getId():contribFilters.TitleSF
}
class RHContribList(RHTrackAbstractsBase):
def _checkProtection(self):
RHTrackAbstractsBase._checkProtection(self, False)
def _checkParams( self, params ):
RHTrackAbstractsBase._checkParams(self,params)
self._conf=self._track.getConference()
filterUsed=params.has_key("OK")
#sorting
self._sortingCrit=ContribSortingCrit([params.get("sortBy","number").strip()])
self._order = params.get("order","down")
#filtering
filter = {"author":params.get("authSearch","")}
ltypes = []
if not filterUsed:
for type in self._conf.getContribTypeList():
ltypes.append(type.getId())
else:
for id in self._normaliseListParam(params.get("types",[])):
ltypes.append(id)
filter["type"]=ltypes
lsessions= []
if not filterUsed:
for session in self._conf.getSessionList():
lsessions.append( session.getId() )
filter["session"]=self._normaliseListParam(params.get("sessions",lsessions))
lstatus=[]
if not filterUsed:
for status in ContribStatusList().getList():
lstatus.append(ContribStatusList().getId(status))
filter["status"]=self._normaliseListParam(params.get("status",lstatus))
self._filterCrit=ContribFilterCrit(self._conf,filter)
typeShowNoValue,sessionShowNoValue=True,True
if filterUsed:
typeShowNoValue = params.has_key("typeShowNoValue")
sessionShowNoValue = params.has_key("sessionShowNoValue")
self._filterCrit.getField("type").setShowNoValue(typeShowNoValue)
self._filterCrit.getField("session").setShowNoValue(sessionShowNoValue)
def _process( self ):
p = tracks.WPModContribList(self,self._track)
return p.display( filterCrit= self._filterCrit, sortingCrit=self._sortingCrit, order=self._order )
class RHContribsActions:
"""
class to select the action to do with the selected contributions
"""
def process(self, params):
if 'PDF' in params:
return RHContribsToPDF().process(params)
elif 'AUTH' in params:
return RHContribsParticipantList().process(params)
return "no action to do"
class RHContribsToPDF(RHTrackAbstractsBase):
def _checkProtection(self):
RHTrackAbstractsBase._checkProtection(self, False)
def _checkParams( self, params ):
RHTrackAbstractsBase._checkParams( self, params )
self._contribIds = self._normaliseListParam( params.get("contributions", []) )
self._contribs = []
for id in self._contribIds:
self._contribs.append(self._conf.getContributionById(id))
def _process(self):
tz = self._conf.getTimezone()
if not self._contribs:
return "No contributions to print"
pdf = ContribsToPDF(self._conf, self._contribs, tz)
return send_file('Contributions.pdf', pdf.generate(), 'PDF')
class RHContribsParticipantList(RHTrackAbstractsBase):
def _checkProtection( self ):
if len( self._conf.getCoordinatedTracks( self._getUser() ) ) == 0:
RHTrackAbstractsBase._checkProtection( self )
def _checkParams( self, params ):
RHTrackAbstractsBase._checkParams( self, params )
self._contribIds = self._normaliseListParam( params.get("contributions", []) )
self._displayedGroups = self._normaliseListParam( params.get("displayedGroups", []) )
self._clickedGroup = params.get("clickedGroup","")
def _setGroupsToDisplay(self):
if self._clickedGroup in self._displayedGroups:
self._displayedGroups.remove(self._clickedGroup)
else:
self._displayedGroups.append(self._clickedGroup)
def _process( self ):
if not self._contribIds:
return "<table align=\"center\" width=\"100%%\"><tr><td>There are no contributions</td></tr></table>"
speakers = OOBTree()
primaryAuthors = OOBTree()
coAuthors = OOBTree()
speakerEmails = set()
primaryAuthorEmails = set()
coAuthorEmails = set()
self._setGroupsToDisplay()
for contribId in self._contribIds:
contrib = self._conf.getContributionById(contribId)
#Primary authors
for pAut in contrib.getPrimaryAuthorList():
if pAut.getFamilyName().lower().strip() == "" and pAut.getFirstName().lower().strip() == "" and pAut.getEmail().lower().strip() == "":
continue
keyPA = "%s-%s-%s"%(pAut.getFamilyName().lower(), pAut.getFirstName().lower(), pAut.getEmail().lower())
if contrib.isSpeaker(pAut):
speakers[keyPA] = pAut
speakerEmails.add(pAut.getEmail())
primaryAuthors[keyPA] = pAut
primaryAuthorEmails.add(pAut.getEmail())
#Co-authors
for coAut in contrib.getCoAuthorList():
if coAut.getFamilyName().lower().strip() == "" and coAut.getFirstName().lower().strip() == "" and coAut.getEmail().lower().strip() == "":
continue
keyCA = "%s-%s-%s"%(coAut.getFamilyName().lower(), coAut.getFirstName().lower(), coAut.getEmail().lower())
if contrib.isSpeaker(coAut):
speakers[keyCA] = coAut
speakerEmails.add(coAut.getEmail())
coAuthors[keyCA] = coAut
coAuthorEmails.add(coAut.getEmail())
emailList = {"speakers":{},"primaryAuthors":{},"coAuthors":{}}
emailList["speakers"]["tree"] = speakers
emailList["primaryAuthors"]["tree"] = primaryAuthors
emailList["coAuthors"]["tree"] = coAuthors
emailList["speakers"]["emails"] = speakerEmails
emailList["primaryAuthors"]["emails"] = primaryAuthorEmails
emailList["coAuthors"]["emails"] = coAuthorEmails
p = tracks.WPModParticipantList(self, self._target, emailList, self._displayedGroups, self._contribIds )
return p.display()
class RHContribQuickAccess(RHTrackAbstractsBase):
def _checkProtection(self):
RHTrackAbstractsBase._checkProtection(self, False)
def _checkParams(self,params):
RHTrackAbstractsBase._checkParams(self,params)
self._contrib=self._target.getConference().getContributionById(params.get("selContrib",""))
def _process(self):
url=urlHandlers.UHTrackModContribList.getURL(self._target)
if self._contrib is not None:
url=urlHandlers.UHContributionModification.getURL(self._contrib)
self._redirect(url)
| XeCycle/indico | indico/MaKaC/webinterface/rh/trackModif.py | Python | gpl-3.0 | 39,643 |
"""
Base class for the calculation of reduced and minimal intrinsic informations.
"""
from abc import abstractmethod
import numpy as np
from ... import Distribution
from ...algorithms import BaseAuxVarOptimizer
from ...exceptions import ditException
from ...math import prod
from ...utils import unitful
__all__ = (
'BaseIntrinsicMutualInformation',
'BaseMoreIntrinsicMutualInformation',
'BaseOneWaySKAR',
)
class BaseOneWaySKAR(BaseAuxVarOptimizer):
"""
Compute lower bounds on the secret key agreement rate of the form:
.. math::
max_{V - U - X - YZ} objective()
"""
construct_initial = BaseAuxVarOptimizer.construct_copy_initial
def __init__(self, dist, rv_x=None, rv_y=None, rv_z=None, rv_mode=None, bound_u=None, bound_v=None):
"""
Initialize the optimizer.
Parameters
----------
dist : Distribution
The distribution to compute the intrinsic mutual information of.
rv_x : iterable
The variables to consider `X`.
rv_y : iterable
The variables to consider `Y`.
rv_z : iterable
The variables to consider `Z`.
rv_mode : str, None
Specifies how to interpret `rvs` and `crvs`. Valid options are:
{'indices', 'names'}. If equal to 'indices', then the elements of
`crvs` and `rvs` are interpreted as random variable indices. If
equal to 'names', the the elements are interpreted as random
variable names. If `None`, then the value of `dist._rv_mode` is
consulted, which defaults to 'indices'.
bound_u : int, None
Specifies a bound on the size of the auxiliary random variable. If
None, then the theoretical bound is used.
bound_v : int, None
Specifies a bound on the size of the auxiliary random variable. If
None, then the theoretical bound is used.
"""
super().__init__(dist, [rv_x, rv_y], rv_z, rv_mode=rv_mode)
theoretical_bound_u = self._get_u_bound()
bound_u = min(bound_u, theoretical_bound_u) if bound_u else theoretical_bound_u
theoretical_bound_v = self._get_v_bound()
bound_v = min(bound_v, theoretical_bound_v) if bound_v else theoretical_bound_v
self._construct_auxvars([({0}, bound_u), ({3}, bound_v)])
self._x = {0}
self._y = {1}
self._z = {2}
self._u = {3}
self._v = {4}
self._default_hops *= 2
@abstractmethod
def _get_u_bound(self):
"""
Bound of |U|
Returns
-------
bound : int
The bound
"""
pass
@abstractmethod
def _get_v_bound(self):
"""
Bound of |V|
Returns
-------
bound : int
The bound
"""
pass
class BaseIntrinsicMutualInformation(BaseAuxVarOptimizer):
"""
Compute a generalized intrinsic mutual information:
.. math::
IMI[X:Y|Z] = min_{p(z_bar|z)} I[X:Y|Z]
"""
name = ""
def __init__(self, dist, rvs=None, crvs=None, bound=None, rv_mode=None):
"""
Initialize the optimizer.
Parameters
----------
dist : Distribution
The distribution to compute the intrinsic mutual information of.
rvs : list, None
A list of lists. Each inner list specifies the indexes of the random
variables used to calculate the intrinsic mutual information. If None,
then it is calculated over all random variables, which is equivalent
to passing `rvs=dist.rvs`.
crvs : list
A single list of indexes specifying the random variables to
condition on.
bound : int, None
Specifies a bound on the size of the auxiliary random variable. If None,
then the theoretical bound is used.
rv_mode : str, None
Specifies how to interpret `rvs` and `crvs`. Valid options are:
{'indices', 'names'}. If equal to 'indices', then the elements of
`crvs` and `rvs` are interpreted as random variable indices. If
equal to 'names', the the elements are interpreted as random
variable names. If `None`, then the value of `dist._rv_mode` is
consulted, which defaults to 'indices'.
"""
if not crvs:
msg = "Intrinsic mutual informations require a conditional variable."
raise ditException(msg)
super().__init__(dist, rvs, crvs, rv_mode=rv_mode)
crv_index = len(self._shape) - 1
crv_size = self._shape[crv_index]
bound = min([bound, crv_size]) if bound is not None else crv_size
self._construct_auxvars([({crv_index}, bound)])
def optimize(self, *args, **kwargs):
"""
Perform the optimization.
Parameters
----------
x0 : np.ndarray, None
Initial optimization vector. If None, use a random vector.
niter : int, None
The number of basin hops to perform while optimizing. If None,
hop a number of times equal to the dimension of the conditioning
variable(s).
"""
result = super().optimize(*args, **kwargs)
# test against known upper bounds as well, in case space wasn't well sampled.
options = [self.construct_constant_initial(), # mutual information
self.construct_copy_initial(), # conditional mutual information
result.x, # found optima
]
self._optima = min(options, key=lambda opt: self.objective(opt))
@classmethod
def functional(cls):
"""
Construct a functional form of the optimizer.
"""
@unitful
def intrinsic(dist, rvs=None, crvs=None, niter=None, bound=None, rv_mode=None):
opt = cls(dist, rvs=rvs, crvs=crvs, rv_mode=rv_mode, bound=bound)
opt.optimize(niter=niter)
return opt.objective(opt._optima)
intrinsic.__doc__ = \
"""
Compute the intrinsic {name}.
Parameters
----------
dist : Distribution
The distribution to compute the intrinsic {name} of.
rvs : list, None
A list of lists. Each inner list specifies the indexes of the random
variables used to calculate the intrinsic {name}. If None,
then it is calculated over all random variables, which is equivalent
to passing `rvs=dist.rvs`.
crvs : list
A single list of indexes specifying the random variables to
condition on.
niter : int
The number of optimization iterations to perform.
bound : int, None
Bound on the size of the auxiliary variable. If None, use the
theoretical bound.
rv_mode : str, None
Specifies how to interpret `rvs` and `crvs`. Valid options are:
{{'indices', 'names'}}. If equal to 'indices', then the elements of
`crvs` and `rvs` are interpreted as random variable indices. If
equal to 'names', the the elements are interpreted as random
variable names. If `None`, then the value of `dist._rv_mode` is
consulted, which defaults to 'indices'.
""".format(name=cls.name)
return intrinsic
class BaseMoreIntrinsicMutualInformation(BaseAuxVarOptimizer):
"""
Compute the reduced and minimal intrinsic mutual informations, upper bounds
on the secret key agreement rate:
.. math::
I[X : Y \\downarrow\\downarrow\\downarrow Z] = min_U I[X:Y|U] + I[XY:U|Z]
"""
name = ""
def __init__(self, dist, rvs=None, crvs=None, bound=None, rv_mode=None):
"""
Initialize the optimizer.
Parameters
----------
dist : Distribution
The distribution to compute the intrinsic mutual information of.
rvs : list, None
A list of lists. Each inner list specifies the indexes of the random
variables used to calculate the intrinsic mutual information. If None,
then it is calculated over all random variables, which is equivalent
to passing `rvs=dist.rvs`.
crvs : list
A single list of indexes specifying the random variables to
condition on.
bound : int, None
Specifies a bound on the size of the auxiliary random variable. If None,
then the theoretical bound is used.
rv_mode : str, None
Specifies how to interpret `rvs` and `crvs`. Valid options are:
{'indices', 'names'}. If equal to 'indices', then the elements of
`crvs` and `rvs` are interpreted as random variable indices. If
equal to 'names', the the elements are interpreted as random
variable names. If `None`, then the value of `dist._rv_mode` is
consulted, which defaults to 'indices'.
"""
if not crvs:
msg = "Intrinsic mutual informations require a conditional variable."
raise ditException(msg)
super().__init__(dist, rvs, crvs, rv_mode=rv_mode)
theoretical_bound = prod(self._shape)
bound = min([bound, theoretical_bound]) if bound else theoretical_bound
self._construct_auxvars([(self._rvs | self._crvs, bound)])
@abstractmethod
def measure(self, rvs, crvs):
"""
Abstract method for computing the appropriate measure of generalized
mutual information.
Parameters
----------
rvs : set
The set of random variables.
crvs : set
The set of conditional random variables.
Returns
-------
gmi : func
The generalized mutual information.
"""
pass
@classmethod
def functional(cls):
"""
Construct a functional form of the optimizer.
"""
@unitful
def intrinsic(dist, rvs=None, crvs=None, niter=None, bounds=None, rv_mode=None):
if bounds is None:
bounds = (2, 3, 4, None)
candidates = []
for bound in bounds:
opt = cls(dist, rvs=rvs, crvs=crvs, bound=bound, rv_mode=rv_mode)
opt.optimize(niter=niter)
candidates.append(opt.objective(opt._optima))
return min(candidates)
intrinsic.__doc__ = \
"""
Compute the {style} intrinsic {name}.
Parameters
----------
dist : Distribution
The distribution to compute the {style} intrinsic {name} of.
rvs : list, None
A list of lists. Each inner list specifies the indexes of the random
variables used to calculate the intrinsic {name}. If None,
then it is calculated over all random variables, which is equivalent
to passing `rvs=dist.rvs`.
crvs : list
A single list of indexes specifying the random variables to
condition on.
niter : int
The number of optimization iterations to perform.
bounds : [int], None
Bounds on the size of the auxiliary variable. If None, use the
theoretical bound. This is used to better sample smaller subspaces.
rv_mode : str, None
Specifies how to interpret `rvs` and `crvs`. Valid options are:
{{'indices', 'names'}}. If equal to 'indices', then the elements of
`crvs` and `rvs` are interpreted as random variable indices. If
equal to 'names', the the elements are interpreted as random
variable names. If `None`, then the value of `dist._rv_mode` is
consulted, which defaults to 'indices'.
""".format(name=cls.name, style=cls.style)
return intrinsic
class BaseReducedIntrinsicMutualInformation(BaseMoreIntrinsicMutualInformation):
"""
Compute the reduced intrinsic mutual information, a lower bound on the secret
key agreement rate:
.. math::
I[X : Y \\Downarrow Z] = min_U I[X : Y \\downarrow ZU] + H[U]
"""
style = "reduced"
@property
@staticmethod
@abstractmethod
def measure():
pass
def _objective(self, x): # pragma: no cover
"""
Minimize :math:`I[X:Y \\downarrow ZU] + H[U]`
Parameters
----------
x : ndarray
An optimization vector.
Returns
-------
obj : float
The value of the objective function.
"""
h = self._entropy(self._arvs)
def objective(self, x):
"""
Compute :math:`I[X:Y \\downarrow ZU] + H[U]`
Parameters
----------
x : np.ndarray
An optimization vector.
Returns
-------
obj : float
The value of the objective.
"""
pmf = self.construct_joint(x)
# I[X:Y \downarrow ZU]
d = Distribution.from_ndarray(pmf)
a = self.measure(dist=d, rvs=[[rv] for rv in self._rvs], crvs=self._crvs | self._arvs)
# H[U]
b = h(pmf)
return a + b
return objective
class BaseMinimalIntrinsicMutualInformation(BaseMoreIntrinsicMutualInformation):
"""
Compute the minimal intrinsic mutual information, a upper bound on the
secret key agreement rate:
.. math::
I[X : Y \\downarrow\\downarrow\\downarrow Z] = min_U I[X:Y|U] + I[XY:U|Z]
"""
style = "minimal"
def _objective(self):
"""
Compute I[X:Y|U] + I[XY:U|Z], or its multivariate analog.
Returns
-------
obj : func
The objective function.
"""
mmi = self.measure(self._rvs, self._arvs)
cmi = self._conditional_mutual_information(self._rvs, self._arvs, self._crvs)
def objective(self, x):
"""
Compute :math:`I[X:Y|U] + I[XY:U|Z]`
Parameters
----------
x : np.ndarray
An optimization vector.
Returns
-------
obj : float
The value of the objective.
"""
pmf = self.construct_joint(x)
# I[X:Y|U]
a = mmi(pmf)
# I[XY:U|Z]
b = cmi(pmf)
return a + b
return objective
class InnerTwoPartIntrinsicMutualInformation(BaseAuxVarOptimizer):
"""
Compute the two-part intrinsic mutual informations, an upper bound on the
secret key agreement rate:
.. math::
I[X : Y \\downarrow\\downarrow\\downarrow\\downarrow Z] =
inf_{J} min_{V - U - XY - ZJ} I[X:Y|J] + I[U:J|V] - I[U:Z|V]
"""
name = ""
def __init__(self, dist, rvs=None, crvs=None, j=None, bound_u=None, bound_v=None, rv_mode=None):
"""
Initialize the optimizer.
Parameters
----------
dist : Distribution
The distribution to compute the intrinsic mutual information of.
rvs : list, None
A list of lists. Each inner list specifies the indexes of the random
variables used to calculate the intrinsic mutual information. If
None, then it is calculated over all random variables, which is
equivalent to passing `rvs=dist.rvs`.
crvs : list
A single list of indexes specifying the random variables to
condition on.
j : list
A list with a single index specifying the random variable to
consider as J.
bound_u : int, None
Specifies a bound on the size of the U auxiliary random variable. If
None, then the theoretical bound is used.
bound_v : int, None
Specifies a bound on the size of the V auxiliary random variable. If
None, then the theoretical bound is used.
rv_mode : str, None
Specifies how to interpret `rvs` and `crvs`. Valid options are:
{'indices', 'names'}. If equal to 'indices', then the elements of
`crvs` and `rvs` are interpreted as random variable indices. If
equal to 'names', the the elements are interpreted as random
variable names. If `None`, then the value of `dist._rv_mode` is
consulted, which defaults to 'indices'.
"""
if not crvs:
msg = "Intrinsic mutual informations require a conditional variable."
raise ditException(msg)
super().__init__(dist, rvs + [j], crvs, rv_mode=rv_mode)
theoretical_bound_u = prod(self._shape[rv] for rv in self._rvs)
bound_u = min([bound_u, theoretical_bound_u]) if bound_u else theoretical_bound_u
theoretical_bound_v = prod(self._shape[rv] for rv in self._rvs)**2
bound_v = min([bound_v, theoretical_bound_v]) if bound_v else theoretical_bound_v
self._construct_auxvars([(self._rvs, bound_u),
({len(self._shape)}, bound_v),
])
idx = min(self._arvs)
self._j = {max(self._rvs)}
self._u = {idx}
self._v = {idx + 1}
def _objective(self):
"""
Maximize I[X:Y|J] + I[U:J|V] - I[U:Z|V], or its multivariate analog.
Returns
-------
obj : func
The objective function.
"""
cmi1 = self._conditional_mutual_information(self._u, self._j, self._v)
cmi2 = self._conditional_mutual_information(self._u, self._crvs, self._v)
def objective(self, x):
"""
Compute :math:`I[X:Y|J] + I[U:J|V] - I[U:Z|V]`
Parameters
----------
x : np.ndarray
An optimization vector.
Returns
-------
obj : float
The value of the objective.
"""
pmf = self.construct_joint(x)
# I[U:J|V]
b = cmi1(pmf)
# I[U:Z|V]
c = cmi2(pmf)
return -(b - c)
return objective
class BaseTwoPartIntrinsicMutualInformation(BaseAuxVarOptimizer):
"""
Compute the two-part intrinsic mutual informations, an upper bound on the
secret key agreement rate:
.. math::
I[X : Y \\downarrow\\downarrow\\downarrow\\downarrow Z] =
inf_{J} min_{V - U - XY - ZJ} I[X:Y|J] + I[U:J|V] - I[U:Z|V]
"""
name = ""
def __init__(self, dist, rvs=None, crvs=None, bound_j=None, bound_u=None, bound_v=None, rv_mode=None):
"""
Initialize the optimizer.
Parameters
----------
dist : Distribution
The distribution to compute the intrinsic mutual information of.
rvs : list, None
A list of lists. Each inner list specifies the indexes of the random
variables used to calculate the intrinsic mutual information. If
None, then it is calculated over all random variables, which is
equivalent to passing `rvs=dist.rvs`.
crvs : list
A single list of indexes specifying the random variables to
condition on.
bound_j : int, None
Specifies a bound on the size of the J auxiliary random variable. If
None, then the theoretical bound is used.
bound_u : int, None
Specifies a bound on the size of the U auxiliary random variable. If
None, then the theoretical bound is used.
bound_v : int, None
Specifies a bound on the size of the V auxiliary random variable. If
None, then the theoretical bound is used.
rv_mode : str, None
Specifies how to interpret `rvs` and `crvs`. Valid options are:
{'indices', 'names'}. If equal to 'indices', then the elements of
`crvs` and `rvs` are interpreted as random variable indices. If
equal to 'names', the the elements are interpreted as random
variable names. If `None`, then the value of `dist._rv_mode` is
consulted, which defaults to 'indices'.
"""
if not crvs:
msg = "Intrinsic mutual informations require a conditional variable."
raise ditException(msg)
super().__init__(dist, rvs, crvs, rv_mode=rv_mode)
theoretical_bound_j = prod(self._shape)
bound_j = min([bound_j, theoretical_bound_j]) if bound_j else theoretical_bound_j
self._construct_auxvars([(self._rvs | self._crvs, bound_j)])
self._j = self._arvs
self._bound_u = bound_u
self._bound_v = bound_v
def _objective(self):
"""
Mimimize :math:`max(I[X:Y|J] + I[U:J|V] - I[U:Z|V])`, or its
multivariate analog.
Returns
-------
obj : func
The objective function.
"""
mmi = self.measure(self._rvs, self._j)
def objective(self, x):
"""
Compute max(I[X:Y|J] + I[U:J|V] - I[U:Z|V])
Parameters
----------
x : np.ndarray
An optimization vector.
Returns
-------
obj : float
The value of the objective.
TODO
----
Save the optimal inner, so that full achieving joint can be constructed.
"""
joint = self.construct_joint(x)
outcomes, pmf = zip(*[(o, p) for o, p in np.ndenumerate(joint)])
dist = Distribution(outcomes, pmf)
inner = InnerTwoPartIntrinsicMutualInformation(dist=dist,
rvs=[[rv] for rv in self._rvs],
crvs=self._crvs,
j=self._j,
bound_u=self._bound_u,
bound_v=self._bound_v,
)
inner.optimize()
opt = -inner.objective(inner._optima)
a = mmi(joint)
return a + opt
return objective
@classmethod
def functional(cls):
"""
Construct a functional form of the optimizer.
"""
@unitful
def two_part_intrinsic(dist, rvs=None, crvs=None, niter=None, bound_j=None, bound_u=None, bound_v=None, rv_mode=None):
bounds = {
(2, 2, 2),
(bound_j, bound_u, bound_v),
}
candidates = []
for b_j, b_u, b_v in bounds:
opt = cls(dist, rvs=rvs, crvs=crvs, bound_j=b_j, bound_u=b_u, bound_v=b_v, rv_mode=rv_mode)
opt.optimize(niter=niter)
candidates.append(opt.objective(opt._optima))
return min(candidates)
two_part_intrinsic.__doc__ = \
"""
Compute the two-part intrinsic {name}.
Parameters
----------
dist : Distribution
The distribution to compute the two-part intrinsic {name} of.
rvs : list, None
A list of lists. Each inner list specifies the indexes of the
random variables used to calculate the intrinsic {name}. If
None, then it is calculated over all random variables, which is
equivalent to passing `rvs=dist.rvs`.
crvs : list
A single list of indexes specifying the random variables to
condition on.
niter : int
The number of optimization iterations to perform.
bound_j : int, None
Specifies a bound on the size of the J auxiliary random
variable. If None, then the theoretical bound is used.
bound_u : int, None
Specifies a bound on the size of the U auxiliary random
variable. If None, then the theoretical bound is used.
bound_v : int, None
Specifies a bound on the size of the V auxiliary random
variable. If None, then the theoretical bound is used.
rv_mode : str, None
Specifies how to interpret `rvs` and `crvs`. Valid options are:
{{'indices', 'names'}}. If equal to 'indices', then the elements
of `crvs` and `rvs` are interpreted as random variable indices.
If equal to 'names', the the elements are interpreted as random
variable names. If `None`, then the value of `dist._rv_mode` is
consulted, which defaults to 'indices'.
""".format(name=cls.name)
return two_part_intrinsic
| dit/dit | dit/multivariate/secret_key_agreement/base_skar_optimizers.py | Python | bsd-3-clause | 25,213 |
# -*- coding: utf-8 -*-
import sys
import tweepy
import webbrowser
import time
reload(sys)
sys.setdefaultencoding("utf-8")
# Pegando a consulta por parâmetro
consulta = sys.argv[1:]
#Autenticações
consumer_key = 'w8FmJROsBCnoirSqZxZbg'
consumer_secret = 'PoPc3qVdDWYzYHAo22xqnLfPXQdS2TLa8iucBLOqk'
access_token = '1870359661-4pv55A2ZSPQ6UmZR1vpZNcAXXOlRZ67AH9kLClf'
access_token_secret = 'yoCh8kLPon51FIookFfvs3ka3H6W6c5ZMD1Lgb5Mk8B96'
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
#Coletando tweets
class CustomStreamListener(tweepy.StreamListener):
def on_status(self, tweet):
autor = str(tweet.author.screen_name)
texto = str(tweet.text)
idtweet = str(tweet.id)
link = "http://www.twitter.com/"+autor+"/status/"+idtweet
if (tweet.text[0:2]!='RT' and autor!="UFSJBot"):
if (len(autor)+len(texto)>131):
texto = texto[0:127-len(autor)-len(link)]+"..."
tweet_bot = "Por @"+autor+": \""+texto+"\" "+link
else:
tweet_bot = "Por @"+autor+": \""+texto+"\""
api.update_status(tweet_bot)
print "\n@"+autor+" mencionado. Dormindo por 60 segundos..."
time.sleep(60)
print "\nVoltando a coletar..."
return True
def on_error(self, status_code):
print "Erro com o código:", status_code
return True # Não mata o coletor
def on_timeout(self):
print "Tempo esgotado!"
return True # Não mata o coletor
#Criando o coletor com timeout de 60 seg
streaming_api = tweepy.streaming.Stream(auth, CustomStreamListener(), timeout=60)
print "Coletando tweets... "
try:
streaming_api.filter(follow=None, track=consulta)
except KeyboardInterrupt:
print "\nBot interrompido!"
| ronanlopes/ufsjbot | UFSJBot.py | Python | gpl-2.0 | 1,738 |
#! /usr/bin/env python
import os
import sys
import re
import logging
if __name__ == '__main__':
# According to the python sys.path documentation, the directory containing
# the main script appears as sys.path[0].
utildir = sys.path[0]
assert(os.path.basename(utildir) == 'util')
andir = os.path.dirname(utildir)
#assert(os.path.basename(andir) == 'astrometry')
rootdir = os.path.dirname(andir)
# Here we put the "astrometry" and "astrometry/.." directories at the front
# of the path: astrometry to pick up pyfits, and .. to pick up astrometry itself.
sys.path.insert(1, andir)
sys.path.insert(2, rootdir)
import pyfits
import pyfits
from astrometry.util.fits import pyfits_writeto
def fits2fits(infile, outfile, verbose=False, fix_idr=False):
"""
Returns: error string, or None on success.
"""
if fix_idr:
from astrometry.util.fix_sdss_idr import fix_sdss_idr
# Read input file.
fitsin = pyfits.open(infile)
# Print out info about input file.
if verbose:
fitsin.info()
for i, hdu in enumerate(fitsin):
if fix_idr:
hdu = fitsin[i] = fix_sdss_idr(hdu)
# verify() fails when a keywords contains invalid characters,
# so go through the primary header and fix them by converting invalid
# characters to '_'
hdr = hdu.header
logging.info('Header has %i cards' % len(hdr))
# allowed characters (FITS standard section 5.1.2.1)
pat = re.compile(r'[^A-Z0-9_\-]')
newcards = []
for c in hdr.ascard:
k = c.keyword
# new keyword:
knew = pat.sub('_', k)
if k != knew:
logging.debug('Replacing illegal keyword %s by %s' % (k, knew))
# it seems pyfits is not clever enough to notice this...
if len(knew) > 8:
knew = 'HIERARCH ' + knew
newcards.append(pyfits.Card(keyword=knew, value=c.value,
comment=c.comment))
hdu.header = pyfits.Header(newcards)
# Fix input header
hdu.verify('fix')
# UGH! Work around stupid pyfits handling of scaled data...
# (it fails to round-trip scaled data correctly!)
bzero = hdr.get('BZERO', None)
bscale = hdr.get('BSCALE', None)
if (bzero is not None and bscale is not None
and (bzero != 0. or bscale != 1.)):
logging.debug('Scaling to bzero=%g, bscale=%g' % (bzero, bscale))
hdu.scale('int16', '', bscale, bzero)
# Describe output file we're about to write...
if verbose:
print 'Outputting:'
fitsin.info()
try:
pyfits_writeto(fitsin, outfile, output_verify='warn')
except pyfits.VerifyError, ve:
return ('Verification of output file failed: your FITS file is probably too broken to automatically fix.' +
' Error message is:' + str(ve))
fitsin.close()
return None
def main():
from optparse import OptionParser
parser = OptionParser()
parser.add_option('-v', '--verbose',
action='store_true', dest='verbose',
help='be chatty')
parser.add_option('-s', '--fix-sdss',
action='store_true', dest='fix_idr',
help='fix SDSS idR files')
(options, args) = parser.parse_args()
if len(args) != 2:
print 'Usage: fits2fits.py [--verbose] input.fits output.fits'
return -1
logformat = '%(message)s'
if options.verbose:
logging.basicConfig(level=logging.DEBUG, format=logformat)
else:
logging.basicConfig(level=logging.INFO, format=logformat)
logging.raiseExceptions = False
infn = args[0]
outfn = args[1]
errmsg = fits2fits(infn, outfn, fix_idr=options.fix_idr,
verbose=options.verbose)
if errmsg is not None:
print 'fits2fits.py failed:', errmsg
return -1
return 0
if __name__ == '__main__':
sys.exit(main())
| blackball/an-test6 | util/fits2fits.py | Python | gpl-2.0 | 4,096 |
#!/usr/bin/env python
"""RESTful Open Annotation server based on Eve.
The RESTful Open Annotation API is primarily implemented using two
ways of modifying the Eve default API:
1. global configuration of keys in settings.py to use OA names,
e.g. "annotatedAt" instead of the default "_created".
2. event hooks to modify incoming and outcoming documents in more
complex ways, such as removing the default "@context" value on POST
and adding it to the top-level graph on GET.
"""
__author__ = 'Sampo Pyysalo'
__license__ = 'MIT'
import sys
import os
from eve import Eve
from oaeve import setup_callbacks
from settings import PORT
# TODO: I think we need this for mod_wsgi, but make sure.
appdir = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.join(appdir))
try:
from development import DEBUG
print >> sys.stderr, '########## Devel, DEBUG %s ##########' % DEBUG
except ImportError:
DEBUG = False
# Eve's "settings.py application folder" default fails with wsgi
app = Eve(settings=os.path.join(appdir, 'settings.py'))
setup_callbacks(app)
def main(argv):
if not DEBUG:
app.run(host='0.0.0.0', port=PORT, debug=False)
else:
app.run(debug=DEBUG, port=PORT)
return 1
if __name__ == '__main__':
sys.exit(main(sys.argv))
| restful-open-annotation/eve-restoa | server.py | Python | mit | 1,296 |
import Orange
data = Orange.data.Table("lenses")
rules = Orange.associate.AssociationRulesInducer(data, support=0.3)
rule = rules[0]
print "Rule: ", rule, "\n"
print "Supporting data instances:"
for d in data:
if rule.appliesBoth(d):
print d
print
print "Contradicting data instances:"
for d in data:
if rule.applies_left(d) and not rule.applies_right(d):
print d
print | yzl0083/orange | docs/reference/rst/code/associate-traceback.py | Python | gpl-3.0 | 397 |
# -*- coding: utf-8 -*-
from widgetastic_patternfly import BootstrapSelect
from widgetastic_patternfly import Button
from cfme.base.ui import AutomateImportExportBaseView
from cfme.base.ui import AutomateImportExportView
from cfme.utils.appliance import Navigatable
from cfme.utils.appliance.implementations.ui import navigate_to
class GitImportSelectorView(AutomateImportExportBaseView):
type = BootstrapSelect('branch_or_tag')
branch = BootstrapSelect(locator='.//div[contains(@class, "bootstrap-select git-branches")]')
tag = BootstrapSelect(locator='.//div[contains(@class, "bootstrap-select git-tags")]')
submit = Button('Submit')
cancel = Button('Cancel')
@property
def is_displayed(self):
return self.in_import_export and self.type.is_displayed
class AutomateGitRepository(Navigatable):
"""Represents an Automate git repository. This entity is not represented in UI as it is, but
only in database. But by representing it it makes the code changes for domain much simpler.
"""
def __init__(self, url=None, username=None, password=None, verify_ssl=None, appliance=None):
Navigatable.__init__(self, appliance=appliance)
self.url = url
self.username = username
self.password = password
self.verify_ssl = verify_ssl
self.domain = None
@classmethod
def from_db(cls, db_id, appliance=None):
git_repositories = appliance.db.client['git_repositories']
try:
url, verify_ssl = appliance.db.client.session\
.query(git_repositories.url, git_repositories.verify_ssl)\
.filter(git_repositories.id == db_id)\
.first()
return cls(url=url, verify_ssl=verify_ssl > 0, appliance=appliance)
except ValueError:
raise ValueError('No such repository in the database')
@property
def fill_values_repo_add(self):
return {
k: v
for k, v
in {
'url': self.url,
'username': self.username,
'password': self.password,
'verify_ssl': self.verify_ssl}.items() if v is not None}
def fill_values_branch_select(self, branch, tag):
"""Processes the args into a dictionary to be filled in the selection dialog."""
if branch and tag:
raise ValueError('You cannot pass branch and tag together')
elif tag is not None:
return {'type': 'Tag', 'tag': tag}
else:
return {'type': 'Branch', 'branch': branch}
def import_domain_from(self, branch=None, tag=None):
"""Import the domain from git using the Import/Export UI.
Args:
branch: If you import from a branch, specify the origin/branchname
tag: If you import from a tag, specify its name.
Returns:
Instance of :py:class:`cfme.automate.explorer.domain.Domain`
**Important!** ``branch`` and ``tag`` are mutually exclusive.
"""
imex_page = navigate_to(self.appliance.server, 'AutomateImportExport')
assert imex_page.import_git.fill(self.fill_values_repo_add)
imex_page.import_git.submit.click()
imex_page.browser.plugin.ensure_page_safe(timeout='5m')
git_select = self.create_view(GitImportSelectorView, wait='10s')
git_select.flash.assert_no_error()
git_select.fill(self.fill_values_branch_select(branch, tag))
git_select.submit.click()
git_select.browser.plugin.ensure_page_safe(timeout='5m')
imex_page = self.create_view(AutomateImportExportView, wait='10s')
imex_page.flash.assert_no_error()
# Now find the domain in database
namespaces = self.appliance.db.client['miq_ae_namespaces']
git_repositories = self.appliance.db.client['git_repositories']
none = None
query = self.appliance.db.client.session\
.query(
namespaces.id, namespaces.name, namespaces.description, git_repositories.url,
namespaces.ref_type, namespaces.ref)\
.filter(namespaces.parent_id == none, namespaces.source == 'remote')\
.join(git_repositories, namespaces.git_repository_id == git_repositories.id)
for id, name, description, url, git_type, git_type_value in query:
if url != self.url:
continue
if not (
git_type == 'branch' and branch == git_type_value or
git_type == 'tag' and tag == git_type_value):
continue
# We have the domain
dc = self.appliance.collections.domains
return dc.instantiate(
db_id=id, name=name, description=description, git_checkout_type=git_type,
git_checkout_value=git_type_value)
else:
raise ValueError('The domain imported was not found in the database!')
| RedHatQE/cfme_tests | cfme/automate/import_export.py | Python | gpl-2.0 | 4,955 |
# -*- coding: utf-8 -*-
#
#
# This file is a part of 'django-stoba' project.
#
# Copyright (c) 2016, Vassim Shahir
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import unicode_literals, absolute_import
from django.conf import settings
from django.utils.dateformat import format
from datetime import datetime
from django.utils import timezone
def tz_aware_datetime(datetime_obj,time_zone=None):
if settings.USE_TZ:
if time_zone is None:
try:
time_zone = timezone(settings.TIME_ZONE)
except:
time_zone = timezone.utc
else:
time_zone = timezone(time_zone)
if datetime_obj.tzinfo is None or datetime_obj.tzinfo.utcoffset(datetime_obj) is None:
datetime_with_tz = timezone.utc.localize(datetime_obj)
else:
datetime_with_tz = datetime_obj
return datetime_with_tz.astimezone(time_zone)
return datetime_obj
def get_infinity():
return float('inf')
def datetime_to_epoch(datetime_obj):
return format(datetime_obj, u'U')
| vassim/django-stoba | stoba/core/helper.py | Python | bsd-3-clause | 2,583 |
# -*- coding: utf-8 -*-
from collections import deque
from sql import Table, Literal
from nereid import render_template, route
from nereid.globals import session, request, current_app
from nereid.helpers import slugify, url_for
from nereid import jsonify, Markup
from nereid.contrib.pagination import Pagination
from nereid.contrib.sitemap import SitemapIndex, SitemapSection
from werkzeug.exceptions import NotFound
from flask.ext.babel import format_currency
from trytond.model import ModelSQL, ModelView, fields
from trytond.pyson import Eval, Not, Bool
from trytond.transaction import Transaction
from trytond.pool import Pool, PoolMeta
from trytond import backend
from sql import Null
__all__ = [
'Product', 'ProductsRelated', 'ProductTemplate',
'ProductMedia', 'ProductCategory'
]
__metaclass__ = PoolMeta
DEFAULT_STATE = {'invisible': Not(Bool(Eval('displayed_on_eshop')))}
DEFAULT_STATE2 = {
'invisible': Not(Bool(Eval('displayed_on_eshop'))),
'required': Bool(Eval('displayed_on_eshop')),
}
class ProductMedia(ModelSQL, ModelView):
"Product Media"
__name__ = "product.media"
sequence = fields.Integer("Sequence", required=True, select=True)
static_file = fields.Many2One(
"nereid.static.file", "Static File", required=True, select=True)
product = fields.Many2One("product.product", "Product", select=True)
template = fields.Many2One("product.template", "Template", select=True)
@classmethod
def __setup__(cls):
super(ProductMedia, cls).__setup__()
cls._order.insert(0, ('sequence', 'ASC'))
@classmethod
def __register__(cls, module_name):
TableHandler = backend.get('TableHandler')
cursor = Transaction().cursor
super(ProductMedia, cls).__register__(module_name)
media_table = cls.__table__()
if TableHandler.table_exist(cursor, 'product_product_imageset'):
# Migrate data from ProductImageSet table to ProductMedia table
imageset_table = Table('product_product_imageset')
cursor.execute(*media_table.insert(
columns=[
media_table.sequence,
media_table.product, media_table.template,
media_table.static_file,
],
values=imageset_table.select(
Literal(10),
imageset_table.product, imageset_table.template,
imageset_table.image
)
))
TableHandler.drop_table(
cursor, 'product.product.imageset', 'product_product_imageset',
cascade=True
)
@staticmethod
def default_sequence():
return 10
class ProductTemplate:
__name__ = "product.template"
products_displayed_on_eshop = fields.Function(
fields.One2Many('product.product', None, 'Products (Disp. on eShop)'),
'get_products_displayed_on_eshop'
)
long_description = fields.Text('Long Description')
description = fields.Text("Description")
media = fields.One2Many("product.media", "template", "Media")
images = fields.Function(
fields.One2Many('nereid.static.file', None, 'Images'),
getter='get_template_images'
)
def get_template_images(self, name=None):
"""
Getter for `images` function field
"""
template_images = []
for media in self.media:
if media.static_file.mimetype and \
'image' in media.static_file.mimetype:
template_images.append(media.static_file.id)
return template_images
def get_products_displayed_on_eshop(self, name=None):
"""
Return the variants that are displayed on eshop
"""
Product = Pool().get('product.product')
return map(
int,
Product.search([
('template', '=', self.id),
('displayed_on_eshop', '=', True),
])
)
class Product:
"Product extension for Nereid"
__name__ = "product.product"
#: Decides the number of products that would be remebered.
recent_list_size = 5
#: The list of fields allowed to be sent back on a JSON response from the
#: application. This is validated before any product info is built
#:
#: The `name`, `sale_price`, `id` and `uri` are sent by default
#:
#: .. versionadded:: 0.3
json_allowed_fields = set(['rec_name', 'sale_price', 'id', 'uri'])
uri = fields.Char(
'URI', select=True, states=DEFAULT_STATE2
)
displayed_on_eshop = fields.Boolean('Displayed on E-Shop?', select=True)
long_description = fields.Text('Long Description')
media = fields.One2Many(
"product.media", "product", "Media", states={
'invisible': Bool(Eval('use_template_images'))
}, depends=['use_template_images']
)
images = fields.Function(
fields.One2Many('nereid.static.file', None, 'Images'),
getter='get_product_images'
)
up_sells = fields.Many2Many(
'product.product-product.product',
'product', 'up_sell', 'Up-Sells', states=DEFAULT_STATE
)
cross_sells = fields.Many2Many(
'product.product-product.product',
'product', 'cross_sell', 'Cross-Sells', states=DEFAULT_STATE
)
default_image = fields.Function(
fields.Many2One('nereid.static.file', 'Image'), 'get_default_image',
)
use_template_description = fields.Boolean("Use template's description")
use_template_images = fields.Boolean("Use template's images")
@classmethod
def copy(cls, products, default=None):
"""Duplicate products
"""
if default is None:
default = {}
default = default.copy()
duplicate_products = []
for index, product in enumerate(products, start=1):
if product.displayed_on_eshop:
default['uri'] = "%s-copy-%d" % (product.uri, index)
duplicate_products.extend(
super(Product, cls).copy([product], default)
)
return duplicate_products
@classmethod
def validate(cls, products):
super(Product, cls).validate(products)
cls.check_uri_uniqueness(products)
def get_default_image(self, name):
"""
Returns default product image if any.
"""
return self.images[0].id if self.images else None
@classmethod
def __setup__(cls):
super(Product, cls).__setup__()
cls.description.states['invisible'] = Bool(
Eval('use_template_description')
)
cls._error_messages.update({
'unique_uri': ('URI of Product must be Unique'),
})
cls.per_page = 12
@classmethod
def __register__(cls, module_name):
TableHandler = backend.get('TableHandler')
cursor = Transaction().cursor
table = TableHandler(cursor, cls, module_name)
# Drop contraint for Unique URI on database
# Now using validation method for that purpose
table.drop_constraint('uri_uniq')
super(Product, cls).__register__(module_name)
@staticmethod
def default_displayed_on_eshop():
return False
@fields.depends('template', 'uri')
def on_change_with_uri(self):
"""
If the URI is empty, slugify template name into URI
"""
if not self.uri and self.template:
return slugify(self.template.name)
return self.uri
@staticmethod
def default_use_template_description():
return True
@staticmethod
def default_use_template_images():
return True
@classmethod
def check_uri_uniqueness(cls, products):
"""
Ensure uniqueness of products uri.
"""
query = ['OR']
for product in products:
# Do not check for unique uri if product is marked as
# not displayed on eshop
if not product.displayed_on_eshop:
continue
arg = [
'AND', [
('id', '!=', product.id)
], [
('uri', 'ilike', product.uri)
]
]
query.append(arg)
if query != ['OR'] and cls.search(query):
cls.raise_user_error('unique_uri')
@classmethod
@route('/product/<uri>')
@route('/product/<path:path>/<uri>')
def render(cls, uri, path=None):
"""Renders the template for a single product.
:param uri: URI of the product
:param path: Ignored parameter. This is used in
cases where SEO friendly URL like
product/category/sub-cat/sub-sub-cat/product-uri
are generated
"""
products = cls.search([
('displayed_on_eshop', '=', True),
('uri', '=', uri),
('template.active', '=', True),
], limit=1)
if not products:
return NotFound('Product Not Found')
cls._add_to_recent_list(int(products[0]))
return render_template('product.jinja', product=products[0])
@classmethod
@route('/products/+recent', methods=['GET', 'POST'])
def recent_products(cls):
"""
GET
---
Return a list of recently visited products in JSON
POST
----
Add the product to the recent list manually. This method is required
if the product page is cached, or is served by a Caching Middleware
like Varnish which may clear the session before sending the request to
Nereid.
Just as with GET the response is the AJAX of recent products
"""
if request.method == 'POST':
cls._add_to_recent_list(request.form.get('product_id', type=int))
fields = set(request.args.getlist('fields')) or cls.json_allowed_fields
fields = fields & cls.json_allowed_fields
if 'sale_price' in fields:
fields.remove('sale_price')
response = []
if hasattr(session, 'sid'):
products = cls.browse(session.get('recent-products', []))
for product in products:
product_val = {}
for field in fields:
product_val[field] = getattr(product, field)
product_val['sale_price'] = format_currency(
product.sale_price(),
request.nereid_currency.code
)
response.append(product_val)
return jsonify(products=response)
@classmethod
def _add_to_recent_list(cls, product_id):
"""Adds the given product ID to the list of recently viewed products
By default the list size is 5. To change this you can inherit
product.product and set :attr:`recent_list_size` attribute to a
non negative integer value
For faster and easier access the products are stored with the ids alone
this behaviour can be modified by subclassing.
The deque object cannot be saved directly in the cache as its not
serialisable. Hence a conversion to list is made on the fly
.. versionchanged:: 0.3
If there is no session for the user this function returns an empty
list. This ensures that the code is consistent with iterators that
may use the returned value
:param product_id: the product id to prepend to the list
"""
if not hasattr(session, 'sid'):
current_app.logger.warning(
"No session. Not saving to browsing history"
)
return []
recent_products = deque(
session.setdefault('recent-products', []), cls.recent_list_size
)
# XXX: If a product is already in the recently viewed list, but it
# would be nice to remember the recent_products list in the order of
# visits.
if product_id not in recent_products:
recent_products.appendleft(product_id)
session['recent-products'] = list(recent_products)
return recent_products
@classmethod
@route('/products')
@route('/products/<int:page>')
def render_list(cls, page=1):
"""
Renders the list of all products which are displayed_on_shop=True
.. tip::
The implementation uses offset for pagination and could be
extremely resource intensive on databases. Hence you might want to
either have an alternate cache/search server based pagination or
limit the pagination to a maximum page number.
The base implementation does NOT limit this and could hence result
in poor performance
:param page: The page in pagination to be displayed
"""
products = Pagination(cls, [
('displayed_on_eshop', '=', True),
('template.active', '=', True),
], page, cls.per_page)
return render_template('product-list.jinja', products=products)
def sale_price(self, quantity=0):
"""Return the Sales Price.
A wrapper designed to work as a context variable in templating
The price is calculated from the pricelist associated with the current
user. The user in the case of guest user is logged in user. In the
event that the logged in user does not have a pricelist set against
the user, the guest user's pricelist is chosen.
Finally if neither the guest user, nor the regsitered user has a
pricelist set against them then the list price is displayed as the
price of the product
:param quantity: Quantity
"""
return self.list_price
@classmethod
@route('/sitemaps/product-index.xml')
def sitemap_index(cls):
"""
Returns a Sitemap Index Page
"""
index = SitemapIndex(cls, [
('displayed_on_eshop', '=', True),
('template.active', '=', True),
])
return index.render()
@classmethod
@route('/sitemaps/product-<int:page>.xml')
def sitemap(cls, page):
sitemap_section = SitemapSection(
cls, [
('displayed_on_eshop', '=', True),
('template.active', '=', True),
], page
)
sitemap_section.changefreq = 'daily'
return sitemap_section.render()
def get_absolute_url(self, **kwargs):
"""
Return the URL of the current product.
This method works only under a nereid request context
"""
return url_for('product.product.render', uri=self.uri, **kwargs)
def _json(self):
"""
Return a JSON serializable dictionary of the product
"""
response = {
'template': {
'name': self.template.rec_name,
'id': self.template.id,
'list_price': self.list_price,
},
'code': self.code,
'description': self.description,
}
return response
def get_long_description(self):
"""
Get long description of product.
If the product is set to use the template's long description, then
the template long description is sent back.
The returned value is a `~jinja2.Markup` object which makes it
HTML safe and can be used directly in templates. It is recommended
to use this method instead of trying to wrap this logic in the
templates.
"""
if self.use_template_description:
return Markup(self.template.long_description)
return Markup(self.long_description)
def get_description(self):
"""
Get description of product.
If the product is set to use the template's description, then
the template description is sent back.
The returned value is a `~jinja2.Markup` object which makes it
HTML safe and can be used directly in templates. It is recommended
to use this method instead of trying to wrap this logic in the
templates.
"""
if self.use_template_description:
description = self.template.description
else:
description = self.description
if description:
return Markup(description)
def get_product_images(self, name=None):
"""
Getter for `images` function field
"""
product_images = []
for media in self.media:
if 'image' in media.static_file.mimetype:
product_images.append(media.static_file.id)
return product_images
def get_images(self):
"""
Get images of product variant.
If the product is set to use the template's images, then
the template images is sent back.
"""
if self.use_template_images:
return self.template.images
return self.images
class ProductsRelated(ModelSQL):
"Related Product"
__name__ = 'product.product-product.product'
_table = 'product_product_rel'
product = fields.Many2One(
'product.product', 'Product',
ondelete='CASCADE', select=True, required=True)
up_sell = fields.Many2One(
'product.product', 'Up-sell Product',
ondelete='CASCADE', select=True)
cross_sell = fields.Many2One(
'product.product', 'Cross-sell Product',
ondelete='CASCADE', select=True)
class ProductCategory:
__name__ = 'product.category'
@staticmethod
def order_rec_name(tables):
table, _ = tables[None]
return [table.parent == Null, table.parent, table.name]
@classmethod
def __setup__(cls):
super(ProductCategory, cls).__setup__()
cls.rec_name.string = "Parent/name"
| priyankarani/nereid-catalog | product.py | Python | bsd-3-clause | 17,920 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OERPLib
# Copyright (C) 2013 Sébastien Alix.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import re
from oerplib import error
TPL_MODEL = """<
<table cellborder="0" cellpadding="0" cellspacing="0"
border="1" bgcolor="{model_bgcolor}" height="100%%">
<tr>
<td border="0" bgcolor="{model_bgcolor_title}" align="center" colspan="3">
<font color="{model_color_title}">{name}</font>
</td>
</tr>
{attrs}
{relations_r}
</table>>"""
TPL_MODEL_SUBTITLE = """
<tr><td> </td></tr>
<tr>
<td align="center"
border="0"
colspan="3"><font color="{color}">[{title}]</font></td>
</tr>
"""
TPL_MODEL_ATTR = """
<tr>
<td align="left" border="0">- <font color="{color_name}">{name}</font></td>
<td align="left" border="0">{flags}</td>
<td align="left" border="0"> <font color="{color_name}">{type_}</font> </td>
</tr>
"""
TPL_MODEL_REL = """
<tr>
<td align="left" border="0">- {name}</td>
<td align="left" border="0">{flags}</td>
<td align="left" border="0"> <font color="{color_name}">{type_}</font></td>
</tr>
"""
def pattern2regex(pattern):
"""Return a regular expression corresponding to `pattern` (simpler
representation of the regular expression).
"""
pattern = "^{0}$".format(pattern.replace('*', '.*'))
return re.compile(pattern)
def match_in(elt, lst):
"""Return `True` if `elt` is matching one of a pattern in `lst`."""
for regex in lst:
if regex.match(elt):
return True
return False
class Relations(object):
"""Draw relations between models with `Graphviz`."""
def __init__(self, oerp, models, maxdepth=1, whitelist=None, blacklist=None,
attrs_whitelist=None, attrs_blacklist=None, config=None):
self.oerp = oerp
self._models = models
self._maxdepth = maxdepth
self._whitelist = [pattern2regex(model) for model in (models)]
self._whitelist.extend(
[pattern2regex(model) for model in (whitelist or ['*'])])
self._blacklist = [pattern2regex(model) for model in (blacklist or [])]
self._attrs_whitelist = [pattern2regex(model)
for model in (attrs_whitelist or [])]
self._attrs_blacklist = [pattern2regex(model)
for model in (attrs_blacklist or [])]
# Configuration options
self._config = {
'relation_types': ['many2one', 'one2many', 'many2many'],
'show_many2many_table': False,
'color_many2one': '#0E2548',
'color_one2many': '#008200',
'color_many2many': '#6E0004',
'model_root_bgcolor_title': '#A50018',
'model_bgcolor_title': '#64629C',
'model_color_title': 'white',
'model_color_subtitle': '#3E3D60',
'model_bgcolor': 'white',
'color_normal': 'black',
'color_required': 'blue',
'color_function': '#7D7D7D',
'space_between_models': 0.25,
}
self._config.update(config or {})
# Store relations between data models:
self._relations = {}
self._stack = {'o2m': {}}
# Build and draw relations for each model
for model in models:
self._build_relations(self.oerp.get(model), 0)
def _build_relations(self, obj, depth):
"""Build all relations of `obj` recursively:
- many2one
- one2many (will be bound to the related many2one)
- many2many (will be bound with the eventual many2many from the
other side)
"""
# Stop scanning when the maxdepth is reached, or when the data model
# has already been scanned
if obj._name in self._models:
depth = 0
if depth > self._maxdepth or obj._name in self._relations:
return
# Check the whitelist, then the blacklist
if obj._name not in self._models:
if self._whitelist:
if not match_in(obj._name, self._whitelist):
return
if self._blacklist:
if match_in(obj._name, self._blacklist):
return
# Only increments depth for data models which are not already scanned
if obj._name not in self._relations:
depth += 1
# Scan relational fields of the data model
fields = obj.fields_get()
if obj._name not in self._relations:
self._relations[obj._name] = {
'relations': {},
'relations_r': {}, # Recursive relations
'fields': dict((k, v) for k, v in fields.iteritems()
if not v.get('relation')),
}
for name, data in fields.iteritems():
if 'relation' in data \
and data['type'] in self._config['relation_types']:
rel = data['relation']
# where to store the relation?
store_type = obj._name == rel and 'relations_r' or 'relations'
# flags
flags = {
'required': data.get('required'),
'function': data.get('function'),
'fnct_inv': data.get('fnct_inv'),
'fnct_search': data.get('fnct_search'),
}
# many2one
if data['type'] == 'many2one':
# Check if related one2many fields have been registered
# for the current many2one relation
o2m_fields = obj._name in self._stack['o2m'] \
and rel in self._stack['o2m'][obj._name] \
and name in self._stack['o2m'][obj._name][rel] \
and self._stack['o2m'][obj._name][rel][name] \
or []
# Add the field
self._relations[obj._name][store_type][name] = {
'type': 'many2one',
'relation': rel,
'name': name,
'o2m_fields': o2m_fields,
}
self._relations[obj._name][store_type][name].update(flags)
# one2many
elif data['type'] == 'one2many':
# 'relation_field' key may be missing for 'one2many'
# generated by 'fields.function'
rel_f = data.get('relation_field', None)
# If it is a normal o2m field (with a relation field), it
# will be attached to its corresponding m2o field
if rel_f:
# Case where the related m2o field has already been
# registered
if rel in self._relations \
and rel_f in self._relations[rel][store_type]:
if name not in self._relations[
rel][store_type][rel_f]:
self._relations[
rel][store_type][
rel_f]['o2m_fields'].append(name)
# Otherwise, we will process the field later (when the
# m2o field will be scanned)
else:
if rel not in self._stack['o2m']:
self._stack['o2m'][rel] = {}
if obj._name not in self._stack['o2m'][rel]:
self._stack['o2m'][rel][obj._name] = {}
if rel_f not in self._stack['o2m'][rel][obj._name]:
self._stack['o2m'][rel][obj._name][rel_f] = []
self._stack[
'o2m'][rel][obj._name][rel_f].append(name)
# If the o2m field has no relation field available
# (calculated by a function, or a related field) the
# relation is stored as a standalone one2many
else:
self._relations[obj._name][store_type][name] = {
'type': 'one2many',
'relation': rel,
'name': name,
}
self._relations[obj._name][store_type][name].update(
flags)
# many2many
elif data['type'] == 'many2many':
#rel_columns = data.get('related_columns') \
# or data.get('m2m_join_columns')
#rel_columns = rel_columns and tuple(rel_columns) or None
self._relations[obj._name][store_type][name] = {
'type': 'many2many',
'relation': rel,
'name': name,
'third_table':
data.get('third_table') or data.get('m2m_join_table'),
'related_columns': None,
}
self._relations[obj._name][store_type][name].update(flags)
# Scan relations recursively
rel_obj = self.oerp.get(rel)
self._build_relations(rel_obj, depth)
def make_dot(self):
"""Returns a `pydot.Dot` object representing relations between models.
>>> graph = oerp.inspect.relations(['res.partner'])
>>> graph.make_dot()
<pydot.Dot object at 0x2bb0650>
See the `pydot <http://code.google.com/p/pydot/>`_ documentation
for details.
"""
try:
import pydot
except ImportError:
raise error.InternalError("'pydot' module not found")
output = pydot.Dot(
graph_type='digraph', overlap='scalexy', splines='true',
nodesep=str(self._config['space_between_models']))
for model, data in self._relations.iteritems():
# Generate attributes of the model
attrs_ok = False
attrs = []
if self._attrs_whitelist \
and match_in(model, self._attrs_whitelist):
attrs_ok = True
if self._attrs_blacklist \
and match_in(model, self._attrs_blacklist):
attrs_ok = False
if attrs_ok:
subtitle = TPL_MODEL_SUBTITLE.format(
color=self._config['model_color_subtitle'],
title="Attributes")
attrs.append(subtitle)
for k, v in sorted(data['fields'].iteritems()):
color_name = self._config['color_normal']
if v.get('function'):
color_name = self._config['color_function']
if v.get('fnct_inv'):
color_name = self._config['color_normal']
#if v.get('required'):
# color_name = self._config['color_required']
attr = TPL_MODEL_ATTR.format(
name=k, type_=v['type'],
color_name=color_name,
flags=self._generate_flags_label(v))
attrs.append(attr)
# Generate recursive relations of the model
relations_r = []
if data['relations_r']:
subtitle = TPL_MODEL_SUBTITLE.format(
color=self._config['model_color_subtitle'],
title="Recursive relations")
relations_r.append(subtitle)
for data2 in data['relations_r'].itervalues():
label = self._generate_relation_label(data2)
flags = self._generate_flags_label(data2)
rel_r = TPL_MODEL_REL.format(
name=label, flags=flags,
color_name=self._config['color_normal'],
type_=data2['type'])
relations_r.append(rel_r)
# Generate the layout of the model
model_bgcolor_title = self._config['model_bgcolor_title']
if model in self._models:
model_bgcolor_title = self._config['model_root_bgcolor_title']
tpl = TPL_MODEL.format(
model_color_title=self._config['model_color_title'],
model_bgcolor_title=model_bgcolor_title,
model_bgcolor=self._config['model_bgcolor'],
name=model,
attrs=''.join(attrs),
relations_r=''.join(relations_r))
# Add the model to the graph
node = self._create_node(model, 'relation', tpl)
output.add_node(node)
# Draw relations of the model
for data2 in data['relations'].itervalues():
if data2['relation'] in self._relations:
edge = self._create_edge(model, data2['relation'], data2)
output.add_edge(edge)
return output
def _create_node(self, name, type_, tpl=None):
"""Generate a `pydot.Node` object.
`type_` can take one of these values: ``relation``, ``m2m_table``.
If a HTML `tpl` is supplied, it will be used as layout for the node.
"""
import pydot
types = {
'relation': {
'margin': '0',
'shape': tpl and 'none' or 'record',
'label': tpl or name,
},
'm2m_table': {
'margin': '0',
'shape': tpl and 'none' or 'record',
'color': self._config['color_many2many'],
'fontcolor': self._config['color_many2many'],
'label': tpl or name,
},
}
return pydot.Node(name, **types[type_])
def _create_edge(self, model1, model2, data):
"""Generate a `pydot.Edge` object, representing a relation between
`model1` and `model2`.
"""
import pydot
label = self._generate_relation_label(data, space=6, on_arrow=True)
return pydot.Edge(
model1, model2,
label=label,
labeldistance='10.0',
color=self._config['color_{0}'.format(data['type'])],
fontcolor=self._config['color_{0}'.format(data['type'])])
#arrowhead=(data['type'] == 'many2many' and 'none' or 'normal'),
def _generate_flags_label(self, data):
"""Generate a HTML label for status flags of a field
described by `data`.
"""
flags = []
if data.get('required'):
flags.append("<font color='{color}'>R</font>".format(
color=self._config['color_required']))
if data.get('function'):
name = data.get('fnct_inv') and "Fw" or "F"
if data.get('fnct_search'):
name += "s"
flags.append("<font color='{color}'>{name}</font>".format(
name=name, color=self._config['color_function']))
if flags:
return " [{0}]".format(' '.join(flags))
return ""
def _generate_relation_label(self, data, space=0, on_arrow=False):
"""Generate a HTML label based for the relation described by `data`."""
name_color = self._config['color_{0}'.format(data['type'])]
label = "{space}<font color='{color}'>{name}</font>".format(
color=name_color, name=data['name'], space=' ' * space)
# many2one arrow
if data['type'] == 'many2one' and data['o2m_fields']:
label = "{label} <font color='{color}'>← {o2m}</font>".format(
label=label,
color=self._config['color_one2many'],
o2m=', '.join(data['o2m_fields']))
# one2many "standalone" arrow
if data['type'] == 'one2many':
pass
# many2many arrow
if data['type'] == 'many2many':
m2m_table = ''
if self._config['show_many2many_table']:
if data.get('third_table'):
m2m_table = '({table})'.format(
table=data.get('third_table'))
label = "{space}<font color='{color}'>{name} {m2m_t}</font>".format(
color=name_color, name=data['name'],
m2m_t=m2m_table, space=' ' * space)
# flags
if on_arrow:
label += self._generate_flags_label(data)
# add space on the right
label = label + "{space}".format(space=' ' * space)
# closing tag
if on_arrow:
label = "<{label}>".format(label=label)
return label
def write(self, *args, **kwargs):
"""Write the resulting graph in a file.
It is just a wrapper around the :func:`pydot.Dot.write` method
(see the `pydot <http://code.google.com/p/pydot/>`_ documentation for
details). Below a common way to use it::
>>> graph = oerp.inspect.relations(['res.partner'])
>>> graph.write('relations_res_partner.png', format='png')
About supported formats, consult the
`Graphviz documentation <http://www.graphviz.org/doc/info/output.html>`_.
"""
output = self.make_dot()
return output.write(*args, **kwargs)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| etsinko/oerplib | oerplib/service/inspect/relations.py | Python | lgpl-3.0 | 18,356 |
import StateMod1
class Board():
# This class is used to reason about the board.
# A board is read from file and converted into a 2d array
# 'S' is start 'G' is goal '#' is obstacle
def __init__(self, filename):
inputhandler = InputHandler(filename)
self.dimensions = inputhandler.dimens
self.start = inputhandler.start
self.goal = inputhandler.goal
self.obstacles = inputhandler.obstacles
self.filename = filename
self.grid = [[' ' for j in range(int(self.dimensions[0]))] for i in range(int(self.dimensions[1]))]
self.grid[int(self.goal[0])][int(self.goal[1])] = 'G'
self.grid[int(self.start[0])][int(self.start[1])] = 'S'
for obs in self.obstacles:
x = int(obs[0])
y = int(obs[1])
width = int(obs[2])
height = int(obs[3])
for i in range(x, x + width):
for j in range(y, y + height):
self.grid[i][j] = '#'
#This method is used to provide the astar class with an initial search state.
#Iterates through textual representation of board and returns a new node
#with coordinates of start state
def generateInitialState(self):
for i in range(len(self.grid)):
for j in range(len(self.grid[i])):
if self.grid[i][j] == 'S':
return StateMod1.StateMod1(i, j, self, None)
# Ths class is a basic class to handle input from filename
# First line read is dimensions of grid
#second line read is start and end coordinates
#all subsequent lines are obstacles
class InputHandler():
def __init__(self, filename):
f = open(filename, 'r')
self.dimens = f.readline().rstrip().translate(None, '()').split(',')
self.startandgoal = f.readline().rstrip()
self.start = self.startandgoal.split(')(')[0].strip('()').split(',')
self.goal = self.startandgoal.split(')(')[1].strip('()').split(',')
self.obstaclesHack = f.readlines()
self.obstacles = []
for line in self.obstaclesHack:
line = line.rstrip()
line = line.translate(None, '()')
self.obstacles.append(line.split(','))
| pmitche/it3105-aiprogramming | project1/module1/board.py | Python | mit | 2,229 |
__author__ = 'Elliott'
from django.http import HttpResponse
from models import *
def renamejp2(request):
log="<html><head></head><body><h1>Successful changes</h1><div>"
errors="<h1>ERRORS</h1><div>"
#Get all page legacy records
pl=PageLegacy.objects.all()
#Try to get page on server
#Passed: Copy page to parsed directory, with pageimage id as new filename
#Failed: write expected filename to error log
return HttpResponse("") | kcl-ddh/chopin-online | ocve/scripts/renamejp2.py | Python | gpl-3.0 | 493 |
from __future__ import division, absolute_import, print_function
import re
import os
import sys
import types
from copy import copy
from distutils.ccompiler import *
from distutils import ccompiler
from distutils.errors import DistutilsExecError, DistutilsModuleError, \
DistutilsPlatformError
from distutils.sysconfig import customize_compiler
from distutils.version import LooseVersion
from numpy.distutils import log
from numpy.distutils.exec_command import exec_command
from numpy.distutils.misc_util import cyg2win32, is_sequence, mingw32, \
quote_args
from numpy.distutils.compat import get_exception
def replace_method(klass, method_name, func):
if sys.version_info[0] < 3:
m = types.MethodType(func, None, klass)
else:
# Py3k does not have unbound method anymore, MethodType does not work
m = lambda self, *args, **kw: func(self, *args, **kw)
setattr(klass, method_name, m)
# Using customized CCompiler.spawn.
def CCompiler_spawn(self, cmd, display=None):
"""
Execute a command in a sub-process.
Parameters
----------
cmd : str
The command to execute.
display : str or sequence of str, optional
The text to add to the log file kept by `numpy.distutils`.
If not given, `display` is equal to `cmd`.
Returns
-------
None
Raises
------
DistutilsExecError
If the command failed, i.e. the exit status was not 0.
"""
if display is None:
display = cmd
if is_sequence(display):
display = ' '.join(list(display))
log.info(display)
s, o = exec_command(cmd)
if s:
if is_sequence(cmd):
cmd = ' '.join(list(cmd))
try:
print(o)
except UnicodeError:
# When installing through pip, `o` can contain non-ascii chars
pass
if re.search('Too many open files', o):
msg = '\nTry rerunning setup command until build succeeds.'
else:
msg = ''
raise DistutilsExecError('Command "%s" failed with exit status %d%s' % (cmd, s, msg))
replace_method(CCompiler, 'spawn', CCompiler_spawn)
def CCompiler_object_filenames(self, source_filenames, strip_dir=0, output_dir=''):
"""
Return the name of the object files for the given source files.
Parameters
----------
source_filenames : list of str
The list of paths to source files. Paths can be either relative or
absolute, this is handled transparently.
strip_dir : bool, optional
Whether to strip the directory from the returned paths. If True,
the file name prepended by `output_dir` is returned. Default is False.
output_dir : str, optional
If given, this path is prepended to the returned paths to the
object files.
Returns
-------
obj_names : list of str
The list of paths to the object files corresponding to the source
files in `source_filenames`.
"""
if output_dir is None:
output_dir = ''
obj_names = []
for src_name in source_filenames:
base, ext = os.path.splitext(os.path.normpath(src_name))
base = os.path.splitdrive(base)[1] # Chop off the drive
base = base[os.path.isabs(base):] # If abs, chop off leading /
if base.startswith('..'):
# Resolve starting relative path components, middle ones
# (if any) have been handled by os.path.normpath above.
i = base.rfind('..')+2
d = base[:i]
d = os.path.basename(os.path.abspath(d))
base = d + base[i:]
if ext not in self.src_extensions:
raise UnknownFileError("unknown file type '%s' (from '%s')" % (ext, src_name))
if strip_dir:
base = os.path.basename(base)
obj_name = os.path.join(output_dir, base + self.obj_extension)
obj_names.append(obj_name)
return obj_names
replace_method(CCompiler, 'object_filenames', CCompiler_object_filenames)
def CCompiler_compile(self, sources, output_dir=None, macros=None,
include_dirs=None, debug=0, extra_preargs=None,
extra_postargs=None, depends=None):
"""
Compile one or more source files.
Please refer to the Python distutils API reference for more details.
Parameters
----------
sources : list of str
A list of filenames
output_dir : str, optional
Path to the output directory.
macros : list of tuples
A list of macro definitions.
include_dirs : list of str, optional
The directories to add to the default include file search path for
this compilation only.
debug : bool, optional
Whether or not to output debug symbols in or alongside the object
file(s).
extra_preargs, extra_postargs : ?
Extra pre- and post-arguments.
depends : list of str, optional
A list of file names that all targets depend on.
Returns
-------
objects : list of str
A list of object file names, one per source file `sources`.
Raises
------
CompileError
If compilation fails.
"""
# This method is effective only with Python >=2.3 distutils.
# Any changes here should be applied also to fcompiler.compile
# method to support pre Python 2.3 distutils.
if not sources:
return []
# FIXME:RELATIVE_IMPORT
if sys.version_info[0] < 3:
from .fcompiler import FCompiler
else:
from numpy.distutils.fcompiler import FCompiler
if isinstance(self, FCompiler):
display = []
for fc in ['f77', 'f90', 'fix']:
fcomp = getattr(self, 'compiler_'+fc)
if fcomp is None:
continue
display.append("Fortran %s compiler: %s" % (fc, ' '.join(fcomp)))
display = '\n'.join(display)
else:
ccomp = self.compiler_so
display = "C compiler: %s\n" % (' '.join(ccomp),)
log.info(display)
macros, objects, extra_postargs, pp_opts, build = \
self._setup_compile(output_dir, macros, include_dirs, sources,
depends, extra_postargs)
cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
display = "compile options: '%s'" % (' '.join(cc_args))
if extra_postargs:
display += "\nextra options: '%s'" % (' '.join(extra_postargs))
log.info(display)
# build any sources in same order as they were originally specified
# especially important for fortran .f90 files using modules
if isinstance(self, FCompiler):
objects_to_build = list(build.keys())
for obj in objects:
if obj in objects_to_build:
src, ext = build[obj]
if self.compiler_type=='absoft':
obj = cyg2win32(obj)
src = cyg2win32(src)
self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
else:
for obj, (src, ext) in build.items():
self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
# Return *all* object filenames, not just the ones we just built.
return objects
replace_method(CCompiler, 'compile', CCompiler_compile)
def CCompiler_customize_cmd(self, cmd, ignore=()):
"""
Customize compiler using distutils command.
Parameters
----------
cmd : class instance
An instance inheriting from `distutils.cmd.Command`.
ignore : sequence of str, optional
List of `CCompiler` commands (without ``'set_'``) that should not be
altered. Strings that are checked for are:
``('include_dirs', 'define', 'undef', 'libraries', 'library_dirs',
'rpath', 'link_objects')``.
Returns
-------
None
"""
log.info('customize %s using %s' % (self.__class__.__name__,
cmd.__class__.__name__))
def allow(attr):
return getattr(cmd, attr, None) is not None and attr not in ignore
if allow('include_dirs'):
self.set_include_dirs(cmd.include_dirs)
if allow('define'):
for (name, value) in cmd.define:
self.define_macro(name, value)
if allow('undef'):
for macro in cmd.undef:
self.undefine_macro(macro)
if allow('libraries'):
self.set_libraries(self.libraries + cmd.libraries)
if allow('library_dirs'):
self.set_library_dirs(self.library_dirs + cmd.library_dirs)
if allow('rpath'):
self.set_runtime_library_dirs(cmd.rpath)
if allow('link_objects'):
self.set_link_objects(cmd.link_objects)
replace_method(CCompiler, 'customize_cmd', CCompiler_customize_cmd)
def _compiler_to_string(compiler):
props = []
mx = 0
keys = list(compiler.executables.keys())
for key in ['version', 'libraries', 'library_dirs',
'object_switch', 'compile_switch',
'include_dirs', 'define', 'undef', 'rpath', 'link_objects']:
if key not in keys:
keys.append(key)
for key in keys:
if hasattr(compiler, key):
v = getattr(compiler, key)
mx = max(mx, len(key))
props.append((key, repr(v)))
lines = []
format = '%-' + repr(mx+1) + 's = %s'
for prop in props:
lines.append(format % prop)
return '\n'.join(lines)
def CCompiler_show_customization(self):
"""
Print the compiler customizations to stdout.
Parameters
----------
None
Returns
-------
None
Notes
-----
Printing is only done if the distutils log threshold is < 2.
"""
if 0:
for attrname in ['include_dirs', 'define', 'undef',
'libraries', 'library_dirs',
'rpath', 'link_objects']:
attr = getattr(self, attrname, None)
if not attr:
continue
log.info("compiler '%s' is set to %s" % (attrname, attr))
try:
self.get_version()
except:
pass
if log._global_log.threshold<2:
print('*'*80)
print(self.__class__)
print(_compiler_to_string(self))
print('*'*80)
replace_method(CCompiler, 'show_customization', CCompiler_show_customization)
def CCompiler_customize(self, dist, need_cxx=0):
"""
Do any platform-specific customization of a compiler instance.
This method calls `distutils.sysconfig.customize_compiler` for
platform-specific customization, as well as optionally remove a flag
to suppress spurious warnings in case C++ code is being compiled.
Parameters
----------
dist : object
This parameter is not used for anything.
need_cxx : bool, optional
Whether or not C++ has to be compiled. If so (True), the
``"-Wstrict-prototypes"`` option is removed to prevent spurious
warnings. Default is False.
Returns
-------
None
Notes
-----
All the default options used by distutils can be extracted with::
from distutils import sysconfig
sysconfig.get_config_vars('CC', 'CXX', 'OPT', 'BASECFLAGS',
'CCSHARED', 'LDSHARED', 'SO')
"""
# See FCompiler.customize for suggested usage.
log.info('customize %s' % (self.__class__.__name__))
customize_compiler(self)
if need_cxx:
# In general, distutils uses -Wstrict-prototypes, but this option is
# not valid for C++ code, only for C. Remove it if it's there to
# avoid a spurious warning on every compilation.
try:
self.compiler_so.remove('-Wstrict-prototypes')
except (AttributeError, ValueError):
pass
if hasattr(self, 'compiler') and 'cc' in self.compiler[0]:
if not self.compiler_cxx:
if self.compiler[0].startswith('gcc'):
a, b = 'gcc', 'g++'
else:
a, b = 'cc', 'c++'
self.compiler_cxx = [self.compiler[0].replace(a, b)]\
+ self.compiler[1:]
else:
if hasattr(self, 'compiler'):
log.warn("#### %s #######" % (self.compiler,))
log.warn('Missing compiler_cxx fix for '+self.__class__.__name__)
return
replace_method(CCompiler, 'customize', CCompiler_customize)
def simple_version_match(pat=r'[-.\d]+', ignore='', start=''):
"""
Simple matching of version numbers, for use in CCompiler and FCompiler.
Parameters
----------
pat : str, optional
A regular expression matching version numbers.
Default is ``r'[-.\\d]+'``.
ignore : str, optional
A regular expression matching patterns to skip.
Default is ``''``, in which case nothing is skipped.
start : str, optional
A regular expression matching the start of where to start looking
for version numbers.
Default is ``''``, in which case searching is started at the
beginning of the version string given to `matcher`.
Returns
-------
matcher : callable
A function that is appropriate to use as the ``.version_match``
attribute of a `CCompiler` class. `matcher` takes a single parameter,
a version string.
"""
def matcher(self, version_string):
# version string may appear in the second line, so getting rid
# of new lines:
version_string = version_string.replace('\n', ' ')
pos = 0
if start:
m = re.match(start, version_string)
if not m:
return None
pos = m.end()
while True:
m = re.search(pat, version_string[pos:])
if not m:
return None
if ignore and re.match(ignore, m.group(0)):
pos = m.end()
continue
break
return m.group(0)
return matcher
def CCompiler_get_version(self, force=False, ok_status=[0]):
"""
Return compiler version, or None if compiler is not available.
Parameters
----------
force : bool, optional
If True, force a new determination of the version, even if the
compiler already has a version attribute. Default is False.
ok_status : list of int, optional
The list of status values returned by the version look-up process
for which a version string is returned. If the status value is not
in `ok_status`, None is returned. Default is ``[0]``.
Returns
-------
version : str or None
Version string, in the format of `distutils.version.LooseVersion`.
"""
if not force and hasattr(self, 'version'):
return self.version
self.find_executables()
try:
version_cmd = self.version_cmd
except AttributeError:
return None
if not version_cmd or not version_cmd[0]:
return None
try:
matcher = self.version_match
except AttributeError:
try:
pat = self.version_pattern
except AttributeError:
return None
def matcher(version_string):
m = re.match(pat, version_string)
if not m:
return None
version = m.group('version')
return version
status, output = exec_command(version_cmd, use_tee=0)
version = None
if status in ok_status:
version = matcher(output)
if version:
version = LooseVersion(version)
self.version = version
return version
replace_method(CCompiler, 'get_version', CCompiler_get_version)
def CCompiler_cxx_compiler(self):
"""
Return the C++ compiler.
Parameters
----------
None
Returns
-------
cxx : class instance
The C++ compiler, as a `CCompiler` instance.
"""
if self.compiler_type=='msvc': return self
cxx = copy(self)
cxx.compiler_so = [cxx.compiler_cxx[0]] + cxx.compiler_so[1:]
if sys.platform.startswith('aix') and 'ld_so_aix' in cxx.linker_so[0]:
# AIX needs the ld_so_aix script included with Python
cxx.linker_so = [cxx.linker_so[0], cxx.compiler_cxx[0]] \
+ cxx.linker_so[2:]
else:
cxx.linker_so = [cxx.compiler_cxx[0]] + cxx.linker_so[1:]
return cxx
replace_method(CCompiler, 'cxx_compiler', CCompiler_cxx_compiler)
compiler_class['intel'] = ('intelccompiler', 'IntelCCompiler',
"Intel C Compiler for 32-bit applications")
compiler_class['intele'] = ('intelccompiler', 'IntelItaniumCCompiler',
"Intel C Itanium Compiler for Itanium-based applications")
compiler_class['intelem'] = ('intelccompiler', 'IntelEM64TCCompiler',
"Intel C Compiler for 64-bit applications")
compiler_class['pathcc'] = ('pathccompiler', 'PathScaleCCompiler',
"PathScale Compiler for SiCortex-based applications")
ccompiler._default_compilers += (('linux.*', 'intel'),
('linux.*', 'intele'),
('linux.*', 'intelem'),
('linux.*', 'pathcc'))
if sys.platform == 'win32':
compiler_class['mingw32'] = ('mingw32ccompiler', 'Mingw32CCompiler',
"Mingw32 port of GNU C Compiler for Win32"\
"(for MSC built Python)")
if mingw32():
# On windows platforms, we want to default to mingw32 (gcc)
# because msvc can't build blitz stuff.
log.info('Setting mingw32 as default compiler for nt.')
ccompiler._default_compilers = (('nt', 'mingw32'),) \
+ ccompiler._default_compilers
_distutils_new_compiler = new_compiler
def new_compiler (plat=None,
compiler=None,
verbose=0,
dry_run=0,
force=0):
# Try first C compilers from numpy.distutils.
if plat is None:
plat = os.name
try:
if compiler is None:
compiler = get_default_compiler(plat)
(module_name, class_name, long_description) = compiler_class[compiler]
except KeyError:
msg = "don't know how to compile C/C++ code on platform '%s'" % plat
if compiler is not None:
msg = msg + " with '%s' compiler" % compiler
raise DistutilsPlatformError(msg)
module_name = "numpy.distutils." + module_name
try:
__import__ (module_name)
except ImportError:
msg = str(get_exception())
log.info('%s in numpy.distutils; trying from distutils',
str(msg))
module_name = module_name[6:]
try:
__import__(module_name)
except ImportError:
msg = str(get_exception())
raise DistutilsModuleError("can't compile C/C++ code: unable to load module '%s'" % \
module_name)
try:
module = sys.modules[module_name]
klass = vars(module)[class_name]
except KeyError:
raise DistutilsModuleError(("can't compile C/C++ code: unable to find class '%s' " +
"in module '%s'") % (class_name, module_name))
compiler = klass(None, dry_run, force)
log.debug('new_compiler returns %s' % (klass))
return compiler
ccompiler.new_compiler = new_compiler
_distutils_gen_lib_options = gen_lib_options
def gen_lib_options(compiler, library_dirs, runtime_library_dirs, libraries):
library_dirs = quote_args(library_dirs)
runtime_library_dirs = quote_args(runtime_library_dirs)
r = _distutils_gen_lib_options(compiler, library_dirs,
runtime_library_dirs, libraries)
lib_opts = []
for i in r:
if is_sequence(i):
lib_opts.extend(list(i))
else:
lib_opts.append(i)
return lib_opts
ccompiler.gen_lib_options = gen_lib_options
# Also fix up the various compiler modules, which do
# from distutils.ccompiler import gen_lib_options
# Don't bother with mwerks, as we don't support Classic Mac.
for _cc in ['msvc9', 'msvc', 'bcpp', 'cygwinc', 'emxc', 'unixc']:
_m = sys.modules.get('distutils.' + _cc + 'compiler')
if _m is not None:
setattr(_m, 'gen_lib_options', gen_lib_options)
_distutils_gen_preprocess_options = gen_preprocess_options
def gen_preprocess_options (macros, include_dirs):
include_dirs = quote_args(include_dirs)
return _distutils_gen_preprocess_options(macros, include_dirs)
ccompiler.gen_preprocess_options = gen_preprocess_options
##Fix distutils.util.split_quoted:
# NOTE: I removed this fix in revision 4481 (see ticket #619), but it appears
# that removing this fix causes f2py problems on Windows XP (see ticket #723).
# Specifically, on WinXP when gfortran is installed in a directory path, which
# contains spaces, then f2py is unable to find it.
import re
import string
_wordchars_re = re.compile(r'[^\\\'\"%s ]*' % string.whitespace)
_squote_re = re.compile(r"'(?:[^'\\]|\\.)*'")
_dquote_re = re.compile(r'"(?:[^"\\]|\\.)*"')
_has_white_re = re.compile(r'\s')
def split_quoted(s):
s = s.strip()
words = []
pos = 0
while s:
m = _wordchars_re.match(s, pos)
end = m.end()
if end == len(s):
words.append(s[:end])
break
if s[end] in string.whitespace: # unescaped, unquoted whitespace: now
words.append(s[:end]) # we definitely have a word delimiter
s = s[end:].lstrip()
pos = 0
elif s[end] == '\\': # preserve whatever is being escaped;
# will become part of the current word
s = s[:end] + s[end+1:]
pos = end+1
else:
if s[end] == "'": # slurp singly-quoted string
m = _squote_re.match(s, end)
elif s[end] == '"': # slurp doubly-quoted string
m = _dquote_re.match(s, end)
else:
raise RuntimeError("this can't happen (bad char '%c')" % s[end])
if m is None:
raise ValueError("bad string (mismatched %s quotes?)" % s[end])
(beg, end) = m.span()
if _has_white_re.search(s[beg+1:end-1]):
s = s[:beg] + s[beg+1:end-1] + s[end:]
pos = m.end() - 2
else:
# Keeping quotes when a quoted word does not contain
# white-space. XXX: send a patch to distutils
pos = m.end()
if pos >= len(s):
words.append(s)
break
return words
ccompiler.split_quoted = split_quoted
##Fix distutils.util.split_quoted:
| nan86150/ImageFusion | lib/python2.7/site-packages/numpy/distutils/ccompiler.py | Python | mit | 22,819 |
import datetime
import glob
import importlib
import logging
import os
import re
import sys
import time
import traceback
from django.core.management.base import BaseCommand
from django.conf import settings
# from django.core.management.base import CommandError
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
def is_python_file(path):
return True if re.search(r'.(py|js)$', path) else False
class EventHandler(FileSystemEventHandler):
"""Logs all the events captured."""
def __init__(self, path):
self.path = path
self.python_templates = []
self.__rescan()
self.__generate_templates()
def __rescan(self):
pattern = os.path.join(self.path, "**/*_html.py")
self.python_templates = [
x for x in glob.iglob(pattern, recursive=True)
]
def __generate_templates(self):
current_imports = list(sys.modules.keys())
count_templates = 0
error_text = ""
for template in self.python_templates:
rel_path = os.path.splitext(
os.path.relpath(template, self.path))[0]
import_path = re.sub(os.path.sep, ".", rel_path)
try:
mod = importlib.import_module(import_path)
outputfile = re.sub(r'_html\.py$', '.html', template)
with open(outputfile, 'w') as of:
for line in mod.result():
of.write(str(line))
of.write(os.linesep)
except:
error_text += traceback.format_exc() + os.linesep
else:
count_templates += 1
for new_import in [
x for x in sys.modules.keys() if x not in current_imports
]:
del sys.modules[new_import]
print(
"Generated %d templates on %s"
% (count_templates, datetime.datetime.now())
)
if error_text:
print(error_text)
def on_moved(self, event):
super(EventHandler, self).on_moved(event)
if event.is_directory or is_python_file(event.src_path):
self.__rescan()
self.__generate_templates()
def on_created(self, event):
super(EventHandler, self).on_created(event)
if event.is_directory or is_python_file(event.src_path):
self.__rescan()
self.__generate_templates()
def on_deleted(self, event):
super(EventHandler, self).on_deleted(event)
pass
def on_modified(self, event):
super(EventHandler, self).on_modified(event)
if is_python_file(event.src_path):
self.__generate_templates()
class Command(BaseCommand):
help = \
'Regenerates foo.html files from foo_html.py ' \
'whenever a python file changes'
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
path = settings.BASE_DIR
event_handler = EventHandler(path)
observer = Observer()
observer.schedule(event_handler, path, recursive=True)
observer.start()
try:
while True:
time.sleep(0.5)
except KeyboardInterrupt:
observer.stop()
observer.join()
| mnieber/shared-goal | django/django_dominate/management/commands/update_python_templates.py | Python | apache-2.0 | 3,450 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-07 10:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0004_warningconstraint'),
]
operations = [
migrations.AlterField(
model_name='preorderposition',
name='secret',
field=models.CharField(db_index=True, max_length=254, unique=True),
),
]
| c3cashdesk/c6sh | src/postix/core/migrations/0005_auto_20160207_1138.py | Python | agpl-3.0 | 485 |
# coding: utf-8
# Author: Milan Kubik
| apophys/ipaqe-dyndir | tests/__init__.py | Python | mit | 38 |
# coding = utf-8
"""
多个线程队列轮询
Ref: http://python3-cookbook.readthedocs.io/zh_CN/latest/c12/p13_polling_multiple_thread_queues.html
"""
import queue
import socket
import os
class PollableQueue(queue.Queue):
def __init__(self):
super().__init__()
# 创建一对链接的套接口
if os.name == "posix":
self._putsocket, self._getsocket = socket.socketpair()
else:
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind(("127.0.0.1", 0))
server.listen(1)
self._putsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._putsocket.connect(server.getsockname())
(self._getsocket, _) = server.accept()
server.close()
def fileno(self):
return self._getsocket.fileno()
def put(self, item):
super().put(item)
self._putsocket.send(b'x')
def get(self):
self._getsocket.recv(1)
return super().get()
# 例子
import select
import threading
import time
def consumer(queues):
"""同时从多个队列中读取数据"""
while True:
(canRead, _, _) = select.select(queues, [], [])
for r in canRead:
item = r.get()
print("Got:", item)
q1 = PollableQueue()
q2 = PollableQueue()
q3 = PollableQueue()
t = threading.Thread(target=consumer, args=([q1, q2, q3],))
t.daemon = True
t.start()
# 送数据到队列
q1.put(1)
q2.put(10)
q3.put("Hello")
q2.put(15)
# ...
| Ginkgo-Biloba/Misc-Python | cookbook/PollableQueue.py | Python | gpl-3.0 | 1,349 |
# Copyright (c) 2018 Red Hat, Inc.
# All Rights Reserved.
import dateutil
import logging
from django.db.models import (
Count,
F,
)
from django.db import transaction
from django.shortcuts import get_object_or_404
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
from rest_framework.permissions import SAFE_METHODS
from rest_framework.exceptions import PermissionDenied
from rest_framework.response import Response
from rest_framework import status
from awx.main.constants import ACTIVE_STATES
from awx.main.utils import (
get_object_or_400,
parse_yaml_or_json,
)
from awx.main.models.ha import (
Instance,
InstanceGroup,
)
from awx.main.models.organization import Team
from awx.main.models.projects import Project
from awx.main.models.inventory import Inventory
from awx.main.models.jobs import JobTemplate
from awx.conf.license import (
feature_enabled,
LicenseForbids,
)
from awx.api.exceptions import ActiveJobConflict
logger = logging.getLogger('awx.api.views.mixin')
class ActivityStreamEnforcementMixin(object):
'''
Mixin to check that license supports activity streams.
'''
def check_permissions(self, request):
ret = super(ActivityStreamEnforcementMixin, self).check_permissions(request)
if not feature_enabled('activity_streams'):
raise LicenseForbids(_('Your license does not allow use of the activity stream.'))
return ret
class SystemTrackingEnforcementMixin(object):
'''
Mixin to check that license supports system tracking.
'''
def check_permissions(self, request):
ret = super(SystemTrackingEnforcementMixin, self).check_permissions(request)
if not feature_enabled('system_tracking'):
raise LicenseForbids(_('Your license does not permit use of system tracking.'))
return ret
class WorkflowsEnforcementMixin(object):
'''
Mixin to check that license supports workflows.
'''
def check_permissions(self, request):
ret = super(WorkflowsEnforcementMixin, self).check_permissions(request)
if not feature_enabled('workflows') and request.method not in ('GET', 'OPTIONS', 'DELETE'):
raise LicenseForbids(_('Your license does not allow use of workflows.'))
return ret
class UnifiedJobDeletionMixin(object):
'''
Special handling when deleting a running unified job object.
'''
def destroy(self, request, *args, **kwargs):
obj = self.get_object()
if not request.user.can_access(self.model, 'delete', obj):
raise PermissionDenied()
try:
if obj.unified_job_node.workflow_job.status in ACTIVE_STATES:
raise PermissionDenied(detail=_('Cannot delete job resource when associated workflow job is running.'))
except self.model.unified_job_node.RelatedObjectDoesNotExist:
pass
# Still allow deletion of new status, because these can be manually created
if obj.status in ACTIVE_STATES and obj.status != 'new':
raise PermissionDenied(detail=_("Cannot delete running job resource."))
elif not obj.event_processing_finished:
# Prohibit deletion if job events are still coming in
if obj.finished and now() < obj.finished + dateutil.relativedelta.relativedelta(minutes=1):
# less than 1 minute has passed since job finished and events are not in
return Response({"error": _("Job has not finished processing events.")},
status=status.HTTP_400_BAD_REQUEST)
else:
# if it has been > 1 minute, events are probably lost
logger.warning('Allowing deletion of {} through the API without all events '
'processed.'.format(obj.log_format))
obj.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class InstanceGroupMembershipMixin(object):
'''
This mixin overloads attach/detach so that it calls InstanceGroup.save(),
triggering a background recalculation of policy-based instance group
membership.
'''
def attach(self, request, *args, **kwargs):
response = super(InstanceGroupMembershipMixin, self).attach(request, *args, **kwargs)
sub_id, res = self.attach_validate(request)
if status.is_success(response.status_code):
if self.parent_model is Instance:
ig_obj = get_object_or_400(self.model, pk=sub_id)
inst_name = ig_obj.hostname
else:
inst_name = get_object_or_400(self.model, pk=sub_id).hostname
with transaction.atomic():
ig_qs = InstanceGroup.objects.select_for_update()
if self.parent_model is Instance:
ig_obj = get_object_or_400(ig_qs, pk=sub_id)
else:
# similar to get_parent_object, but selected for update
parent_filter = {
self.lookup_field: self.kwargs.get(self.lookup_field, None),
}
ig_obj = get_object_or_404(ig_qs, **parent_filter)
if inst_name not in ig_obj.policy_instance_list:
ig_obj.policy_instance_list.append(inst_name)
ig_obj.save(update_fields=['policy_instance_list'])
return response
def is_valid_relation(self, parent, sub, created=False):
if sub.is_isolated():
return {'error': _('Isolated instances may not be added or removed from instances groups via the API.')}
if self.parent_model is InstanceGroup:
ig_obj = self.get_parent_object()
if ig_obj.controller_id is not None:
return {'error': _('Isolated instance group membership may not be managed via the API.')}
return None
def unattach_validate(self, request):
(sub_id, res) = super(InstanceGroupMembershipMixin, self).unattach_validate(request)
if res:
return (sub_id, res)
sub = get_object_or_400(self.model, pk=sub_id)
attach_errors = self.is_valid_relation(None, sub)
if attach_errors:
return (sub_id, Response(attach_errors, status=status.HTTP_400_BAD_REQUEST))
return (sub_id, res)
def unattach(self, request, *args, **kwargs):
response = super(InstanceGroupMembershipMixin, self).unattach(request, *args, **kwargs)
if status.is_success(response.status_code):
sub_id = request.data.get('id', None)
if self.parent_model is Instance:
inst_name = self.get_parent_object().hostname
else:
inst_name = get_object_or_400(self.model, pk=sub_id).hostname
with transaction.atomic():
ig_qs = InstanceGroup.objects.select_for_update()
if self.parent_model is Instance:
ig_obj = get_object_or_400(ig_qs, pk=sub_id)
else:
# similar to get_parent_object, but selected for update
parent_filter = {
self.lookup_field: self.kwargs.get(self.lookup_field, None),
}
ig_obj = get_object_or_404(ig_qs, **parent_filter)
if inst_name in ig_obj.policy_instance_list:
ig_obj.policy_instance_list.pop(ig_obj.policy_instance_list.index(inst_name))
ig_obj.save(update_fields=['policy_instance_list'])
return response
class RelatedJobsPreventDeleteMixin(object):
def perform_destroy(self, obj):
self.check_related_active_jobs(obj)
return super(RelatedJobsPreventDeleteMixin, self).perform_destroy(obj)
def check_related_active_jobs(self, obj):
active_jobs = obj.get_active_jobs()
if len(active_jobs) > 0:
raise ActiveJobConflict(active_jobs)
time_cutoff = now() - dateutil.relativedelta.relativedelta(minutes=1)
recent_jobs = obj._get_related_jobs().filter(finished__gte = time_cutoff)
for unified_job in recent_jobs.get_real_instances():
if not unified_job.event_processing_finished:
raise PermissionDenied(_(
'Related job {} is still processing events.'
).format(unified_job.log_format))
class OrganizationCountsMixin(object):
def get_serializer_context(self, *args, **kwargs):
full_context = super(OrganizationCountsMixin, self).get_serializer_context(*args, **kwargs)
if self.request is None:
return full_context
db_results = {}
org_qs = self.model.accessible_objects(self.request.user, 'read_role')
org_id_list = org_qs.values('id')
if len(org_id_list) == 0:
if self.request.method == 'POST':
full_context['related_field_counts'] = {}
return full_context
inv_qs = Inventory.accessible_objects(self.request.user, 'read_role')
project_qs = Project.accessible_objects(self.request.user, 'read_role')
# Produce counts of Foreign Key relationships
db_results['inventories'] = inv_qs\
.values('organization').annotate(Count('organization')).order_by('organization')
db_results['teams'] = Team.accessible_objects(
self.request.user, 'read_role').values('organization').annotate(
Count('organization')).order_by('organization')
JT_project_reference = 'project__organization'
JT_inventory_reference = 'inventory__organization'
db_results['job_templates_project'] = JobTemplate.accessible_objects(
self.request.user, 'read_role').exclude(
project__organization=F(JT_inventory_reference)).values(JT_project_reference).annotate(
Count(JT_project_reference)).order_by(JT_project_reference)
db_results['job_templates_inventory'] = JobTemplate.accessible_objects(
self.request.user, 'read_role').values(JT_inventory_reference).annotate(
Count(JT_inventory_reference)).order_by(JT_inventory_reference)
db_results['projects'] = project_qs\
.values('organization').annotate(Count('organization')).order_by('organization')
# Other members and admins of organization are always viewable
db_results['users'] = org_qs.annotate(
users=Count('member_role__members', distinct=True),
admins=Count('admin_role__members', distinct=True)
).values('id', 'users', 'admins')
count_context = {}
for org in org_id_list:
org_id = org['id']
count_context[org_id] = {
'inventories': 0, 'teams': 0, 'users': 0, 'job_templates': 0,
'admins': 0, 'projects': 0}
for res, count_qs in db_results.items():
if res == 'job_templates_project':
org_reference = JT_project_reference
elif res == 'job_templates_inventory':
org_reference = JT_inventory_reference
elif res == 'users':
org_reference = 'id'
else:
org_reference = 'organization'
for entry in count_qs:
org_id = entry[org_reference]
if org_id in count_context:
if res == 'users':
count_context[org_id]['admins'] = entry['admins']
count_context[org_id]['users'] = entry['users']
continue
count_context[org_id][res] = entry['%s__count' % org_reference]
# Combine the counts for job templates by project and inventory
for org in org_id_list:
org_id = org['id']
count_context[org_id]['job_templates'] = 0
for related_path in ['job_templates_project', 'job_templates_inventory']:
if related_path in count_context[org_id]:
count_context[org_id]['job_templates'] += count_context[org_id].pop(related_path)
full_context['related_field_counts'] = count_context
return full_context
class ControlledByScmMixin(object):
'''
Special method to reset SCM inventory commit hash
if anything that it manages changes.
'''
def _reset_inv_src_rev(self, obj):
if self.request.method in SAFE_METHODS or not obj:
return
project_following_sources = obj.inventory_sources.filter(
update_on_project_update=True, source='scm')
if project_following_sources:
# Allow inventory changes unrelated to variables
if self.model == Inventory and (
not self.request or not self.request.data or
parse_yaml_or_json(self.request.data.get('variables', '')) == parse_yaml_or_json(obj.variables)):
return
project_following_sources.update(scm_last_revision='')
def get_object(self):
obj = super(ControlledByScmMixin, self).get_object()
self._reset_inv_src_rev(obj)
return obj
def get_parent_object(self):
obj = super(ControlledByScmMixin, self).get_parent_object()
self._reset_inv_src_rev(obj)
return obj
| wwitzel3/awx | awx/api/views/mixin.py | Python | apache-2.0 | 13,324 |
#!/usr/bin/python
import OpenSSL.crypto
import argparse
import base64
import glob
import k8s
import os
import shutil
import yaml
def sn():
sn = int(open("/etc/origin/master/ca.serial.txt").read(), 16)
sntext = "%X" % (sn + 1)
if len(sntext) % 2:
sntext = "0" + sntext
open("/etc/origin/master/ca.serial.txt", "w").write(sntext)
return sn
def make_cert(fn, o, cn, san, eku):
ca_cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
open("/etc/origin/master/ca.crt").read())
ca_key = OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM,
open("/etc/origin/master/ca.key").read())
key = OpenSSL.crypto.PKey()
key.generate_key(OpenSSL.crypto.TYPE_RSA, 2048)
cert = OpenSSL.crypto.X509()
cert.set_version(2)
cert.set_serial_number(sn())
if o:
cert.get_subject().O = o
cert.get_subject().CN = cn
cert.gmtime_adj_notBefore(-60 * 60)
cert.gmtime_adj_notAfter((2 * 365 * 24 - 1) * 60 * 60)
cert.set_issuer(ca_cert.get_subject())
cert.set_pubkey(key)
cert.add_extensions([
OpenSSL.crypto.X509Extension("keyUsage", True, "digitalSignature, keyEncipherment"),
OpenSSL.crypto.X509Extension("extendedKeyUsage", False, eku),
OpenSSL.crypto.X509Extension("basicConstraints", True, "CA:FALSE")
])
if san:
cert.add_extensions([
OpenSSL.crypto.X509Extension("subjectAltName", False, san)
])
cert.sign(ca_key, "sha256")
with os.fdopen(os.open("%s.key" % fn, os.O_WRONLY | os.O_CREAT, 0600),
"w") as f:
f.write(OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM,
key))
with open("%s.crt" % fn, "w") as f:
f.write(OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM,
cert))
f.write(OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM,
ca_cert))
def do_master_config():
# update master-config.yaml
f = "/etc/origin/master/master-config.yaml"
y = yaml.load(open(f, "r").read())
y["assetConfig"]["loggingPublicURL"] = "https://kibana." + args.subdomain + "/"
y["assetConfig"]["masterPublicURL"] = "https://" + args.public_hostname + ":8443"
y["assetConfig"]["metricsPublicURL"] = "https://hawkular-metrics." + args.subdomain + "/hawkular/metrics"
y["assetConfig"]["publicURL"] = "https://" + args.public_hostname + ":8443/console/"
y["corsAllowedOrigins"] = ["127.0.0.1",
"localhost",
"172.30.0.1",
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster.local",
"openshift",
"openshift.default",
"openshift.default.svc",
"openshift.default.svc.cluster.local",
args.private_ip,
args.private_hostname,
args.public_ip,
args.public_hostname
]
y["etcdClientInfo"]["urls"] = ["https://" + args.private_hostname + ":4001"]
y["etcdConfig"]["address"] = args.private_hostname + ":4001"
y["etcdConfig"]["peerAddress"] = args.private_hostname + ":7001"
y["kubernetesMasterConfig"]["masterIP"] = args.private_ip
y["masterPublicURL"] = "https://" + args.public_hostname + ":8443"
y["oauthConfig"]["assetPublicURL"] = "https://" + args.public_hostname + ":8443/console/"
y["oauthConfig"]["masterPublicURL"] = "https://" + args.public_hostname + ":8443"
y["oauthConfig"]["masterURL"] = "https://" + args.private_hostname + ":8443"
y["routingConfig"]["subdomain"] = "apps." + args.subdomain
open(f, "w").write(yaml.dump(y, default_flow_style=False))
# rebuild SSL certs
for cert in ["etcd.server", "master.server"]:
make_cert("/etc/origin/master/" + cert, None, "172.30.0.1",
", ".join(["DNS:kubernetes",
"DNS:kubernetes.default",
"DNS:kubernetes.default.svc",
"DNS:kubernetes.default.svc.cluster.local",
"DNS:openshift",
"DNS:openshift.default",
"DNS:openshift.default.svc",
"DNS:openshift.default.svc.cluster.local",
"DNS:" + args.public_hostname,
"DNS:" + args.private_hostname,
"DNS:172.30.0.1",
"DNS:" + args.public_ip,
"DNS:" + args.private_ip,
"IP:172.30.0.1",
"IP:" + args.public_ip,
"IP:" + args.private_ip]), "serverAuth")
# rebuild service kubeconfig files
ca = base64.b64encode(open("/etc/origin/master/ca.crt").read())
private_hostname_ = args.private_hostname.replace(".", "-")
public_hostname_ = args.public_hostname.replace(".", "-")
for kc in ["admin", "openshift-master", "openshift-registry", "openshift-router"]:
y = {"apiVersion": "v1",
"kind": "Config",
"preferences": {},
"clusters": [{"name": public_hostname_ + ":8443",
"cluster": {"certificate-authority-data": ca,
"server": "https://" + args.public_hostname + ":8443"}},
{"name": private_hostname_ + ":8443",
"cluster": {"certificate-authority-data": ca,
"server": "https://" + args.private_hostname + ":8443"}}],
"users": [{"name": "system:" + kc + "/" + private_hostname_ + ":8443",
"user": {"client-certificate-data": base64.b64encode(open("/etc/origin/master/" + kc + ".crt").read()),
"client-key-data": base64.b64encode(open("/etc/origin/master/" + kc + ".key").read())}}],
"contexts": [{"name": "default/" + public_hostname_ + ":8443/system:" + kc,
"context": {"cluster": public_hostname_ + ":8443",
"namespace": "default",
"user": "system:" + kc + "/" + private_hostname_ + ":8443"}},
{"name": "default/" + private_hostname_ + ":8443/system:" + kc,
"context": {"cluster": private_hostname_ + ":8443",
"namespace": "default",
"user": "system:" + kc + "/" + private_hostname_ + ":8443"}}],
"current-context": "default/" + private_hostname_ + ":8443/system:" + kc}
open("/etc/origin/master/" + kc + ".kubeconfig", "w").write(yaml.dump(y, default_flow_style=False))
# rebuild root's kubeconfig file
shutil.copy("/etc/origin/master/admin.kubeconfig", "/root/.kube/config")
def do_node_config():
# update node-config.yaml
f = "/etc/origin/node/node-config.yaml"
y = yaml.load(open(f, "r").read())
y["masterKubeConfig"] = "system:node:" + args.private_hostname + ".kubeconfig"
y["nodeIP"] = args.private_ip
y["nodeName"] = args.private_hostname
open(f, "w").write(yaml.dump(y, default_flow_style=False))
# remove old node SSL certs and kubeconfig files
for f in glob.glob("/etc/origin/node/system:node:*"):
os.unlink(f)
# rebuild node SSL certs
make_cert("/etc/origin/node/server", None, "172.30.0.1",
", ".join(["DNS:kubernetes",
"DNS:kubernetes.default",
"DNS:kubernetes.default.svc",
"DNS:kubernetes.default.svc.cluster.local",
"DNS:openshift",
"DNS:openshift.default",
"DNS:openshift.default.svc",
"DNS:openshift.default.svc.cluster.local",
"DNS:" + args.public_hostname,
"DNS:" + args.private_hostname,
"DNS:172.30.0.1",
"DNS:" + args.public_ip,
"DNS:" + args.private_ip,
"IP:172.30.0.1",
"IP:" + args.public_ip,
"IP:" + args.private_ip]), "serverAuth")
make_cert("/etc/origin/node/system:node:" + args.private_hostname, "system:nodes", "system:node:" + args.private_hostname, None, "clientAuth")
# rebuild node kubeconfig file
private_hostname_ = args.private_hostname.replace(".", "-")
y = {"apiVersion": "v1",
"kind": "Config",
"preferences": {},
"clusters": [{"name": private_hostname_ + ":8443",
"cluster": {"certificate-authority-data": base64.b64encode(open("/etc/origin/node/ca.crt").read()),
"server": "https://" + args.private_hostname + ":8443"}}],
"users": [{"name": "system:node:" + args.private_hostname + "/" + private_hostname_ + ":8443",
"user": {"client-certificate-data": base64.b64encode(open("/etc/origin/node/system:node:" + args.private_hostname + ".crt").read()),
"client-key-data": base64.b64encode(open("/etc/origin/node/system:node:" + args.private_hostname + ".key").read())}}],
"contexts": [{"name": "default/" + private_hostname_ + ":8443/system:node:" + args.private_hostname,
"context": {"cluster": private_hostname_ + ":8443",
"namespace": "default",
"user": "system:node:" + args.private_hostname + "/" + private_hostname_ + ":8443"}}],
"current-context": "default/" + private_hostname_ + ":8443/system:node:" + args.private_hostname}
open("/etc/origin/node/system:node:" + args.private_hostname + ".kubeconfig", "w").write(yaml.dump(y, default_flow_style=False))
def do_restart_services():
svcs = ["atomic-openshift-master", "atomic-openshift-node"]
for svc in svcs[::-1]:
os.system("systemctl stop " + svc)
# clear out finished Docker containers
os.system("docker ps -aq | xargs docker rm -f")
# trigger complete reconfiguration of OVS
os.unlink("/run/openshift-sdn/docker-network")
os.system("ovs-ofctl -O OpenFlow13 del-flows br0")
for svc in svcs:
os.system("systemctl start " + svc)
def do_cleanup(api):
for i in api.get("/api/v1/nodes")._items:
if i.metadata.name != args.private_hostname:
api.delete(i.metadata.selfLink)
for i in api.get("/oapi/v1/hostsubnets")._items:
if i.metadata.name != args.private_hostname:
api.delete(i.metadata.selfLink)
for i in api.get("/oapi/v1/oauthclients")._items:
i.redirectURIs = [uri for uri in i.redirectURIs if not ("8443" in uri and args.public_hostname not in uri)]
api.put(i.metadata.selfLink, i)
for i in api.get("/api/v1/events")._items:
api.delete(i.metadata.selfLink)
for i in api.get("/api/v1/pods")._items:
try:
api.delete(i.metadata.selfLink)
except Exception:
print "** Exception **"
def do_services_config_post(api):
# replace DCs (we replace so that latestVersion is reset)
dc = api.get("/oapi/v1/namespaces/default/deploymentconfigs/docker-registry")
delete_dc(api, dc)
set_env(dc.spec.template.spec.containers[0], "OPENSHIFT_MASTER", "https://" + args.public_hostname + ":8443")
dc.metadata = {k: dc.metadata[k] for k in dc.metadata if k in ["labels", "name"]}
del dc.status
api.post("/oapi/v1/namespaces/default/deploymentconfigs", dc)
dc = api.get("/oapi/v1/namespaces/default/deploymentconfigs/router")
delete_dc(api, dc)
set_env(dc.spec.template.spec.containers[0], "OPENSHIFT_MASTER", "https://" + args.public_hostname + ":8443")
dc.metadata = {k: dc.metadata[k] for k in dc.metadata if k in ["labels", "name"]}
del dc.status
api.post("/oapi/v1/namespaces/default/deploymentconfigs", dc)
def do_kibana_config_pre(api):
# rebuild SSL cert
make_cert("kibana", None, "kibana",
", ".join(["DNS:kibana",
"DNS:kibana." + args.subdomain,
"DNS:kibana-ops." + args.subdomain]), "serverAuth")
sec = api.get("/api/v1/namespaces/logging/secrets/logging-kibana-proxy")
sec.data["server-cert"] = base64.b64encode(open("kibana.crt").read())
sec.data["server-key"] = base64.b64encode(open("kibana.key").read())
api.put(sec.metadata.selfLink, sec)
def do_kibana_config_post(api):
# replace logging-kibana DC (we replace so that latestVersion is reset)
dc = api.get("/oapi/v1/namespaces/logging/deploymentconfigs/logging-kibana")
delete_dc(api, dc)
set_env(dc.spec.template.spec.containers[1], "OAP_PUBLIC_MASTER_URL", "https://" + args.public_hostname + ":8443")
dc.metadata = {k: dc.metadata[k] for k in dc.metadata if k in ["labels", "name"]}
del dc.status
api.post("/oapi/v1/namespaces/logging/deploymentconfigs", dc)
# fix route hostnames
r = api.get("/oapi/v1/namespaces/logging/routes/kibana")
r.spec.host = "kibana." + args.subdomain
api.put(r.metadata.selfLink, r)
r = api.get("/oapi/v1/namespaces/logging/routes/kibana-ops")
r.spec.host = "kibana-ops." + args.subdomain
api.put(r.metadata.selfLink, r)
def do_hawkular_config_pre(api):
# rebuild SSL cert
make_cert("hawkular-metrics", None, "hawkular-metrics",
", ".join(["DNS:hawkular-metrics",
"DNS:hawkular-metrics." + args.subdomain]), "serverAuth")
open("hawkular-metrics.crt", "a").write(open("/etc/origin/master/ca.crt").read())
# key and cert go into hawkular-metrics-secrets keystore
sec = api.get("/api/v1/namespaces/openshift-infra/secrets/hawkular-metrics-secrets")
pw = sec.data["hawkular-metrics.keystore.password"].decode("base64").strip()
os.system("openssl pkcs12 -export -in hawkular-metrics.crt -inkey hawkular-metrics.key -out hawkular-metrics.pkcs12 -name hawkular-metrics -password pass:" + pw)
os.system("keytool -importkeystore -srckeystore hawkular-metrics.pkcs12 -srcstoretype pkcs12 -destkeystore keystore -deststorepass " + pw + " -srcstorepass " + pw + " -noprompt")
sec.data["hawkular-metrics.keystore"] = base64.b64encode(open("keystore").read())
api.put(sec.metadata.selfLink, sec)
# cert goes into hawkular-metrics-certificate
sec = api.get("/api/v1/namespaces/openshift-infra/secrets/hawkular-metrics-certificate")
os.system("openssl x509 -in hawkular-metrics.crt -out hawkular-metrics.crt.der -outform der")
sec.data["hawkular-metrics.certificate"] = base64.b64encode(open("hawkular-metrics.crt.der").read())
api.put(sec.metadata.selfLink, sec)
# cert also goes into hawkular-cassandra-secrets truststore
sec = api.get("/api/v1/namespaces/openshift-infra/secrets/hawkular-cassandra-secrets")
pw = sec.data["cassandra.truststore.password"].decode("base64").strip()
open("truststore", "w").write(sec.data["cassandra.truststore"].decode("base64"))
os.system("keytool -delete -alias hawkular-metrics -keystore truststore -storepass " + pw + " -noprompt")
os.system("keytool -import -trustcacerts -alias hawkular-metrics -file hawkular-metrics.crt.der -keystore truststore -storepass " + pw + " -noprompt")
sec.data["cassandra.truststore"] = base64.b64encode(open("truststore").read())
api.put(sec.metadata.selfLink, sec)
def do_hawkular_config_post(api):
# fix route hostname
r = api.get("/oapi/v1/namespaces/openshift-infra/routes/hawkular-metrics")
r.spec.host = "hawkular-metrics." + args.subdomain
api.put(r.metadata.selfLink, r)
def connect_api():
return k8s.API("https://" + args.private_hostname + ":8443",
("/etc/origin/master/openshift-master.crt",
"/etc/origin/master/openshift-master.key"))
def delete_dc(api, dc):
# delete DC and cascade to appropriate RCs
api.delete(dc.metadata.selfLink)
for rc in api.get("/api/v1/namespaces/" + dc.metadata.namespace + "/replicationcontrollers")._items:
if rc.metadata.name.startswith(dc.metadata.name + "-"):
delete_rc(api, rc)
def delete_rc(api, rc):
# delete RC and cascade to appropriate pods
api.delete(rc.metadata.selfLink)
for pod in api.get("/api/v1/namespaces/" + rc.metadata.namespace + "/pods")._items:
if pod.metadata.name.startswith(rc.metadata.name + "-"):
api.delete(pod.metadata.selfLink)
def set_env(c, k, v):
c.env = [e for e in c.env if e.name != k]
c.env.append(k8s.AttrDict({"name": k, "value": v}))
def parse_args():
ap = argparse.ArgumentParser()
ap.add_argument("private_hostname")
ap.add_argument("public_hostname")
ap.add_argument("private_ip")
ap.add_argument("public_ip")
ap.add_argument("subdomain")
return ap.parse_args()
def main():
# 1. Update daemon configs and certs
do_master_config()
do_node_config()
do_restart_services()
api = connect_api()
# 2. Make necessary changes via API before bulk object delete
do_kibana_config_pre(api)
do_hawkular_config_pre(api)
# 3. Bulk opject delete
do_cleanup(api)
# 4. Post bulk object delete changes
do_services_config_post(api)
do_kibana_config_post(api)
do_hawkular_config_post(api)
if __name__ == "__main__":
args = parse_args()
main()
| RedHatEMEA/aws-ose3 | target/reip.py | Python | apache-2.0 | 18,095 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('course_groups', '0001_initial'),
('bulk_email', '0003_config_model_feature_flag'),
]
operations = [
migrations.CreateModel(
name='Target',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('target_type', models.CharField(max_length=64, choices=[('myself', 'Myself'), ('staff', 'Staff and instructors'), ('learners', 'All students'), ('cohort', 'Specific cohort')])),
],
),
migrations.AlterField(
model_name='courseemail',
name='to_option',
field=models.CharField(max_length=64, choices=[('deprecated', 'deprecated')]),
),
migrations.CreateModel(
name='CohortTarget',
fields=[
('target_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='bulk_email.Target', on_delete=models.CASCADE)),
('cohort', models.ForeignKey(to='course_groups.CourseUserGroup', on_delete=models.CASCADE)),
],
bases=('bulk_email.target',),
),
migrations.AddField(
model_name='courseemail',
name='targets',
field=models.ManyToManyField(to='bulk_email.Target'),
),
]
| eduNEXT/edx-platform | lms/djangoapps/bulk_email/migrations/0004_add_email_targets.py | Python | agpl-3.0 | 1,465 |
# Copyright (C) 2016 Ross D Milligan
# GNU GENERAL PUBLIC LICENSE Version 3 (full notice can be found at https://github.com/rdmilligan/PyMovieStudio)
from constants import *
from pygame import mixer
from time import sleep
class Screen:
# initialise
def __init__(self, config_provider, disk, display, replay, graphics):
self.config_provider = config_provider
self.disk = disk
self.display = display
self.replay = replay
self.graphics = graphics
# pygame mixer
mixer.init()
# screen frame
def frame(self, frame_number):
# apply frame delay
sleep(self.config_provider.frame_delay)
# load frame from disk
frame = self.disk.load_frame(self.config_provider.screen_load_from, None, frame_number, self.config_provider.frame_format)
# ensure frame loaded from disk
if frame is None:
return False
# replay effects
self.replay.effects(frame_number, self.disk, self.graphics, self.config_provider.screen_load_from, None)
# replay audio
self.replay.audio(frame_number, self.disk, mixer, self.config_provider.screen_load_from)
# display frame
self.display.frame(frame)
return True
| rdmilligan/PyMovieStudio | scripts/screen/screen.py | Python | gpl-3.0 | 1,267 |
from django import http
from django.utils.translation import ugettext
from olympia.access import acl
from olympia.addons.models import Addon
from olympia.amo.feeds import NonAtomicFeed
from olympia.amo.templatetags.jinja_helpers import absolutify, page_name
from olympia.amo.urlresolvers import reverse
from olympia.browse.feeds import AddonFeedMixin
from . import views
class CollectionFeedMixin(NonAtomicFeed):
"""Common pieces for collections in a feed."""
def item_link(self, c):
return absolutify(c.get_url_path())
def item_title(self, c):
return unicode(c.name or '')
def item_description(self, c):
return unicode(c.description or '')
def item_author_name(self, c):
return c.author_username
def item_pubdate(self, c):
sort = self.request.GET.get('sort')
return c.created if sort == 'created' else c.modified
class CollectionFeed(CollectionFeedMixin, NonAtomicFeed):
request = None
def get_object(self, request):
self.request = request
def title(self, c):
app = page_name(self.request.APP)
# L10n: {0} is 'Add-ons for <app>'.
return ugettext(u'Collections :: %s') % app
def link(self):
return absolutify(reverse('collections.list'))
def description(self):
return ugettext(
'Collections are groups of related add-ons that anyone can '
'create and share.')
def items(self):
return views.get_filter(self.request).qs[:20]
class CollectionDetailFeed(AddonFeedMixin, NonAtomicFeed):
def get_object(self, request, username, slug):
self.request = request
c = views.get_collection(request, username, slug)
if not (c.listed or acl.check_collection_ownership(request, c)):
# 403 can't be raised as an exception.
raise http.Http404()
return c
def title(self, c):
app = page_name(self.request.APP)
# L10n: {0} is a collection name, {1} is 'Add-ons for <app>'.
return ugettext(u'{0} :: Collections :: {1}').format(c.name, app)
def link(self, c):
return absolutify(c.feed_url())
def description(self, c):
return c.description
def items(self, c):
addons = Addon.objects.valid() & c.addons.all()
return addons.order_by('-collectionaddon__created')[:20]
| harry-7/addons-server | src/olympia/bandwagon/feeds.py | Python | bsd-3-clause | 2,373 |
#
# try:
# from numpy import *
# a=zeros((rows,cols),float)
# except ImportError:
# #use a list if we have to
# a=[]
# for i in range(rows):
# a+=[[0.]*cols]
# #use a list if we have to
try:
from numpy import *
def matrix(rows, cols):
return zeros((rows,cols),float)
except ImportError:
def matrix(rows, cols):
matrix=[]
for i in range(rows):
matrix += [[0.] * cols]
return matrix
class Error(Exception):
"""
Base class for exceptions in this module.
"""
pass
class AlignmentError(Error):
"""
Exception raised for errors in interaction with formatting.
Attributes:
expression: input expression in which
the error occurred
message: explanation of the error
"""
def __init__(self, expression, message):
self.message = expression
self.message = message
print self.expression, ": ", self.message
class Needle(object):
def __init__(self, series):
self.series = series
def align(self, series, match, gap, flankGap=None):
if flankGap is None:
flankGap = gap
# faster access to local variables
s1, s2 = self.series, series
# init matrix
rows=len(s1)+1
cols=len(s2)+1
a = matrix(rows, cols)
# fill matrix
for i in range(rows):
a[i][0] = i * flankGap
for j in range(cols):
a[0][j] = j * flankGap
for i in range(1,rows):
for j in range(1,cols):
choice1 = a[i-1][j-1] + match(s1[i-1], s2[j-1])
choice2 = a[i-1][j] + gap
choice3 = a[i][j-1] + gap
a[i][j] = min(choice1, choice2, choice3)
# print a[i][j]
# for i in range(0,rows):
# print "\t".join(map(str, a[i]))
# print
# traceback: reconstruct the alignment indeces into aIdx1 and aIdx2
aIdx1 = []
aIdx2 = []
i = len(s1)
j = len(s2)
while i>0 and j>0:
score = a[i][j]
score_diag = a[i-1][j-1]
score_up = a[i][j-1]
score_left = a[i-1][j]
if score == score_diag + match(s1[i-1], s2[j-1]):
aIdx1.insert(0, i-1)
aIdx2.insert(0, j-1)
i -= 1
j -= 1
elif score == score_left + gap:
aIdx1.insert(0, i-1)
aIdx2.insert(0, None)
i -= 1
elif score == score_up + gap:
aIdx1.insert(0, None)
aIdx2.insert(0, j-1)
j -= 1
else:
raise AlignmnetError()
while i>0:
#If we hit j==0 before i==0 we keep going in i.
aIdx1.insert(0, i-1)
aIdx2.insert(0, None)
i -= 1
while j>0:
#If we hit i==0 before i==0 we keep going in j.
aIdx1.insert(0, None)
aIdx2.insert(0, j-1)
j -= 1
a1 = list()
for i in aIdx1:
if i is not None:
a1.append(i)
else:
a1.append(None)
a2 = list()
for i in aIdx2:
if i is not None:
a2.append(i)
else:
a2.append(None)
# return aligned index pairs
return zip(a1, a2)
if __name__ == "__main__":
l1 = [10,20,30,40,50]
l2 = [20,30,50,60,70,80,90]
nl = Needle(l1)
for x, y in nl.align(l2, lambda x, y: abs(x-y), 101):
print x, y
# #!/usr/bin/env python
# from sys import *
# seq1='GAGACCGCCATGGCGACCCTGGAAAAGCTGATGAAGGCCCT'
# seq2='AGACCCAATGCGACCCTGAAAAAGCTGATGAAGGCCTTTTT'
#
# print'''
# # Both sequences are similar to the human protein huntingtin.
# # Spurious expanded trinucleotide (CGA) repeats in this protein
# # cause it to aggregate in neurons leading to Huntingtons disease.
#
# # The Needleman-Wunsch algorithm preforms a global alignment
# # off two sequences (of length n and m)
# # For a given similarity matrix s
# # (containig the penalties for character match-mismatch)
# # and a LINEAR gap penalty the algorithm is guaranteed
# # to find the alignment with highest score (in O(nm) time).
# # The algorithm is outlined through comments to the source.
# '''
# stderr.write('Calculating')
# rows=len(seq1)+1
# cols=len(seq2)+1
# try:
# #use fast numerical arrays if we can
# from numpy import *
# a=zeros((rows,cols),float)
# except ImportError:
# #use a list if we have to
# a=[]
# for i in range(rows):
# a+=[[0.]*cols]
#
# #################################################
# ## Needleman-Wunsch ##
# #################################################
#
# match=1.
# mismatch=-1.
# gap=-1.
# s={
# 'AA': match,'AG':mismatch,'AC':mismatch,'AT':mismatch,\
# 'GA':mismatch,'GG': match,'GC':mismatch,'GT':mismatch,\
# 'CA':mismatch,'CG':mismatch,'CC': match,'CT':mismatch,\
# 'TA':mismatch,'TG':mismatch,'TC':mismatch,'TT': match,\
# }
# for i in range(rows):
# a[i][0] = 0
# for j in range(cols):
# a[0][j] = 0
# for i in range(1,rows):
# for j in range(1,cols):
# # Dynamic programing -- aka. divide and conquer:
# # Since gap penalties are linear in gap size
# # the score of an alignmet of length l only depends on the
# # the l-th characters in the alignment (match - mismatch - gap)
# # and the score of the one shorter (l-1) alignment,
# # i.e. we can calculate how to extend an arbritary alignment
# # soley based on the previous score value.
# choice1 = a[i-1][j-1] + s[(seq1[i-1] + seq2[j-1])]
# choice2 = a[i-1][j] + gap
# choice3 = a[i][j-1] + gap
# a[i][j] = max(choice1, choice2, choice3)
#
#
# aseq1 = ''
# aseq2 = ''
# #We reconstruct the alignment into aseq1 and aseq2,
# i = len(seq1)
# j = len(seq2)
# while i>0 and j>0:
# if i%10==0:
# stderr.write('.')
#
# #by preforming a traceback of how the matrix was filled out above,
# #i.e. we find a shortest path from a[n,m] to a[0,0]
# score = a[i][j]
# score_diag = a[i-1][j-1]
# score_up = a[i][j-1]
# score_left = a[i-1][j]
# if score == score_diag + s[seq1[i-1] + seq2[j-1]]:
# aseq1 = seq1[i-1] + aseq1
# aseq2 = seq2[j-1] + aseq2
# i -= 1
# j -= 1
# elif score == score_left + gap:
# aseq1 = seq1[i-1] + aseq1
# aseq2 = '_' + aseq2
# i -= 1
# elif score == score_up + gap:
# aseq1 = '_' + aseq1
# aseq2 = seq2[j-1] + aseq2
# j -= 1
# else:
# #should never get here..
# print 'ERROR'
# i=0
# j=0
# aseq1='ERROR';aseq2='ERROR';seq1='ERROR';seq2='ERROR'
# while i>0:
# #If we hit j==0 before i==0 we keep going in i.
# aseq1 = seq1[i-1] + aseq1
# aseq2 = '_' + aseq2
# i -= 1
#
# while j>0:
# #If we hit i==0 before i==0 we keep going in j.
# aseq1 = '_' + aseq1
# aseq2 = seq2[j-1] + aseq2
# j -= 1
#
# #################################################
# #################################################
# ## Full backtrack ##
# #################################################
#
# #To reconstruct all alinghments is somewhat tedious..
# def make_graph():
# #the simpilest way is to make a graph of the possible constructions of the values in a
# graph={}
# for i in range(1,cols)[::-1]:
# graph[(i,0)] = [(i-1,0)]
# graph[(0,i)] = [(0,i-1)]
# for j in range(1,cols)[::-1]:
# graph[(i,j)]=[]
# score = a[i][j]
# score_diag = a[i-1][j-1]
# score_up = a[i][j-1]
# score_left = a[i-1][j]
# if score == score_diag + s[seq1[i-1] + seq2[j-1]]:
# graph[(i,j)] += [(i-1,j-1)]
# if score == score_left + gap:
# graph[(i,j)] += [(i-1,j)]
# if score == score_up + gap:
# graph[(i,j)] += [(i,j-1)]
# return graph
#
# def find_all_paths(graph, start, end, path=[]):
# #and then to recursivly find all paths
# #from bottom right to top left..
# path = path + [start]
# # print start
# if start == end:
# return [path]
# if not graph.has_key(start):
# return []
# paths = []
# for node in graph[start]:
# if node not in path:
# newpaths = find_all_paths(graph, node, end, path)
# for newpath in newpaths:
# paths.append(newpath)
# return paths
#
# graph=make_graph()
# tracks=find_all_paths(graph,(cols-1,rows-1),(0,0))
# baseqs1=[]
# baseqs2=[]
# for track in tracks:
# #using these we can reconstruct all optimal alig.-s
# baseq1 = ''
# baseq2 = ''
# last_step=(cols-1,rows-1)
# for step in track:
# i,j=last_step
# if i==step[0]:
# baseq1 = '_' + baseq1
# baseq2 = seq2[j-1] + baseq2
# elif j==step[1]:
# baseq1 = seq1[i-1] + baseq1
# baseq2 = '_' + baseq2
# else:
# baseq1 = seq1[i-1] + baseq1
# baseq2 = seq2[j-1] + baseq2
#
# last_step=step
# baseqs1+=[baseq1]
# baseqs2+=[baseq2]
# #################################################
#
# print ''
# print '# Using: match='+repr(match)+'; mismatch='+repr(mismatch)+'; gap='+repr(gap)
# print seq1
# print seq2
# print '# We get e.g.:'
# print aseq1
# print aseq2
# print ''
# gaps=0
# mms=0
# ms=0
# for i in range(len(aseq1)):
# if aseq1[i]==aseq2[i]:
# aseq1=aseq1[:i]+'='+aseq1[i+1:]
# aseq2=aseq2[:i]+'='+aseq2[i+1:]
# ms+=1
# else:
# if aseq1[i]=='_' or aseq2[i]=='_':
# gaps+=1
# else:
# mms+=1
#
# print aseq1
# print aseq2
# print ''
# print ms,' matches; ',mms,' mismatches; ',gaps,' gaps.'
# print '# With a score of'
# print a[rows-2][cols-2],'/',min(len(seq1),len(seq2))
#
# print 'Optimal alig. is ',len(tracks),' times degenrate:'
# print ''
# for i in range(len(tracks)):
# print i+1,'.'
# print baseqs1[i]
# print baseqs2[i]
| kaspermunch/MultiPurpose | MultiPurpose/Needle.py | Python | gpl-2.0 | 10,265 |
import numpy as np
from bokeh.document import Document
from bokeh.models import ColumnDataSource, DataRange1d, Plot, LinearAxis, Grid
from bokeh.models.glyphs import Rect
from bokeh.plotting import show
N = 9
x = np.linspace(-2, 2, N)
y = x**2
w = x/15.0 + 0.3
h = y/20.0 + 0.3
source = ColumnDataSource(dict(x=x, y=y, w=w, h=h))
xdr = DataRange1d()
ydr = DataRange1d()
plot = Plot(
title=None, x_range=xdr, y_range=ydr, plot_width=300, plot_height=300,
h_symmetry=False, v_symmetry=False, min_border=0, toolbar_location=None)
glyph = Rect(x="x", y="y", width="w", height="h", angle=-0.7, fill_color="#CAB2D6")
plot.add_glyph(source, glyph)
xaxis = LinearAxis()
plot.add_layout(xaxis, 'below')
yaxis = LinearAxis()
plot.add_layout(yaxis, 'left')
plot.add_layout(Grid(dimension=0, ticker=xaxis.ticker))
plot.add_layout(Grid(dimension=1, ticker=yaxis.ticker))
doc = Document()
doc.add_root(plot)
show(plot)
| phobson/bokeh | tests/glyphs/Rect.py | Python | bsd-3-clause | 924 |
import Rohwabot
import random
import glob
import serverPackets
import exceptions
import userHelper
import time
import systemHelper
"""
Commands callbacks
Must have fro, chan and messages as arguments
fro -- name of who triggered the command
chan -- channel where the message was sent
message -- list containing arguments passed from the message
[0] = first argument
[1] = second argument
. . .
return the message or **False** if there's no response by the bot
"""
def faq(fro, chan, message):
if message[0] == "rules":
return "Please make sure to check (Ripple's rules)[http://ripple.moe/?p=23]."
elif message[0] == "rules":
return "Please make sure to check (Ripple's rules)[http://ripple.moe/?p=23]."
elif message[0] == "swearing":
return "Please don't abuse swearing"
elif message[0] == "spam":
return "Please don't spam"
elif message[0] == "offend":
return "Please don't offend other players"
elif message[0] == "github":
return "(Ripple's Github page!)[https://github.com/osuripple/ripple]"
elif message[0] == "discord":
return "(Join Ripple's Discord!)[https://discord.gg/0rJcZruIsA6rXuIx]"
elif message[0] == "blog":
return "You can find the latest Ripple news on the (blog)[https://ripple.moe/blog/]!"
elif message[0] == "changelog":
return "Check the (changelog)[https://ripple.moe/index.php?p=17] !"
elif message[0] == "status":
return "Check the server status (here!)[https://ripple.moe/index.php?p=27]"
def roll(fro, chan, message):
maxPoints = 100
if len(message) >= 1:
if message[0].isdigit() == True and int(message[0]) > 0:
maxPoints = int(message[0])
points = random.randrange(0,maxPoints)
return "{} rolls {} points!".format(fro, str(points))
def ask(fro, chan, message):
return random.choice(["yes", "no", "maybe"])
def alert(fro, chan, message):
glob.tokens.enqueueAll(serverPackets.notification(' '.join(message[:])))
return False
def moderated(fro, chan, message):
try:
# Make sure we are in a channel and not PM
if chan.startswith("#") == False:
raise exceptions.moderatedPMException
# Get on/off
enable = True
if len(message) >= 1:
if message[0] == "off":
enable = False
# Turn on/off moderated mode
glob.channels.channels[chan].moderated = enable
return "This channel is {} in moderated mode!".format("now" if enable else "no longer")
except exceptions.moderatedPMException:
return "You are trying to put a private chat in moderated mode. Are you serious?!? You're fired."
def kickAll(fro, chan, message):
# Kick everyone but mods/admins
toKick = []
for key, value in glob.tokens.tokens.items():
if value.rank < 3:
toKick.append(key)
# Loop though users to kick (we can't change dictionary size while iterating)
for i in toKick:
if i in glob.tokens.tokens:
glob.tokens.tokens[i].kick()
return "Whoops! Rip everyone."
def kick(fro, chan, message):
# Get parameters
target = message[0].replace("_", " ")
# Get target token and make sure is connected
targetToken = glob.tokens.getTokenFromUsername(target)
if targetToken == None:
return "{} is not online".format(target)
# Kick user
targetToken.kick()
# Bot response
return "{} has been kicked from the server.".format(target)
def RohwaBotReconnect(fro, chan, message):
# Check if RohwaBot is already connected
if glob.tokens.getTokenFromUserID(999) != None:
return"RohwaBot is already connected to Bancho"
# RohwaBot is not connected, connect it
RohwaBot.connect()
return False
def silence(fro, chan, message):
for i in message:
i = i.lower()
target = message[0].replace("_", " ")
amount = message[1]
unit = message[2]
reason = ' '.join(message[3:])
# Get target user ID
targetUserID = userHelper.getID(target)
# Make sure the user exists
if targetUserID == False:
return "{}: user not found".format(target)
# Calculate silence seconds
if unit == 's':
silenceTime = int(amount)
elif unit == 'm':
silenceTime = int(amount)*60
elif unit == 'h':
silenceTime = int(amount)*3600
elif unit == 'd':
silenceTime = int(amount)*86400
else:
return "Invalid time unit (s/m/h/d)."
# Max silence time is 7 days
if silenceTime > 604800:
return "Invalid silence time. Max silence time is 7 days."
# Calculate silence end time
endTime = int(time.time())+silenceTime
# Update silence end in db
userHelper.silence(targetUserID, endTime, reason)
# Send silence packet to target if he's connected
targetToken = glob.tokens.getTokenFromUsername(target)
if targetToken != None:
targetToken.enqueue(serverPackets.silenceEndTime(silenceTime))
return "{} has been silenced for the following reason: {}".format(target, reason)
def removeSilence(fro, chan, message):
# Get parameters
for i in message:
i = i.lower()
target = message[0].replace("_", " ")
# Make sure the user exists
targetUserID = userHelper.getID(target)
if targetUserID == False:
return "{}: user not found".format(target)
# Reset user silence time and reason in db
userHelper.silence(targetUserID, 0, "")
# Send new silence end packet to user if he's online
targetToken = glob.tokens.getTokenFromUsername(target)
if targetToken != None:
targetToken.enqueue(serverPackets.silenceEndTime(0))
return "{}'s silence reset".format(target)
def restartShutdown(restart):
"""Restart (if restart = True) or shutdown (if restart = False) pep.py safely"""
msg = "We are performing some maintenance. Bancho will {} in 5 seconds. Thank you for your patience.".format("restart" if restart else "shutdown")
systemHelper.scheduleShutdown(5, restart, msg)
return msg
def systemRestart(fro, chan, message):
return restartShutdown(True)
def systemShutdown(fro, chan, message):
return restartShutdown(False)
def systemReload(fro, chan, message):
#Reload settings from bancho_settings
glob.banchoConf.loadSettings()
# Reload channels too
glob.channels.loadChannels()
# Send new channels and new bottom icon to everyone
glob.tokens.enqueueAll(serverPackets.mainMenuIcon(glob.banchoConf.config["menuIcon"]))
glob.tokens.enqueueAll(serverPackets.channelInfoEnd())
for key, _ in glob.channels.channels.items():
glob.tokens.enqueueAll(serverPackets.channelInfo(key))
return "Bancho settings reloaded!"
def systemMaintenance(fro, chan, message):
# Turn on/off bancho maintenance
maintenance = True
# Get on/off
if len(message) >= 2:
if message[1] == "off":
maintenance = False
# Set new maintenance value in bancho_settings table
glob.banchoConf.setMaintenance(maintenance)
if maintenance == True:
# We have turned on maintenance mode
# Users that will be disconnected
who = []
# Disconnect everyone but mod/admins
for _, value in glob.tokens.tokens.items():
if value.rank < 3:
who.append(value.userID)
glob.tokens.enqueueAll(serverPackets.notification("Our bancho server is in maintenance mode. Please try to login again later."))
glob.tokens.multipleEnqueue(serverPackets.loginError(), who)
msg = "The server is now in maintenance mode!"
else:
# We have turned off maintenance mode
# Send message if we have turned off maintenance mode
msg = "The server is no longer in maintenance mode!"
# Chat output
return msg
def systemStatus(fro, chan, message):
# Print some server info
data = systemHelper.getSystemInfo()
# Final message
msg = "=== PEP.PY STATS ===\n"
msg += "Running pep.py server\n"
msg += "Webserver: {}\n".format(data["webServer"])
msg += "\n"
msg += "=== BANCHO STATS ===\n"
msg += "Connected users: {}\n".format(str(data["connectedUsers"]))
msg += "\n"
msg += "=== SYSTEM STATS ===\n"
msg += "CPU: {}%\n".format(str(data["cpuUsage"]))
msg += "RAM: {}GB/{}GB\n".format(str(data["usedMemory"]), str(data["totalMemory"]))
if data["unix"] == True:
msg += "Load average: {}/{}/{}\n".format(str(data["loadAverage"][0]), str(data["loadAverage"][1]), str(data["loadAverage"][2]))
return msg
"""
Commands list
trigger: message that triggers the command
callback: function to call when the command is triggered. Optional.
response: text to return when the command is triggered. Optional.
syntax: command syntax. Arguments must be separated by spaces (eg: <arg1> <arg2>)
minRank: minimum rank to execute that command. Optional (default = 1)
You MUST set trigger and callback/response, or the command won't work.
"""
commands = [
{
"trigger": "!roll",
"callback": roll
}, {
"trigger": "!faq",
"syntax": "<name>",
"callback": faq
}, {
"trigger": "!report",
"response": "Report command isn't here yet :c"
}, {
"trigger": "!help",
"response": "Click (here)[https://ripple.moe/index.php?p=16&id=4] for RohwaBot's full command list"
}, {
"trigger": "!ask",
"syntax": "<question>",
"callback": ask
}, {
"trigger": "!mm00",
"response": random.choice(["meme", "MA MAURO ESISTE?"])
}, {
"trigger": "!alert",
"syntax": "<message>",
"minRank": 4,
"callback": alert
}, {
"trigger": "!moderated",
"minRank": 3,
"callback": moderated
}, {
"trigger": "!kickall",
"minRank": 4,
"callback": kickAll
}, {
"trigger": "!kick",
"syntax": "<target>",
"minRank": 3,
"callback": kick
}, {
"trigger": "!RohwaBot reconnect",
"minRank": 3,
"callback": RohwaBotReconnect
}, {
"trigger": "!silence",
"syntax": "<target> <amount> <unit(s/m/h/d)> <reason>",
"minRank": 3,
"callback": silence
}, {
"trigger": "!removesilence",
"syntax": "<target>",
"minRank": 3,
"callback": removeSilence
}, {
"trigger": "!system restart",
"minRank": 4,
"callback": systemRestart
}, {
"trigger": "!system shutdown",
"minRank": 4,
"callback": systemShutdown
}, {
"trigger": "!system reload",
"minRank": 3,
"callback": systemReload
}, {
"trigger": "!system maintenance",
"minRank": 3,
"callback": systemMaintenance
}, {
"trigger": "!system status",
"minRank": 3,
"callback": systemStatus
}
]
# Commands list default values
for cmd in commands:
cmd.setdefault("syntax", "")
cmd.setdefault("minRank", 1)
cmd.setdefault("callback", None)
cmd.setdefault("response", "u w0t m8?")
| RlSEN/bannedcho | c.ppy.sh/RohwabotCommands.py | Python | gpl-3.0 | 10,057 |
# -*- coding: utf-8 -*-
"""
logbook
~~~~~~~
Simple logging library that aims to support desktop, command line
and web applications alike.
:copyright: (c) 2010 by Armin Ronacher, Georg Brandl.
:license: BSD, see LICENSE for more details.
"""
import os
from logbook.base import LogRecord, Logger, LoggerGroup, NestedSetup, \
Processor, Flags, get_level_name, lookup_level, dispatch_record, \
CRITICAL, ERROR, WARNING, NOTICE, INFO, DEBUG, NOTSET, \
set_datetime_format
from logbook.handlers import Handler, StreamHandler, FileHandler, \
MonitoringFileHandler, StderrHandler, RotatingFileHandler, \
TimedRotatingFileHandler, TestHandler, MailHandler, GMailHandler, SyslogHandler, \
NullHandler, NTEventLogHandler, create_syshandler, StringFormatter, \
StringFormatterHandlerMixin, HashingHandlerMixin, \
LimitingHandlerMixin, WrapperHandler, FingersCrossedHandler, \
GroupHandler
__version__ = '0.10.0'
# create an anonymous default logger and provide all important
# methods of that logger as global functions
_default_logger = Logger('Generic')
_default_logger.suppress_dispatcher = True
debug = _default_logger.debug
info = _default_logger.info
warn = _default_logger.warn
warning = _default_logger.warning
notice = _default_logger.notice
error = _default_logger.error
exception = _default_logger.exception
catch_exceptions = _default_logger.catch_exceptions
critical = _default_logger.critical
log = _default_logger.log
del _default_logger
# install a default global handler
if os.environ.get('LOGBOOK_INSTALL_DEFAULT_HANDLER'):
default_handler = StderrHandler()
default_handler.push_application()
| agustinhenze/logbook.debian | logbook/__init__.py | Python | bsd-3-clause | 1,683 |
import sys
from os import path, getpid
from dsat.state import state_wrapper
from dsat.linux_mtime import m_time as time
from random import randint
from time import sleep
from dsat.state import get_connection, construct_info
import sched
from time import sleep
import logging
cpu_f = open("/proc/loadavg")
def every(x):
y =x
while True:
yield not y
y = y - 1 if y>=0 else x-1
def cpu(cnx, arg):
cpu_f.seek(0)
_5,_10,_15 = cpu_f.read().split(" ")[:3]
return { "data" : [ _5,_10,_15], "load" : _5, "5min" : _5, "10min" : _10, "15min" : _15 }
cntproc = every(60)
cntping = every(30)
cntcsv = every(30)
cntrrd = every(30)
def cpu_clock(ev):
#try:
sleep(float(ev['arg']['load']) * .05)
#except:
# pass
return True
state_wrapper(sys.argv, cpu, bounce_to=["cpu", "proc", "ping"],
cond_for=dict(
proc=lambda ev: cntproc.next(),
csvw = lambda ev: cntcsv.next(),
rrd = lambda ev: cntrrd.next(),
ping = lambda ev: cntping.next(),
cpu = cpu_clock,
)
)
| jul/dsat | example/cpu.py | Python | bsd-2-clause | 1,061 |
import numpy as np
from numpy import linalg as LA
import matplotlib.pyplot as plt
from dynamic_graph import plug
from dynamic_graph.sot.application.stabilizer import LinearizedTableCartDevice, ZMPtoCoMAlgerbraicController
import math
cart = LinearizedTableCartDevice("cart")
controller = ZMPtoCoMAlgerbraicController("controller")
hcom=0.80771
dt = 0.005
cart.setCartMass(59.8)
cart.setCartHeight(hcom)
cart.setStiffness(53200.0*0.2)
cart.setViscosity(100.0)
openloop = False
if openloop:
plug(cart.comHeight,controller.comHeight)
plug(cart.state,controller.comIn)
controller.comddotIN.value = (0.0,)
plug(controller.comddot,cart.control)
else:
plug(cart.comHeight,controller.comHeight)
plug(cart.comreal,controller.comIn)
plug(cart.flexcomddot,controller.comddotIN)
plug(controller.comddot,cart.control)
controller.zmpref.value=(0.0,)
stepTime = 0.05
simuTime = 9
logZMP = np.array([])
logZMP.resize(simuTime/dt,2)
logZMPRef = np.array([])
logZMPRef.resize(simuTime/dt,2)
logComddot = np.array([])
logComddot.resize(simuTime/dt,2)
for i in range(1,int(stepTime/dt)):
print(i)
cart.incr(dt)
logZMP[i,0] = i
logZMP[i,1] = cart.zmp.value[0]
logZMPRef[i,0] = i
logZMPRef[i,1] = controller.zmpref.value[0]
logComddot[i,0]= i
logComddot[i,1]= controller.comddot.value[0]
controller.zmpref.value=(0.01,)
for i in range(int(stepTime/dt),int(simuTime/dt)):
print(i)
cart.incr(dt)
logZMP[i,0] = i
logZMP[i,1] = cart.zmp.value[0]
logZMPRef[i,0] = i
logZMPRef[i,1] = controller.zmpref.value[0]
logComddot[i,0]= i
logComddot[i,1]= controller.comddot.value[0]
fig = plt.figure(); axfig = fig.add_subplot(111)
axfig.plot(logZMP[:,0], logZMP[:,1], label='zmp X')
axfig.plot(logZMPRef[:,0], logZMPRef[:,1], label='zmpRef X')
#axfig.plot(logComddot[:,0], logComddot[:,1], label='comddot')
handles, labels = axfig.get_legend_handles_labels()
axfig.legend(handles, labels)
plt.show()
| amifsud/sot-stabilizer | src/prototyping/simu.py | Python | lgpl-3.0 | 1,980 |
# -*- coding: utf-8; -*-
# This file is a part of sensbiotk
# Contact : sensbio@inria.fr
# Copyright (C) 2014 INRIA (Contact: sensbiotk@inria.fr)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Example of a pedometer
"""
import numpy as np
from numpy.ctypeslib import ndpointer
import ctypes as ct
_LIBPED = ct.cdll.LoadLibrary('obj/libPedometer.so')
_LIBPED.pedometer.restype = None
_LIBPED.pedometer.argtypes = \
[ct.c_int, ndpointer(ndim=1, shape=(3)),
ct.POINTER(ct.c_int), ct.POINTER(ct.c_int), ct.POINTER(ct.c_float)]
def compute_ped(k, sig):
""" compute pedometer
"""
nbstep = ct.c_int()
state = ct.c_int()
debug = ct.c_float()
_LIBPED.pedometer(k, sig, ct.byref(nbstep), ct.byref(state),
ct.byref(debug))
return nbstep.value, state.value, debug.value
def test_cpedometer():
""" Test pedometer implemented in C
Returns
-------
status: str
"OK" or "ERROR"
"""
from sensbiotk.io import iofox
import pylab as py
[timea, acc] = \
iofox.load_foxacc_csvfile("./data/walk4_acc.csv")
# [timea, acc] = cut_signal(timea, acc, 12.0, 29.0)
peak_detected = np.zeros(len(acc))
for k in range(0, len(acc)):
tab = np.array(acc[k, 0:3], dtype='f')
[step, state, peak_detected[k]] = compute_ped(k, tab)
print "Step numbers=", step
py.figure()
py.plot(timea[:, 0], acc)
py.figure()
#py.plot(peak_detected, "o")
py.show()
def test_pedometer():
""" Test pedometer implemented in Python
Returns
-------
status: str
"OK" or "ERROR"
"""
from sensbiotk.io import iofox
from sensbiotk.algorithms import basic as algo
import pylab as py
[timea, acc] = iofox.load_foxacc_csvfile("./data/walk1_acc.csv")
[timea, acc] = algo.cut_signal(timea, acc, 12.0, 29.0)
acc_norm = algo.compute_norm(acc)
#acc_filt = algo.lowpass_filter(acc_norm, 2.0, 100.0)
#acc_filt = algo.lowpass_filter2(acc_norm, 2.0, 200.0)
acc_filt = algo.moving_average(acc_norm, 30)
#acc_filt = algo.moving_average2(acc_norm, 50)
index_peak = algo.search_maxpeak(acc_filt)
[time_peak, acc_peak] = algo.threshold_signal(timea[index_peak],
acc_filt[index_peak], 11.0)
print "Step numbers=", len(acc_peak)
py.figure()
py.plot(timea[:, 0], acc[:, 0:3])
py.title("Walking 1 Accelerations")
py.legend(('x', 'y', 'z'), bbox_to_anchor=(0, 1, 1, 0),
ncol=2, loc=3, borderaxespad=0.)
py.figure()
py.title("Walking 1 Results")
py.plot(timea[:, 0], acc_norm)
py.plot(timea[:, 0], acc_filt)
py.plot(time_peak, acc_peak, "o")
py.figure()
py.plot(np.diff(acc_filt))
py.plot(np.diff(np.sign(np.diff(acc_filt)) < 0))
py.show()
return "OK"
if __name__ == '__main__':
#test_cpedometer()
test_pedometer()
| sensbio/sensbiotk | examples/pedometer/pedometer.py | Python | gpl-3.0 | 3,521 |
from south.db import db
from django.db import models
from mypage.pages.models import *
class Migration:
def forwards(self, orm):
# Deleting field 'WidgetInPage.rendered_widget'
db.delete_column('pages_widgetinpage', 'rendered_widget_id')
def backwards(self, orm):
# Adding field 'WidgetInPage.rendered_widget'
db.add_column('pages_widgetinpage', 'rendered_widget', models.ForeignKey(orm['widgets.RenderedWidget'], null=False))
models = {
'sites.site': {
'Meta': {'ordering': "('domain',)", 'db_table': "'django_site'"},
'_stub': True,
'id': ('models.AutoField', [], {'primary_key': 'True'})
},
'pages.widgetinpage': {
'Meta': {'unique_together': "(('page','widget',),)"},
'config_json': ('models.TextField', [], {}),
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'page': ('models.ForeignKey', ["orm['pages.Page']"], {'verbose_name': "_('Page')"}),
'state': ('models.SmallIntegerField', [], {'default': '2'}),
'widget': ('models.ForeignKey', ["orm['widgets.Widget']"], {'verbose_name': "_('Widget')"})
},
'auth.user': {
'_stub': True,
'id': ('models.AutoField', [], {'primary_key': 'True'})
},
'widgets.widget': {
'_stub': True,
'id': ('models.AutoField', [], {'primary_key': 'True'})
},
'pages.page': {
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'layout_json': ('models.TextField', [], {}),
'site': ('models.ForeignKey', ["orm['sites.Site']"], {'default': ' lambda :settings.SITE_ID'}),
'skin': ('models.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'template': ('models.CharField', [], {'default': "'page.html'", 'max_length': '100'}),
'widgets': ('models.ManyToManyField', ["orm['widgets.Widget']"], {'through': "'WidgetInPage'"})
},
'widgets.renderedwidget': {
'Meta': {'unique_together': "(('widget','state','site',),)"},
'_stub': True,
'id': ('models.AutoField', [], {'primary_key': 'True'})
},
'pages.userpage': {
'Meta': {'_bases': ['mypage.pages.models.Page']},
'page_ptr': ('models.OneToOneField', ["orm['pages.Page']"], {}),
'user': ('models.ForeignKey', ["orm['auth.User']"], {'unique': 'True'})
},
'pages.sessionpage': {
'Meta': {'_bases': ['mypage.pages.models.Page']},
'page_ptr': ('models.OneToOneField', ["orm['pages.Page']"], {}),
'session_key': ('models.CharField', ["_('session key')"], {'unique': 'True', 'max_length': '40'}),
'updated': ('models.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'False'})
}
}
complete_apps = ['pages']
| ella/mypage | mypage/pages/migrations/0004_remove_rendered_widget_fk.py | Python | bsd-3-clause | 3,021 |
from __future__ import absolute_import
import pygame
import sappho.layers
from .common import compare_surfaces
class TestSurfaceLayers(object):
"""
Do not need to test create_surface_layers(), because
it's inherent to SurfaceLayers' initialization and
everything else tested.
"""
NUMBER_OF_LAYERS = 100
TARGET_SURFACE_SIZE = (800, 600)
def setup(self):
self.target_surface = pygame.surface.Surface(self.TARGET_SURFACE_SIZE)
self.surface_layers = (sappho.layers.
SurfaceLayers(self.target_surface,
self.NUMBER_OF_LAYERS))
def test_getitem(self):
for i in range(self.NUMBER_OF_LAYERS):
self.surface_layers[i]
def test_len(self):
assert len(self.surface_layers) == self.NUMBER_OF_LAYERS
def test_iter(self):
for i, surface in enumerate(self.surface_layers):
assert surface is self.surface_layers[i]
assert i == (self.NUMBER_OF_LAYERS - 1)
def test_sizes(self):
for surface in self.surface_layers:
assert surface.get_size() == self.TARGET_SURFACE_SIZE
def test_render(self):
subsurface_size = (150, 150)
# Create our test surfaces
background = pygame.surface.Surface(self.TARGET_SURFACE_SIZE)
rect1 = pygame.surface.Surface(subsurface_size)
rect1pos = (100, 100)
rect2 = pygame.surface.Surface(subsurface_size)
rect2pos = (200, 200)
rect3 = pygame.surface.Surface(subsurface_size)
rect3pos = (300, 300)
# Fill the surfaces
background.fill((255, 255, 255))
rect1.fill((255, 0, 0))
rect2.fill((0, 255, 0))
rect3.fill((0, 0, 255))
# Create a surface to compare with and blit our test surfaces
test_surface = pygame.surface.Surface(self.TARGET_SURFACE_SIZE)
test_surface.blit(background, (0, 0))
test_surface.blit(rect1, rect1pos)
test_surface.blit(rect2, rect2pos)
test_surface.blit(rect3, rect3pos)
# Create the SurfaceLayers object and fill it with our layers
surface_layers = sappho.layers.SurfaceLayers(self.target_surface, 4)
surface_layers[0].blit(background, (0, 0))
surface_layers[1].blit(rect1, rect1pos)
surface_layers[2].blit(rect2, rect2pos)
surface_layers[3].blit(rect3, rect3pos)
# Render to the target surface
surface_layers.render()
# Compare the two surfaces
assert compare_surfaces(self.target_surface, test_surface)
| lillian-gardenia-seabreeze/sappho | tests/test_layers.py | Python | mit | 2,612 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file './ConfigurationPages/Form_HubPage.ui'
#
# Created: Mon Sep 9 21:29:22 2013
# by: PyQt4 UI code generator 4.8.6
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_HubPage(object):
def setupUi(self, HubPage):
HubPage.setObjectName(_fromUtf8("HubPage"))
HubPage.resize(381, 270)
HubPage.setWindowTitle(QtGui.QApplication.translate("HubPage", "Ethernet hub", None, QtGui.QApplication.UnicodeUTF8))
self.gridlayout = QtGui.QGridLayout(HubPage)
self.gridlayout.setObjectName(_fromUtf8("gridlayout"))
self.groupBox = QtGui.QGroupBox(HubPage)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox.sizePolicy().hasHeightForWidth())
self.groupBox.setSizePolicy(sizePolicy)
self.groupBox.setTitle(QtGui.QApplication.translate("HubPage", "Settings", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.gridlayout1 = QtGui.QGridLayout(self.groupBox)
self.gridlayout1.setObjectName(_fromUtf8("gridlayout1"))
self.label = QtGui.QLabel(self.groupBox)
self.label.setText(QtGui.QApplication.translate("HubPage", "Number of ports:", None, QtGui.QApplication.UnicodeUTF8))
self.label.setObjectName(_fromUtf8("label"))
self.gridlayout1.addWidget(self.label, 0, 0, 1, 1)
self.spinBoxNumberOfPorts = QtGui.QSpinBox(self.groupBox)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.spinBoxNumberOfPorts.sizePolicy().hasHeightForWidth())
self.spinBoxNumberOfPorts.setSizePolicy(sizePolicy)
self.spinBoxNumberOfPorts.setMinimum(0)
self.spinBoxNumberOfPorts.setMaximum(65535)
self.spinBoxNumberOfPorts.setProperty("value", 1)
self.spinBoxNumberOfPorts.setObjectName(_fromUtf8("spinBoxNumberOfPorts"))
self.gridlayout1.addWidget(self.spinBoxNumberOfPorts, 0, 1, 1, 1)
spacerItem = QtGui.QSpacerItem(20, 71, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridlayout1.addItem(spacerItem, 1, 1, 1, 1)
self.gridlayout.addWidget(self.groupBox, 0, 0, 1, 2)
self.retranslateUi(HubPage)
QtCore.QMetaObject.connectSlotsByName(HubPage)
def retranslateUi(self, HubPage):
pass
| dlintott/gns3 | src/GNS3/Ui/ConfigurationPages/Form_HubPage.py | Python | gpl-2.0 | 2,806 |
# -*- test-case-name: twisted.words.test.test_irc -*-
# Copyright (c) 2001-2008 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Internet Relay Chat Protocol for client and server.
Future Plans
============
The way the IRCClient class works here encourages people to implement
IRC clients by subclassing the ephemeral protocol class, and it tends
to end up with way more state than it should for an object which will
be destroyed as soon as the TCP transport drops. Someone oughta do
something about that, ya know?
The DCC support needs to have more hooks for the client for it to be
able to ask the user things like \"Do you want to accept this session?\"
and \"Transfer #2 is 67% done.\" and otherwise manage the DCC sessions.
Test coverage needs to be better.
@author: Kevin Turner
@see: RFC 1459: Internet Relay Chat Protocol
@see: RFC 2812: Internet Relay Chat: Client Protocol
@see: U{The Client-To-Client-Protocol
<http://www.irchelp.org/irchelp/rfc/ctcpspec.html>}
"""
import errno, os, random, re, stat, struct, sys, time, types, traceback
import string, socket
from os import path
from twisted.internet import reactor, protocol
from twisted.persisted import styles
from twisted.protocols import basic
from twisted.python import log, reflect, text
NUL = chr(0)
CR = chr(015)
NL = chr(012)
LF = NL
SPC = chr(040)
CHANNEL_PREFIXES = '&#!+'
class IRCBadMessage(Exception):
pass
class IRCPasswordMismatch(Exception):
pass
def parsemsg(s):
"""Breaks a message from an IRC server into its prefix, command, and arguments.
"""
prefix = ''
trailing = []
if not s:
raise IRCBadMessage("Empty line.")
if s[0] == ':':
prefix, s = s[1:].split(' ', 1)
if s.find(' :') != -1:
s, trailing = s.split(' :', 1)
args = s.split()
args.append(trailing)
else:
args = s.split()
command = args.pop(0)
return prefix, command, args
def split(str, length = 80):
"""I break a message into multiple lines.
I prefer to break at whitespace near str[length]. I also break at \\n.
@returns: list of strings
"""
if length <= 0:
raise ValueError("Length must be a number greater than zero")
r = []
while len(str) > length:
w, n = str[:length].rfind(' '), str[:length].find('\n')
if w == -1 and n == -1:
line, str = str[:length], str[length:]
else:
if n == -1:
i = w
else:
i = n
if i == 0: # just skip the space or newline. don't append any output.
str = str[1:]
continue
line, str = str[:i], str[i+1:]
r.append(line)
if len(str):
r.extend(str.split('\n'))
return r
class IRC(protocol.Protocol):
"""Internet Relay Chat server protocol.
"""
buffer = ""
hostname = None
encoding = None
def connectionMade(self):
self.channels = []
if self.hostname is None:
self.hostname = socket.getfqdn()
def sendLine(self, line):
if self.encoding is not None:
if isinstance(line, unicode):
line = line.encode(self.encoding)
self.transport.write("%s%s%s" % (line, CR, LF))
def sendMessage(self, command, *parameter_list, **prefix):
"""Send a line formatted as an IRC message.
First argument is the command, all subsequent arguments
are parameters to that command. If a prefix is desired,
it may be specified with the keyword argument 'prefix'.
"""
if not command:
raise ValueError, "IRC message requires a command."
if ' ' in command or command[0] == ':':
# Not the ONLY way to screw up, but provides a little
# sanity checking to catch likely dumb mistakes.
raise ValueError, "Somebody screwed up, 'cuz this doesn't" \
" look like a command to me: %s" % command
line = string.join([command] + list(parameter_list))
if prefix.has_key('prefix'):
line = ":%s %s" % (prefix['prefix'], line)
self.sendLine(line)
if len(parameter_list) > 15:
log.msg("Message has %d parameters (RFC allows 15):\n%s" %
(len(parameter_list), line))
def dataReceived(self, data):
"""This hack is to support mIRC, which sends LF only,
even though the RFC says CRLF. (Also, the flexibility
of LineReceiver to turn "line mode" on and off was not
required.)
"""
lines = (self.buffer + data).split(LF)
# Put the (possibly empty) element after the last LF back in the
# buffer
self.buffer = lines.pop()
for line in lines:
if len(line) <= 2:
# This is a blank line, at best.
continue
if line[-1] == CR:
line = line[:-1]
prefix, command, params = parsemsg(line)
# mIRC is a big pile of doo-doo
command = command.upper()
# DEBUG: log.msg( "%s %s %s" % (prefix, command, params))
self.handleCommand(command, prefix, params)
def handleCommand(self, command, prefix, params):
"""Determine the function to call for the given command and call
it with the given arguments.
"""
method = getattr(self, "irc_%s" % command, None)
try:
if method is not None:
method(prefix, params)
else:
self.irc_unknown(prefix, command, params)
except:
log.deferr()
def irc_unknown(self, prefix, command, params):
"""Implement me!"""
raise NotImplementedError(command, prefix, params)
# Helper methods
def privmsg(self, sender, recip, message):
"""Send a message to a channel or user
@type sender: C{str} or C{unicode}
@param sender: Who is sending this message. Should be of the form
username!ident@hostmask (unless you know better!).
@type recip: C{str} or C{unicode}
@param recip: The recipient of this message. If a channel, it
must start with a channel prefix.
@type message: C{str} or C{unicode}
@param message: The message being sent.
"""
self.sendLine(":%s PRIVMSG %s :%s" % (sender, recip, lowQuote(message)))
def notice(self, sender, recip, message):
"""Send a \"notice\" to a channel or user.
Notices differ from privmsgs in that the RFC claims they are different.
Robots are supposed to send notices and not respond to them. Clients
typically display notices differently from privmsgs.
@type sender: C{str} or C{unicode}
@param sender: Who is sending this message. Should be of the form
username!ident@hostmask (unless you know better!).
@type recip: C{str} or C{unicode}
@param recip: The recipient of this message. If a channel, it
must start with a channel prefix.
@type message: C{str} or C{unicode}
@param message: The message being sent.
"""
self.sendLine(":%s NOTICE %s :%s" % (sender, recip, message))
def action(self, sender, recip, message):
"""Send an action to a channel or user.
@type sender: C{str} or C{unicode}
@param sender: Who is sending this message. Should be of the form
username!ident@hostmask (unless you know better!).
@type recip: C{str} or C{unicode}
@param recip: The recipient of this message. If a channel, it
must start with a channel prefix.
@type message: C{str} or C{unicode}
@param message: The action being sent.
"""
self.sendLine(":%s ACTION %s :%s" % (sender, recip, message))
def topic(self, user, channel, topic, author=None):
"""Send the topic to a user.
@type user: C{str} or C{unicode}
@param user: The user receiving the topic. Only their nick name, not
the full hostmask.
@type channel: C{str} or C{unicode}
@param channel: The channel for which this is the topic.
@type topic: C{str} or C{unicode} or C{None}
@param topic: The topic string, unquoted, or None if there is
no topic.
@type author: C{str} or C{unicode}
@param author: If the topic is being changed, the full username and hostmask
of the person changing it.
"""
if author is None:
if topic is None:
self.sendLine(':%s %s %s %s :%s' % (
self.hostname, RPL_NOTOPIC, user, channel, 'No topic is set.'))
else:
self.sendLine(":%s %s %s %s :%s" % (
self.hostname, RPL_TOPIC, user, channel, lowQuote(topic)))
else:
self.sendLine(":%s TOPIC %s :%s" % (author, channel, lowQuote(topic)))
def topicAuthor(self, user, channel, author, date):
"""
Send the author of and time at which a topic was set for the given
channel.
This sends a 333 reply message, which is not part of the IRC RFC.
@type user: C{str} or C{unicode}
@param user: The user receiving the topic. Only their nick name, not
the full hostmask.
@type channel: C{str} or C{unicode}
@param channel: The channel for which this information is relevant.
@type author: C{str} or C{unicode}
@param author: The nickname (without hostmask) of the user who last
set the topic.
@type date: C{int}
@param date: A POSIX timestamp (number of seconds since the epoch)
at which the topic was last set.
"""
self.sendLine(':%s %d %s %s %s %d' % (
self.hostname, 333, user, channel, author, date))
def names(self, user, channel, names):
"""Send the names of a channel's participants to a user.
@type user: C{str} or C{unicode}
@param user: The user receiving the name list. Only their nick
name, not the full hostmask.
@type channel: C{str} or C{unicode}
@param channel: The channel for which this is the namelist.
@type names: C{list} of C{str} or C{unicode}
@param names: The names to send.
"""
# XXX If unicode is given, these limits are not quite correct
prefixLength = len(channel) + len(user) + 10
namesLength = 512 - prefixLength
L = []
count = 0
for n in names:
if count + len(n) + 1 > namesLength:
self.sendLine(":%s %s %s = %s :%s" % (
self.hostname, RPL_NAMREPLY, user, channel, ' '.join(L)))
L = [n]
count = len(n)
else:
L.append(n)
count += len(n) + 1
if L:
self.sendLine(":%s %s %s = %s :%s" % (
self.hostname, RPL_NAMREPLY, user, channel, ' '.join(L)))
self.sendLine(":%s %s %s %s :End of /NAMES list" % (
self.hostname, RPL_ENDOFNAMES, user, channel))
def who(self, user, channel, memberInfo):
"""
Send a list of users participating in a channel.
@type user: C{str} or C{unicode}
@param user: The user receiving this member information. Only their
nick name, not the full hostmask.
@type channel: C{str} or C{unicode}
@param channel: The channel for which this is the member
information.
@type memberInfo: C{list} of C{tuples}
@param memberInfo: For each member of the given channel, a 7-tuple
containing their username, their hostmask, the server to which they
are connected, their nickname, the letter "H" or "G" (wtf do these
mean?), the hopcount from C{user} to this member, and this member's
real name.
"""
for info in memberInfo:
(username, hostmask, server, nickname, flag, hops, realName) = info
assert flag in ("H", "G")
self.sendLine(":%s %s %s %s %s %s %s %s %s :%d %s" % (
self.hostname, RPL_WHOREPLY, user, channel,
username, hostmask, server, nickname, flag, hops, realName))
self.sendLine(":%s %s %s %s :End of /WHO list." % (
self.hostname, RPL_ENDOFWHO, user, channel))
def whois(self, user, nick, username, hostname, realName, server, serverInfo, oper, idle, signOn, channels):
"""
Send information about the state of a particular user.
@type user: C{str} or C{unicode}
@param user: The user receiving this information. Only their nick
name, not the full hostmask.
@type nick: C{str} or C{unicode}
@param nick: The nickname of the user this information describes.
@type username: C{str} or C{unicode}
@param username: The user's username (eg, ident response)
@type hostname: C{str}
@param hostname: The user's hostmask
@type realName: C{str} or C{unicode}
@param realName: The user's real name
@type server: C{str} or C{unicode}
@param server: The name of the server to which the user is connected
@type serverInfo: C{str} or C{unicode}
@param serverInfo: A descriptive string about that server
@type oper: C{bool}
@param oper: Indicates whether the user is an IRC operator
@type idle: C{int}
@param idle: The number of seconds since the user last sent a message
@type signOn: C{int}
@param signOn: A POSIX timestamp (number of seconds since the epoch)
indicating the time the user signed on
@type channels: C{list} of C{str} or C{unicode}
@param channels: A list of the channels which the user is participating in
"""
self.sendLine(":%s %s %s %s %s %s * :%s" % (
self.hostname, RPL_WHOISUSER, user, nick, username, hostname, realName))
self.sendLine(":%s %s %s %s %s :%s" % (
self.hostname, RPL_WHOISSERVER, user, nick, server, serverInfo))
if oper:
self.sendLine(":%s %s %s %s :is an IRC operator" % (
self.hostname, RPL_WHOISOPERATOR, user, nick))
self.sendLine(":%s %s %s %s %d %d :seconds idle, signon time" % (
self.hostname, RPL_WHOISIDLE, user, nick, idle, signOn))
self.sendLine(":%s %s %s %s :%s" % (
self.hostname, RPL_WHOISCHANNELS, user, nick, ' '.join(channels)))
self.sendLine(":%s %s %s %s :End of WHOIS list." % (
self.hostname, RPL_ENDOFWHOIS, user, nick))
def join(self, who, where):
"""Send a join message.
@type who: C{str} or C{unicode}
@param who: The name of the user joining. Should be of the form
username!ident@hostmask (unless you know better!).
@type where: C{str} or C{unicode}
@param where: The channel the user is joining.
"""
self.sendLine(":%s JOIN %s" % (who, where))
def part(self, who, where, reason=None):
"""Send a part message.
@type who: C{str} or C{unicode}
@param who: The name of the user joining. Should be of the form
username!ident@hostmask (unless you know better!).
@type where: C{str} or C{unicode}
@param where: The channel the user is joining.
@type reason: C{str} or C{unicode}
@param reason: A string describing the misery which caused
this poor soul to depart.
"""
if reason:
self.sendLine(":%s PART %s :%s" % (who, where, reason))
else:
self.sendLine(":%s PART %s" % (who, where))
def channelMode(self, user, channel, mode, *args):
"""
Send information about the mode of a channel.
@type user: C{str} or C{unicode}
@param user: The user receiving the name list. Only their nick
name, not the full hostmask.
@type channel: C{str} or C{unicode}
@param channel: The channel for which this is the namelist.
@type mode: C{str}
@param mode: A string describing this channel's modes.
@param args: Any additional arguments required by the modes.
"""
self.sendLine(":%s %s %s %s %s %s" % (
self.hostname, RPL_CHANNELMODEIS, user, channel, mode, ' '.join(args)))
class IRCClient(basic.LineReceiver):
"""Internet Relay Chat client protocol, with sprinkles.
In addition to providing an interface for an IRC client protocol,
this class also contains reasonable implementations of many common
CTCP methods.
TODO
====
- Limit the length of messages sent (because the IRC server probably
does).
- Add flood protection/rate limiting for my CTCP replies.
- NickServ cooperation. (a mix-in?)
- Heartbeat. The transport may die in such a way that it does not realize
it is dead until it is written to. Sending something (like \"PING
this.irc-host.net\") during idle peroids would alleviate that. If
you're concerned with the stability of the host as well as that of the
transport, you might care to watch for the corresponding PONG.
@ivar nickname: Nickname the client will use.
@ivar password: Password used to log on to the server. May be C{None}.
@ivar realname: Supplied to the server during login as the \"Real name\"
or \"ircname\". May be C{None}.
@ivar username: Supplied to the server during login as the \"User name\".
May be C{None}
@ivar userinfo: Sent in reply to a C{USERINFO} CTCP query. If C{None}, no
USERINFO reply will be sent.
\"This is used to transmit a string which is settable by
the user (and never should be set by the client).\"
@ivar fingerReply: Sent in reply to a C{FINGER} CTCP query. If C{None}, no
FINGER reply will be sent.
@type fingerReply: Callable or String
@ivar versionName: CTCP VERSION reply, client name. If C{None}, no VERSION
reply will be sent.
@ivar versionNum: CTCP VERSION reply, client version,
@ivar versionEnv: CTCP VERSION reply, environment the client is running in.
@ivar sourceURL: CTCP SOURCE reply, a URL where the source code of this
client may be found. If C{None}, no SOURCE reply will be sent.
@ivar lineRate: Minimum delay between lines sent to the server. If
C{None}, no delay will be imposed.
@type lineRate: Number of Seconds.
"""
motd = ""
nickname = 'irc'
password = None
realname = None
username = None
### Responses to various CTCP queries.
userinfo = None
# fingerReply is a callable returning a string, or a str()able object.
fingerReply = None
versionName = None
versionNum = None
versionEnv = None
sourceURL = "http://twistedmatrix.com/downloads/"
dcc_destdir = '.'
dcc_sessions = None
# 'mode': (added, removed) i.e.:
# 'l': (True, False) accepts an arg when added and no arg when removed
# from http://www.faqs.org/rfcs/rfc1459.html - 4.2.3.1 Channel modes
# if you want other modes to accept args, add them here, by default unknown
# modes won't accept any arg
_modeAcceptsArg = {
'o': (True, True), # op/deop a user
'h': (True, True), # hop/dehop (halfop) a user (not defined in RFC)
'v': (True, True), # voice/devoice a user
'b': (True, True), # ban/unban a user/mask
'l': (True, False), # set the user limit to channel
'k': (True, False), # set a channel key (password)
't': (False, False), # only ops set topic
's': (False, False), # secret channel
'p': (False, False), # private channel
'i': (False, False), # invite-only channel
'm': (False, False), # moderated channel
'n': (False, False), # no external messages
}
# If this is false, no attempt will be made to identify
# ourself to the server.
performLogin = 1
lineRate = None
_queue = None
_queueEmptying = None
delimiter = '\n' # '\r\n' will also work (see dataReceived)
__pychecker__ = 'unusednames=params,prefix,channel'
def _reallySendLine(self, line):
return basic.LineReceiver.sendLine(self, lowQuote(line) + '\r')
def sendLine(self, line):
if self.lineRate is None:
self._reallySendLine(line)
else:
self._queue.append(line)
if not self._queueEmptying:
self._sendLine()
def _sendLine(self):
if self._queue:
self._reallySendLine(self._queue.pop(0))
self._queueEmptying = reactor.callLater(self.lineRate,
self._sendLine)
else:
self._queueEmptying = None
### Interface level client->user output methods
###
### You'll want to override these.
### Methods relating to the server itself
def created(self, when):
"""Called with creation date information about the server, usually at logon.
@type when: C{str}
@param when: A string describing when the server was created, probably.
"""
def yourHost(self, info):
"""Called with daemon information about the server, usually at logon.
@type info: C{str}
@param when: A string describing what software the server is running, probably.
"""
def myInfo(self, servername, version, umodes, cmodes):
"""Called with information about the server, usually at logon.
@type servername: C{str}
@param servername: The hostname of this server.
@type version: C{str}
@param version: A description of what software this server runs.
@type umodes: C{str}
@param umodes: All the available user modes.
@type cmodes: C{str}
@param cmodes: All the available channel modes.
"""
def luserClient(self, info):
"""Called with information about the number of connections, usually at logon.
@type info: C{str}
@param info: A description of the number of clients and servers
connected to the network, probably.
"""
def bounce(self, info):
"""Called with information about where the client should reconnect.
@type info: C{str}
@param info: A plaintext description of the address that should be
connected to.
"""
def isupport(self, options):
"""Called with various information about what the server supports.
@type options: C{list} of C{str}
@param options: Descriptions of features or limits of the server, possibly
in the form "NAME=VALUE".
"""
def luserChannels(self, channels):
"""Called with the number of channels existant on the server.
@type channels: C{int}
"""
def luserOp(self, ops):
"""Called with the number of ops logged on to the server.
@type ops: C{int}
"""
def luserMe(self, info):
"""Called with information about the server connected to.
@type info: C{str}
@param info: A plaintext string describing the number of users and servers
connected to this server.
"""
### Methods involving me directly
def privmsg(self, user, channel, message):
"""Called when I have a message from a user to me or a channel.
"""
pass
def joined(self, channel):
"""Called when I finish joining a channel.
channel has the starting character (# or &) intact.
"""
pass
def left(self, channel):
"""Called when I have left a channel.
channel has the starting character (# or &) intact.
"""
pass
def noticed(self, user, channel, message):
"""Called when I have a notice from a user to me or a channel.
By default, this is equivalent to IRCClient.privmsg, but if your
client makes any automated replies, you must override this!
From the RFC::
The difference between NOTICE and PRIVMSG is that
automatic replies MUST NEVER be sent in response to a
NOTICE message. [...] The object of this rule is to avoid
loops between clients automatically sending something in
response to something it received.
"""
self.privmsg(user, channel, message)
def modeChanged(self, user, channel, set, modes, args):
"""Called when users or channel's modes are changed.
@type user: C{str}
@param user: The user and hostmask which instigated this change.
@type channel: C{str}
@param channel: The channel where the modes are changed. If args is
empty the channel for which the modes are changing. If the changes are
at server level it could be equal to C{user}.
@type set: C{bool} or C{int}
@param set: True if the mode(s) is being added, False if it is being
removed. If some modes are added and others removed at the same time
this function will be called twice, the first time with all the added
modes, the second with the removed ones. (To change this behaviour
override the irc_MODE method)
@type modes: C{str}
@param modes: The mode or modes which are being changed.
@type args: C{tuple}
@param args: Any additional information required for the mode
change.
"""
def pong(self, user, secs):
"""Called with the results of a CTCP PING query.
"""
pass
def signedOn(self):
"""Called after sucessfully signing on to the server.
"""
pass
def kickedFrom(self, channel, kicker, message):
"""Called when I am kicked from a channel.
"""
pass
def nickChanged(self, nick):
"""Called when my nick has been changed.
"""
self.nickname = nick
### Things I observe other people doing in a channel.
def userJoined(self, user, channel):
"""Called when I see another user joining a channel.
"""
pass
def userLeft(self, user, channel):
"""Called when I see another user leaving a channel.
"""
pass
def userQuit(self, user, quitMessage):
"""Called when I see another user disconnect from the network.
"""
pass
def userKicked(self, kickee, channel, kicker, message):
"""Called when I observe someone else being kicked from a channel.
"""
pass
def action(self, user, channel, data):
"""Called when I see a user perform an ACTION on a channel.
"""
pass
def topicUpdated(self, user, channel, newTopic):
"""In channel, user changed the topic to newTopic.
Also called when first joining a channel.
"""
pass
def userRenamed(self, oldname, newname):
"""A user changed their name from oldname to newname.
"""
pass
### Information from the server.
def receivedMOTD(self, motd):
"""I received a message-of-the-day banner from the server.
motd is a list of strings, where each string was sent as a seperate
message from the server. To display, you might want to use::
'\\n'.join(motd)
to get a nicely formatted string.
"""
pass
### user input commands, client->server
### Your client will want to invoke these.
def join(self, channel, key=None):
"""
Join a channel.
@type channel: C{str}
@param channel: The name of the channel to join. If it has no
prefix, C{'#'} will to prepended to it.
@type key: C{str}
@param key: If specified, the key used to join the channel.
"""
if channel[0] not in '&#!+': channel = '#' + channel
if key:
self.sendLine("JOIN %s %s" % (channel, key))
else:
self.sendLine("JOIN %s" % (channel,))
def leave(self, channel, reason=None):
"""
Leave a channel.
@type channel: C{str}
@param channel: The name of the channel to leave. If it has no
prefix, C{'#'} will to prepended to it.
@type reason: C{str}
@param reason: If given, the reason for leaving.
"""
if channel[0] not in '&#!+': channel = '#' + channel
if reason:
self.sendLine("PART %s :%s" % (channel, reason))
else:
self.sendLine("PART %s" % (channel,))
def kick(self, channel, user, reason=None):
"""
Attempt to kick a user from a channel.
@type channel: C{str}
@param channel: The name of the channel to kick the user from. If it
has no prefix, C{'#'} will to prepended to it.
@type user: C{str}
@param user: The nick of the user to kick.
@type reason: C{str}
@param reason: If given, the reason for kicking the user.
"""
if channel[0] not in '&#!+': channel = '#' + channel
if reason:
self.sendLine("KICK %s %s :%s" % (channel, user, reason))
else:
self.sendLine("KICK %s %s" % (channel, user))
part = leave
def topic(self, channel, topic=None):
"""Attempt to set the topic of the given channel, or ask what it is.
If topic is None, then I sent a topic query instead of trying to set
the topic. The server should respond with a TOPIC message containing
the current topic of the given channel.
@type channel: C{str}
@param channel: The name of the channel to change the topic on. If it
has no prefix, C{'#'} will to prepended to it.
@type topic: C{str}
@param topic: If specified, what to set the topic to.
"""
# << TOPIC #xtestx :fff
if channel[0] not in '&#!+': channel = '#' + channel
if topic != None:
self.sendLine("TOPIC %s :%s" % (channel, topic))
else:
self.sendLine("TOPIC %s" % (channel,))
def mode(self, chan, set, modes, limit = None, user = None, mask = None):
"""
Change the modes on a user or channel.
The C{limit}, C{user}, and C{mask} parameters are mutually exclusive.
@type chan: C{str}
@param chan: The name of the channel to operate on. If it has no
prefix, C{'#'} will to prepended to it.
@type set: C{bool}
@param set: True to give the user or channel permissions and False to
remove them.
@type modes: C{str}
@param modes: The mode flags to set on the user or channel.
@type limit: C{int}
@param limit: In conjuction with the C{'l'} mode flag, limits the
number of users on the channel.
@type user: C{str}
@param user: The user to change the mode on.
@type mask: C{str}
@param mask: In conjuction with the C{'b'} mode flag, sets a mask of
users to be banned from the channel.
"""
if set:
line = 'MODE %s +%s' % (chan, modes)
else:
line = 'MODE %s -%s' % (chan, modes)
if limit is not None:
line = '%s %d' % (line, limit)
elif user is not None:
line = '%s %s' % (line, user)
elif mask is not None:
line = '%s %s' % (line, mask)
self.sendLine(line)
def say(self, channel, message, length = None):
"""
Send a message to a channel
@type channel: C{str}
@param channel: The channel to say the message on.
@type message: C{str}
@param message: The message to say.
@type length: C{int}
@param length: The maximum number of octets to send at a time. This
has the effect of turning a single call to C{msg()} into multiple
commands to the server. This is useful when long messages may be
sent that would otherwise cause the server to kick us off or
silently truncate the text we are sending. If None is passed, the
entire message is always send in one command.
"""
if channel[0] not in '&#!+': channel = '#' + channel
self.msg(channel, message, length)
def msg(self, user, message, length = None):
"""Send a message to a user or channel.
@type user: C{str}
@param user: The username or channel name to which to direct the
message.
@type message: C{str}
@param message: The text to send
@type length: C{int}
@param length: The maximum number of octets to send at a time. This
has the effect of turning a single call to msg() into multiple
commands to the server. This is useful when long messages may be
sent that would otherwise cause the server to kick us off or silently
truncate the text we are sending. If None is passed, the entire
message is always send in one command.
"""
fmt = "PRIVMSG %s :%%s" % (user,)
if length is None:
self.sendLine(fmt % (message,))
else:
# NOTE: minimumLength really equals len(fmt) - 2 (for '%s') + n
# where n is how many bytes sendLine sends to end the line.
# n was magic numbered to 2, I think incorrectly
minimumLength = len(fmt)
if length <= minimumLength:
raise ValueError("Maximum length must exceed %d for message "
"to %s" % (minimumLength, user))
lines = split(message, length - minimumLength)
map(lambda line, self=self, fmt=fmt: self.sendLine(fmt % line),
lines)
def notice(self, user, message):
"""
Send a notice to a user.
Notices are like normal message, but should never get automated
replies.
@type user: C{str}
@param user: The user to send a notice to.
@type message: C{str}
@param message: The contents of the notice to send.
"""
self.sendLine("NOTICE %s :%s" % (user, message))
def away(self, message=''):
"""
Mark this client as away.
@type message: C{str}
@param message: If specified, the away message.
"""
self.sendLine("AWAY :%s" % message)
def back(self):
"""
Clear the away status.
"""
# An empty away marks us as back
self.away()
def whois(self, nickname, server=None):
"""
Retrieve user information about the given nick name.
@type nickname: C{str}
@param nickname: The nick name about which to retrieve information.
@since: 8.2
"""
if server is None:
self.sendLine('WHOIS ' + nickname)
else:
self.sendLine('WHOIS %s %s' % (server, nickname))
def register(self, nickname, hostname='foo', servername='bar'):
"""
Login to the server.
@type nickname: C{str}
@param nickname: The nickname to register.
@type hostname: C{str}
@param hostname: If specified, the hostname to logon as.
@type servername: C{str}
@param servername: If specified, the servername to logon as.
"""
if self.password is not None:
self.sendLine("PASS %s" % self.password)
self.setNick(nickname)
if self.username is None:
self.username = nickname
self.sendLine("USER %s %s %s :%s" % (self.username, hostname, servername, self.realname))
def setNick(self, nickname):
"""
Set this client's nickname.
@type nickname: C{str}
@param nickname: The nickname to change to.
"""
self.nickname = nickname
self.sendLine("NICK %s" % nickname)
def quit(self, message = ''):
"""
Disconnect from the server
@type message: C{str}
@param message: If specified, the message to give when quitting the
server.
"""
self.sendLine("QUIT :%s" % message)
### user input commands, client->client
def me(self, channel, action):
"""
Strike a pose.
@type channel: C{str}
@param channel: The name of the channel to have an action on. If it
has no prefix, C{'#'} will to prepended to it.
@type action: C{str}
@param action: The action to preform.
"""
if channel[0] not in '&#!+': channel = '#' + channel
self.ctcpMakeQuery(channel, [('ACTION', action)])
_pings = None
_MAX_PINGRING = 12
def ping(self, user, text = None):
"""
Measure round-trip delay to another IRC client.
"""
if self._pings is None:
self._pings = {}
if text is None:
chars = string.letters + string.digits + string.punctuation
key = ''.join([random.choice(chars) for i in range(12)])
else:
key = str(text)
self._pings[(user, key)] = time.time()
self.ctcpMakeQuery(user, [('PING', key)])
if len(self._pings) > self._MAX_PINGRING:
# Remove some of the oldest entries.
byValue = [(v, k) for (k, v) in self._pings.items()]
byValue.sort()
excess = self._MAX_PINGRING - len(self._pings)
for i in xrange(excess):
del self._pings[byValue[i][1]]
def dccSend(self, user, file):
if type(file) == types.StringType:
file = open(file, 'r')
size = fileSize(file)
name = getattr(file, "name", "file@%s" % (id(file),))
factory = DccSendFactory(file)
port = reactor.listenTCP(0, factory, 1)
raise NotImplementedError,(
"XXX!!! Help! I need to bind a socket, have it listen, and tell me its address. "
"(and stop accepting once we've made a single connection.)")
my_address = struct.pack("!I", my_address)
args = ['SEND', name, my_address, str(port)]
if not (size is None):
args.append(size)
args = string.join(args, ' ')
self.ctcpMakeQuery(user, [('DCC', args)])
def dccResume(self, user, fileName, port, resumePos):
"""Send a DCC RESUME request to another user."""
self.ctcpMakeQuery(user, [
('DCC', ['RESUME', fileName, port, resumePos])])
def dccAcceptResume(self, user, fileName, port, resumePos):
"""Send a DCC ACCEPT response to clients who have requested a resume.
"""
self.ctcpMakeQuery(user, [
('DCC', ['ACCEPT', fileName, port, resumePos])])
### server->client messages
### You might want to fiddle with these,
### but it is safe to leave them alone.
def irc_ERR_NICKNAMEINUSE(self, prefix, params):
"""
Called when we try to register an invalid nickname.
"""
self.register(self.nickname+'_')
def irc_ERR_PASSWDMISMATCH(self, prefix, params):
"""
Called when the login was incorrect.
"""
raise IRCPasswordMismatch("Password Incorrect.")
def irc_RPL_WELCOME(self, prefix, params):
"""
Called when we have received the welcome from the server.
"""
self.signedOn()
def irc_JOIN(self, prefix, params):
"""
Called when a user joins a channel.
"""
nick = string.split(prefix,'!')[0]
channel = params[-1]
if nick == self.nickname:
self.joined(channel)
else:
self.userJoined(nick, channel)
def irc_PART(self, prefix, params):
"""
Called when a user leaves a channel.
"""
nick = string.split(prefix,'!')[0]
channel = params[0]
if nick == self.nickname:
self.left(channel)
else:
self.userLeft(nick, channel)
def irc_QUIT(self, prefix, params):
"""
Called when a user has quit.
"""
nick = string.split(prefix,'!')[0]
self.userQuit(nick, params[0])
def irc_MODE(self, prefix, params):
"""
Parse the server message when one or more modes are changed
"""
user, channel, modes, args = prefix, params[0], params[1], params[2:]
if modes[0] not in '+-':
# add a '+' before the modes if it isn't specified (e.g. MODE s)
modes = '+' + modes
if ((modes[0] == '+' and '-' not in modes[1:]) or
(modes[0] == '-' and '+' not in modes[1:])):
# all modes are added or removed
set = (modes[0] == '+')
modes = modes[1:].replace('-+'[set], '')
self.modeChanged(user, channel, set, modes, tuple(args))
else:
# some modes added and other removed
modes2, args2 = ['', ''], [[], []]
for c in modes:
if c == '+':
i = 0
elif c == '-':
i = 1
else:
modes2[i] += c
# take an arg only if the mode accepts it (e.g. +o nick)
if args and self._modeAcceptsArg.get(c, (False, False))[i]:
args2[i].append(args.pop(0))
if args:
log.msg('Too many args (%s) received for %s. If one or more '
'modes are supposed to accept an arg and they are not in '
'_modeAcceptsArg, add them.' % (' '.join(args), modes))
self.modeChanged(user, channel, True, modes2[0], tuple(args2[0]))
self.modeChanged(user, channel, False, modes2[1], tuple(args2[1]))
def irc_PING(self, prefix, params):
"""
Called when some has pinged us.
"""
self.sendLine("PONG %s" % params[-1])
def irc_PRIVMSG(self, prefix, params):
"""
Called when we get a message.
"""
user = prefix
channel = params[0]
message = params[-1]
if not message: return # don't raise an exception if some idiot sends us a blank message
if message[0]==X_DELIM:
m = ctcpExtract(message)
if m['extended']:
self.ctcpQuery(user, channel, m['extended'])
if not m['normal']:
return
message = string.join(m['normal'], ' ')
self.privmsg(user, channel, message)
def irc_NOTICE(self, prefix, params):
"""
Called when a user gets a notice.
"""
user = prefix
channel = params[0]
message = params[-1]
if message[0]==X_DELIM:
m = ctcpExtract(message)
if m['extended']:
self.ctcpReply(user, channel, m['extended'])
if not m['normal']:
return
message = string.join(m['normal'], ' ')
self.noticed(user, channel, message)
def irc_NICK(self, prefix, params):
"""
Called when a user changes their nickname.
"""
nick = string.split(prefix,'!', 1)[0]
if nick == self.nickname:
self.nickChanged(params[0])
else:
self.userRenamed(nick, params[0])
def irc_KICK(self, prefix, params):
"""
Called when a user is kicked from a channel.
"""
kicker = string.split(prefix,'!')[0]
channel = params[0]
kicked = params[1]
message = params[-1]
if string.lower(kicked) == string.lower(self.nickname):
# Yikes!
self.kickedFrom(channel, kicker, message)
else:
self.userKicked(kicked, channel, kicker, message)
def irc_TOPIC(self, prefix, params):
"""
Someone in the channel set the topic.
"""
user = string.split(prefix, '!')[0]
channel = params[0]
newtopic = params[1]
self.topicUpdated(user, channel, newtopic)
def irc_RPL_TOPIC(self, prefix, params):
"""
Called when the topic for a channel is initially reported or when it
subsequently changes.
"""
user = string.split(prefix, '!')[0]
channel = params[1]
newtopic = params[2]
self.topicUpdated(user, channel, newtopic)
def irc_RPL_NOTOPIC(self, prefix, params):
user = string.split(prefix, '!')[0]
channel = params[1]
newtopic = ""
self.topicUpdated(user, channel, newtopic)
def irc_RPL_MOTDSTART(self, prefix, params):
if params[-1].startswith("- "):
params[-1] = params[-1][2:]
self.motd = [params[-1]]
def irc_RPL_MOTD(self, prefix, params):
if params[-1].startswith("- "):
params[-1] = params[-1][2:]
self.motd.append(params[-1])
def irc_RPL_ENDOFMOTD(self, prefix, params):
self.receivedMOTD(self.motd)
def irc_RPL_CREATED(self, prefix, params):
self.created(params[1])
def irc_RPL_YOURHOST(self, prefix, params):
self.yourHost(params[1])
def irc_RPL_MYINFO(self, prefix, params):
info = params[1].split(None, 3)
while len(info) < 4:
info.append(None)
self.myInfo(*info)
def irc_RPL_BOUNCE(self, prefix, params):
# 005 is doubly assigned. Piece of crap dirty trash protocol.
if params[-1] == "are available on this server":
self.isupport(params[1:-1])
else:
self.bounce(params[1])
def irc_RPL_LUSERCLIENT(self, prefix, params):
self.luserClient(params[1])
def irc_RPL_LUSEROP(self, prefix, params):
try:
self.luserOp(int(params[1]))
except ValueError:
pass
def irc_RPL_LUSERCHANNELS(self, prefix, params):
try:
self.luserChannels(int(params[1]))
except ValueError:
pass
def irc_RPL_LUSERME(self, prefix, params):
self.luserMe(params[1])
def irc_unknown(self, prefix, command, params):
pass
### Receiving a CTCP query from another party
### It is safe to leave these alone.
def ctcpQuery(self, user, channel, messages):
"""Dispatch method for any CTCP queries received.
"""
for m in messages:
method = getattr(self, "ctcpQuery_%s" % m[0], None)
if method:
method(user, channel, m[1])
else:
self.ctcpUnknownQuery(user, channel, m[0], m[1])
def ctcpQuery_ACTION(self, user, channel, data):
self.action(user, channel, data)
def ctcpQuery_PING(self, user, channel, data):
nick = string.split(user,"!")[0]
self.ctcpMakeReply(nick, [("PING", data)])
def ctcpQuery_FINGER(self, user, channel, data):
if data is not None:
self.quirkyMessage("Why did %s send '%s' with a FINGER query?"
% (user, data))
if not self.fingerReply:
return
if callable(self.fingerReply):
reply = self.fingerReply()
else:
reply = str(self.fingerReply)
nick = string.split(user,"!")[0]
self.ctcpMakeReply(nick, [('FINGER', reply)])
def ctcpQuery_VERSION(self, user, channel, data):
if data is not None:
self.quirkyMessage("Why did %s send '%s' with a VERSION query?"
% (user, data))
if self.versionName:
nick = string.split(user,"!")[0]
self.ctcpMakeReply(nick, [('VERSION', '%s:%s:%s' %
(self.versionName,
self.versionNum,
self.versionEnv))])
def ctcpQuery_SOURCE(self, user, channel, data):
if data is not None:
self.quirkyMessage("Why did %s send '%s' with a SOURCE query?"
% (user, data))
if self.sourceURL:
nick = string.split(user,"!")[0]
# The CTCP document (Zeuge, Rollo, Mesander 1994) says that SOURCE
# replies should be responded to with the location of an anonymous
# FTP server in host:directory:file format. I'm taking the liberty
# of bringing it into the 21st century by sending a URL instead.
self.ctcpMakeReply(nick, [('SOURCE', self.sourceURL),
('SOURCE', None)])
def ctcpQuery_USERINFO(self, user, channel, data):
if data is not None:
self.quirkyMessage("Why did %s send '%s' with a USERINFO query?"
% (user, data))
if self.userinfo:
nick = string.split(user,"!")[0]
self.ctcpMakeReply(nick, [('USERINFO', self.userinfo)])
def ctcpQuery_CLIENTINFO(self, user, channel, data):
"""A master index of what CTCP tags this client knows.
If no arguments are provided, respond with a list of known tags.
If an argument is provided, provide human-readable help on
the usage of that tag.
"""
nick = string.split(user,"!")[0]
if not data:
# XXX: prefixedMethodNames gets methods from my *class*,
# but it's entirely possible that this *instance* has more
# methods.
names = reflect.prefixedMethodNames(self.__class__,
'ctcpQuery_')
self.ctcpMakeReply(nick, [('CLIENTINFO',
string.join(names, ' '))])
else:
args = string.split(data)
method = getattr(self, 'ctcpQuery_%s' % (args[0],), None)
if not method:
self.ctcpMakeReply(nick, [('ERRMSG',
"CLIENTINFO %s :"
"Unknown query '%s'"
% (data, args[0]))])
return
doc = getattr(method, '__doc__', '')
self.ctcpMakeReply(nick, [('CLIENTINFO', doc)])
def ctcpQuery_ERRMSG(self, user, channel, data):
# Yeah, this seems strange, but that's what the spec says to do
# when faced with an ERRMSG query (not a reply).
nick = string.split(user,"!")[0]
self.ctcpMakeReply(nick, [('ERRMSG',
"%s :No error has occoured." % data)])
def ctcpQuery_TIME(self, user, channel, data):
if data is not None:
self.quirkyMessage("Why did %s send '%s' with a TIME query?"
% (user, data))
nick = string.split(user,"!")[0]
self.ctcpMakeReply(nick,
[('TIME', ':%s' %
time.asctime(time.localtime(time.time())))])
def ctcpQuery_DCC(self, user, channel, data):
"""Initiate a Direct Client Connection
"""
if not data: return
dcctype = data.split(None, 1)[0].upper()
handler = getattr(self, "dcc_" + dcctype, None)
if handler:
if self.dcc_sessions is None:
self.dcc_sessions = []
data = data[len(dcctype)+1:]
handler(user, channel, data)
else:
nick = string.split(user,"!")[0]
self.ctcpMakeReply(nick, [('ERRMSG',
"DCC %s :Unknown DCC type '%s'"
% (data, dcctype))])
self.quirkyMessage("%s offered unknown DCC type %s"
% (user, dcctype))
def dcc_SEND(self, user, channel, data):
# Use splitQuoted for those who send files with spaces in the names.
data = text.splitQuoted(data)
if len(data) < 3:
raise IRCBadMessage, "malformed DCC SEND request: %r" % (data,)
(filename, address, port) = data[:3]
address = dccParseAddress(address)
try:
port = int(port)
except ValueError:
raise IRCBadMessage, "Indecipherable port %r" % (port,)
size = -1
if len(data) >= 4:
try:
size = int(data[3])
except ValueError:
pass
# XXX Should we bother passing this data?
self.dccDoSend(user, address, port, filename, size, data)
def dcc_ACCEPT(self, user, channel, data):
data = text.splitQuoted(data)
if len(data) < 3:
raise IRCBadMessage, "malformed DCC SEND ACCEPT request: %r" % (data,)
(filename, port, resumePos) = data[:3]
try:
port = int(port)
resumePos = int(resumePos)
except ValueError:
return
self.dccDoAcceptResume(user, filename, port, resumePos)
def dcc_RESUME(self, user, channel, data):
data = text.splitQuoted(data)
if len(data) < 3:
raise IRCBadMessage, "malformed DCC SEND RESUME request: %r" % (data,)
(filename, port, resumePos) = data[:3]
try:
port = int(port)
resumePos = int(resumePos)
except ValueError:
return
self.dccDoResume(user, filename, port, resumePos)
def dcc_CHAT(self, user, channel, data):
data = text.splitQuoted(data)
if len(data) < 3:
raise IRCBadMessage, "malformed DCC CHAT request: %r" % (data,)
(filename, address, port) = data[:3]
address = dccParseAddress(address)
try:
port = int(port)
except ValueError:
raise IRCBadMessage, "Indecipherable port %r" % (port,)
self.dccDoChat(user, channel, address, port, data)
### The dccDo methods are the slightly higher-level siblings of
### common dcc_ methods; the arguments have been parsed for them.
def dccDoSend(self, user, address, port, fileName, size, data):
"""Called when I receive a DCC SEND offer from a client.
By default, I do nothing here."""
## filename = path.basename(arg)
## protocol = DccFileReceive(filename, size,
## (user,channel,data),self.dcc_destdir)
## reactor.clientTCP(address, port, protocol)
## self.dcc_sessions.append(protocol)
pass
def dccDoResume(self, user, file, port, resumePos):
"""Called when a client is trying to resume an offered file
via DCC send. It should be either replied to with a DCC
ACCEPT or ignored (default)."""
pass
def dccDoAcceptResume(self, user, file, port, resumePos):
"""Called when a client has verified and accepted a DCC resume
request made by us. By default it will do nothing."""
pass
def dccDoChat(self, user, channel, address, port, data):
pass
#factory = DccChatFactory(self, queryData=(user, channel, data))
#reactor.connectTCP(address, port, factory)
#self.dcc_sessions.append(factory)
#def ctcpQuery_SED(self, user, data):
# """Simple Encryption Doodoo
#
# Feel free to implement this, but no specification is available.
# """
# raise NotImplementedError
def ctcpUnknownQuery(self, user, channel, tag, data):
nick = string.split(user,"!")[0]
self.ctcpMakeReply(nick, [('ERRMSG',
"%s %s: Unknown query '%s'"
% (tag, data, tag))])
log.msg("Unknown CTCP query from %s: %s %s\n"
% (user, tag, data))
def ctcpMakeReply(self, user, messages):
"""
Send one or more C{extended messages} as a CTCP reply.
@type messages: a list of extended messages. An extended
message is a (tag, data) tuple, where 'data' may be C{None}.
"""
self.notice(user, ctcpStringify(messages))
### client CTCP query commands
def ctcpMakeQuery(self, user, messages):
"""
Send one or more C{extended messages} as a CTCP query.
@type messages: a list of extended messages. An extended
message is a (tag, data) tuple, where 'data' may be C{None}.
"""
self.msg(user, ctcpStringify(messages))
### Receiving a response to a CTCP query (presumably to one we made)
### You may want to add methods here, or override UnknownReply.
def ctcpReply(self, user, channel, messages):
"""
Dispatch method for any CTCP replies received.
"""
for m in messages:
method = getattr(self, "ctcpReply_%s" % m[0], None)
if method:
method(user, channel, m[1])
else:
self.ctcpUnknownReply(user, channel, m[0], m[1])
def ctcpReply_PING(self, user, channel, data):
nick = user.split('!', 1)[0]
if (not self._pings) or (not self._pings.has_key((nick, data))):
raise IRCBadMessage,\
"Bogus PING response from %s: %s" % (user, data)
t0 = self._pings[(nick, data)]
self.pong(user, time.time() - t0)
def ctcpUnknownReply(self, user, channel, tag, data):
"""Called when a fitting ctcpReply_ method is not found.
XXX: If the client makes arbitrary CTCP queries,
this method should probably show the responses to
them instead of treating them as anomolies.
"""
log.msg("Unknown CTCP reply from %s: %s %s\n"
% (user, tag, data))
### Error handlers
### You may override these with something more appropriate to your UI.
def badMessage(self, line, excType, excValue, tb):
"""When I get a message that's so broken I can't use it.
"""
log.msg(line)
log.msg(string.join(traceback.format_exception(excType,
excValue,
tb),''))
def quirkyMessage(self, s):
"""This is called when I receive a message which is peculiar,
but not wholly indecipherable.
"""
log.msg(s + '\n')
### Protocool methods
def connectionMade(self):
self._queue = []
if self.performLogin:
self.register(self.nickname)
def dataReceived(self, data):
basic.LineReceiver.dataReceived(self, data.replace('\r', ''))
def lineReceived(self, line):
line = lowDequote(line)
try:
prefix, command, params = parsemsg(line)
if numeric_to_symbolic.has_key(command):
command = numeric_to_symbolic[command]
self.handleCommand(command, prefix, params)
except IRCBadMessage:
self.badMessage(line, *sys.exc_info())
def handleCommand(self, command, prefix, params):
"""Determine the function to call for the given command and call
it with the given arguments.
"""
method = getattr(self, "irc_%s" % command, None)
try:
if method is not None:
method(prefix, params)
else:
self.irc_unknown(prefix, command, params)
except:
log.deferr()
def __getstate__(self):
dct = self.__dict__.copy()
dct['dcc_sessions'] = None
dct['_pings'] = None
return dct
def dccParseAddress(address):
if '.' in address:
pass
else:
try:
address = long(address)
except ValueError:
raise IRCBadMessage,\
"Indecipherable address %r" % (address,)
else:
address = (
(address >> 24) & 0xFF,
(address >> 16) & 0xFF,
(address >> 8) & 0xFF,
address & 0xFF,
)
address = '.'.join(map(str,address))
return address
class DccFileReceiveBasic(protocol.Protocol, styles.Ephemeral):
"""Bare protocol to receive a Direct Client Connection SEND stream.
This does enough to keep the other guy talking, but you'll want to
extend my dataReceived method to *do* something with the data I get.
"""
bytesReceived = 0
def __init__(self, resumeOffset=0):
self.bytesReceived = resumeOffset
self.resume = (resumeOffset != 0)
def dataReceived(self, data):
"""Called when data is received.
Warning: This just acknowledges to the remote host that the
data has been received; it doesn't *do* anything with the
data, so you'll want to override this.
"""
self.bytesReceived = self.bytesReceived + len(data)
self.transport.write(struct.pack('!i', self.bytesReceived))
class DccSendProtocol(protocol.Protocol, styles.Ephemeral):
"""Protocol for an outgoing Direct Client Connection SEND.
"""
blocksize = 1024
file = None
bytesSent = 0
completed = 0
connected = 0
def __init__(self, file):
if type(file) is types.StringType:
self.file = open(file, 'r')
def connectionMade(self):
self.connected = 1
self.sendBlock()
def dataReceived(self, data):
# XXX: Do we need to check to see if len(data) != fmtsize?
bytesShesGot = struct.unpack("!I", data)
if bytesShesGot < self.bytesSent:
# Wait for her.
# XXX? Add some checks to see if we've stalled out?
return
elif bytesShesGot > self.bytesSent:
# self.transport.log("DCC SEND %s: She says she has %d bytes "
# "but I've only sent %d. I'm stopping "
# "this screwy transfer."
# % (self.file,
# bytesShesGot, self.bytesSent))
self.transport.loseConnection()
return
self.sendBlock()
def sendBlock(self):
block = self.file.read(self.blocksize)
if block:
self.transport.write(block)
self.bytesSent = self.bytesSent + len(block)
else:
# Nothing more to send, transfer complete.
self.transport.loseConnection()
self.completed = 1
def connectionLost(self, reason):
self.connected = 0
if hasattr(self.file, "close"):
self.file.close()
class DccSendFactory(protocol.Factory):
protocol = DccSendProtocol
def __init__(self, file):
self.file = file
def buildProtocol(self, connection):
p = self.protocol(self.file)
p.factory = self
return p
def fileSize(file):
"""I'll try my damndest to determine the size of this file object.
"""
size = None
if hasattr(file, "fileno"):
fileno = file.fileno()
try:
stat_ = os.fstat(fileno)
size = stat_[stat.ST_SIZE]
except:
pass
else:
return size
if hasattr(file, "name") and path.exists(file.name):
try:
size = path.getsize(file.name)
except:
pass
else:
return size
if hasattr(file, "seek") and hasattr(file, "tell"):
try:
try:
file.seek(0, 2)
size = file.tell()
finally:
file.seek(0, 0)
except:
pass
else:
return size
return size
class DccChat(basic.LineReceiver, styles.Ephemeral):
"""Direct Client Connection protocol type CHAT.
DCC CHAT is really just your run o' the mill basic.LineReceiver
protocol. This class only varies from that slightly, accepting
either LF or CR LF for a line delimeter for incoming messages
while always using CR LF for outgoing.
The lineReceived method implemented here uses the DCC connection's
'client' attribute (provided upon construction) to deliver incoming
lines from the DCC chat via IRCClient's normal privmsg interface.
That's something of a spoof, which you may well want to override.
"""
queryData = None
delimiter = CR + NL
client = None
remoteParty = None
buffer = ""
def __init__(self, client, queryData=None):
"""Initialize a new DCC CHAT session.
queryData is a 3-tuple of
(fromUser, targetUserOrChannel, data)
as received by the CTCP query.
(To be honest, fromUser is the only thing that's currently
used here. targetUserOrChannel is potentially useful, while
the 'data' argument is soley for informational purposes.)
"""
self.client = client
if queryData:
self.queryData = queryData
self.remoteParty = self.queryData[0]
def dataReceived(self, data):
self.buffer = self.buffer + data
lines = string.split(self.buffer, LF)
# Put the (possibly empty) element after the last LF back in the
# buffer
self.buffer = lines.pop()
for line in lines:
if line[-1] == CR:
line = line[:-1]
self.lineReceived(line)
def lineReceived(self, line):
log.msg("DCC CHAT<%s> %s" % (self.remoteParty, line))
self.client.privmsg(self.remoteParty,
self.client.nickname, line)
class DccChatFactory(protocol.ClientFactory):
protocol = DccChat
noisy = 0
def __init__(self, client, queryData):
self.client = client
self.queryData = queryData
def buildProtocol(self, addr):
p = self.protocol(client=self.client, queryData=self.queryData)
p.factory = self
def clientConnectionFailed(self, unused_connector, unused_reason):
self.client.dcc_sessions.remove(self)
def clientConnectionLost(self, unused_connector, unused_reason):
self.client.dcc_sessions.remove(self)
def dccDescribe(data):
"""Given the data chunk from a DCC query, return a descriptive string.
"""
orig_data = data
data = string.split(data)
if len(data) < 4:
return orig_data
(dcctype, arg, address, port) = data[:4]
if '.' in address:
pass
else:
try:
address = long(address)
except ValueError:
pass
else:
address = (
(address >> 24) & 0xFF,
(address >> 16) & 0xFF,
(address >> 8) & 0xFF,
address & 0xFF,
)
# The mapping to 'int' is to get rid of those accursed
# "L"s which python 1.5.2 puts on the end of longs.
address = string.join(map(str,map(int,address)), ".")
if dcctype == 'SEND':
filename = arg
size_txt = ''
if len(data) >= 5:
try:
size = int(data[4])
size_txt = ' of size %d bytes' % (size,)
except ValueError:
pass
dcc_text = ("SEND for file '%s'%s at host %s, port %s"
% (filename, size_txt, address, port))
elif dcctype == 'CHAT':
dcc_text = ("CHAT for host %s, port %s"
% (address, port))
else:
dcc_text = orig_data
return dcc_text
class DccFileReceive(DccFileReceiveBasic):
"""Higher-level coverage for getting a file from DCC SEND.
I allow you to change the file's name and destination directory.
I won't overwrite an existing file unless I've been told it's okay
to do so. If passed the resumeOffset keyword argument I will attempt to
resume the file from that amount of bytes.
XXX: I need to let the client know when I am finished.
XXX: I need to decide how to keep a progress indicator updated.
XXX: Client needs a way to tell me \"Do not finish until I say so.\"
XXX: I need to make sure the client understands if the file cannot be written.
"""
filename = 'dcc'
fileSize = -1
destDir = '.'
overwrite = 0
fromUser = None
queryData = None
def __init__(self, filename, fileSize=-1, queryData=None,
destDir='.', resumeOffset=0):
DccFileReceiveBasic.__init__(self, resumeOffset=resumeOffset)
self.filename = filename
self.destDir = destDir
self.fileSize = fileSize
if queryData:
self.queryData = queryData
self.fromUser = self.queryData[0]
def set_directory(self, directory):
"""Set the directory where the downloaded file will be placed.
May raise OSError if the supplied directory path is not suitable.
"""
if not path.exists(directory):
raise OSError(errno.ENOENT, "You see no directory there.",
directory)
if not path.isdir(directory):
raise OSError(errno.ENOTDIR, "You cannot put a file into "
"something which is not a directory.",
directory)
if not os.access(directory, os.X_OK | os.W_OK):
raise OSError(errno.EACCES,
"This directory is too hard to write in to.",
directory)
self.destDir = directory
def set_filename(self, filename):
"""Change the name of the file being transferred.
This replaces the file name provided by the sender.
"""
self.filename = filename
def set_overwrite(self, boolean):
"""May I overwrite existing files?
"""
self.overwrite = boolean
# Protocol-level methods.
def connectionMade(self):
dst = path.abspath(path.join(self.destDir,self.filename))
exists = path.exists(dst)
if self.resume and exists:
# I have been told I want to resume, and a file already
# exists - Here we go
self.file = open(dst, 'ab')
log.msg("Attempting to resume %s - starting from %d bytes" %
(self.file, self.file.tell()))
elif self.overwrite or not exists:
self.file = open(dst, 'wb')
else:
raise OSError(errno.EEXIST,
"There's a file in the way. "
"Perhaps that's why you cannot open it.",
dst)
def dataReceived(self, data):
self.file.write(data)
DccFileReceiveBasic.dataReceived(self, data)
# XXX: update a progress indicator here?
def connectionLost(self, reason):
"""When the connection is lost, I close the file.
"""
self.connected = 0
logmsg = ("%s closed." % (self,))
if self.fileSize > 0:
logmsg = ("%s %d/%d bytes received"
% (logmsg, self.bytesReceived, self.fileSize))
if self.bytesReceived == self.fileSize:
pass # Hooray!
elif self.bytesReceived < self.fileSize:
logmsg = ("%s (Warning: %d bytes short)"
% (logmsg, self.fileSize - self.bytesReceived))
else:
logmsg = ("%s (file larger than expected)"
% (logmsg,))
else:
logmsg = ("%s %d bytes received"
% (logmsg, self.bytesReceived))
if hasattr(self, 'file'):
logmsg = "%s and written to %s.\n" % (logmsg, self.file.name)
if hasattr(self.file, 'close'): self.file.close()
# self.transport.log(logmsg)
def __str__(self):
if not self.connected:
return "<Unconnected DccFileReceive object at %x>" % (id(self),)
from_ = self.transport.getPeer()
if self.fromUser:
from_ = "%s (%s)" % (self.fromUser, from_)
s = ("DCC transfer of '%s' from %s" % (self.filename, from_))
return s
def __repr__(self):
s = ("<%s at %x: GET %s>"
% (self.__class__, id(self), self.filename))
return s
# CTCP constants and helper functions
X_DELIM = chr(001)
def ctcpExtract(message):
"""Extract CTCP data from a string.
Returns a dictionary with two items:
- C{'extended'}: a list of CTCP (tag, data) tuples
- C{'normal'}: a list of strings which were not inside a CTCP delimeter
"""
extended_messages = []
normal_messages = []
retval = {'extended': extended_messages,
'normal': normal_messages }
messages = string.split(message, X_DELIM)
odd = 0
# X1 extended data X2 nomal data X3 extended data X4 normal...
while messages:
if odd:
extended_messages.append(messages.pop(0))
else:
normal_messages.append(messages.pop(0))
odd = not odd
extended_messages[:] = filter(None, extended_messages)
normal_messages[:] = filter(None, normal_messages)
extended_messages[:] = map(ctcpDequote, extended_messages)
for i in xrange(len(extended_messages)):
m = string.split(extended_messages[i], SPC, 1)
tag = m[0]
if len(m) > 1:
data = m[1]
else:
data = None
extended_messages[i] = (tag, data)
return retval
# CTCP escaping
M_QUOTE= chr(020)
mQuoteTable = {
NUL: M_QUOTE + '0',
NL: M_QUOTE + 'n',
CR: M_QUOTE + 'r',
M_QUOTE: M_QUOTE + M_QUOTE
}
mDequoteTable = {}
for k, v in mQuoteTable.items():
mDequoteTable[v[-1]] = k
del k, v
mEscape_re = re.compile('%s.' % (re.escape(M_QUOTE),), re.DOTALL)
def lowQuote(s):
for c in (M_QUOTE, NUL, NL, CR):
s = string.replace(s, c, mQuoteTable[c])
return s
def lowDequote(s):
def sub(matchobj, mDequoteTable=mDequoteTable):
s = matchobj.group()[1]
try:
s = mDequoteTable[s]
except KeyError:
s = s
return s
return mEscape_re.sub(sub, s)
X_QUOTE = '\\'
xQuoteTable = {
X_DELIM: X_QUOTE + 'a',
X_QUOTE: X_QUOTE + X_QUOTE
}
xDequoteTable = {}
for k, v in xQuoteTable.items():
xDequoteTable[v[-1]] = k
xEscape_re = re.compile('%s.' % (re.escape(X_QUOTE),), re.DOTALL)
def ctcpQuote(s):
for c in (X_QUOTE, X_DELIM):
s = string.replace(s, c, xQuoteTable[c])
return s
def ctcpDequote(s):
def sub(matchobj, xDequoteTable=xDequoteTable):
s = matchobj.group()[1]
try:
s = xDequoteTable[s]
except KeyError:
s = s
return s
return xEscape_re.sub(sub, s)
def ctcpStringify(messages):
"""
@type messages: a list of extended messages. An extended
message is a (tag, data) tuple, where 'data' may be C{None}, a
string, or a list of strings to be joined with whitespace.
@returns: String
"""
coded_messages = []
for (tag, data) in messages:
if data:
if not isinstance(data, types.StringType):
try:
# data as list-of-strings
data = " ".join(map(str, data))
except TypeError:
# No? Then use it's %s representation.
pass
m = "%s %s" % (tag, data)
else:
m = str(tag)
m = ctcpQuote(m)
m = "%s%s%s" % (X_DELIM, m, X_DELIM)
coded_messages.append(m)
line = string.join(coded_messages, '')
return line
# Constants (from RFC 2812)
RPL_WELCOME = '001'
RPL_YOURHOST = '002'
RPL_CREATED = '003'
RPL_MYINFO = '004'
RPL_BOUNCE = '005'
RPL_USERHOST = '302'
RPL_ISON = '303'
RPL_AWAY = '301'
RPL_UNAWAY = '305'
RPL_NOWAWAY = '306'
RPL_WHOISUSER = '311'
RPL_WHOISSERVER = '312'
RPL_WHOISOPERATOR = '313'
RPL_WHOISIDLE = '317'
RPL_ENDOFWHOIS = '318'
RPL_WHOISCHANNELS = '319'
RPL_WHOWASUSER = '314'
RPL_ENDOFWHOWAS = '369'
RPL_LISTSTART = '321'
RPL_LIST = '322'
RPL_LISTEND = '323'
RPL_UNIQOPIS = '325'
RPL_CHANNELMODEIS = '324'
RPL_NOTOPIC = '331'
RPL_TOPIC = '332'
RPL_INVITING = '341'
RPL_SUMMONING = '342'
RPL_INVITELIST = '346'
RPL_ENDOFINVITELIST = '347'
RPL_EXCEPTLIST = '348'
RPL_ENDOFEXCEPTLIST = '349'
RPL_VERSION = '351'
RPL_WHOREPLY = '352'
RPL_ENDOFWHO = '315'
RPL_NAMREPLY = '353'
RPL_ENDOFNAMES = '366'
RPL_LINKS = '364'
RPL_ENDOFLINKS = '365'
RPL_BANLIST = '367'
RPL_ENDOFBANLIST = '368'
RPL_INFO = '371'
RPL_ENDOFINFO = '374'
RPL_MOTDSTART = '375'
RPL_MOTD = '372'
RPL_ENDOFMOTD = '376'
RPL_YOUREOPER = '381'
RPL_REHASHING = '382'
RPL_YOURESERVICE = '383'
RPL_TIME = '391'
RPL_USERSSTART = '392'
RPL_USERS = '393'
RPL_ENDOFUSERS = '394'
RPL_NOUSERS = '395'
RPL_TRACELINK = '200'
RPL_TRACECONNECTING = '201'
RPL_TRACEHANDSHAKE = '202'
RPL_TRACEUNKNOWN = '203'
RPL_TRACEOPERATOR = '204'
RPL_TRACEUSER = '205'
RPL_TRACESERVER = '206'
RPL_TRACESERVICE = '207'
RPL_TRACENEWTYPE = '208'
RPL_TRACECLASS = '209'
RPL_TRACERECONNECT = '210'
RPL_TRACELOG = '261'
RPL_TRACEEND = '262'
RPL_STATSLINKINFO = '211'
RPL_STATSCOMMANDS = '212'
RPL_ENDOFSTATS = '219'
RPL_STATSUPTIME = '242'
RPL_STATSOLINE = '243'
RPL_UMODEIS = '221'
RPL_SERVLIST = '234'
RPL_SERVLISTEND = '235'
RPL_LUSERCLIENT = '251'
RPL_LUSEROP = '252'
RPL_LUSERUNKNOWN = '253'
RPL_LUSERCHANNELS = '254'
RPL_LUSERME = '255'
RPL_ADMINME = '256'
RPL_ADMINLOC = '257'
RPL_ADMINLOC = '258'
RPL_ADMINEMAIL = '259'
RPL_TRYAGAIN = '263'
ERR_NOSUCHNICK = '401'
ERR_NOSUCHSERVER = '402'
ERR_NOSUCHCHANNEL = '403'
ERR_CANNOTSENDTOCHAN = '404'
ERR_TOOMANYCHANNELS = '405'
ERR_WASNOSUCHNICK = '406'
ERR_TOOMANYTARGETS = '407'
ERR_NOSUCHSERVICE = '408'
ERR_NOORIGIN = '409'
ERR_NORECIPIENT = '411'
ERR_NOTEXTTOSEND = '412'
ERR_NOTOPLEVEL = '413'
ERR_WILDTOPLEVEL = '414'
ERR_BADMASK = '415'
ERR_UNKNOWNCOMMAND = '421'
ERR_NOMOTD = '422'
ERR_NOADMININFO = '423'
ERR_FILEERROR = '424'
ERR_NONICKNAMEGIVEN = '431'
ERR_ERRONEUSNICKNAME = '432'
ERR_NICKNAMEINUSE = '433'
ERR_NICKCOLLISION = '436'
ERR_UNAVAILRESOURCE = '437'
ERR_USERNOTINCHANNEL = '441'
ERR_NOTONCHANNEL = '442'
ERR_USERONCHANNEL = '443'
ERR_NOLOGIN = '444'
ERR_SUMMONDISABLED = '445'
ERR_USERSDISABLED = '446'
ERR_NOTREGISTERED = '451'
ERR_NEEDMOREPARAMS = '461'
ERR_ALREADYREGISTRED = '462'
ERR_NOPERMFORHOST = '463'
ERR_PASSWDMISMATCH = '464'
ERR_YOUREBANNEDCREEP = '465'
ERR_YOUWILLBEBANNED = '466'
ERR_KEYSET = '467'
ERR_CHANNELISFULL = '471'
ERR_UNKNOWNMODE = '472'
ERR_INVITEONLYCHAN = '473'
ERR_BANNEDFROMCHAN = '474'
ERR_BADCHANNELKEY = '475'
ERR_BADCHANMASK = '476'
ERR_NOCHANMODES = '477'
ERR_BANLISTFULL = '478'
ERR_NOPRIVILEGES = '481'
ERR_CHANOPRIVSNEEDED = '482'
ERR_CANTKILLSERVER = '483'
ERR_RESTRICTED = '484'
ERR_UNIQOPPRIVSNEEDED = '485'
ERR_NOOPERHOST = '491'
ERR_NOSERVICEHOST = '492'
ERR_UMODEUNKNOWNFLAG = '501'
ERR_USERSDONTMATCH = '502'
# And hey, as long as the strings are already intern'd...
symbolic_to_numeric = {
"RPL_WELCOME": '001',
"RPL_YOURHOST": '002',
"RPL_CREATED": '003',
"RPL_MYINFO": '004',
"RPL_BOUNCE": '005',
"RPL_USERHOST": '302',
"RPL_ISON": '303',
"RPL_AWAY": '301',
"RPL_UNAWAY": '305',
"RPL_NOWAWAY": '306',
"RPL_WHOISUSER": '311',
"RPL_WHOISSERVER": '312',
"RPL_WHOISOPERATOR": '313',
"RPL_WHOISIDLE": '317',
"RPL_ENDOFWHOIS": '318',
"RPL_WHOISCHANNELS": '319',
"RPL_WHOWASUSER": '314',
"RPL_ENDOFWHOWAS": '369',
"RPL_LISTSTART": '321',
"RPL_LIST": '322',
"RPL_LISTEND": '323',
"RPL_UNIQOPIS": '325',
"RPL_CHANNELMODEIS": '324',
"RPL_NOTOPIC": '331',
"RPL_TOPIC": '332',
"RPL_INVITING": '341',
"RPL_SUMMONING": '342',
"RPL_INVITELIST": '346',
"RPL_ENDOFINVITELIST": '347',
"RPL_EXCEPTLIST": '348',
"RPL_ENDOFEXCEPTLIST": '349',
"RPL_VERSION": '351',
"RPL_WHOREPLY": '352',
"RPL_ENDOFWHO": '315',
"RPL_NAMREPLY": '353',
"RPL_ENDOFNAMES": '366',
"RPL_LINKS": '364',
"RPL_ENDOFLINKS": '365',
"RPL_BANLIST": '367',
"RPL_ENDOFBANLIST": '368',
"RPL_INFO": '371',
"RPL_ENDOFINFO": '374',
"RPL_MOTDSTART": '375',
"RPL_MOTD": '372',
"RPL_ENDOFMOTD": '376',
"RPL_YOUREOPER": '381',
"RPL_REHASHING": '382',
"RPL_YOURESERVICE": '383',
"RPL_TIME": '391',
"RPL_USERSSTART": '392',
"RPL_USERS": '393',
"RPL_ENDOFUSERS": '394',
"RPL_NOUSERS": '395',
"RPL_TRACELINK": '200',
"RPL_TRACECONNECTING": '201',
"RPL_TRACEHANDSHAKE": '202',
"RPL_TRACEUNKNOWN": '203',
"RPL_TRACEOPERATOR": '204',
"RPL_TRACEUSER": '205',
"RPL_TRACESERVER": '206',
"RPL_TRACESERVICE": '207',
"RPL_TRACENEWTYPE": '208',
"RPL_TRACECLASS": '209',
"RPL_TRACERECONNECT": '210',
"RPL_TRACELOG": '261',
"RPL_TRACEEND": '262',
"RPL_STATSLINKINFO": '211',
"RPL_STATSCOMMANDS": '212',
"RPL_ENDOFSTATS": '219',
"RPL_STATSUPTIME": '242',
"RPL_STATSOLINE": '243',
"RPL_UMODEIS": '221',
"RPL_SERVLIST": '234',
"RPL_SERVLISTEND": '235',
"RPL_LUSERCLIENT": '251',
"RPL_LUSEROP": '252',
"RPL_LUSERUNKNOWN": '253',
"RPL_LUSERCHANNELS": '254',
"RPL_LUSERME": '255',
"RPL_ADMINME": '256',
"RPL_ADMINLOC": '257',
"RPL_ADMINLOC": '258',
"RPL_ADMINEMAIL": '259',
"RPL_TRYAGAIN": '263',
"ERR_NOSUCHNICK": '401',
"ERR_NOSUCHSERVER": '402',
"ERR_NOSUCHCHANNEL": '403',
"ERR_CANNOTSENDTOCHAN": '404',
"ERR_TOOMANYCHANNELS": '405',
"ERR_WASNOSUCHNICK": '406',
"ERR_TOOMANYTARGETS": '407',
"ERR_NOSUCHSERVICE": '408',
"ERR_NOORIGIN": '409',
"ERR_NORECIPIENT": '411',
"ERR_NOTEXTTOSEND": '412',
"ERR_NOTOPLEVEL": '413',
"ERR_WILDTOPLEVEL": '414',
"ERR_BADMASK": '415',
"ERR_UNKNOWNCOMMAND": '421',
"ERR_NOMOTD": '422',
"ERR_NOADMININFO": '423',
"ERR_FILEERROR": '424',
"ERR_NONICKNAMEGIVEN": '431',
"ERR_ERRONEUSNICKNAME": '432',
"ERR_NICKNAMEINUSE": '433',
"ERR_NICKCOLLISION": '436',
"ERR_UNAVAILRESOURCE": '437',
"ERR_USERNOTINCHANNEL": '441',
"ERR_NOTONCHANNEL": '442',
"ERR_USERONCHANNEL": '443',
"ERR_NOLOGIN": '444',
"ERR_SUMMONDISABLED": '445',
"ERR_USERSDISABLED": '446',
"ERR_NOTREGISTERED": '451',
"ERR_NEEDMOREPARAMS": '461',
"ERR_ALREADYREGISTRED": '462',
"ERR_NOPERMFORHOST": '463',
"ERR_PASSWDMISMATCH": '464',
"ERR_YOUREBANNEDCREEP": '465',
"ERR_YOUWILLBEBANNED": '466',
"ERR_KEYSET": '467',
"ERR_CHANNELISFULL": '471',
"ERR_UNKNOWNMODE": '472',
"ERR_INVITEONLYCHAN": '473',
"ERR_BANNEDFROMCHAN": '474',
"ERR_BADCHANNELKEY": '475',
"ERR_BADCHANMASK": '476',
"ERR_NOCHANMODES": '477',
"ERR_BANLISTFULL": '478',
"ERR_NOPRIVILEGES": '481',
"ERR_CHANOPRIVSNEEDED": '482',
"ERR_CANTKILLSERVER": '483',
"ERR_RESTRICTED": '484',
"ERR_UNIQOPPRIVSNEEDED": '485',
"ERR_NOOPERHOST": '491',
"ERR_NOSERVICEHOST": '492',
"ERR_UMODEUNKNOWNFLAG": '501',
"ERR_USERSDONTMATCH": '502',
}
numeric_to_symbolic = {}
for k, v in symbolic_to_numeric.items():
numeric_to_symbolic[v] = k
| hortonworks/hortonworks-sandbox | desktop/core/ext-py/Twisted/twisted/words/protocols/irc.py | Python | apache-2.0 | 83,138 |
# encoding: utf-8
# Copyright: 2013 Ivan Korobkov <ivan.korobkov@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pdef.types import Type, Message, Exc, Enum, Interface
from pdef.invoke import proxy
from pdef.formats import jsonformat
from pdef.rpc import rpc_client, rpc_handler, wsgi_app
from pdef.version import __version__
__title__ = 'pdef'
__author__ = 'Ivan Korobkov <ivan.korobkov@gmail.com>'
__license__ = 'Apache License 2.0'
__copyright__ = 'Copyright 2013 Ivan Korobkov'
| pdef/pdef-python | python/src/pdef/__init__.py | Python | apache-2.0 | 999 |
import inspect
import dill
import sys
import ast
from .custom_exceptions import *
from .connector import *
def format(sourceLines): # removes indentation
head = sourceLines[0]
while head[0] == ' ' or head[0] == '\t':
sourceLines = [l[1:] for l in sourceLines]
head = sourceLines[0]
return sourceLines
def get_source_code(func):
source_lines = inspect.getsourcelines(func)[0]
source_lines = format(source_lines)
if source_lines[0][0] == '@':
# if the first line is a decorator, remove it
source_lines = source_lines[1:]
source = ''.join(source_lines)
return source
def search(func, depth=1):
local_vars = sys._getframe(depth).f_locals
source = get_source_code(func)
tree = ast.parse(source)
child_funcs = []
for node in ast.walk(tree):
if isinstance(node, ast.Call):
if isinstance(node.func, ast.Name):
child_funcs.append(node.func.id)
elif (isinstance(node, ast.Name) and node.id in local_vars and callable(local_vars[node.id]) and node.id not in sys.builtin_module_names):
child_funcs.append(node.id)
child_load_str = ''
for child in child_funcs:
if child in local_vars:
try:
load_string = search(local_vars[child], depth=(depth + 1))
child_load_str += load_string + '\n'
except Exception as e:
pass
load_str = child_load_str + source
return load_str
def print_new_files(new_files):
if new_files:
print('New file%s downloaded: %s' % ('' if len(new_files) == 1 else 's', str(new_files)[1:-1]))
def print_time_credit(job_hash):
duration, credit = get_time_and_credit(job_hash)
if duration == 0:
print('Your job took less than a minute, so it\'s free!')
else:
print('%s minute%s used, you have %s minute%s of credit remaining' % (
duration, '' if duration <= 1 else 's',
credit, '' if credit <= 1 else 's'))
def abort_and_print_credit():
if settings.CURRENT_JOB_HASH:
abort_job(settings.CURRENT_JOB_HASH)
print_time_credit(settings.CURRENT_JOB_HASH)
sys.tracebacklimit = 0
sys.exit()
# decorator that handles the possible errors
def add_error_handling(run_job_func):
def wrap(*args, **kwargs):
try:
return run_job_func(*args, **kwargs)
# all keyboard interrupt during streaming will be caught and raised as JobInterruptedException
# anything here will be during upload or download, so we just abort
except KeyboardInterrupt as e:
print('\nJob aborted')
abort_and_print_credit()
# print('\nStreaming stopped, code is still running in the cloud')
# print('Your job hash is: %s' % settings.CURRENT_JOB_HASH)
except RequestFailedException as e:
print('Oops, something went wrong...')
print(e.error_msg)
print('Please try again later')
sys.tracebacklimit = 0
sys.exit()
# except JobInterruptedException as e:
# # extra newline incase no newline was printed
# # print('\nJob interrupted')
# # ans = input('Do you want to abort the job?\n')
# # if ans == 'yes':
# # abort_and_print_credit()
# # else:
# # print('Job is still running\n')
# # print('To reconnect to the job:\n')
# # print(' catalearn.reconnect()\n')
# # print('To stop the job:\n')
# # print(' catalearn.stop()\n')
# print('Job aborted')
# abort_and_print_credit()
return wrap
def decorate_gpu_func(func):
@add_error_handling
def gpu_func(*args, **kwargs):
data = {}
data['source'] = search(func, 3)
data['args'] = args
data['kwargs'] = kwargs
data['name'] = func.__name__
data_path = "uploads.pkl"
dill.dump(data, open(data_path, "wb"))
job_hash, has_idle_instance = get_available_instance()
# we set the global job_hash so that we know which job to abort if things go wrong
settings.CURRENT_JOB_HASH = job_hash
# no idle GPU available, catalearn is starting one,
# we need to ping it to see if it has started
if not has_idle_instance:
print("Starting server, this will take about 20 seconds")
ping_until_gpu_start(job_hash)
print("Server started")
gpu_ip, ws_port = get_ip_and_ws_port(job_hash)
print("Uploading data")
upload_data(gpu_ip, job_hash, data_path)
print("Job running:\n")
has_result = stream_output(gpu_ip, ws_port, job_hash)
print('\nJob finished')
if has_result:
print('Downloading result')
result, new_files = get_result(job_hash)
print("Done!")
print_new_files(new_files)
print_time_credit(job_hash)
return result
return gpu_func
# @add_error_handling
# def stop_job():
# running_jobs = get_running_jobs()
# if running_jobs:
# # only dealing with one job for now
# job_hash, _, _, _ = running_jobs[0]
# abort_job(job_hash)
# print('Job is Now stopped')
# else:
# print('No jobs running right now')
| Catalearn/catalearn | catalearn/runner.py | Python | mit | 5,437 |
# -*- coding: utf-8 -*-
#
# Copyright 2016-2022 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base class for batch topic distributions' REST calls
https://bigml.com/developers/batchtopicdistributions
"""
try:
import simplejson as json
except ImportError:
import json
from bigml.api_handlers.resourcehandler import ResourceHandlerMixin
from bigml.api_handlers.resourcehandler import check_resource_type
from bigml.constants import BATCH_TOPIC_DISTRIBUTION_PATH, TOPIC_MODEL_PATH
class BatchTopicDistributionHandlerMixin(ResourceHandlerMixin):
"""This class is used by the BigML class as
a mixin that provides the REST calls models. It should not
be instantiated independently.
"""
def __init__(self):
"""Initializes the BatchTopidDistributionHandler. This class is
intended to be used as a mixin on ResourceHandler, that inherits its
attributes and basic method from BigMLConnection, and must not be
instantiated independently.
"""
self.batch_topic_distribution_url = self.prediction_base_url + \
BATCH_TOPIC_DISTRIBUTION_PATH
def create_batch_topic_distribution(self, topic_model, dataset,
args=None, wait_time=3, retries=10):
"""Creates a new batch topic distribution.
"""
create_args = {}
if args is not None:
create_args.update(args)
origin_resources_checked = self.check_origins(
dataset, topic_model, create_args, model_types=[TOPIC_MODEL_PATH],
wait_time=wait_time, retries=retries)
if origin_resources_checked:
body = json.dumps(create_args)
return self._create(self.batch_topic_distribution_url, body)
return
def get_batch_topic_distribution(self, batch_topic_distribution,
query_string=''):
"""Retrieves a batch topic distribution.
The batch_topic_distribution parameter should be a string
containing the batch_topic_distribution id or the dict
returned by create_batch_topic_distribution.
As batch_topic_distribution is an evolving object that is processed
until it reaches the FINISHED or FAULTY state, the function will
return a dict that encloses the batch_topic_distribution values
and state info available at the time it is called.
"""
check_resource_type(batch_topic_distribution,
BATCH_TOPIC_DISTRIBUTION_PATH,
message="A batch topic distribution id is needed.")
return self.get_resource(batch_topic_distribution,
query_string=query_string)
def download_batch_topic_distribution(self,
batch_topic_distribution,
filename=None, retries=10):
"""Retrieves the batch topic distribution file.
Downloads topic distributions, that are stored in a remote CSV file.
If a path is given in filename, the contents of the file are
downloaded and saved locally. A file-like object is returned
otherwise.
"""
check_resource_type(batch_topic_distribution,
BATCH_TOPIC_DISTRIBUTION_PATH,
message="A batch topic distribution id is needed.")
return self._download_resource(batch_topic_distribution, filename,
retries=retries)
def list_batch_topic_distributions(self, query_string=''):
"""Lists all your batch topic distributions.
"""
return self._list(self.batch_topic_distribution_url, query_string)
def update_batch_topic_distribution(self, batch_topic_distribution,
changes):
"""Updates a batch topic distributions.
"""
check_resource_type(batch_topic_distribution,
BATCH_TOPIC_DISTRIBUTION_PATH,
message="A batch topic distribution id is needed.")
return self.update_resource(batch_topic_distribution, changes)
def delete_batch_topic_distribution(self, batch_topic_distribution):
"""Deletes a batch topic distribution.
"""
check_resource_type(batch_topic_distribution,
BATCH_TOPIC_DISTRIBUTION_PATH,
message="A batch topic distribution id is needed.")
return self.delete_resource(batch_topic_distribution)
| bigmlcom/python | bigml/api_handlers/batchtopicdistributionhandler.py | Python | apache-2.0 | 5,155 |
#bprop.py
#Author: Nicholas Smith
import numpy as np
#Array of layer sizes
ls = np.array([2, 4, 4, 1])
n = len(ls)
#List of weight matrices (each a numpy array)
W = []
#Initialize weights to small random values
for i in range(n - 1):
W.append(np.random.randn(ls[i], ls[i + 1]) * 0.1)
#List of bias vectors initialized to small random values
B = []
for i in range(1, n):
B.append(np.random.randn(ls[i]) * 0.1)
#List of output vectors
O = []
for i in range(n):
O.append(np.zeros([ls[i]]))
#List of Delta vectors
D = []
for i in range(1, n):
D.append(np.zeros(ls[i]))
#Input vectors (1 row per each)
A = np.matrix([[0.0, 0.0], [0.0, 1.0], [1.0, 0.0], [1.0, 1.0]])
#Target Vectors (1 row per each)
y = np.matrix([[-0.5], [0.5], [0.5], [-0.5]])
#Activation function (tanh) for each layer
#Linear activation for final layer
actF = []
dF = []
for i in range(n - 1):
actF.append(lambda (x) : np.tanh(x))
#Derivative of activation function in terms of itself
dF.append(lambda (y) : 1 - np.square(y))
#Linear activation for final layer
actF.append(lambda (x): x)
dF.append(lambda (x) : np.ones(x.shape))
#Learning rate
a = 0.5
#Number of iterations
numIter = 250
#Loop for each iteration
for c in range(numIter):
#loop over all input vectors
for i in range(len(A)):
print(str(i))
#Target vector
t = y[i, :]
#Feed-forward step
O[0] = A[i, :]
for j in range(n - 1):
O[j + 1] = actF[j](np.dot(O[j], W[j]) + B[j])
print('Out:' + str(O[-1]))
#Compute output node delta values
D[-1] = np.multiply((t - O[-1]), dF[-1](O[-1]))
#Compute hidden node deltas
for j in range(n - 2, 0, -1):
D[j - 1] = np.multiply(np.dot(D[j], W[j].T), dF[j](O[j]))
#Perform weight and bias updates
for j in range(n - 1):
W[j] = W[j] + a * np.outer(O[j], D[j])
B[j] = B[j] + a * D[j]
print('\nFinal weights:')
#Display final weights
for i in range(n - 1):
print('Layer ' + str(i + 1) + ':\n' + str(W[i]) + '\n')
| nicholastoddsmith/pythonml | bprop.py | Python | mit | 1,935 |
from base.iterativeRecommender import IterativeRecommender
import numpy as np
from util import config
from collections import defaultdict
from math import log,exp
from scipy.sparse import *
from scipy import *
class CoFactor(IterativeRecommender):
def __init__(self, conf, trainingSet=None, testSet=None, fold='[1]'):
super(CoFactor, self).__init__(conf, trainingSet, testSet, fold)
def readConfiguration(self):
super(CoFactor, self).readConfiguration()
extraSettings = config.OptionConf(self.config['CoFactor'])
self.negCount = int(extraSettings['-k']) #the number of negative samples
if self.negCount < 1:
self.negCount = 1
self.regR = float(extraSettings['-gamma'])
self.filter = int(extraSettings['-filter'])
def printAlgorConfig(self):
super(CoFactor, self).printAlgorConfig()
print('Specified Arguments of', self.config['model.name'] + ':')
print('k: %d' % self.negCount)
print('regR: %.5f' %self.regR)
print('filter: %d' %self.filter)
print('=' * 80)
def initModel(self):
super(CoFactor, self).initModel()
#constructing SPPMI matrix
self.SPPMI = defaultdict(dict)
print('Constructing SPPMI matrix...')
#for larger data set has many items, the process will be time consuming
occurrence = defaultdict(dict)
i=0
for item1 in self.data.item:
i += 1
if i % 100 == 0:
print(str(i) + '/' + str(self.num_items))
uList1, rList1 = self.data.itemRated(item1)
if len(uList1) < self.filter:
continue
for item2 in self.data.item:
if item1 == item2:
continue
if item2 not in occurrence[item1]:
uList2, rList2 = self.data.itemRated(item2)
if len(uList2) < self.filter:
continue
count = len(set(uList1).intersection(set(uList2)))
if count > self.filter:
occurrence[item1][item2] = count
occurrence[item2][item1] = count
maxVal = 0
frequency = {}
for item1 in occurrence:
frequency[item1] = sum(occurrence[item1].values()) * 1.0
D = sum(frequency.values()) * 1.0
# maxx = -1
for item1 in occurrence:
for item2 in occurrence[item1]:
try:
val = max([log(occurrence[item1][item2] * D / (frequency[item1] * frequency[item2])) - log(
self.negCount), 0])
except ValueError:
print(self.SPPMI[item1][item2])
print(self.SPPMI[item1][item2] * D / (frequency[item1] * frequency[item2]))
if val > 0:
if maxVal < val:
maxVal = val
self.SPPMI[item1][item2] = val
self.SPPMI[item2][item1] = self.SPPMI[item1][item2]
#normalize
for item1 in self.SPPMI:
for item2 in self.SPPMI[item1]:
self.SPPMI[item1][item2] = self.SPPMI[item1][item2]/maxVal
def buildModel(self):
self.X=self.P*10 #Theta
self.Y=self.Q*10 #Beta
self.w = np.random.rand(self.num_items) / 10 # bias value of item
self.c = np.random.rand(self.num_items) / 10 # bias value of context
self.G = np.random.rand(self.num_items, self.emb_size) / 10 # context embedding
print('training...')
epoch = 0
while epoch < self.maxEpoch:
self.loss = 0
YtY = self.Y.T.dot(self.Y)
for user in self.data.user:
# C_u = np.ones(self.data.getSize(self.recType))
H = np.ones(self.num_items)
val, pos = [],[]
P_u = np.zeros(self.num_items)
uid = self.data.user[user]
for item in self.data.trainSet_u[user]:
iid = self.data.item[item]
r_ui = float(self.data.trainSet_u[user][item])
pos.append(iid)
val.append(10 * r_ui)
H[iid] += 10 * r_ui
P_u[iid] = 1
error = (P_u[iid] - self.X[uid].dot(self.Y[iid]))
self.loss += pow(error, 2)
# sparse matrix
C_u = coo_matrix((val, (pos, pos)), shape=(self.num_items, self.num_items))
A = (YtY + np.dot(self.Y.T, C_u.dot(self.Y)) + self.regU * np.eye(self.emb_size))
self.X[uid] = np.dot(np.linalg.inv(A), (self.Y.T * H).dot(P_u))
XtX = self.X.T.dot(self.X)
for item in self.data.item:
P_i = np.zeros(self.num_users)
iid = self.data.item[item]
H = np.ones(self.num_users)
val,pos = [],[]
for user in self.data.trainSet_i[item]:
uid = self.data.user[user]
r_ui = float(self.data.trainSet_i[item][user])
pos.append(uid)
val.append(10 * r_ui)
H[uid] += 10 * r_ui
P_i[uid] = 1
matrix_g1 = np.zeros((self.emb_size, self.emb_size))
matrix_g2 = np.zeros((self.emb_size, self.emb_size))
vector_m1 = np.zeros(self.emb_size)
vector_m2 = np.zeros(self.emb_size)
update_w = 0
update_c = 0
if len(self.SPPMI[item])>0:
for context in self.SPPMI[item]:
cid = self.data.item[context]
gamma = self.G[cid]
beta = self.Y[cid]
matrix_g1 += gamma.reshape(self.emb_size, 1).dot(gamma.reshape(1, self.emb_size))
vector_m1 += (self.SPPMI[item][context]-self.w[iid]-
self.c[cid])*gamma
matrix_g2 += beta.reshape(self.emb_size, 1).dot(beta.reshape(1, self.emb_size))
vector_m2 += (self.SPPMI[item][context] - self.w[cid]
- self.c[iid]) * beta
update_w += self.SPPMI[item][context]-self.Y[iid].dot(gamma)-self.c[cid]
update_c += self.SPPMI[item][context]-beta.dot(self.G[iid])-self.w[cid]
C_i = coo_matrix((val, (pos, pos)), shape=(self.num_users, self.num_users))
A = (XtX + np.dot(self.X.T, C_i.dot(self.X)) + self.regU * np.eye(self.emb_size) + matrix_g1)
self.Y[iid] = np.dot(np.linalg.inv(A), (self.X.T * H).dot(P_i)+vector_m1)
if len(self.SPPMI[item]) > 0:
self.G[iid] = np.dot(np.linalg.inv(matrix_g2 + self.regR * np.eye(self.emb_size)), vector_m2)
self.w[iid] = update_w/len(self.SPPMI[item])
self.c[iid] = update_c/len(self.SPPMI[item])
epoch += 1
print('epoch:', epoch, 'loss:', self.loss)
def predictForRanking(self,u):
'invoked to rank all the items for the user'
if self.data.containsUser(u):
u = self.data.getUserId(u)
return self.Y.dot(self.X[u])
else:
return [self.data.globalMean] * self.num_items
| recq-cse/RecQ | model/ranking/CoFactor.py | Python | gpl-3.0 | 7,469 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from kuryr.schemata import commons
NETWORK_CREATE_SCHEMA = {
u'links': [{
u'method': u'POST',
u'href': u'/NetworkDriver.CreateNetwork',
u'description': u'Create a Network',
u'rel': u'self',
u'title': u'Create'
}],
u'title': u'Create network',
u'required': [u'NetworkID', u'IPv4Data', u'IPv6Data'],
u'definitions': {u'commons': {}},
u'$schema': u'http://json-schema.org/draft-04/hyper-schema',
u'type': u'object',
u'properties': {
u'NetworkID': {
u'description': u'ID of a Network to be created',
u'$ref': u'#/definitions/commons/definitions/id'
},
u'IPv4Data': {
u'description': u'IPv4 data for the network',
u'type': u'array',
u'items': {
u'$ref': u'#/definitions/commons/definitions/ipv4datum'
}
},
u'IPv6Data': {
u'description': u'IPv6 data for the network',
u'type': u'array',
u'items': {
u'$ref': u'#/definitions/commons/definitions/ipv6datum'
}
},
u'Options': {
u'type': [u'object', u'null'],
u'description': u'Options',
u'example': {}
}
}
}
NETWORK_CREATE_SCHEMA[u'definitions'][u'commons'] = commons.COMMONS
| midonet/kuryr | kuryr/schemata/network_create.py | Python | apache-2.0 | 1,892 |
from django.contrib.auth.models import User, Group
from django.contrib.auth import login, authenticate
from django.shortcuts import redirect
from disciplinesite.demo.models import *
from disciplinesite.tools import *
from discipline.models import *
def index(request):
grp = Group.objects.get(name="editors")
first = word(True)
last = word(True)
user = User(
first_name = first,
last_name = last,
email = "%s%s@example.com" % (first, last),
username = first + last,
is_staff = True
)
user.set_password("crimson")
user.save()
user.groups.add(grp)
editor = Editor.objects.create(user=user)
login(request, authenticate(username = first + last, password = "crimson"))
return redirect("/admin/")
| boronine/discipline | disciplinesite/demo/views.py | Python | mit | 778 |
#!/usr/bin/python
# android-build.py
# Build android
#
# use cocos console to build tests
import sys
import os, os.path
import shutil
from optparse import OptionParser
CPP_SAMPLES = ['cpp-empty-test', 'cpp-tests', 'game-controller-test']
LUA_SAMPLES = ['lua-empty-test', 'lua-tests', 'lua-game-controller-test']
ALL_SAMPLES = CPP_SAMPLES + LUA_SAMPLES
def caculate_built_samples(args):
''' Compute the sampels to be built
'cpp' for short of all cpp tests
'lua' for short of all lua tests
'''
if 'all' in args:
return ALL_SAMPLES
targets = []
if 'cpp' in args:
targets += CPP_SAMPLES
args.remove('cpp')
if 'lua' in args:
targets += LUA_SAMPLES
args.remove('lua')
targets += args
# remove duplicate elements, for example
# python android-build.py cpp hellocpp
targets = set(targets)
return list(targets)
def do_build(app_android_root, build_mode):
command = 'cocos compile -p android -s %s --ndk-mode %s' % (app_android_root, build_mode)
print command
if os.system(command) != 0:
raise Exception("Build dynamic library for project [ " + app_android_root + " ] fails!")
def build_samples(target, build_mode):
if build_mode is None:
build_mode = 'debug'
elif build_mode != 'release':
build_mode = 'debug'
build_targets = caculate_built_samples(target)
app_android_root = ''
target_proj_path_map = {
"cpp-empty-test": "tests/cpp-empty-test",
"game-controller-test": "tests/game-controller-test",
"cpp-tests": "tests/cpp-tests",
"lua-empty-test": "tests/lua-empty-test/project",
"lua-tests": "tests/lua-tests/project",
"lua-game-controller-test": "tests/lua-game-controller-test/project"
}
cocos_root = os.path.join(os.path.dirname(os.path.realpath(__file__)), "..")
for target in build_targets:
if target in target_proj_path_map:
app_android_root = os.path.join(cocos_root, target_proj_path_map[target])
else:
print 'unknown target: %s' % target
continue
do_build(app_android_root, build_mode)
# -------------- main --------------
if __name__ == '__main__':
#parse the params
usage = """
This script is mainy used for building tests built-in with cocos2d-x.
Usage: %prog [options] [cpp-empty-test|cpp-tests|lua-empty-test|lua-tests|cpp|lua|all]
If you are new to cocos2d-x, I recommend you start with cpp-empty-test, lua-empty-test.
You can combine these targets like this:
python android-build.py cpp-empty-test lua-empty-test
"""
parser = OptionParser(usage=usage)
parser.add_option("-n", "--ndk", dest="ndk_build_param",
help='It is not used anymore, because cocos console does not support it.')
parser.add_option("-p", "--platform", dest="android_platform",
help='This parameter is not used any more, just keep compatible.')
parser.add_option("-b", "--build", dest="build_mode",
help='The build mode for java project,debug[default] or release. Get more information,please refer to http://developer.android.com/tools/building/building-cmdline.html')
(opts, args) = parser.parse_args()
if len(args) == 0:
parser.print_help()
sys.exit(1)
else:
try:
build_samples(args, opts.build_mode)
except Exception as e:
print e
sys.exit(1)
| tofurama3000/The-Obscurity-Project | cocos2d/build/android-build.py | Python | mit | 3,466 |
from google.appengine.api import mail
from secrets import *
import logging
def send_email(to, subject, body):
logging.info('sending an email from ' + Secrets.EMAIL_FROM + ' to: ' + to)
message = mail.EmailMessage(sender=Secrets.EMAIL_FROM, subject=subject, body=body, to=to)
message.send()
def send_email_to_admins(subject, body):
logging.info('sending an email from ' + Secrets.EMAIL_FROM + ' to: ' + Secrets.ADMIN_EMAIL)
message = mail.AdminEmailMessage(sender=Secrets.EMAIL_FROM, subject=subject, body=body)
message.send()
| bbondy/brianbondy.gae | emailController.py | Python | mit | 540 |
"""
Test of the classical LM model for language modelling
"""
from groundhog.datasets import LMIterator
from groundhog.trainer.SGD_momentum import SGD as SGD_m
from groundhog.trainer.SGD import SGD
from groundhog.mainLoop import MainLoop
from groundhog.layers import MultiLayer, \
RecurrentMultiLayer, \
RecurrentMultiLayerInp, \
RecurrentMultiLayerShortPath, \
RecurrentMultiLayerShortPathInp, \
RecurrentMultiLayerShortPathInpAll, \
SoftmaxLayer, \
LastState,\
UnaryOp, \
Operator, \
Shift, \
GaussianNoise, \
SigmoidLayer
from groundhog.layers import maxpool, \
maxpool_ntimes, \
last, \
last_ntimes,\
tanh, \
sigmoid, \
rectifier,\
hard_sigmoid, \
hard_tanh
from groundhog.models import LM_Model
from theano.sandbox.scan import scan
import numpy
import theano
import theano.tensor as TT
linear = lambda x:x
theano.config.allow_gc = False
def get_text_data(state):
def out_format (x, y, r):
return {'x':x, 'y' :y, 'reset': r}
def out_format_valid (x, y, r):
return {'x':x, 'y' :y, 'reset': r}
train_data = LMIterator(
batch_size=state['bs'],
path = state['path'],
stop=-1,
seq_len = state['seqlen'],
mode="train",
chunks=state['chunks'],
shift = state['shift'],
output_format = out_format,
can_fit=True)
valid_data = LMIterator(
batch_size=state['bs'],
path=state['path'],
stop=-1,
use_infinite_loop=False,
allow_short_sequences = True,
seq_len= state['seqlen'],
mode="valid",
reset =state['reset'],
chunks=state['chunks'],
shift = state['shift'],
output_format = out_format_valid,
can_fit=True)
test_data = LMIterator(
batch_size=state['bs'],
path = state['path'],
stop=-1,
use_infinite_loop=False,
allow_short_sequences=True,
seq_len= state['seqlen'],
mode="test",
chunks=state['chunks'],
shift = state['shift'],
output_format = out_format_valid,
can_fit=True)
if 'wiki' in state['path']:
test_data = None
return train_data, valid_data, test_data
def jobman(state, channel):
# load dataset
rng = numpy.random.RandomState(state['seed'])
# declare the dimensionalies of the input and output
if state['chunks'] == 'words':
state['n_in'] = 10000
state['n_out'] = 10000
else:
state['n_in'] = 50
state['n_out'] = 50
train_data, valid_data, test_data = get_text_data(state)
## BEGIN Tutorial
### Define Theano Input Variables
x = TT.lvector('x')
y = TT.lvector('y')
h0 = theano.shared(numpy.zeros((eval(state['nhids'])[-1],), dtype='float32'))
### Neural Implementation of the Operators: \oplus
#### Word Embedding
emb_words = MultiLayer(
rng,
n_in=state['n_in'],
n_hids=eval(state['inp_nhids']),
activation=eval(state['inp_activ']),
init_fn='sample_weights_classic',
weight_noise=state['weight_noise'],
rank_n_approx = state['rank_n_approx'],
scale=state['inp_scale'],
sparsity=state['inp_sparse'],
learn_bias = True,
bias_scale=eval(state['inp_bias']),
name='emb_words')
#### Deep Transition Recurrent Layer
rec = eval(state['rec_layer'])(
rng,
eval(state['nhids']),
activation = eval(state['rec_activ']),
#activation = 'TT.nnet.sigmoid',
bias_scale = eval(state['rec_bias']),
scale=eval(state['rec_scale']),
sparsity=eval(state['rec_sparse']),
init_fn=eval(state['rec_init']),
weight_noise=state['weight_noise'],
name='rec')
#### Stiching them together
##### (1) Get the embedding of a word
x_emb = emb_words(x, no_noise_bias=state['no_noise_bias'])
##### (2) Embedding + Hidden State via DT Recurrent Layer
reset = TT.scalar('reset')
rec_layer = rec(x_emb, n_steps=x.shape[0],
init_state=h0*reset,
no_noise_bias=state['no_noise_bias'],
truncate_gradient=state['truncate_gradient'],
batch_size=1)
### Neural Implementation of the Operators: \lhd
#### Softmax Layer
output_layer = SoftmaxLayer(
rng,
eval(state['nhids'])[-1],
state['n_out'],
scale=state['out_scale'],
bias_scale=state['out_bias_scale'],
init_fn="sample_weights_classic",
weight_noise=state['weight_noise'],
sparsity=state['out_sparse'],
sum_over_time=True,
name='out')
### Few Optional Things
#### Direct shortcut from x to y
if state['shortcut_inpout']:
shortcut = MultiLayer(
rng,
n_in=state['n_in'],
n_hids=eval(state['inpout_nhids']),
activations=eval(state['inpout_activ']),
init_fn='sample_weights_classic',
weight_noise = state['weight_noise'],
scale=eval(state['inpout_scale']),
sparsity=eval(state['inpout_sparse']),
learn_bias=eval(state['inpout_learn_bias']),
bias_scale=eval(state['inpout_bias']),
name='shortcut')
#### Learning rate scheduling (1/(1+n/beta))
state['clr'] = state['lr']
def update_lr(obj, cost):
stp = obj.step
if isinstance(obj.state['lr_start'], int) and stp > obj.state['lr_start']:
time = float(stp - obj.state['lr_start'])
new_lr = obj.state['clr']/(1+time/obj.state['lr_beta'])
obj.lr = new_lr
if state['lr_adapt']:
rec.add_schedule(update_lr)
### Neural Implementations of the Language Model
#### Training
if state['shortcut_inpout']:
train_model = output_layer(rec_layer,
no_noise_bias=state['no_noise_bias'],
additional_inputs=[shortcut(x)]).train(target=y,
scale=numpy.float32(1./state['seqlen']))
else:
train_model = output_layer(rec_layer,
no_noise_bias=state['no_noise_bias']).train(target=y,
scale=numpy.float32(1./state['seqlen']))
nw_h0 = rec_layer.out[rec_layer.out.shape[0]-1]
if state['carry_h0']:
train_model.updates += [(h0, nw_h0)]
#### Validation
h0val = theano.shared(numpy.zeros((eval(state['nhids'])[-1],), dtype='float32'))
rec_layer = rec(emb_words(x, use_noise=False),
n_steps = x.shape[0],
batch_size=1,
init_state=h0val*reset,
use_noise=False)
nw_h0 = rec_layer.out[rec_layer.out.shape[0]-1]
if not state['shortcut_inpout']:
valid_model = output_layer(rec_layer,
use_noise=False).validate(target=y, sum_over_time=True)
else:
valid_model = output_layer(rec_layer,
additional_inputs=[shortcut(x, use_noise=False)],
use_noise=False).validate(target=y, sum_over_time=True)
valid_updates = []
if state['carry_h0']:
valid_updates = [(h0val, nw_h0)]
valid_fn = theano.function([x,y, reset], valid_model.cost,
name='valid_fn', updates=valid_updates)
#### Sampling
##### single-step sampling
def sample_fn(word_tm1, h_tm1):
x_emb = emb_words(word_tm1, use_noise = False, one_step=True)
h0 = rec(x_emb, state_before=h_tm1, one_step=True, use_noise=False)[-1]
word = output_layer.get_sample(state_below=h0, temp=1.)
return word, h0
##### scan for iterating the single-step sampling multiple times
[samples, summaries], updates = scan(sample_fn,
states = [
TT.alloc(numpy.int64(0), state['sample_steps']),
TT.alloc(numpy.float32(0), 1, eval(state['nhids'])[-1])],
n_steps= state['sample_steps'],
name='sampler_scan')
##### define a Theano function
sample_fn = theano.function([], [samples],
updates=updates, profile=False, name='sample_fn')
##### Load a dictionary
dictionary = numpy.load(state['dictionary'])
if state['chunks'] == 'chars':
dictionary = dictionary['unique_chars']
else:
dictionary = dictionary['unique_words']
def hook_fn():
sample = sample_fn()[0]
print 'Sample:',
if state['chunks'] == 'chars':
print "".join(dictionary[sample])
else:
for si in sample:
print dictionary[si],
print
### Build and Train a Model
#### Define a model
model = LM_Model(
cost_layer = train_model,
weight_noise_amount=state['weight_noise_amount'],
valid_fn = valid_fn,
clean_before_noise_fn = False,
noise_fn = None,
rng = rng)
if state['reload']:
model.load(state['prefix']+'model.npz')
#### Define a trainer
##### Training algorithm (SGD)
if state['moment'] < 0:
algo = SGD(model, state, train_data)
else:
algo = SGD_m(model, state, train_data)
##### Main loop of the trainer
main = MainLoop(train_data,
valid_data,
test_data,
model,
algo,
state,
channel,
train_cost = False,
hooks = hook_fn,
validate_postprocess = eval(state['validate_postprocess']))
main.main()
## END Tutorial
if __name__=='__main__':
state = {}
# complete path to data (cluster specific)
state['seqlen'] = 100
state['path']= "/data/lisa/data/PennTreebankCorpus/pentree_char_and_word.npz"
state['dictionary']= "/data/lisa/data/PennTreebankCorpus/dictionaries.npz"
state['chunks'] = 'chars'
state['seed'] = 123
# flag .. don't need to change it. It says what to do if you get cost to
# be nan .. you could raise, though I would leave it to this
state['on_nan'] = 'warn'
# DATA
# For wikipedia validation set is extremely large. Is very time
# wasteful. This value is only used for validation set, and IMHO should
# be something like seqlen * 10000 (i.e. the validation should be only
# 10000 steps
state['reset'] = -1
# For music/ word level I think 50 is a good idea. For character this
# should be at least 100 (I think there are problems with getting state
# of the art otherwise). Note most people use 200 !
# The job stops when learning rate declines to this value. It can be
# useful, because sometimes is hopeless to wait for validation error to
# get below minerr, or for the time to expire
state['minlr'] = float(5e-7)
# Layers
# Input
# Input weights are sampled from a gaussian with std=scale; this is the
# standard way to initialize
state['rank_n_approx'] = 0
state['inp_nhids'] = '[400]'
state['inp_activ'] = '[linear]'
state['inp_bias'] = '[0.]'
state['inp_sparse']= -1 # dense
state['inp_scale'] = .1
# This is for the output weights
state['out_scale'] = .1
state['out_bias_scale'] = -.5
state['out_sparse'] = -1
# HidLayer
# Hidden units on for the internal layers of DT-RNN. Having a single
# value results in a standard RNN
state['nhids'] = '[200, 200]'
# Activation of each layer
state['rec_activ'] = '"TT.nnet.sigmoid"'
state['rec_bias'] = '.0'
state['rec_sparse'] ='20'
state['rec_scale'] = '1.'
# sample_weights - you rescale the weights such that the largest
# singular value is scale
# sample_weights_classic : just sample weights from a gaussian with std
# equal to scale
state['rec_init'] = "'sample_weights'"
state['rec_layer'] = 'RecurrentMultiLayerShortPathInpAll'
# SGD params
state['bs'] = 1 # the size of the minibatch
state['lr'] = 1. # initial learn_ing rate
state['cutoff'] = 1. # threshold for gradient rescaling
state['moment'] = 0.995 #-.1 # momentum
# Do not optimize these
state['weight_noise'] = True # white Gaussian noise in weights
state['weight_noise_amount'] = 0.075 # standard deviation
# maximal number of updates
state['loopIters'] = int(1e8)
# maximal number of minutes to wait until killing job
state['timeStop'] = 48*60 # 48 hours
# Construct linear connections from input to output. These are factored
# (like the rank_n) to deal with the possible high dimensionality of the
# input, but it is a linear projection that feeds into the softmax
state['shortcut_inpout'] = False
state['shortcut_rank'] = 200
# Main Loop
# Make this to be a decently large value. Otherwise you waste a lot of
# memory keeping track of the training error (and other things) at each
# step + the stdout becomes extremely large
state['trainFreq'] = 100
state['hookFreq'] = 5000
state['validFreq'] = 1000
state['saveFreq'] = 15 # save every 15 minutes
state['prefix'] = 'model_' # prefix of the save files
state['reload'] = False # reload
state['overwrite'] = 1
# Threhold should be 1.004 for PPL, for entropy (which is what
# everything returns, it should be much smaller. Running value is 1.0002
# We should not hyperoptimize this
state['divide_lr'] = 2.
state['cost_threshold'] = 1.0002
state['patience'] = 1
state['validate_postprocess'] = 'lambda x:10**(x/numpy.log(10))'
state['truncate_gradient'] = 80 # truncated BPTT
state['lr_adapt'] = 0 # 1/(1 + n/n0) scheduling
state['lr_beta'] = 10*1900.
state['lr_start'] = 'on_error'
state['no_noise_bias'] = True # do not use weight noise for biases
state['carry_h0'] = True # carry over h0 across updates
state['sample_steps'] = 80
# Do not change these
state['minerr'] = -1
state['shift'] = 1 # n-step forward prediction
state['cutoff_rescale_length'] = False
jobman(state, None)
| vseledkin/LV_groundhog | tutorials/DT_RNN_Tut.py | Python | bsd-3-clause | 14,392 |
#!/usr/bin/env python3
from __future__ import print_function, division
from rnnlm_ops import RnnlmOp, run_epoch
from dataset import SingleSentenceData
from config import Config
import sys
from util import SpeedCounter
import tensorflow as tf
class Loglikes(RnnlmOp):
def __init__(self, config, params):
super(Loglikes, self).__init__(config, params)
self.build_graph()
def _build_graph(self):
config = self.config
eval_config = Config(clone=config)
eval_config.batch_size = 1
initializer = self.model_initializer
with tf.name_scope("Test"):
with tf.variable_scope("Model", reuse=False, initializer=initializer):
self.test_model = self.Model(config=eval_config, is_training=False)
def _run(self):
with tf.Session() as session:
self.io.restore_session(session)
inputs = sys.stdin
singsen = SingleSentenceData()
scounter = SpeedCounter().start()
while True:
senlen = singsen.read_from_file(sys.stdin, self.io.w2id)
if senlen is None:
break
if senlen < 2:
print(-9999)
continue
o = run_epoch(session, self.test_model, singsen)
scounter.next()
if self.params.progress and scounter.val % 20 ==0:
print("\rLoglikes per secs: %f" % scounter.speed, end="", file=sys.stderr)
print("%f" % o)
if __name__ == "__main__":
import flags
ll = Loglikes(flags.config, flags.FLAGS)
ll()
| pltrdy/tf_rnnlm | loglikes.py | Python | apache-2.0 | 1,460 |
from datetime import datetime
import logging
from sqlalchemy import Table, Column, ForeignKey, or_
from sqlalchemy import DateTime, Integer, Unicode
import meta
log = logging.getLogger(__name__)
twitter_table = Table(
'twitter', meta.data,
Column('id', Integer, primary_key=True),
Column('create_time', DateTime, default=datetime.utcnow),
Column('delete_time', DateTime, nullable=True),
Column('user_id', Integer, ForeignKey('user.id'), nullable=False),
Column('twitter_id', Integer),
Column('key', Unicode(255), nullable=False),
Column('secret', Unicode(255), nullable=False),
Column('screen_name', Unicode(255), nullable=False),
Column('priority', Integer, default=4)
)
class Twitter(object):
def __init__(self, twitter_id, user, screen_name, key, secret):
self.twitter_id = twitter_id
self.user = user
self.screen_name = screen_name
self.key = key
self.secret = secret
@classmethod
def find(cls, screen_name, include_deleted=False):
try:
q = meta.Session.query(Twitter)
q = q.filter(Twitter.screen_name == screen_name)
if not include_deleted:
q = q.filter(or_(Twitter.delete_time == None,
Twitter.delete_time > datetime.utcnow()))
return q.one()
except Exception, e:
log.warn("find(%s): %s" % (screen_name, e))
return None
def delete(self, delete_time=None):
if delete_time is None:
delete_time = datetime.utcnow()
if self.delete_time is None:
self.delete_time = delete_time
def is_deleted(self, at_time=None):
if at_time is None:
at_time = datetime.utcnow()
return (self.delete_time is not None) and \
self.delete_time <= at_time
def __repr__(self):
return u"<Twitter(%d,%d,%s,%s)>" % (self.id,
self.twitter_id,
self.user.user_name,
self.screen_name)
| whausen/part | src/adhocracy/model/twitter.py | Python | agpl-3.0 | 2,128 |
#!/usr/bin/env python3
# encoding: utf-8
"""
test_user_db.py
Created by Aaron Crosman on 2015-02-18.
This file is part of historia.
historia is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
historia is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with historia. If not, see <http://www.gnu.org/licenses/>.
"""
import unittest
import logging, sys, datetime
import mysql.connector
from database import settings
from database import core_data_objects
from database import exceptions
from database import user_db
import tests.helper_functions
class TestUserDatabase(unittest.TestCase):
config_location = 'tests/test_config'
@classmethod
def setUpClass(cls):
cls.config = tests.helper_functions.load_configuration(cls.config_location)
def setUp(self):
self.config = TestUserDatabase.config
self.key_file = self.config['server']['aes_key_file']
self.test_master_db_name = self.config['database']["main_database"]
self.test_user_db_name = '_'.join([self.config['database']["user_database_name_prefix"], 'user_db'])
self.default_db_settings = {
'user': self.config['database']['user'],
'password': self.config['database']['password'],
'host': self.config['database']['host'],
'database': '',
'raise_on_warnings': self.config['database']["raise_on_warnings"]
}
self.db = core_data_objects.HistoriaDatabase(self.test_master_db_name)
self.db.connection_settings = self.default_db_settings
statements = self.db.generate_database_SQL()
self.db.connect()
cur = self.db.cursor()
for state in statements:
try:
cur.execute(state[0], state[1])
self.db.commit()
except mysql.connector.Error as err:
self.fail("Unable to create master testing database: {0} \n while executing: {1}".format(err, state[0]))
def tearDown(self):
try:
# Make a good faith effort to clean up any database we created along the way.
if self.db.connected:
try:
cur = self.db.cursor()
cur.execute("DROP DATABASE `{0}`".format(self.test_master_db_name))
self.db.commit()
cur.execute("DROP DATABASE `{0}`".format(self.test_user_db_name))
self.db.commit()
self.db.disconnect()
except Exception as err:
#Say something if we fail in the hopes some fool reads the output...
print("Unable to drop test database: {0} due to {1}".format(self.testdb_name, err))
except:
pass
def create_record_table(self):
self.db.connect()
cur = self.db.cursor()
cur.execute("USE {0}".format(self.test_master_db_name))
statements = user_db.HistoriaUserDatabase.generate_SQL()
for state in statements:
try:
cur.execute(state[0], state[1])
self.db.commit()
except mysql.connector.Error as err:
self.fail("Unable to create testing database: {0} \n while executing: {1}".format(err, state[0]))
def user_database_setup(self):
self.db.connect()
cur = self.db.cursor()
statements = user_db.HistoriaUserDatabase.generate_database_SQL()
for state in statements:
try:
cur.execute(state[0], state[1])
self.db.commit()
except mysql.connector.Error as err:
self.fail("Unable to create master testing database: {0} \n while executing: {1}".format(err, state[0]))
def test_00_classVariables(self):
"""UserDatabase: classVariables"""
self.assertEqual(user_db.HistoriaUserDatabase.type_label, "Historia User Database", "User DB label is wrong")
self.assertEqual(user_db.HistoriaUserDatabase.machine_type, "historia_user_database", "User DB machine type is wrong")
def test_10_construct(self):
"""UserDatabase: __init__()"""
db = user_db.HistoriaUserDatabase(self.db, self.test_user_db_name, self.key_file)
self.assertIsInstance(db._logger, logging.Logger, "Default logger isn't a logger")
self.assertEqual(db.name, self.test_user_db_name.lower(), "Name passed to object didn't make it")
self.assertEqual(db._id, -1, "ID should be -1 for user databases")
self.assertEqual(len(db.connection_settings), 5, "Incorrect number of DB settings")
self.assertEqual(db.database_defaults['charset'], 'utf8', 'User database should always use UTF-8')
self.assertIsNone(db.connection, "Where did the database get a connection object already")
def test_15_internals(self):
"""UserDatabase: __setattr__"""
udb = user_db.HistoriaUserDatabase(self.db, self.test_user_db_name, self.key_file)
with self.assertRaises(AttributeError):
udb.bogus_field = "Junk Data"
attrs = ['name', 'db_user', 'db_address', 'created', 'last_record_update', 'last_login']
# All of the listed fields on a User should raise a ValueError when they are fed an integer
for attr in attrs:
with self.assertRaises(ValueError):
setattr(udb, attr, 123243)
udb._anything = "ok"
self.assertEqual(udb._anything, "ok", "Assignment of _ variables works fine...except that they fail all the time")
current_stamp = datetime.datetime.now()
udb.name = "monty_db"
udb.db_user = "monty"
udb.db_password = "Plain text password"
udb.db_address = "127.0.0.1"
udb.created = current_stamp
udb.last_login = current_stamp
udb.enabled = True
self.assertEqual(-1, udb.id, "ID is still -1")
self.assertEqual(udb.name, "monty_db", "Assignment of setting name failed.")
self.assertEqual(udb.db_password, "Plain text password", "Assignment of password failed")
self.assertEqual(udb.db_address, "127.0.0.1", "Assignment of setting address failed.")
self.assertEqual(udb.created, current_stamp, "Assignment of setting created timestamp failed.")
self.assertEqual(udb.last_login, current_stamp, "Assignment of setting access timestamp failed.")
self.assertEqual(udb.enabled, True, "Assignment of setting enabled failed.")
self.assertEqual(udb.db_user, 'monty', "Assignment of setting user failed.")
def test_20_generate_DB_SQL(self):
"""UserDatabase: generate database SQL statements"""
udb = user_db.HistoriaUserDatabase(self.db, self.test_user_db_name, self.key_file)
statements = udb.generate_database_SQL()
self.assertEqual(len(statements), (len(udb.member_classes)*2)+2, "There should be 2 statements for each class + 2 for the database itself")
self.assertIn(self.test_user_db_name, statements[0][0], "DB name not in db create statement")
self.assertIn(self.test_user_db_name, statements[1][0], "DB name not in db use statement")
def test_25_generate_table_SQL(self):
"""UserDatabase: generateSQL for the record's table"""
statements = user_db.HistoriaUserDatabase.generate_SQL()
self.assertIsInstance(statements, tuple, "Statements should come back as a tuple.")
self.assertEqual(len(statements),2, "There should be two statements")
self.assertEqual(statements[0][0],"DROP TABLE IF EXISTS `{0}`".format(user_db.HistoriaUserDatabase.machine_type), "Openning of the first statement is wrong")
self.assertIn(user_db.HistoriaUserDatabase.machine_type, statements[1][0], "table name not in the create table statement")
# We have the statements, let's try to use them
self.db.connect()
cur = self.db.cursor()
cur.execute("USE {0}".format(self.test_master_db_name))
for state in statements:
try:
cur.execute(state[0], state[1])
self.db.commit()
except mysql.connector.Error as err:
self.fail("Unable to create testing database: {0} \n while executing: {1}".format(err, state[0]))
def test_30_save(self):
"""UserDatabase: save()"""
udb = user_db.HistoriaUserDatabase(self.db, self.test_user_db_name, self.key_file)
self.assertRaises(exceptions.DataSaveError, udb.save)
# Create the required table and try again
self.create_record_table()
current_stamp = datetime.datetime.now()
udb.name = "monty_db"
udb.db_user = "monty"
udb.db_address = "127.0.0.1"
udb.db_password = "Plain text password"
udb.created = current_stamp
udb.last_login = current_stamp
udb.enabled = True
self.assertTrue(udb._dirty, "Dirty bit not active but data changed")
self.assertRaises(exceptions.DataSaveError, udb.save) # still can't save because we're lacking a UID
udb.uid = 1
udb.save()
self.assertFalse(udb._dirty, "Dirty bit active after save")
self.assertNotEqual(udb.id, -1, "Record ID still -1 after save.")
# Now let's go see if it's really there
select = ("SELECT * FROM `{0}`".format(user_db.HistoriaUserDatabase.machine_type),{})
result = self.db.execute_select(select)
self.assertEqual(len(result), 1, "There should be 1 and only 1 entry in the table.")
self.assertEqual(result[0]['name'], udb.name, "name in the table should match the name on the record.")
self.assertNotEqual(result[0]['db_password'], udb.db_password, "password in the table should not match the one on the record.")
self.assertEqual(result[0]['db_user'], udb.db_user, "db_user in the table should match the one on the record.")
self.assertAlmostEqual(result[0]['created'], udb.created, delta=datetime.timedelta(seconds=1), msg="created in the table should match the one on the record.")
self.assertAlmostEqual(result[0]['last_login'], udb.last_login, delta=datetime.timedelta(seconds=1), msg="last_login in the table should match the one on the record.")
self.assertEqual(result[0]['enabled'], udb.enabled, "enabled in the table should match the one on the record.")
self.assertEqual(result[0]['db_address'], udb.db_address, "db_address in the table should match the one on the record.")
def test_40_load(self):
"""UserDatabase: load()"""
self.create_record_table()
udb = user_db.HistoriaUserDatabase(self.db, self.test_user_db_name, self.key_file)
current_stamp = datetime.datetime.now()
udb.name = "monty_db"
udb.db_user = "monty"
udb.db_address = "127.0.0.1"
udb.db_password = "Plain text password"
udb.created = current_stamp
udb.last_login = current_stamp
udb.enabled = True
udb.uid = 1
self.assertTrue(udb._dirty, "Dirty bit not active but data changed")
udb.save()
udb2 = user_db.HistoriaUserDatabase(self.db, self.test_user_db_name, self.key_file)
udb2.load(udb.id)
self.assertEqual(udb.id, udb2.id, "IDs on original and loaded object don't match")
self.assertFalse(udb2._dirty, "The dirty bit is wrong after load.")
self.assertEqual(udb2, udb, "The two copies of the record should consider themselves equal.")
self.assertEqual(udb2.name, udb.name, "name in the table should match the name on the record.")
self.assertEqual(udb2.uid, udb.uid, "uid in the table should match the uid on the record.")
self.assertEqual(udb2.db_password, udb.db_password, "password in the table should match the one on the record.")
self.assertEqual(udb2.db_user, udb.db_user, "db_user in the table should match the one on the record.")
self.assertAlmostEqual(udb2.created, udb.created, delta=datetime.timedelta(seconds=1), msg="created in the table should match the one on the record.")
self.assertAlmostEqual(udb2.last_login, udb.last_login, delta=datetime.timedelta(seconds=1), msg="last_login in the table should match the one on the record.")
self.assertEqual(udb2.enabled, udb.enabled, "enabled in the table should match the one on the record.")
self.assertEqual(udb2.db_address, udb.db_address, "db_address in the table should match the one on the record.")
def test_50_delete(self):
"""UserDatabase: delete()"""
self.create_record_table()
udb = user_db.HistoriaUserDatabase(self.db, self.test_user_db_name, self.key_file)
current_stamp = datetime.datetime.now()
udb.name = "monty_db"
udb.db_user = "monty"
udb.db_address = "127.0.0.1"
udb.db_password = "Plain text password"
udb.created = current_stamp
udb.last_login = current_stamp
udb.enabled = True
udb.uid = 1
self.assertTrue(udb._dirty, "Dirty bit not active but data changed")
udb.save()
udb.delete()
# Now let's go see if it's really there
select = ("SELECT * FROM `{0}`".format(user_db.HistoriaUserDatabase.machine_type),{})
result = self.db.execute_select(select)
self.assertEqual(len(result), 0, "There should nothing in the table now.")
self.assertEqual(-1, udb.id, "The ID should reset to -1")
def test_60_connect(self):
"""UserDatabase: Connect to db"""
udb = user_db.HistoriaUserDatabase(self.db, self.test_user_db_name, self.key_file)
udb.db_user = self.config['database']['user']
udb.db_password = self.config['database']['password']
udb.db_address = self.config['database']['host']
udb.name = self.test_master_db_name #Normally this would be wrong, but it's needed for testing.
udb.connect()
self.assertTrue(udb.connected, "User database unable to connect when setting connection values.")
if __name__ == '__main__':
unittest.main() | acrosman/historia | src/tests/test_user_db.py | Python | gpl-3.0 | 14,746 |
class TestOSD(object):
def test_osds_are_all_collocated(self, node, host):
# TODO: figure out way to paramaterize node['vars']['devices'] for this test
osd_auto_discovery = node["vars"].get('osd_auto_discovery', False)
if osd_auto_discovery:
node["vars"]["devices"] = ["/dev/sda", "/dev/sdb", "/dev/sdc"] # Hardcoded since we can't retrieve the devices list generated during playbook run
for device in node["vars"]["devices"]:
assert host.check_output("sudo blkid -s PARTLABEL -o value %s2" % device) in ["ceph journal", "ceph block"]
| bengland2/ceph-ansible | tests/functional/tests/osd/test_journal_collocation.py | Python | apache-2.0 | 597 |
config = {
# This config file will be detected in localhost environment and values defined here will overwrite those in config.py
'environment': "localhost",
# ----> ADD MORE CONFIGURATION OPTIONS HERE <----
} | shupelneker/gae_new_structure | config/localhost.py | Python | lgpl-3.0 | 212 |
from opencog.atomspace import types, TruthValue, get_type_name
import formulas
from pln.rule import Rule
'''
Some Rules evaluate various kinds of logical links based explicitly on
set membership. A set = a ConceptNode. Other Rules calculate them
heuristically, based on set probabilities and logical links.
'''
# Todo: try to separate these rules further into several files by
# category. The rules in this file were under the header 'inheritance
# rules' in rules.py, but may need to be further classified.
__VERBOSE__ = False
BOOLEAN_LINKS = [types.AndLink,
types.OrLink,
types.NotLink]
FIRST_ORDER_LINKS = [types.InheritanceLink,
types.SubsetLink,
types.IntensionalInheritanceLink,
types.SimilarityLink,
types.ExtensionalSimilarityLink,
types.IntensionalSimilarityLink]
HIGHER_ORDER_LINKS = [types.ImplicationLink,
types.EquivalenceLink]
class InversionRule(Rule):
"""
A->B entails B->A
"""
def __init__(self, chainer, link_type):
A = chainer.new_variable()
B = chainer.new_variable()
Rule.__init__(self,
name = "InversionRule<%s>"%(get_type_name(link_type),),
outputs=[chainer.link(link_type, [B, A])],
inputs=[chainer.link(link_type, [A, B]), A, B],
formula=formulas.inversionFormula)
class DeductionRule(Rule):
"""
A->B, B->C entails A->C
"""
def __init__(self, chainer, link_type):
A = chainer.new_variable()
B = chainer.new_variable()
C = chainer.new_variable()
Rule.__init__(self,
name = "DeductionRule<%s>"%(get_type_name(link_type),),
formula=formulas.deductionIndependenceBasedFormula,
outputs=[chainer.link(link_type, [A, C])],
inputs=[chainer.link(link_type, [A, B]),
chainer.link(link_type, [B, C]),
B,
C])
# Todo: It doesn't have the right formula
class DeductionGeometryRule(Rule):
"""
A->B, B->C entails A->C. Uses concept geometry.
"""
def __init__(self, chainer, link_type):
A = chainer.new_variable()
B = chainer.new_variable()
C = chainer.new_variable()
Rule.__init__(self,
name="DeductionGeometryRule<%s>"%(get_type_name(link_type),),
formula=formulas.deductionGeometryFormula,
outputs=[chainer.link(link_type, [A, C])],
inputs=[chainer.link(link_type, [A, B]),
chainer.link(link_type, [B, C])])
# TODO add macro-rules for Abduction and Induction based on Deduction
# and Inversion
'''
deduction
S is M, M is L, then S is L
induction
M is S, M is L, then S is L
invert same same
abduction
S is M, L is M, then S is L
invert
'''
class InductionRule(Rule):
"""
M->S, M->L, S->L
"""
def __init__(self, chainer, link_type):
S = chainer.new_variable()
M = chainer.new_variable()
L = chainer.new_variable()
Rule.__init__(self,
name="InductionRule<%s>"%(get_type_name(link_type),),
outputs=[chainer.link(link_type, [S, L])],
inputs=[chainer.link(link_type, [M, S]),
chainer.link(link_type, [M, L]), S, M, L],
formula=formulas.inductionFormula)
class AbductionRule(Rule):
"""
S is M, L is M, S->L
"""
def __init__(self, chainer, link_type):
S = chainer.new_variable()
M = chainer.new_variable()
L = chainer.new_variable()
Rule.__init__(self,
name="AbductionRule<%s>"%(get_type_name(link_type),),
outputs=[chainer.link(link_type, [S, L])],
inputs=[chainer.link(link_type, [S, M]),
chainer.link(link_type, [L, M]), S, M, L],
formula=formulas.abductionFormula)
class TransitiveSimilarityRule(Rule):
"""
Similarity A B, Similarity B C => Similarity A C
"""
def __init__(self, chainer, link_type):
A = chainer.new_variable()
B = chainer.new_variable()
C = chainer.new_variable()
Rule.__init__(self,
name="TransitiveSimilarityRule<%s>"%(get_type_name(link_type),),
formula=formulas.transitiveSimilarityFormula,
outputs=[chainer.link(link_type, [A, C])],
inputs=[chainer.link(link_type, [A, B]),
chainer.link(link_type, [B, C]),
A, B, C])
class PreciseModusPonensRule(Rule):
"""
Given P(A->B) and P(NOT(A)->B) and sA, estimate sB
"""
def __init__(self, chainer, link_type):
A = chainer.new_variable()
B = chainer.new_variable()
notA = chainer.link(types.NotLink, [A])
Rule.__init__(self,
name="PreciseModusPonensRule<%s>"%(get_type_name(link_type),),
outputs=[B],
inputs=[chainer.link(link_type, [A, B]),
chainer.link(link_type, [notA, B]),
A],
formula=formulas.preciseModusPonensFormula)
class ModusPonensRule(Rule):
"""
Given P(A->B) and sA, estimate sB
"""
def __init__(self, chainer, link_type):
A = chainer.new_variable()
B = chainer.new_variable()
Rule.__init__(self,
name="ModusPonensRule<%s>"%(get_type_name(link_type),),
outputs=[B],
inputs=[chainer.link(link_type, [A, B]),
A],
formula=formulas.modusPonensFormula)
class SymmetricModusPonensRule(Rule):
"""
Given (Similarity A B) and sA, estimate sB
"""
def __init__(self, chainer, link_type):
A = chainer.new_variable()
B = chainer.new_variable()
Rule.__init__(self,
name="SymmetricModusPonensRule<%s>"%(get_type_name(link_type),),
outputs=[B],
inputs=[chainer.link(link_type, [A, B]),
A],
formula=formulas.symmetricModusPonensFormula)
class TermProbabilityRule(Rule):
def __init__(self, chainer, link_type):
A = chainer.new_variable()
B = chainer.new_variable()
AB = chainer.link(link_type, [A, B])
BA = chainer.link(link_type, [B, A])
Rule.__init__(self,
name="TermProbabilityRule<%s>"%(get_type_name(link_type),),
outputs=[B],
inputs=[AB, BA, A],
formula=formulas.termProbabilityFormula)
class InheritanceRule(Rule):
"""
Create a (mixed) InheritanceLink based on the SubsetLink and
IntensionalInheritanceLink (based on the definition of mixed
InheritanceLinks)
"""
def __init__(self, chainer):
A = chainer.new_variable()
B = chainer.new_variable()
Rule.__init__(self,
outputs=[chainer.link(types.InheritanceLink, [A, B])],
inputs=[chainer.link(types.SubsetLink, [A, B]),
chainer.link(types.IntensionalInheritanceLink,
[A, B])],
formula=formulas.inheritanceFormula)
class SimilarityRule(Rule):
"""
SimilarityLink A B
|A and B| / |A or B|
"""
def __init__(self, chainer):
A = chainer.new_variable()
B = chainer.new_variable()
Rule.__init__(self,
outputs=[chainer.link(types.SimilarityLink, [A, B])],
inputs=[chainer.link(types.AndLink, [A, B]),
chainer.link(types.OrLink, [A, B])],
formula=formulas.extensionalSimilarityFormula)
class SubsetRule1(Rule):
"""
SubsetLink A B
|A and B| / |A|
= P(B|A)
"""
def __init__(self, chainer, link_type):
A = chainer.new_variable()
B = chainer.new_variable()
Rule.__init__(self,
name="SubsetRule<%s>"%(get_type_name(link_type),),
outputs=[chainer.link(link_type, [A, B])],
inputs=[chainer.link(types.AndLink, [A, B]),
A],
formula=formulas.subsetFormula)
class AndToSubsetRule1(Rule):
"""
SubsetLink A B
|A and B| / |A|
= P(B|A)
"""
def __init__(self, chainer, link_type):
A = chainer.new_variable()
B = chainer.new_variable()
Rule.__init__(self,
name="AndToSubsetRule1<%s>"%(get_type_name(link_type),),
outputs=[chainer.link(link_type, [A, B])],
inputs=[chainer.link(types.AndLink, [A, B]),
A],
formula=formulas.subsetFormula)
class AndToSubsetRuleN(Rule):
"""
SubsetLink And(A B C) D
|And(A B C D)| / |And A B C|
= P(B|A)
"""
def __init__(self, chainer, link_type, N):
vars = chainer.make_n_variables(N)
lhs = chainer.link(types.AndLink, vars[:-1])
rhs = vars[-1]
Rule.__init__(self,
name="AndToSubsetRuleN<%s,%s>"%(get_type_name(link_type),N),
outputs=[chainer.link(link_type, [lhs, rhs])],
inputs=[chainer.link(types.AndLink, vars),
lhs],
formula=formulas.subsetFormula)
| zhaozengguang/opencog | opencog/python/pln/rules/inheritance_rules.py | Python | agpl-3.0 | 9,893 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
from openerp.tools.translate import _
class pos_receipt(osv.osv_memory):
_name = 'pos.receipt'
_description = 'Point of sale receipt'
def view_init(self, cr, uid, fields_list, context=None):
"""
Creates view dynamically and adding fields at runtime.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param context: A standard dictionary
@return: New arch of view with new columns.
"""
order_lst = self. pool.get('pos.order').browse(cr, uid, context['active_id'], context=context)
def print_report(self, cr, uid, ids, context=None):
"""
To get the date and print the report
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param context: A standard dictionary
@return : retrun report
"""
if context is None:
context = {}
datas = {'ids': context.get('active_ids', [])}
return {
'type': 'ir.actions.report.xml',
'report_name': 'pos.receipt',
'datas': datas,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| ovnicraft/openerp-restaurant | point_of_sale/wizard/pos_receipt.py | Python | agpl-3.0 | 2,288 |
class dbscan():
"""
This class can be used to locate areas of the interplanetsry space that are 'dense' at one epoch.
Essentially, it locates planet clusters
"""
from PyKEP.core import AU, EARTH_VELOCITY
def _axis_equal_3d(self, ax):
"""Rescales 3D axis limits using equal scale."""
import numpy
extents = numpy.array(
[getattr(ax, 'get_{}lim'.format(dim))() for dim in 'xyz'])
sz = extents[:, 1] - extents[:, 0]
centers = numpy.mean(extents, axis=1)
maxsize = max(abs(sz))
r = maxsize / 2
for ctr, dim in zip(centers, 'xyz'):
getattr(ax, 'set_{}lim'.format(dim))(ctr - r, ctr + r)
def __init__(self, planet_list):
"""
USAGE: cl = dbscan(planet_list):
- planet_list = list of PyKEP planets (typically thousands)
"""
self._asteroids = planet_list
self.labels = None
self.n_clusters = None
self.members = None
self.core_members = None
def _orbital_metric(self, r, v):
from PyKEP.core import DAY2SEC
DV2 = [a / (self._T * DAY2SEC) for a in r]
DV1 = [a + b for a, b in zip(DV2, v)]
return DV1 + DV2
def cluster(self, t, eps=0.125, min_samples=10, metric='orbital', T=180, ref_r=AU, ref_v=EARTH_VELOCITY):
"""
USAGE: cl.cluster(t, eps=0.125, min_samples=10, metric='orbital', T=180, ref_r=AU, ref_v=EARTH_VELOCITY):
- t: epoch (in MJD2000)
- eps: max distance between points in a cluster
- min_samples: minimum number of samples per cluster
- metric: one of 'euclidean', 'euclidean_r', orbital'
- T: average transfer time (used in the definition of the 'orbital' metric)
- ref_r reference radius (used as a scaling factor for r if the metric is 'euclidean' or 'euclidean_r')
- ref_v reference velocity (used as a scaling factor for v if the metric is 'euclidean')
"""
import PyKEP
import numpy
from sklearn.cluster import DBSCAN
self._epoch = PyKEP.epoch(t)
if metric == 'euclidean':
self._X = [
[elem for tupl in p.eph(self._epoch) for elem in tupl] for p in self._asteroids]
scaling_vector = [ref_r] * 3
scaling_vector += [ref_v] * 3
elif metric == 'euclidean_r':
self._X = [list(p.eph(self._epoch)[0]) for p in self._asteroids]
scaling_vector = [ref_r] * 3
elif metric == 'orbital':
self._T = T
self._X = [self._orbital_metric(*p.eph(self._epoch)) for p in self._asteroids]
scaling_vector = [1.] * 6 # no scaling
self._X = numpy.array(self._X)
scaling_vector = numpy.array(scaling_vector)
self._X = self._X / scaling_vector[None, :]
self._db = DBSCAN(eps=eps, min_samples=min_samples).fit(self._X)
self._core_samples = self._db.core_sample_indices_
self.labels = self._db.labels_
self.n_clusters = len(
set(self.labels)) - (1 if -1 in self.labels else 0)
self.members = {}
self.core_members = {}
for label in set(self.labels):
if int(label) == -1:
continue
self.members[int(label)] = [index[0]
for index in numpy.argwhere(self.labels == label)]
self.core_members[int(label)] = [
index for index in self._core_samples if self.labels[index] == label]
self._X = self._X * scaling_vector[None, :]
def pretty(self):
"""Prints the cluster lists."""
if self.labels is None:
return
print("Number of clusters: %d" % self.n_clusters)
print("Size of dataset: %s" % str(self._X.shape))
print("Scaling: %s" % str(self._scaling))
print("Epoch: %s" % str(self._epoch))
for label in list(self.members.keys()):
print("cluster %d (%d - %d): %s" % (label, len(self.members[label]),
len(self.core_members[label]), str(self.members[label])))
def plot(self, ax=None, clusters=None, orbits=False, only_core=False):
"""Plots the clusters."""
if self.n_clusters < 1:
return
import matplotlib.pylab as plt
from mpl_toolkits.mplot3d import Axes3D
if ax is None:
fig = plt.figure()
axis = fig.add_subplot(111, projection='3d')
else:
axis = ax
axis.view_init(elev=30.0, azim=135.0)
axis.set_aspect('equal')
if orbits:
from PyKEP.orbit_plots import plot_planet
members = self.core_members if only_core else self.members
for label in members if clusters is None else clusters:
for planet in members[label]:
plot_planet(
self._asteroids[planet], t0=self._epoch, s=0, ax=axis)
X, labels = list(zip(*[(x, label) for (x, label) in zip(self._X, self.labels)
if label > -.5 and (clusters is None or label in clusters)]))
data = [[x[0], x[1], x[2]] for x in X]
axis.scatter(*list(zip(*data)), c=labels, alpha=0.5)
self._axis_equal_3d(axis)
if ax is None:
plt.show()
return axis
def plot_cluster_evolution(self, cluster_id=None, only_core=False, epochs=range(7500, 8400, 100), skip=100, alpha=0.3):
"""
Plots a cluster evolution at 9 prefixed epochs.
"""
if self.n_clusters < 1:
print("No clusters have been found yet")
return
if cluster_id >= self.n_clusters or cluster_id < 0:
print("cluster_id should be larger then 0 and smaller than the number of clusters (-1)")
return
if len(epochs) != 9:
print("The epochs requested must be exactly 9 as to assemble 3x3 subplots")
return
import matplotlib.pylab as plt
from mpl_toolkits.mplot3d import Axes3D
from PyKEP.orbit_plots import plot_planet
from PyKEP import epoch
if only_core:
ids = self.core_members[cluster_id]
else:
ids = self.members[cluster_id]
fig = plt.figure()
for i, ep in enumerate(epochs):
axis = fig.add_subplot(3, 3, i + 1, projection='3d')
plt.axis('off')
plt.title(epoch(ep).__repr__()[:11])
for pl in self._asteroids[::skip]:
axis = plot_planet(pl, ax=axis, alpha=0.05, s=0)
for cluster_member in ids:
r, v = self._asteroids[cluster_member].eph(epoch(ep))
axis.scatter([r[0]], [r[1]], [r[2]], marker='o', alpha=alpha)
plt.draw()
plt.show()
return fig
| Yawgmoth90/pykep | PyKEP/phasing/_dbscan.py | Python | gpl-3.0 | 6,903 |
#-*-coding:utf-8-*-
# 输时的图像
import pygame
from pygame.locals import *
from sys import exit
from pylib.vector import Vector
class Lost(object):
def __init__(self,surface):
self.surface=surface
pygame.image.save(self.surface,'./images/lost.png')# 存储背景图片
self.background_image=pygame.image.load('./images/lost.png').convert()
self.backcolor=(127,127,127)
self.lostcolor=(255,127,0)
self.answercolor=(0,127,255)
self.w,self.h=self.surface.get_size()
self.font=pygame.font.Font('./fonts/comic.ttf',30)
def get_lost(self):
self.surface.blit(self.background_image,(0,0))
vect=(self.w/2.0-150,self.h/2.0-80)
w=300
h=160
pygame.draw.rect(self.surface,self.backcolor,Rect(vect,(w,h)))
lost=self.font.render('You have lost!',True,self.lostcolor)
answer=self.font.render(' OK ',True,self.answercolor)
lw,lh=lost.get_size()
aw,ah=answer.get_size()
lv=(vect[0]+w/2.0-lw/2.0,vect[1]+10)
av=(vect[0]+w/2.0-aw/2.0,vect[1]+20+lh)
self.surface.blit(lost,lv)
self.surface.blit(answer,av)
pygame.image.save(self.surface,'./images/lost_game.png')
lost_image=pygame.image.load('./images/lost_game.png').convert()
pygame.mouse.set_visible(True)# 鼠标可见
while True:
self.surface.blit(lost_image,(0,0))
pygame.display.update()
for event in pygame.event.get():
if event.type==QUIT:
exit()
elif event.type==KEYDOWN:
if event.key==K_ESCAPE:
exit()
elif event.type==MOUSEBUTTONDOWN:
mx,my=pygame.mouse.get_pos()
if mx>=av[0] and mx<=av[0]+aw and my>=av[1] and my<=av[1]+ah:
exit()
| lavotap/2048 | gui/lost.py | Python | gpl-2.0 | 1,912 |
#!/usr/bin/env python
__version__ = '$Revision: 4791 $'.split()[1]
__date__ = '$Date: 2012-10-20 $'.split()[1]
__author__ = 'xmlbinmsg'
__doc__='''
Autogenerated python functions to serialize/deserialize binary messages.
Generated by: ../scripts/aisxmlbinmsg2py.py
Need to then wrap these functions with the outer AIS packet and then
convert the whole binary blob to a NMEA string. Those functions are
not currently provided in this file.
serialize: python to ais binary
deserialize: ais binary to python
The generated code uses translators.py, binary.py, and aisstring.py
which should be packaged with the resulting files.
@requires: U{epydoc<http://epydoc.sourceforge.net/>} > 3.0alpha3
@requires: U{BitVector<http://cheeseshop.python.org/pypi/BitVector>}
@author: '''+__author__+'''
@version: ''' + __version__ +'''
@var __date__: Date of last svn commit
@undocumented: __version__ __author__ __doc__ parser
@status: under development
@license: Generated code has no license
@todo: FIX: put in a description of the message here with fields and types.
'''
import sys
from decimal import Decimal
from BitVector import BitVector
import binary, aisstring
# FIX: check to see if these will be needed
TrueBV = BitVector(bitstring="1")
"Why always rebuild the True bit? This should speed things up a bunch"
FalseBV = BitVector(bitstring="0")
"Why always rebuild the False bit? This should speed things up a bunch"
fieldList = (
'MessageID',
'RepeatIndicator',
'UserID',
'Altitude',
'SOG',
'PositionAccuracy',
'Position_longitude',
'Position_latitude',
'COG',
'TimeStamp',
'Reserved',
'DTE',
'Spare',
'assigned_mode',
'RAIM',
'comm_state',
'state_syncstate',
'state_slottimeout',
'state_slotoffset',
)
fieldListPostgres = (
'MessageID',
'RepeatIndicator',
'UserID',
'Altitude',
'SOG',
'PositionAccuracy',
'Position', # PostGIS data type
'COG',
'TimeStamp',
'Reserved',
'DTE',
'Spare',
'assigned_mode',
'RAIM',
'comm_state',
'state_syncstate',
'state_slottimeout',
'state_slotoffset',
)
toPgFields = {
'Position_longitude':'Position',
'Position_latitude':'Position',
}
'''
Go to the Postgis field names from the straight field name
'''
fromPgFields = {
'Position':('Position_longitude','Position_latitude',),
}
'''
Go from the Postgis field names to the straight field name
'''
pgTypes = {
'Position':'POINT',
}
'''
Lookup table for each postgis field name to get its type.
'''
def encode(params, validate=False):
'''Create a SARposition binary message payload to pack into an AIS Msg SARposition.
Fields in params:
- MessageID(uint): AIS message number. Must be 9 (field automatically set to "9")
- RepeatIndicator(uint): Indicated how many times a message has been repeated
- UserID(uint): Unique ship identification number (MMSI)
- Altitude(uint): Altitude (GNSS)
- SOG(uint): Speed over ground
- PositionAccuracy(uint): Accuracy of positioning fixes
- Position_longitude(decimal): Location of the vessel East West location
- Position_latitude(decimal): Location of the vessel North South location
- COG(udecimal): Course over ground
- TimeStamp(uint): UTC second when the report was generated
- Reserved(uint): Reserved for regional applications. Should be set to zero. (field automatically set to "0")
- DTE(bool): Dtta terminal ready
- Spare(uint): Not used. Should be set to zero. (field automatically set to "0")
- assigned_mode(uint): autonomous or assigned mode
- RAIM(bool): Receiver autonomous integrity monitoring flag
- comm_state(uint): SOTDMA or ITDMA
- state_syncstate(uint): Communications State - SOTDMA Sycronization state
- state_slottimeout(uint): Communications State - SOTDMA Frames remaining until a new slot is selected
- state_slotoffset(uint): Communications State - SOTDMA In what slot will the next transmission occur. BROKEN
@param params: Dictionary of field names/values. Throws a ValueError exception if required is missing
@param validate: Set to true to cause checking to occur. Runs slower. FIX: not implemented.
@rtype: BitVector
@return: encoded binary message (for binary messages, this needs to be wrapped in a msg 8
@note: The returned bits may not be 6 bit aligned. It is up to you to pad out the bits.
'''
bvList = []
bvList.append(binary.setBitVectorSize(BitVector(intVal=9),6))
if 'RepeatIndicator' in params:
bvList.append(binary.setBitVectorSize(BitVector(intVal=params['RepeatIndicator']),2))
else:
bvList.append(binary.setBitVectorSize(BitVector(intVal=0),2))
bvList.append(binary.setBitVectorSize(BitVector(intVal=params['UserID']),30))
if 'Altitude' in params:
bvList.append(binary.setBitVectorSize(BitVector(intVal=params['Altitude']),12))
else:
bvList.append(binary.setBitVectorSize(BitVector(intVal=4095),12))
if 'SOG' in params:
bvList.append(binary.setBitVectorSize(BitVector(intVal=params['SOG']),10))
else:
bvList.append(binary.setBitVectorSize(BitVector(intVal=1023),10))
bvList.append(binary.setBitVectorSize(BitVector(intVal=params['PositionAccuracy']),1))
if 'Position_longitude' in params:
bvList.append(binary.bvFromSignedInt(int(Decimal(params['Position_longitude'])*Decimal('600000')),28))
else:
bvList.append(binary.bvFromSignedInt(108600000,28))
if 'Position_latitude' in params:
bvList.append(binary.bvFromSignedInt(int(Decimal(params['Position_latitude'])*Decimal('600000')),27))
else:
bvList.append(binary.bvFromSignedInt(54600000,27))
if 'COG' in params:
bvList.append(binary.setBitVectorSize(BitVector(intVal=int((Decimal(params['COG'])*Decimal('10')))),12))
else:
bvList.append(binary.setBitVectorSize(BitVector(intVal=int(3600)),12))
if 'TimeStamp' in params:
bvList.append(binary.setBitVectorSize(BitVector(intVal=params['TimeStamp']),6))
else:
bvList.append(binary.setBitVectorSize(BitVector(intVal=60),6))
bvList.append(binary.setBitVectorSize(BitVector(intVal=0),8))
if params["DTE"]: bvList.append(TrueBV)
else: bvList.append(FalseBV)
bvList.append(binary.setBitVectorSize(BitVector(intVal=0),3))
bvList.append(binary.setBitVectorSize(BitVector(intVal=params['assigned_mode']),1))
if params["RAIM"]: bvList.append(TrueBV)
else: bvList.append(FalseBV)
bvList.append(binary.setBitVectorSize(BitVector(intVal=params['comm_state']),1))
bvList.append(binary.setBitVectorSize(BitVector(intVal=params['state_syncstate']),2))
bvList.append(binary.setBitVectorSize(BitVector(intVal=params['state_slottimeout']),3))
bvList.append(binary.setBitVectorSize(BitVector(intVal=params['state_slotoffset']),14))
return binary.joinBV(bvList)
def decode(bv, validate=False):
'''Unpack a SARposition message
Fields in params:
- MessageID(uint): AIS message number. Must be 9 (field automatically set to "9")
- RepeatIndicator(uint): Indicated how many times a message has been repeated
- UserID(uint): Unique ship identification number (MMSI)
- Altitude(uint): Altitude (GNSS)
- SOG(uint): Speed over ground
- PositionAccuracy(uint): Accuracy of positioning fixes
- Position_longitude(decimal): Location of the vessel East West location
- Position_latitude(decimal): Location of the vessel North South location
- COG(udecimal): Course over ground
- TimeStamp(uint): UTC second when the report was generated
- Reserved(uint): Reserved for regional applications. Should be set to zero. (field automatically set to "0")
- DTE(bool): Dtta terminal ready
- Spare(uint): Not used. Should be set to zero. (field automatically set to "0")
- assigned_mode(uint): autonomous or assigned mode
- RAIM(bool): Receiver autonomous integrity monitoring flag
- comm_state(uint): SOTDMA or ITDMA
- state_syncstate(uint): Communications State - SOTDMA Sycronization state
- state_slottimeout(uint): Communications State - SOTDMA Frames remaining until a new slot is selected
- state_slotoffset(uint): Communications State - SOTDMA In what slot will the next transmission occur. BROKEN
@type bv: BitVector
@param bv: Bits defining a message
@param validate: Set to true to cause checking to occur. Runs slower. FIX: not implemented.
@rtype: dict
@return: params
'''
#Would be nice to check the bit count here..
#if validate:
# assert (len(bv)==FIX: SOME NUMBER)
r = {}
r['MessageID']=9
r['RepeatIndicator']=int(bv[6:8])
r['UserID']=int(bv[8:38])
r['Altitude']=int(bv[38:50])
r['SOG']=int(bv[50:60])
r['PositionAccuracy']=int(bv[60:61])
r['Position_longitude']=Decimal(binary.signedIntFromBV(bv[61:89]))/Decimal('600000')
r['Position_latitude']=Decimal(binary.signedIntFromBV(bv[89:116]))/Decimal('600000')
r['COG']=Decimal(int(bv[116:128]))/Decimal('10')
r['TimeStamp']=int(bv[128:134])
r['Reserved']=0
r['DTE']=bool(int(bv[142:143]))
r['Spare']=0
r['assigned_mode']=int(bv[146:147])
r['RAIM']=bool(int(bv[147:148]))
r['comm_state']=int(bv[148:149])
r['state_syncstate']=int(bv[149:151])
r['state_slottimeout']=int(bv[151:154])
r['state_slotoffset']=int(bv[154:168])
return r
def decodeMessageID(bv, validate=False):
return 9
def decodeRepeatIndicator(bv, validate=False):
return int(bv[6:8])
def decodeUserID(bv, validate=False):
return int(bv[8:38])
def decodeAltitude(bv, validate=False):
return int(bv[38:50])
def decodeSOG(bv, validate=False):
return int(bv[50:60])
def decodePositionAccuracy(bv, validate=False):
return int(bv[60:61])
def decodePosition_longitude(bv, validate=False):
return Decimal(binary.signedIntFromBV(bv[61:89]))/Decimal('600000')
def decodePosition_latitude(bv, validate=False):
return Decimal(binary.signedIntFromBV(bv[89:116]))/Decimal('600000')
def decodeCOG(bv, validate=False):
return Decimal(int(bv[116:128]))/Decimal('10')
def decodeTimeStamp(bv, validate=False):
return int(bv[128:134])
def decodeReserved(bv, validate=False):
return 0
def decodeDTE(bv, validate=False):
return bool(int(bv[142:143]))
def decodeSpare(bv, validate=False):
return 0
def decodeassigned_mode(bv, validate=False):
return int(bv[146:147])
def decodeRAIM(bv, validate=False):
return bool(int(bv[147:148]))
def decodecomm_state(bv, validate=False):
return int(bv[148:149])
def decodestate_syncstate(bv, validate=False):
return int(bv[149:151])
def decodestate_slottimeout(bv, validate=False):
return int(bv[151:154])
def decodestate_slotoffset(bv, validate=False):
return int(bv[154:168])
def printHtml(params, out=sys.stdout):
out.write("<h3>SARposition</h3>\n")
out.write("<table border=\"1\">\n")
out.write("<tr bgcolor=\"orange\">\n")
out.write("<th align=\"left\">Field Name</th>\n")
out.write("<th align=\"left\">Type</th>\n")
out.write("<th align=\"left\">Value</th>\n")
out.write("<th align=\"left\">Value in Lookup Table</th>\n")
out.write("<th align=\"left\">Units</th>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>MessageID</td>\n")
out.write("<td>uint</td>\n")
if 'MessageID' in params:
out.write(" <td>"+str(params['MessageID'])+"</td>\n")
out.write(" <td>"+str(params['MessageID'])+"</td>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>RepeatIndicator</td>\n")
out.write("<td>uint</td>\n")
if 'RepeatIndicator' in params:
out.write(" <td>"+str(params['RepeatIndicator'])+"</td>\n")
if str(params['RepeatIndicator']) in RepeatIndicatorDecodeLut:
out.write("<td>"+RepeatIndicatorDecodeLut[str(params['RepeatIndicator'])]+"</td>")
else:
out.write("<td><i>Missing LUT entry</i></td>")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>UserID</td>\n")
out.write("<td>uint</td>\n")
if 'UserID' in params:
out.write(" <td>"+str(params['UserID'])+"</td>\n")
out.write(" <td>"+str(params['UserID'])+"</td>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>Altitude</td>\n")
out.write("<td>uint</td>\n")
if 'Altitude' in params:
out.write(" <td>"+str(params['Altitude'])+"</td>\n")
if str(params['Altitude']) in AltitudeDecodeLut:
out.write("<td>"+AltitudeDecodeLut[str(params['Altitude'])]+"</td>")
else:
out.write("<td><i>Missing LUT entry</i></td>")
out.write("<td>meters</td>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>SOG</td>\n")
out.write("<td>uint</td>\n")
if 'SOG' in params:
out.write(" <td>"+str(params['SOG'])+"</td>\n")
if str(params['SOG']) in SOGDecodeLut:
out.write("<td>"+SOGDecodeLut[str(params['SOG'])]+"</td>")
else:
out.write("<td><i>Missing LUT entry</i></td>")
out.write("<td>knots</td>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>PositionAccuracy</td>\n")
out.write("<td>uint</td>\n")
if 'PositionAccuracy' in params:
out.write(" <td>"+str(params['PositionAccuracy'])+"</td>\n")
if str(params['PositionAccuracy']) in PositionAccuracyDecodeLut:
out.write("<td>"+PositionAccuracyDecodeLut[str(params['PositionAccuracy'])]+"</td>")
else:
out.write("<td><i>Missing LUT entry</i></td>")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>Position_longitude</td>\n")
out.write("<td>decimal</td>\n")
if 'Position_longitude' in params:
out.write(" <td>"+str(params['Position_longitude'])+"</td>\n")
out.write(" <td>"+str(params['Position_longitude'])+"</td>\n")
out.write("<td>degrees</td>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>Position_latitude</td>\n")
out.write("<td>decimal</td>\n")
if 'Position_latitude' in params:
out.write(" <td>"+str(params['Position_latitude'])+"</td>\n")
out.write(" <td>"+str(params['Position_latitude'])+"</td>\n")
out.write("<td>degrees</td>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>COG</td>\n")
out.write("<td>udecimal</td>\n")
if 'COG' in params:
out.write(" <td>"+str(params['COG'])+"</td>\n")
out.write(" <td>"+str(params['COG'])+"</td>\n")
out.write("<td>degrees</td>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>TimeStamp</td>\n")
out.write("<td>uint</td>\n")
if 'TimeStamp' in params:
out.write(" <td>"+str(params['TimeStamp'])+"</td>\n")
if str(params['TimeStamp']) in TimeStampDecodeLut:
out.write("<td>"+TimeStampDecodeLut[str(params['TimeStamp'])]+"</td>")
else:
out.write("<td><i>Missing LUT entry</i></td>")
out.write("<td>seconds</td>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>Reserved</td>\n")
out.write("<td>uint</td>\n")
if 'Reserved' in params:
out.write(" <td>"+str(params['Reserved'])+"</td>\n")
out.write(" <td>"+str(params['Reserved'])+"</td>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>DTE</td>\n")
out.write("<td>bool</td>\n")
if 'DTE' in params:
out.write(" <td>"+str(params['DTE'])+"</td>\n")
out.write(" <td>"+str(params['DTE'])+"</td>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>Spare</td>\n")
out.write("<td>uint</td>\n")
if 'Spare' in params:
out.write(" <td>"+str(params['Spare'])+"</td>\n")
out.write(" <td>"+str(params['Spare'])+"</td>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>assigned_mode</td>\n")
out.write("<td>uint</td>\n")
if 'assigned_mode' in params:
out.write(" <td>"+str(params['assigned_mode'])+"</td>\n")
if str(params['assigned_mode']) in assigned_modeDecodeLut:
out.write("<td>"+assigned_modeDecodeLut[str(params['assigned_mode'])]+"</td>")
else:
out.write("<td><i>Missing LUT entry</i></td>")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>RAIM</td>\n")
out.write("<td>bool</td>\n")
if 'RAIM' in params:
out.write(" <td>"+str(params['RAIM'])+"</td>\n")
if str(params['RAIM']) in RAIMDecodeLut:
out.write("<td>"+RAIMDecodeLut[str(params['RAIM'])]+"</td>")
else:
out.write("<td><i>Missing LUT entry</i></td>")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>comm_state</td>\n")
out.write("<td>uint</td>\n")
if 'comm_state' in params:
out.write(" <td>"+str(params['comm_state'])+"</td>\n")
if str(params['comm_state']) in comm_stateDecodeLut:
out.write("<td>"+comm_stateDecodeLut[str(params['comm_state'])]+"</td>")
else:
out.write("<td><i>Missing LUT entry</i></td>")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>state_syncstate</td>\n")
out.write("<td>uint</td>\n")
if 'state_syncstate' in params:
out.write(" <td>"+str(params['state_syncstate'])+"</td>\n")
if str(params['state_syncstate']) in state_syncstateDecodeLut:
out.write("<td>"+state_syncstateDecodeLut[str(params['state_syncstate'])]+"</td>")
else:
out.write("<td><i>Missing LUT entry</i></td>")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>state_slottimeout</td>\n")
out.write("<td>uint</td>\n")
if 'state_slottimeout' in params:
out.write(" <td>"+str(params['state_slottimeout'])+"</td>\n")
if str(params['state_slottimeout']) in state_slottimeoutDecodeLut:
out.write("<td>"+state_slottimeoutDecodeLut[str(params['state_slottimeout'])]+"</td>")
else:
out.write("<td><i>Missing LUT entry</i></td>")
out.write("<td>frames</td>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>state_slotoffset</td>\n")
out.write("<td>uint</td>\n")
if 'state_slotoffset' in params:
out.write(" <td>"+str(params['state_slotoffset'])+"</td>\n")
out.write(" <td>"+str(params['state_slotoffset'])+"</td>\n")
out.write("</tr>\n")
out.write("</table>\n")
def printKml(params, out=sys.stdout):
'''KML (Keyhole Markup Language) for Google Earth, but without the header/footer'''
out.write("\ <Placemark>\n")
out.write("\t <name>"+str(params['UserID'])+"</name>\n")
out.write("\t\t<description>\n")
import StringIO
buf = StringIO.StringIO()
printHtml(params,buf)
import cgi
out.write(cgi.escape(buf.getvalue()))
out.write("\t\t</description>\n")
out.write("\t\t<styleUrl>#m_ylw-pushpin_copy0</styleUrl>\n")
out.write("\t\t<Point>\n")
out.write("\t\t\t<coordinates>")
out.write(str(params['Position_longitude']))
out.write(',')
out.write(str(params['Position_latitude']))
out.write(",0</coordinates>\n")
out.write("\t\t</Point>\n")
out.write("\t</Placemark>\n")
def printFields(params, out=sys.stdout, format='std', fieldList=None, dbType='postgres'):
'''Print a SARposition message to stdout.
Fields in params:
- MessageID(uint): AIS message number. Must be 9 (field automatically set to "9")
- RepeatIndicator(uint): Indicated how many times a message has been repeated
- UserID(uint): Unique ship identification number (MMSI)
- Altitude(uint): Altitude (GNSS)
- SOG(uint): Speed over ground
- PositionAccuracy(uint): Accuracy of positioning fixes
- Position_longitude(decimal): Location of the vessel East West location
- Position_latitude(decimal): Location of the vessel North South location
- COG(udecimal): Course over ground
- TimeStamp(uint): UTC second when the report was generated
- Reserved(uint): Reserved for regional applications. Should be set to zero. (field automatically set to "0")
- DTE(bool): Dtta terminal ready
- Spare(uint): Not used. Should be set to zero. (field automatically set to "0")
- assigned_mode(uint): autonomous or assigned mode
- RAIM(bool): Receiver autonomous integrity monitoring flag
- comm_state(uint): SOTDMA or ITDMA
- state_syncstate(uint): Communications State - SOTDMA Sycronization state
- state_slottimeout(uint): Communications State - SOTDMA Frames remaining until a new slot is selected
- state_slotoffset(uint): Communications State - SOTDMA In what slot will the next transmission occur. BROKEN
@param params: Dictionary of field names/values.
@param out: File like object to write to
@rtype: stdout
@return: text to out
'''
if 'std'==format:
out.write("SARposition:\n")
if 'MessageID' in params: out.write(" MessageID: "+str(params['MessageID'])+"\n")
if 'RepeatIndicator' in params: out.write(" RepeatIndicator: "+str(params['RepeatIndicator'])+"\n")
if 'UserID' in params: out.write(" UserID: "+str(params['UserID'])+"\n")
if 'Altitude' in params: out.write(" Altitude: "+str(params['Altitude'])+"\n")
if 'SOG' in params: out.write(" SOG: "+str(params['SOG'])+"\n")
if 'PositionAccuracy' in params: out.write(" PositionAccuracy: "+str(params['PositionAccuracy'])+"\n")
if 'Position_longitude' in params: out.write(" Position_longitude: "+str(params['Position_longitude'])+"\n")
if 'Position_latitude' in params: out.write(" Position_latitude: "+str(params['Position_latitude'])+"\n")
if 'COG' in params: out.write(" COG: "+str(params['COG'])+"\n")
if 'TimeStamp' in params: out.write(" TimeStamp: "+str(params['TimeStamp'])+"\n")
if 'Reserved' in params: out.write(" Reserved: "+str(params['Reserved'])+"\n")
if 'DTE' in params: out.write(" DTE: "+str(params['DTE'])+"\n")
if 'Spare' in params: out.write(" Spare: "+str(params['Spare'])+"\n")
if 'assigned_mode' in params: out.write(" assigned_mode: "+str(params['assigned_mode'])+"\n")
if 'RAIM' in params: out.write(" RAIM: "+str(params['RAIM'])+"\n")
if 'comm_state' in params: out.write(" comm_state: "+str(params['comm_state'])+"\n")
if 'state_syncstate' in params: out.write(" state_syncstate: "+str(params['state_syncstate'])+"\n")
if 'state_slottimeout' in params: out.write(" state_slottimeout: "+str(params['state_slottimeout'])+"\n")
if 'state_slotoffset' in params: out.write(" state_slotoffset: "+str(params['state_slotoffset'])+"\n")
elif 'csv'==format:
if None == options.fieldList:
options.fieldList = fieldList
needComma = False;
for field in fieldList:
if needComma: out.write(',')
needComma = True
if field in params:
out.write(str(params[field]))
# else: leave it empty
out.write("\n")
elif 'html'==format:
printHtml(params,out)
elif 'sql'==format:
sqlInsertStr(params,out,dbType=dbType)
elif 'kml'==format:
printKml(params,out)
elif 'kml-full'==format:
out.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n")
out.write("<kml xmlns=\"http://earth.google.com/kml/2.1\">\n")
out.write("<Document>\n")
out.write(" <name>SARposition</name>\n")
printKml(params,out)
out.write("</Document>\n")
out.write("</kml>\n")
else:
print "ERROR: unknown format:",format
assert False
return # Nothing to return
RepeatIndicatorEncodeLut = {
'default':'0',
'do not repeat any more':'3',
} #RepeatIndicatorEncodeLut
RepeatIndicatorDecodeLut = {
'0':'default',
'3':'do not repeat any more',
} # RepeatIndicatorEncodeLut
AltitudeEncodeLut = {
'4095 or higher':'4095',
} #AltitudeEncodeLut
AltitudeDecodeLut = {
'4095':'4095 or higher',
} # AltitudeEncodeLut
SOGEncodeLut = {
'1022 knots or higher':'1022',
} #SOGEncodeLut
SOGDecodeLut = {
'1022':'1022 knots or higher',
} # SOGEncodeLut
PositionAccuracyEncodeLut = {
'low (greater than 10 m)':'0',
'high (less than 10 m)':'1',
} #PositionAccuracyEncodeLut
PositionAccuracyDecodeLut = {
'0':'low (greater than 10 m)',
'1':'high (less than 10 m)',
} # PositionAccuracyEncodeLut
TimeStampEncodeLut = {
'not available/default':'60',
'manual input':'61',
'dead reckoning':'62',
'inoperative':'63',
} #TimeStampEncodeLut
TimeStampDecodeLut = {
'60':'not available/default',
'61':'manual input',
'62':'dead reckoning',
'63':'inoperative',
} # TimeStampEncodeLut
assigned_modeEncodeLut = {
'autonomous and continuous mode':'0',
'assigned mode':'1',
} #assigned_modeEncodeLut
assigned_modeDecodeLut = {
'0':'autonomous and continuous mode',
'1':'assigned mode',
} # assigned_modeEncodeLut
RAIMEncodeLut = {
'not in use':'False',
'in use':'True',
} #RAIMEncodeLut
RAIMDecodeLut = {
'False':'not in use',
'True':'in use',
} # RAIMEncodeLut
comm_stateEncodeLut = {
'SOTDMA':'0',
'ITDMA':'1',
} #comm_stateEncodeLut
comm_stateDecodeLut = {
'0':'SOTDMA',
'1':'ITDMA',
} # comm_stateEncodeLut
state_syncstateEncodeLut = {
'UTC direct':'0',
'UTC indirect':'1',
'synchronized to a base station':'2',
'synchronized to another station':'3',
} #state_syncstateEncodeLut
state_syncstateDecodeLut = {
'0':'UTC direct',
'1':'UTC indirect',
'2':'synchronized to a base station',
'3':'synchronized to another station',
} # state_syncstateEncodeLut
state_slottimeoutEncodeLut = {
'Last frame in this slot':'0',
'1 frames left':'1',
'2 frames left':'2',
'3 frames left':'3',
'4 frames left':'4',
'5 frames left':'5',
'6 frames left':'6',
'7 frames left':'7',
} #state_slottimeoutEncodeLut
state_slottimeoutDecodeLut = {
'0':'Last frame in this slot',
'1':'1 frames left',
'2':'2 frames left',
'3':'3 frames left',
'4':'4 frames left',
'5':'5 frames left',
'6':'6 frames left',
'7':'7 frames left',
} # state_slottimeoutEncodeLut
######################################################################
# SQL SUPPORT
######################################################################
dbTableName='SARposition'
'Database table name'
def sqlCreateStr(outfile=sys.stdout, fields=None, extraFields=None
,addCoastGuardFields=True
,dbType='postgres'
):
'''
Return the SQL CREATE command for this message type
@param outfile: file like object to print to.
@param fields: which fields to put in the create. Defaults to all.
@param extraFields: A sequence of tuples containing (name,sql type) for additional fields
@param addCoastGuardFields: Add the extra fields that come after the NMEA check some from the USCG N-AIS format
@param dbType: Which flavor of database we are using so that the create is tailored ('sqlite' or 'postgres')
@type addCoastGuardFields: bool
@return: sql create string
@rtype: str
@see: sqlCreate
'''
# FIX: should this sqlCreate be the same as in LaTeX (createFuncName) rather than hard coded?
outfile.write(str(sqlCreate(fields,extraFields,addCoastGuardFields,dbType=dbType)))
def sqlCreate(fields=None, extraFields=None, addCoastGuardFields=True, dbType='postgres'):
'''
Return the sqlhelp object to create the table.
@param fields: which fields to put in the create. Defaults to all.
@param extraFields: A sequence of tuples containing (name,sql type) for additional fields
@param addCoastGuardFields: Add the extra fields that come after the NMEA check some from the USCG N-AIS format
@type addCoastGuardFields: bool
@param dbType: Which flavor of database we are using so that the create is tailored ('sqlite' or 'postgres')
@return: An object that can be used to generate a return
@rtype: sqlhelp.create
'''
if None == fields: fields = fieldList
import sqlhelp
c = sqlhelp.create('SARposition',dbType=dbType)
c.addPrimaryKey()
if 'MessageID' in fields: c.addInt ('MessageID')
if 'RepeatIndicator' in fields: c.addInt ('RepeatIndicator')
if 'UserID' in fields: c.addInt ('UserID')
if 'Altitude' in fields: c.addInt ('Altitude')
if 'SOG' in fields: c.addInt ('SOG')
if 'PositionAccuracy' in fields: c.addInt ('PositionAccuracy')
if dbType != 'postgres':
if 'Position_longitude' in fields: c.addDecimal('Position_longitude',8,5)
if dbType != 'postgres':
if 'Position_latitude' in fields: c.addDecimal('Position_latitude',8,5)
if 'COG' in fields: c.addDecimal('COG',4,1)
if 'TimeStamp' in fields: c.addInt ('TimeStamp')
if 'Reserved' in fields: c.addInt ('Reserved')
if 'DTE' in fields: c.addBool('DTE')
if 'Spare' in fields: c.addInt ('Spare')
if 'assigned_mode' in fields: c.addInt ('assigned_mode')
if 'RAIM' in fields: c.addBool('RAIM')
if 'comm_state' in fields: c.addInt ('comm_state')
if 'state_syncstate' in fields: c.addInt ('state_syncstate')
if 'state_slottimeout' in fields: c.addInt ('state_slottimeout')
if 'state_slotoffset' in fields: c.addInt ('state_slotoffset')
if addCoastGuardFields:
# c.addInt('cg_s_rssi') # Relative signal strength indicator
# c.addInt('cg_d_strength') # dBm receive strength
# c.addVarChar('cg_x',10) # Idonno
c.addInt('cg_t_arrival') # Receive timestamp from the AIS equipment 'T'
c.addInt('cg_s_slotnum') # Slot received in
c.addVarChar('cg_r',15) # Receiver station ID - should usually be an MMSI, but sometimes is a string
c.addInt('cg_sec') # UTC seconds since the epoch
c.addTimestamp('cg_timestamp') # UTC decoded cg_sec - not actually in the data stream
if dbType == 'postgres':
#--- EPSG 4326 : WGS 84
#INSERT INTO "spatial_ref_sys" ("srid","auth_name","auth_srid","srtext","proj4text") VALUES (4326,'EPSG',4326,'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],TOWGS84[0,0,0,0,0,0,0],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]]','+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs ');
c.addPostGIS('Position','POINT',2,SRID=4326);
return c
def sqlInsertStr(params, outfile=sys.stdout, extraParams=None, dbType='postgres'):
'''
Return the SQL INSERT command for this message type
@param params: dictionary of values keyed by field name
@param outfile: file like object to print to.
@param extraParams: A sequence of tuples containing (name,sql type) for additional fields
@return: sql create string
@rtype: str
@see: sqlCreate
'''
outfile.write(str(sqlInsert(params,extraParams,dbType=dbType)))
def sqlInsert(params,extraParams=None,dbType='postgres'):
'''
Give the SQL INSERT statement
@param params: dict keyed by field name of values
@param extraParams: any extra fields that you have created beyond the normal ais message fields
@rtype: sqlhelp.insert
@return: insert class instance
@todo: allow optional type checking of params?
@warning: this will take invalid keys happily and do what???
'''
import sqlhelp
i = sqlhelp.insert('SARposition',dbType=dbType)
if dbType=='postgres':
finished = []
for key in params:
if key in finished:
continue
if key not in toPgFields and key not in fromPgFields:
if type(params[key])==Decimal: i.add(key,float(params[key]))
else: i.add(key,params[key])
else:
if key in fromPgFields:
val = params[key]
# Had better be a WKT type like POINT(-88.1 30.321)
i.addPostGIS(key,val)
finished.append(key)
else:
# Need to construct the type.
pgName = toPgFields[key]
#valStr='GeomFromText(\''+pgTypes[pgName]+'('
valStr=pgTypes[pgName]+'('
vals = []
for nonPgKey in fromPgFields[pgName]:
vals.append(str(params[nonPgKey]))
finished.append(nonPgKey)
valStr+=' '.join(vals)+')'
i.addPostGIS(pgName,valStr)
else:
for key in params:
if type(params[key])==Decimal: i.add(key,float(params[key]))
else: i.add(key,params[key])
if None != extraParams:
for key in extraParams:
i.add(key,extraParams[key])
return i
######################################################################
# LATEX SUPPORT
######################################################################
def latexDefinitionTable(outfile=sys.stdout
):
'''
Return the LaTeX definition table for this message type
@param outfile: file like object to print to.
@type outfile: file obj
@return: LaTeX table string via the outfile
@rtype: str
'''
o = outfile
o.write('''
\\begin{table}%[htb]
\\centering
\\begin{tabular}{|l|c|l|}
\\hline
Parameter & Number of bits & Description
\\\\ \\hline\\hline
MessageID & 6 & AIS message number. Must be 9 \\\\ \hline
RepeatIndicator & 2 & Indicated how many times a message has been repeated \\\\ \hline
UserID & 30 & Unique ship identification number (MMSI) \\\\ \hline
Altitude & 12 & Altitude (GNSS) \\\\ \hline
SOG & 10 & Speed over ground \\\\ \hline
PositionAccuracy & 1 & Accuracy of positioning fixes \\\\ \hline
Position\_longitude & 28 & Location of the vessel East West location \\\\ \hline
Position\_latitude & 27 & Location of the vessel North South location \\\\ \hline
COG & 12 & Course over ground \\\\ \hline
TimeStamp & 6 & UTC second when the report was generated \\\\ \hline
Reserved & 8 & Reserved for regional applications. Should be set to zero. \\\\ \hline
DTE & 1 & Dtta terminal ready \\\\ \hline
Spare & 3 & Not used. Should be set to zero. \\\\ \hline
assigned\_mode & 1 & autonomous or assigned mode \\\\ \hline
RAIM & 1 & Receiver autonomous integrity monitoring flag \\\\ \hline
comm\_state & 1 & SOTDMA or ITDMA \\\\ \hline
state\_syncstate & 2 & Communications State - SOTDMA Sycronization state \\\\ \hline
state\_slottimeout & 3 & Communications State - SOTDMA Frames remaining until a new slot is selected \\\\ \hline
state\_slotoffset & 14 & Communications State - SOTDMA In what slot will the next transmission occur. BROKEN\\\\ \\hline \\hline
Total bits & 168 & Appears to take 1 slot \\\\ \\hline
\\end{tabular}
\\caption{AIS message number 9: Search and rescue position report. Changed in 1371-4}
\\label{tab:SARposition}
\\end{table}
''')
######################################################################
# Text Definition
######################################################################
def textDefinitionTable(outfile=sys.stdout
,delim='\t'
):
'''
Return the text definition table for this message type
@param outfile: file like object to print to.
@type outfile: file obj
@return: text table string via the outfile
@rtype: str
'''
o = outfile
o.write('''Parameter'''+delim+'Number of bits'''+delim+'''Description
MessageID'''+delim+'''6'''+delim+'''AIS message number. Must be 9
RepeatIndicator'''+delim+'''2'''+delim+'''Indicated how many times a message has been repeated
UserID'''+delim+'''30'''+delim+'''Unique ship identification number (MMSI)
Altitude'''+delim+'''12'''+delim+'''Altitude (GNSS)
SOG'''+delim+'''10'''+delim+'''Speed over ground
PositionAccuracy'''+delim+'''1'''+delim+'''Accuracy of positioning fixes
Position_longitude'''+delim+'''28'''+delim+'''Location of the vessel East West location
Position_latitude'''+delim+'''27'''+delim+'''Location of the vessel North South location
COG'''+delim+'''12'''+delim+'''Course over ground
TimeStamp'''+delim+'''6'''+delim+'''UTC second when the report was generated
Reserved'''+delim+'''8'''+delim+'''Reserved for regional applications. Should be set to zero.
DTE'''+delim+'''1'''+delim+'''Dtta terminal ready
Spare'''+delim+'''3'''+delim+'''Not used. Should be set to zero.
assigned_mode'''+delim+'''1'''+delim+'''autonomous or assigned mode
RAIM'''+delim+'''1'''+delim+'''Receiver autonomous integrity monitoring flag
comm_state'''+delim+'''1'''+delim+'''SOTDMA or ITDMA
state_syncstate'''+delim+'''2'''+delim+'''Communications State - SOTDMA Sycronization state
state_slottimeout'''+delim+'''3'''+delim+'''Communications State - SOTDMA Frames remaining until a new slot is selected
state_slotoffset'''+delim+'''14'''+delim+'''Communications State - SOTDMA In what slot will the next transmission occur. BROKEN
Total bits'''+delim+'''168'''+delim+'''Appears to take 1 slot''')
######################################################################
# UNIT TESTING
######################################################################
import unittest
def testParams():
'''Return a params file base on the testvalue tags.
@rtype: dict
@return: params based on testvalue tags
'''
params = {}
params['MessageID'] = 9
params['RepeatIndicator'] = 1
params['UserID'] = 1193046
params['Altitude'] = 1001
params['SOG'] = 342
params['PositionAccuracy'] = 1
params['Position_longitude'] = Decimal('-122.16328055555556')
params['Position_latitude'] = Decimal('37.424458333333334')
params['COG'] = Decimal('34.5')
params['TimeStamp'] = 35
params['Reserved'] = 0
params['DTE'] = False
params['Spare'] = 0
params['assigned_mode'] = 1
params['RAIM'] = False
params['comm_state'] = 1
params['state_syncstate'] = 2
params['state_slottimeout'] = 0
params['state_slotoffset'] = 1221
return params
class TestSARposition(unittest.TestCase):
'''Use testvalue tag text from each type to build test case the SARposition message'''
def testEncodeDecode(self):
params = testParams()
bits = encode(params)
r = decode(bits)
# Check that each parameter came through ok.
self.failUnlessEqual(r['MessageID'],params['MessageID'])
self.failUnlessEqual(r['RepeatIndicator'],params['RepeatIndicator'])
self.failUnlessEqual(r['UserID'],params['UserID'])
self.failUnlessEqual(r['Altitude'],params['Altitude'])
self.failUnlessEqual(r['SOG'],params['SOG'])
self.failUnlessEqual(r['PositionAccuracy'],params['PositionAccuracy'])
self.failUnlessAlmostEqual(r['Position_longitude'],params['Position_longitude'],5)
self.failUnlessAlmostEqual(r['Position_latitude'],params['Position_latitude'],5)
self.failUnlessAlmostEqual(r['COG'],params['COG'],1)
self.failUnlessEqual(r['TimeStamp'],params['TimeStamp'])
self.failUnlessEqual(r['Reserved'],params['Reserved'])
self.failUnlessEqual(r['DTE'],params['DTE'])
self.failUnlessEqual(r['Spare'],params['Spare'])
self.failUnlessEqual(r['assigned_mode'],params['assigned_mode'])
self.failUnlessEqual(r['RAIM'],params['RAIM'])
self.failUnlessEqual(r['comm_state'],params['comm_state'])
self.failUnlessEqual(r['state_syncstate'],params['state_syncstate'])
self.failUnlessEqual(r['state_slottimeout'],params['state_slottimeout'])
self.failUnlessEqual(r['state_slotoffset'],params['state_slotoffset'])
def addMsgOptions(parser):
parser.add_option('-d','--decode',dest='doDecode',default=False,action='store_true',
help='decode a "SARposition" AIS message')
parser.add_option('-e','--encode',dest='doEncode',default=False,action='store_true',
help='encode a "SARposition" AIS message')
parser.add_option('--RepeatIndicator-field', dest='RepeatIndicatorField',default=0,metavar='uint',type='int'
,help='Field parameter value [default: %default]')
parser.add_option('--UserID-field', dest='UserIDField',metavar='uint',type='int'
,help='Field parameter value [default: %default]')
parser.add_option('--Altitude-field', dest='AltitudeField',default=4095,metavar='uint',type='int'
,help='Field parameter value [default: %default]')
parser.add_option('--SOG-field', dest='SOGField',default=1023,metavar='uint',type='int'
,help='Field parameter value [default: %default]')
parser.add_option('--PositionAccuracy-field', dest='PositionAccuracyField',metavar='uint',type='int'
,help='Field parameter value [default: %default]')
parser.add_option('--Position_longitude-field', dest='Position_longitudeField',default=Decimal('181'),metavar='decimal',type='string'
,help='Field parameter value [default: %default]')
parser.add_option('--Position_latitude-field', dest='Position_latitudeField',default=Decimal('91'),metavar='decimal',type='string'
,help='Field parameter value [default: %default]')
parser.add_option('--COG-field', dest='COGField',default=Decimal('360'),metavar='udecimal',type='string'
,help='Field parameter value [default: %default]')
parser.add_option('--TimeStamp-field', dest='TimeStampField',default=60,metavar='uint',type='int'
,help='Field parameter value [default: %default]')
parser.add_option('--DTE-field', dest='DTEField',metavar='bool',type='int'
,help='Field parameter value [default: %default]')
parser.add_option('--assigned_mode-field', dest='assigned_modeField',metavar='uint',type='int'
,help='Field parameter value [default: %default]')
parser.add_option('--RAIM-field', dest='RAIMField',metavar='bool',type='int'
,help='Field parameter value [default: %default]')
parser.add_option('--comm_state-field', dest='comm_stateField',metavar='uint',type='int'
,help='Field parameter value [default: %default]')
parser.add_option('--state_syncstate-field', dest='state_syncstateField',metavar='uint',type='int'
,help='Field parameter value [default: %default]')
parser.add_option('--state_slottimeout-field', dest='state_slottimeoutField',metavar='uint',type='int'
,help='Field parameter value [default: %default]')
parser.add_option('--state_slotoffset-field', dest='state_slotoffsetField',metavar='uint',type='int'
,help='Field parameter value [default: %default]')
def main():
from optparse import OptionParser
parser = OptionParser(usage="%prog [options]",
version="%prog "+__version__)
parser.add_option('--doc-test',dest='doctest',default=False,action='store_true',
help='run the documentation tests')
parser.add_option('--unit-test',dest='unittest',default=False,action='store_true',
help='run the unit tests')
parser.add_option('-v','--verbose',dest='verbose',default=False,action='store_true',
help='Make the test output verbose')
# FIX: remove nmea from binary messages. No way to build the whole packet?
# FIX: or build the surrounding msg 8 for a broadcast?
typeChoices = ('binary','nmeapayload','nmea') # FIX: what about a USCG type message?
parser.add_option('-t','--type',choices=typeChoices,type='choice',dest='ioType'
,default='nmeapayload'
,help='What kind of string to write for encoding ('+', '.join(typeChoices)+') [default: %default]')
outputChoices = ('std','html','csv','sql' , 'kml','kml-full')
parser.add_option('-T','--output-type',choices=outputChoices,type='choice',dest='outputType'
,default='std'
,help='What kind of string to output ('+', '.join(outputChoices)+') [default: %default]')
parser.add_option('-o','--output',dest='outputFileName',default=None,
help='Name of the python file to write [default: stdout]')
parser.add_option('-f','--fields',dest='fieldList',default=None, action='append',
choices=fieldList,
help='Which fields to include in the output. Currently only for csv output [default: all]')
parser.add_option('-p','--print-csv-field-list',dest='printCsvfieldList',default=False,action='store_true',
help='Print the field name for csv')
parser.add_option('-c','--sql-create',dest='sqlCreate',default=False,action='store_true',
help='Print out an sql create command for the table.')
parser.add_option('--latex-table',dest='latexDefinitionTable',default=False,action='store_true',
help='Print a LaTeX table of the type')
parser.add_option('--text-table',dest='textDefinitionTable',default=False,action='store_true',
help='Print delimited table of the type (for Word table importing)')
parser.add_option('--delimt-text-table',dest='delimTextDefinitionTable',default='\t'
,help='Delimiter for text table [default: \'%default\'](for Word table importing)')
dbChoices = ('sqlite','postgres')
parser.add_option('-D','--db-type',dest='dbType',default='postgres'
,choices=dbChoices,type='choice'
,help='What kind of database ('+', '.join(dbChoices)+') [default: %default]')
addMsgOptions(parser)
(options,args) = parser.parse_args()
success=True
if options.doctest:
import os; print os.path.basename(sys.argv[0]), 'doctests ...',
sys.argv= [sys.argv[0]]
if options.verbose: sys.argv.append('-v')
import doctest
numfail,numtests=doctest.testmod()
if numfail==0: print 'ok'
else:
print 'FAILED'
success=False
if not success: sys.exit('Something Failed')
del success # Hide success from epydoc
if options.unittest:
sys.argv = [sys.argv[0]]
if options.verbose: sys.argv.append('-v')
unittest.main()
outfile = sys.stdout
if None!=options.outputFileName:
outfile = file(options.outputFileName,'w')
if options.doEncode:
# First make sure all non required options are specified
if None==options.RepeatIndicatorField: parser.error("missing value for RepeatIndicatorField")
if None==options.UserIDField: parser.error("missing value for UserIDField")
if None==options.AltitudeField: parser.error("missing value for AltitudeField")
if None==options.SOGField: parser.error("missing value for SOGField")
if None==options.PositionAccuracyField: parser.error("missing value for PositionAccuracyField")
if None==options.Position_longitudeField: parser.error("missing value for Position_longitudeField")
if None==options.Position_latitudeField: parser.error("missing value for Position_latitudeField")
if None==options.COGField: parser.error("missing value for COGField")
if None==options.TimeStampField: parser.error("missing value for TimeStampField")
if None==options.DTEField: parser.error("missing value for DTEField")
if None==options.assigned_modeField: parser.error("missing value for assigned_modeField")
if None==options.RAIMField: parser.error("missing value for RAIMField")
if None==options.comm_stateField: parser.error("missing value for comm_stateField")
if None==options.state_syncstateField: parser.error("missing value for state_syncstateField")
if None==options.state_slottimeoutField: parser.error("missing value for state_slottimeoutField")
if None==options.state_slotoffsetField: parser.error("missing value for state_slotoffsetField")
msgDict={
'MessageID': '9',
'RepeatIndicator': options.RepeatIndicatorField,
'UserID': options.UserIDField,
'Altitude': options.AltitudeField,
'SOG': options.SOGField,
'PositionAccuracy': options.PositionAccuracyField,
'Position_longitude': options.Position_longitudeField,
'Position_latitude': options.Position_latitudeField,
'COG': options.COGField,
'TimeStamp': options.TimeStampField,
'Reserved': '0',
'DTE': options.DTEField,
'Spare': '0',
'assigned_mode': options.assigned_modeField,
'RAIM': options.RAIMField,
'comm_state': options.comm_stateField,
'state_syncstate': options.state_syncstateField,
'state_slottimeout': options.state_slottimeoutField,
'state_slotoffset': options.state_slotoffsetField,
}
bits = encode(msgDict)
if 'binary'==options.ioType: print str(bits)
elif 'nmeapayload'==options.ioType:
# FIX: figure out if this might be necessary at compile time
#print "bitLen",len(bits)
bitLen=len(bits)
if bitLen%6!=0:
bits = bits + BitVector(size=(6 - (bitLen%6))) # Pad out to multiple of 6
#print "result:",binary.bitvectoais6(bits)[0]
print binary.bitvectoais6(bits)[0]
# FIX: Do not emit this option for the binary message payloads. Does not make sense.
elif 'nmea'==options.ioType:
#bitLen=len(bits)
#if bitLen%6!=0:
# bits = bits + BitVector(size=(6 - (bitLen%6))) # Pad out to multiple of 6
import aisutils.uscg as uscg
nmea = uscg.create_nmea(bits)
print nmea
#
#
#sys.exit("FIX: need to implement creating nmea capability")
else: sys.exit('ERROR: unknown ioType. Help!')
if options.sqlCreate:
sqlCreateStr(outfile,options.fieldList,dbType=options.dbType)
if options.latexDefinitionTable:
latexDefinitionTable(outfile)
# For conversion to word tables
if options.textDefinitionTable:
textDefinitionTable(outfile,options.delimTextDefinitionTable)
if options.printCsvfieldList:
# Make a csv separated list of fields that will be displayed for csv
if None == options.fieldList: options.fieldList = fieldList
import StringIO
buf = StringIO.StringIO()
for field in options.fieldList:
buf.write(field+',')
result = buf.getvalue()
if result[-1] == ',': print result[:-1]
else: print result
if options.doDecode:
if len(args)==0: args = sys.stdin
for msg in args:
bv = None
if msg[0] in ('$','!') and msg[3:6] in ('VDM','VDO'):
# Found nmea
# FIX: do checksum
bv = binary.ais6tobitvec(msg.split(',')[5])
else: # either binary or nmeapayload... expect mostly nmeapayloads
# assumes that an all 0 and 1 string can not be a nmeapayload
binaryMsg=True
for c in msg:
if c not in ('0','1'):
binaryMsg=False
break
if binaryMsg:
bv = BitVector(bitstring=msg)
else: # nmeapayload
bv = binary.ais6tobitvec(msg)
printFields(decode(bv)
,out=outfile
,format=options.outputType
,fieldList=options.fieldList
,dbType=options.dbType
)
############################################################
if __name__=='__main__':
main()
| stregoika/aislib | ais/ais_msg_9.py | Python | gpl-3.0 | 48,470 |
# -*- coding: utf-8 -*-
# Copyright 2014-2018 Rumma & Ko Ltd
# License: BSD, see LICENSE for more details.
"""
Defines the :rst:dir:`sigal_image` directive.
.. rst:directive:: sigal_image
.. _picsel: https://github.com/lsaffre/picsel
.. _Shotwell: https://en.wikipedia.org/wiki/Shotwell_%28software%29
.. _digiKam: https://www.digikam.org/
.. _Sigal: http://sigal.saimon.org/en/latest/
This creates a bridge between a photo collection managed with
Shotwell_ or digiKam_ and a blog generated with Sphinx. All photos
remain in the single central file tree managed by Shotwell_ or
digiKam_. From within Shotwell_ or digiKam_ you use a tag "blog" to
mark all photos that are to be available for your Sphinx blog. Then
you use picsel_ to extract those images to a separate directory. This
tree serves as input for Sigal_ which will generate a static html
gallery. An example of a Sigal gallery is `here
<http://sigal.saffre-rumma.net/>`__. The :rst:dir:`sigal_image`
directive was the last missing part of this publishing bridge: it
allows you to integrate your pictures into blog entries.
Usage::
.. sigal_image:: partial/path/to/photo.jpg[|title_or_options]
For example, if `sigal_base_url` in your :xfile:`conf.py` is set to
``"http://sigal.saffre-rumma.net"``, the following directive in your
rst source file::
.. sigal_image:: 2014/04/10/img_6617.jpg
will insert the following rst code::
.. raw:: html
<a href="http://sigal.saffre-rumma.net/2014/04/10/img_6617.jpg">
<img
src="http://sigal.saffre-rumma.net/2014/04/10/thumbnails/img_6617.jpg"/>
</a>
The file name can contain **formatting instructions** inspired by
`Wikipedia pictures
<https://en.wikipedia.org/wiki/Wikipedia:Picture_tutorial>`_ which
uses a variable number of pipe characters. For example:
>>> from __future__ import print_function
>>> print(line2html("foo.jpg"))
<a href="http://example.com//foo.jpg" data-lightbox="image-1" data-title="foo.jpg"/><img src="http://example.com//thumbnails/foo.jpg" style="padding:4px; width:280px;" title="foo.jpg"/></a>
>>> print(line2html("foo.jpg|This is a nice picture"))
<a href="http://example.com//foo.jpg" data-lightbox="image-1" data-title="This is a nice picture"/><img src="http://example.com//thumbnails/foo.jpg" style="padding:4px; width:280px;" title="This is a nice picture"/></a>
>>> print(line2html("foo.jpg|thumb|This is a nice picture"))
<a href="http://example.com//foo.jpg" data-lightbox="image-1" data-title="This is a nice picture"/><img src="http://example.com//thumbnails/foo.jpg" style="padding:4px; float:right; width:280px;" title="This is a nice picture"/></a>
>>> print(line2html("foo.jpg|thumb|left|This is a nice picture"))
<a href="http://example.com//foo.jpg" data-lightbox="image-1" data-title="This is a nice picture"/><img src="http://example.com//thumbnails/foo.jpg" style="padding:4px; float:left;; width:280px;" title="This is a nice picture"/></a>
The generated HTML also includes attributes for `lightbox
<http://lokeshdhakar.com/projects/lightbox2/>`_. In order to activate
this feature you must add the content of the lighbox :file:`dist`
directory somewhere to your web server and then change your
`layout.html` template to something like this::
{%- block extrahead %}
{{ super() }}
<script src="/data/lightbox/js/lightbox-plus-jquery.min.js"></script>
<link href="/data/lightbox/css/lightbox.css" rel="stylesheet" />
{% endblock %}
"""
from __future__ import print_function
from __future__ import unicode_literals
from builtins import str
from builtins import object
import logging
logger = logging.getLogger(__name__)
import os
# from docutils.parsers.rst import directives
from atelier.sphinxconf.insert_input import InsertInputDirective
TEMPLATE1 = """
.. raw:: html
<a href="%(target)s"><img src="%(src)s" style="padding:4px"/></a>
"""
#TEMPLATE = """<a href="%(target)s" style="%(style)s" %(class)s data-lightbox="image-1" data-title="%(caption)s"/><img src="%(src)s" style="padding:4px" title="%(caption)s"/></a>"""
TEMPLATE = """<a href="%(target)s" %(class)s data-lightbox="image-1" data-title="%(caption)s"/><img src="%(src)s" style="%(style)s" title="%(caption)s"/></a>"""
class Format(object):
@classmethod
def update_context(self, caption, tplkw):
tplkw.update(caption=caption)
tplkw.update(style="padding:4px; width:280px;")
class Thumb(Format):
@classmethod
def update_context(self, caption, tplkw):
chunks = caption.split('|')
if len(chunks) == 1:
tplkw['style'] = "padding:4px; float:right; width:280px;"
elif len(chunks) == 2:
align, caption = chunks
if not align in ("right", "left", "center"):
raise Exception("Invalid alignment '{0}'".format(align))
tplkw['style'] = "padding:4px; float:{0};; width:280px;".format(align)
else:
raise Exception("Impossible")
tplkw.update(caption=caption)
class Wide(Format):
@classmethod
def update_context(self, caption, tplkw):
chunks = caption.split('|')
if len(chunks) == 1:
tplkw['style'] = "padding:4px; width:100%;"
else:
raise Exception("Impossible")
tplkw.update(caption=caption)
FORMATS = dict()
FORMATS[None] = Format()
FORMATS['thumb'] = Thumb()
FORMATS['wide'] = Wide()
def buildurl(*parts):
return 'http://example.com/' + '/'.join(parts)
def line2html(name, buildurl=buildurl):
name = name.strip()
if not name:
return ''
kw = dict() # style="padding:4px")
kw['class'] = ''
kw['style'] = "padding:4px; width:280px;"
if True: # new format using only | as separator
caption = name
fmt = FORMATS[None]
chunks = name.split('|', 1)
if len(chunks) == 2:
name, caption = chunks
chunks = caption.split('|', 1)
if len(chunks) == 2:
fmtname, caption = chunks
fmt = FORMATS[fmtname]
fmt.update_context(caption, kw)
if ' ' in name:
raise Exception("Invalid filename. Spaces not allowed.")
else:
chunks = name.split(None, 1)
if len(chunks) == 1:
kw.update(caption='')
elif len(chunks) == 2:
name, caption = chunks
chunks = caption.split('|', 1)
if len(chunks) == 1:
fmt = FORMATS[None]
elif len(chunks) == 2:
fmtname, caption = chunks
fmt = FORMATS[fmtname]
else:
raise Exception("Impossible")
fmt.update_context(caption, kw)
else:
raise Exception("FILENAME <whitespace> DESC %s" % chunks)
head, tail = os.path.split(name)
kw.update(target=buildurl(head, tail))
kw.update(src=buildurl(head, 'thumbnails', tail))
return TEMPLATE % kw
class SigalImage(InsertInputDirective):
has_content = True
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = False
# option_spec = {
# 'style': directives.unchanged,
# 'class': directives.unchanged,
# }
def get_rst(self):
env = self.state.document.settings.env
base_url = env.config.sigal_base_url
def buildurl(*parts):
return base_url + '/' + '/'.join(parts)
s = ''
for name in self.content:
s += line2html(name, buildurl)
if s:
s = "\n\n.. raw:: html\n\n {0}\n\n".format(s)
return s
def get_headers(self):
return ['title', 'author', 'date']
def format_entry(self, e):
cells = []
# text = ''.join([unicode(c) for c in e.title.children])
# cells.append(":doc:`%s <%s>`" % (text, e.docname))
cells.append(":doc:`%s`" % e.docname)
cells.append(str(e.meta.get('author', '')))
cells.append(str(e.meta.get('date', '')))
return cells
def setup(app):
app.add_config_value(
'sigal_base_url', 'http://sigal.saffre-rumma.net', True)
app.add_directive('sigal_image', SigalImage)
# app.add_role(str('rref'), ReferingRefRole(
# lowercase=True,
# innernodeclass=nodes.emphasis,
# warn_dangling=True))
| khchine5/atelier | atelier/sphinxconf/sigal_image.py | Python | bsd-2-clause | 8,318 |
#!/usr/bin/env python
# coding: latin-1
"""
This module is designed to communicate with the ZeroBorg
Use by creating an instance of the class, call the Init function, then
command as desired, e.g.
import ZeroBorg
ZB = ZeroBorg.ZeroBorg()
ZB.Init()
# User code here, use ZB to control the board
Multiple boards can be used when configured with different I²C
addresses by creating multiple instances, e.g.
import ZeroBorg
ZB1 = ZeroBorg.ZeroBorg()
ZB2 = ZeroBorg.ZeroBorg()
ZB1.i2cAddress = 0x44
ZB2.i2cAddress = 0x45
ZB1.Init()
ZB2.Init()
# User code here, use ZB1 and ZB2 to control each board separately
For explanations of the functions available call the Help function, e.g.
import ZeroBorg
ZB = ZeroBorg.ZeroBorg()
ZB.Help()
See the website at www.piborg.org/zeroborg for more details
"""
# Import the libraries we need
import smbus
import types
import time
# Constant values
PWM_MAX = 255
I2C_NORM_LEN = 4
I2C_LONG_LEN = 24
I2C_ID_ZEROBORG = 0x40
COMMAND_SET_LED = 1 # Set the LED status
COMMAND_GET_LED = 2 # Get the LED status
COMMAND_SET_A_FWD = 3 # Set motor 1 PWM rate in a forwards direction
COMMAND_SET_A_REV = 4 # Set motor 1 PWM rate in a reverse direction
COMMAND_GET_A = 5 # Get motor 1 direction and PWM rate
COMMAND_SET_B_FWD = 6 # Set motor 2 PWM rate in a forwards direction
COMMAND_SET_B_REV = 7 # Set motor 2 PWM rate in a reverse direction
COMMAND_GET_B = 8 # Get motor 2 direction and PWM rate
COMMAND_SET_C_FWD = 9 # Set motor 3 PWM rate in a forwards direction
COMMAND_SET_C_REV = 10 # Set motor 3 PWM rate in a reverse direction
COMMAND_GET_C = 11 # Get motor 3 direction and PWM rate
COMMAND_SET_D_FWD = 12 # Set motor 4 PWM rate in a forwards direction
COMMAND_SET_D_REV = 13 # Set motor 4 PWM rate in a reverse direction
COMMAND_GET_D = 14 # Get motor 4 direction and PWM rate
COMMAND_ALL_OFF = 15 # Switch everything off
COMMAND_SET_ALL_FWD = 16 # Set all motors PWM rate in a forwards direction
COMMAND_SET_ALL_REV = 17 # Set all motors PWM rate in a reverse direction
# Set the failsafe flag, turns the motors off if communication is interrupted
COMMAND_SET_FAILSAFE = 18
COMMAND_GET_FAILSAFE = 19 # Get the failsafe flag
# Resets the EPO flag, use after EPO has been tripped and switch is now clear
COMMAND_RESET_EPO = 20
COMMAND_GET_EPO = 21 # Get the EPO latched flag
COMMAND_SET_EPO_IGNORE = 22 # Set the EPO ignored flag, allows the system to run without an EPO
COMMAND_GET_EPO_IGNORE = 23 # Get the EPO ignored flag
COMMAND_GET_NEW_IR = 24 # Get the new IR message received flag
COMMAND_GET_LAST_IR = 25 # Get the last IR message received (long message, resets new IR flag)
COMMAND_SET_LED_IR = 26 # Set the LED for indicating IR messages
COMMAND_GET_LED_IR = 27 # Get if the LED is being used to indicate IR messages
COMMAND_GET_ANALOG_1 = 28 # Get the analog reading from port #1, pin 2
COMMAND_GET_ANALOG_2 = 29 # Get the analog reading from port #2, pin 4
COMMAND_GET_ID = 0x99 # Get the board identifier
COMMAND_SET_I2C_ADD = 0xAA # Set a new I2C address
COMMAND_VALUE_FWD = 1 # I2C value representing forward
COMMAND_VALUE_REV = 2 # I2C value representing reverse
COMMAND_VALUE_ON = 1 # I2C value representing on
COMMAND_VALUE_OFF = 0 # I2C value representing off
COMMAND_ANALOG_MAX = 0x3FF # Maximum value for analog readings
IR_MAX_BYTES = I2C_LONG_LEN - 2
def scan_for_zero_borg(bus_number=1):
"""
scan_for_zero_borg([busNumber])
Scans the I²C bus for a ZeroBorg boards and returns a list of all
usable addresses
The busNumber if supplied is which I²C bus to scan, 0 for Rev 1
boards, 1 for Rev 2 boards, if not supplied the default is 1
"""
found = []
print 'Scanning I²C bus #%d' % bus_number
bus = smbus.SMBus(bus_number)
for address in range(0x03, 0x78, 1):
try:
i2c_recv = bus.read_i2c_block_data(address, COMMAND_GET_ID, I2C_NORM_LEN)
if len(i2c_recv) == I2C_NORM_LEN:
if i2c_recv[1] == I2C_ID_ZEROBORG:
found.append(address)
else:
pass
else:
pass
except KeyboardInterrupt:
raise
except:
pass
if len(found) == 0:
print 'No ZeroBorg boards found, is bus #%d correct (should be 0 for Rev 1, 1 for ' \
'Rev 2)' % bus_number
elif len(found) == 1:
print '1 ZeroBorg board found'
else:
print '%d ZeroBorg boards found' % (len(found))
return found
def set_new_address(new_address, old_address=-1, bus_number=1):
"""
SetNewAddress(new_address, [old_address], [bus_number])
Scans the I²C bus for the first ZeroBorg and sets it to a new I2C
address
If oldAddress is supplied it will change the address of the board
at that address rather than scanning the bus
The busNumber if supplied is which I²C bus to scan, 0 for Rev 1
boards, 1 for Rev 2 boards, if not supplied the default is 1
Warning, this new I²C address will still be used after resetting
the power on the device
"""
if new_address < 0x03:
print 'Error, I²C addresses below 3 (0x03) are reserved, use an address between 3 (0x03) ' \
'and 119 (0x77)'
return
elif new_address > 0x77:
print 'Error, I²C addresses above 119 (0x77) are reserved, use an address between 3 ' \
'(0x03) and 119 (0x77)'
return
if old_address < 0x0:
found = scan_for_zero_borg(bus_number)
if len(found) < 1:
print 'No ZeroBorg boards found, cannot set a new I²C address!'
return
else:
old_address = found[0]
print 'Changing I²C address from %02X to %02X (bus #%d)' % (
old_address, new_address, bus_number
)
bus = smbus.SMBus(bus_number)
try:
i2c_recv = bus.read_i2c_block_data(old_address, COMMAND_GET_ID, I2C_NORM_LEN)
if len(i2c_recv) == I2C_NORM_LEN:
if i2c_recv[1] == I2C_ID_ZEROBORG:
found_chip = True
print 'Found ZeroBorg at %02X' % old_address
else:
found_chip = False
print 'Found a device at %02X, but it is not a ZeroBorg (ID %02X instead of %02X)'\
% (old_address, i2c_recv[1], I2C_ID_ZEROBORG)
else:
found_chip = False
print 'Missing ZeroBorg at %02X' % old_address
except KeyboardInterrupt:
raise
except:
found_chip = False
print 'Missing ZeroBorg at %02X' % old_address
if found_chip:
bus.write_byte_data(old_address, COMMAND_SET_I2C_ADD, new_address)
time.sleep(0.1)
print 'Address changed to %02X, attempting to talk with the new address' % new_address
try:
i2c_recv = bus.read_i2c_block_data(new_address, COMMAND_GET_ID, I2C_NORM_LEN)
if len(i2c_recv) == I2C_NORM_LEN:
if i2c_recv[1] == I2C_ID_ZEROBORG:
found_chip = True
print 'Found ZeroBorg at %02X' % new_address
else:
found_chip = False
print 'Found a device at %02X, but it is not a ZeroBorg (ID %02X instead of' \
' %02X)' % (new_address, i2c_recv[1], I2C_ID_ZEROBORG)
else:
found_chip = False
print 'Missing ZeroBorg at %02X' % new_address
except KeyboardInterrupt:
raise
except:
found_chip = False
print 'Missing ZeroBorg at %02X' % new_address
if found_chip:
print 'New I²C address of %02X set successfully' % new_address
else:
print 'Failed to set new I²C address...'
# Class used to control ZeroBorg
class ZeroBorg:
"""
This module is designed to communicate with the ZeroBorg
busNumber: I²C bus on which the ZeroBorg is attached (Rev 1 is
bus 0, Rev 2 is bus 1)
bus: the smbus object used to talk to the I²C bus
i2cAddress: The I²C address of the ZeroBorg chip to control
foundChip: True if the ZeroBorg chip can be seen, False otherwise
printFunction: Function reference to call when printing text, if
None "print" is used
"""
# Shared values used by this class
bus_number = 1 # Check here for Rev 1 vs Rev 2 and select the correct bus
i2c_address = I2C_ID_ZEROBORG # I²C address, override for a different address
bus = None
found_chip = False
print_function = None
def print_message(self, message):
"""
print_message(message)
Wrapper used by the ZeroBorg instance to print messages, will
call printFunction if set, print otherwise
"""
if self.print_function is None:
print message
else:
self.print_function(message)
def no_print_message(self, message):
"""
no_print_message(message)
Does nothing, intended for disabling diagnostic printout by using:
ZB = ZeroBorg.ZeroBorg()
ZB.printFunction = ZB.NoPrint
"""
pass
def __init__(self, try_other_bus=True):
"""
__init__([tryOtherBus])
Prepare the I2C driver for talking to the ZeroBorg
If tryOtherBus is True or omitted, this function will
attempt to use the other bus if the ZeroBorg devices can not
be found on the current busNumber
"""
self.print_message(
'Loading ZeroBorg on bus %d, address %02X' % (self.bus_number, self.i2c_address)
)
# Open the bus
self.bus = smbus.SMBus(self.bus_number)
# Check for ZeroBorg
try:
i2c_recv = self.read_from_bus(COMMAND_GET_ID)
if len(i2c_recv) == I2C_NORM_LEN:
if i2c_recv[1] == I2C_ID_ZEROBORG:
self.found_chip = True
self.print_message('Found ZeroBorg at %02X' % self.i2c_address)
else:
self.found_chip = False
self.print_message(
'Found a device at %02X, but it is not a ZeroBorg (ID %02X instead of '
'%02X)' % (
self.i2c_address,
i2c_recv[1],
I2C_ID_ZEROBORG
)
)
else:
self.found_chip = False
self.print_message('Missing ZeroBorg at %02X' % self.i2c_address)
except KeyboardInterrupt:
raise
except:
self.found_chip = False
self.print_message('Missing ZeroBorg at %02X' % self.i2c_address)
# See if we are missing chips
if not self.found_chip:
self.print_message('ZeroBorg was not found')
if try_other_bus:
if self.busNumber == 1:
self.busNumber = 0
else:
self.busNumber = 1
self.print_message('Trying bus %d instead' % self.bus_number)
self.__init__(False)
else:
self.print_message(
'Are you sure your ZeroBorg is properly attached, the correct address is used,'
' and the I2C drivers are running?'
)
self.bus = None
else:
self.print_message('ZeroBorg loaded on bus %d' % self.bus_number)
def set_motor_1(self, power):
"""
set_motor_1(power)
Sets the drive level for motor 1, from +1 to -1.
e.g.
set_motor_1(0) -> motor 1 is stopped
set_motor_1(0.75) -> motor 1 moving forward at 75% power
set_motor_1(-0.5) -> motor 1 moving reverse at 50% power
set_motor_1(1) -> motor 1 moving forward at 100% power
"""
if power < 0:
# Reverse
command = COMMAND_SET_A_REV
pwm = -int(PWM_MAX * power)
if pwm > PWM_MAX:
pwm = PWM_MAX
else:
# Forward / stopped
command = COMMAND_SET_A_FWD
pwm = int(PWM_MAX * power)
if pwm > PWM_MAX:
pwm = PWM_MAX
self.write_to_bus(command, pwm, exception_message='Failed sending motor 1 drive level!')
def get_motor_1(self):
"""
power = get_motor_1()
Gets the drive level for motor 1, from +1 to -1.
e.g.
0 -> motor 1 is stopped
0.75 -> motor 1 moving forward at 75% power
-0.5 -> motor 1 moving reverse at 50% power
1 -> motor 1 moving forward at 100% power
"""
i2c_recv = self.read_from_bus(
COMMAND_GET_A,
exception_message='Failed reading motor 1 drive level!'
)
if i2c_recv is None:
return
power = float(i2c_recv[2]) / float(PWM_MAX)
if i2c_recv[1] == COMMAND_VALUE_FWD:
return power
elif i2c_recv[1] == COMMAND_VALUE_REV:
return -power
else:
return
def set_motor_2(self, power):
"""
set_motor_2(power)
Sets the drive level for motor 2, from +1 to -1.
e.g.
set_motor_2(0) -> motor 2 is stopped
set_motor_2(0.75) -> motor 2 moving forward at 75% power
set_motor_2(-0.5) -> motor 2 moving reverse at 50% power
set_motor_2(1) -> motor 2 moving forward at 100% power
"""
if power < 0:
# Reverse
command = COMMAND_SET_B_REV
pwm = -int(PWM_MAX * power)
if pwm > PWM_MAX:
pwm = PWM_MAX
else:
# Forward / stopped
command = COMMAND_SET_B_FWD
pwm = int(PWM_MAX * power)
if pwm > PWM_MAX:
pwm = PWM_MAX
self.write_to_bus(command, pwm, exception_message='Failed sending motor 2 drive level!')
def get_motor_2(self):
"""
power = get_motor_2()
Gets the drive level for motor 2, from +1 to -1.
e.g.
0 -> motor 2 is stopped
0.75 -> motor 2 moving forward at 75% power
-0.5 -> motor 2 moving reverse at 50% power
1 -> motor 2 moving forward at 100% power
"""
i2c_recv = self.read_from_bus(
COMMAND_GET_B,
exception_message='Failed reading motor 1 drive level!'
)
if i2c_recv is None:
return
power = float(i2c_recv[2]) / float(PWM_MAX)
if i2c_recv[1] == COMMAND_VALUE_FWD:
return power
elif i2c_recv[1] == COMMAND_VALUE_REV:
return -power
else:
return
def set_motor_3(self, power):
"""
set_motor_3(power)
Sets the drive level for motor 3, from +1 to -1.
e.g.
set_motor_3(0) -> motor 3 is stopped
set_motor_3(0.75) -> motor 3 moving forward at 75% power
set_motor_3(-0.5) -> motor 3 moving reverse at 50% power
set_motor_3(1) -> motor 3 moving forward at 100% power
"""
if power < 0:
# Reverse
command = COMMAND_SET_C_REV
pwm = -int(PWM_MAX * power)
if pwm > PWM_MAX:
pwm = PWM_MAX
else:
# Forward / stopped
command = COMMAND_SET_C_FWD
pwm = int(PWM_MAX * power)
if pwm > PWM_MAX:
pwm = PWM_MAX
self.write_to_bus(command, pwm, exception_message='Failed sending motor 3 drive level!')
def get_motor_3(self):
"""
power = get_motor_3()
Gets the drive level for motor 3, from +1 to -1.
e.g.
0 -> motor 3 is stopped
0.75 -> motor 3 moving forward at 75% power
-0.5 -> motor 3 moving reverse at 50% power
1 -> motor 3 moving forward at 100% power
"""
i2c_recv = self.read_from_bus(
COMMAND_GET_C,
exception_message='Failed reading motor 3 drive level!'
)
if i2c_recv is None:
return
power = float(i2c_recv[2]) / float(PWM_MAX)
if i2c_recv[1] == COMMAND_VALUE_FWD:
return power
elif i2c_recv[1] == COMMAND_VALUE_REV:
return -power
else:
return
def set_motor_4(self, power):
"""
set_motor_4(power)
Sets the drive level for motor 4, from +1 to -1.
e.g.
set_motor_4(0) -> motor 4 is stopped
set_motor_4(0.75) -> motor 4 moving forward at 75% power
set_motor_4(-0.5) -> motor 4 moving reverse at 50% power
set_motor_4(1) -> motor 4 moving forward at 100% power
"""
if power < 0:
# Reverse
command = COMMAND_SET_D_REV
pwm = -int(PWM_MAX * power)
if pwm > PWM_MAX:
pwm = PWM_MAX
else:
# Forward / stopped
command = COMMAND_SET_D_FWD
pwm = int(PWM_MAX * power)
if pwm > PWM_MAX:
pwm = PWM_MAX
self.write_to_bus(command, pwm, exception_message='Failed sending motor 4 drive level!')
def get_motor_4(self):
"""
power = get_motor_4()
Gets the drive level for motor 4, from +1 to -1.
e.g.
0 -> motor 4 is stopped
0.75 -> motor 4 moving forward at 75% power
-0.5 -> motor 4 moving reverse at 50% power
1 -> motor 4 moving forward at 100% power
"""
i2c_recv = self.read_from_bus(
COMMAND_GET_D,
exception_message='Failed reading motor 4 drive level!'
)
if i2c_recv is None:
return
power = float(i2c_recv[2]) / float(PWM_MAX)
if i2c_recv[1] == COMMAND_VALUE_FWD:
return power
elif i2c_recv[1] == COMMAND_VALUE_REV:
return -power
else:
return
def set_motors(self, power):
"""
set_motors(power)
Sets the drive level for all motors, from +1 to -1.
e.g.
set_motors(0) -> all motors are stopped
set_motors(0.75) -> all motors are moving forward at 75% power
set_motors(-0.5) -> all motors are moving reverse at 50% power
set_motors(1) -> all motors are moving forward at 100%
power
"""
if power < 0:
# Reverse
command = COMMAND_SET_ALL_REV
pwm = -int(PWM_MAX * power)
if pwm > PWM_MAX:
pwm = PWM_MAX
else:
# Forward / stopped
command = COMMAND_SET_ALL_FWD
pwm = int(PWM_MAX * power)
if pwm > PWM_MAX:
pwm = PWM_MAX
self.write_to_bus(command, pwm, exception_message='Failed sending all motors drive level!')
def motors_off(self):
"""
motors_off()
Sets all motors to stopped, useful when ending a program
"""
self.write_to_bus(
COMMAND_ALL_OFF,
0,
exception_message='Failed sending motors off command!'
)
def set_led(self, state):
"""
set_led(state)
Sets the current state of the LED, False for off, True for on
"""
if state:
level = COMMAND_VALUE_ON
else:
level = COMMAND_VALUE_OFF
self.write_to_bus(
COMMAND_SET_LED,
level,
exception_message='Failed sending LED state!'
)
def get_led(self):
"""
state = get_led()
Reads the current state of the LED, False for off, True for on
"""
i2c_recv = self.read_from_bus(
COMMAND_GET_LED,
exception_message='Failed reading LED state!'
)
if i2c_recv is None:
return
if i2c_recv[1] == COMMAND_VALUE_OFF:
return False
else:
return True
def reset_epo(self):
"""
reset_epo()
Resets the EPO latch state, use to allow movement again after
the EPO has been tripped
"""
self.write_to_bus(
COMMAND_RESET_EPO,
0,
exception_message='Failed resetting EPO!',
)
def get_epo(self):
"""
state = get_epo()
Reads the system EPO latch state.
If False the EPO has not been tripped, and movement is allowed.
If True the EPO has been tripped, movement is disabled if the
EPO is not ignored (see SetEpoIgnore)
Movement can be re-enabled by calling ResetEpo.
"""
i2c_recv = self.read_from_bus(
COMMAND_GET_EPO,
exception_message='Failed reading EPO ignore state!'
)
if i2c_recv is None:
return
if i2c_recv[1] == COMMAND_VALUE_OFF:
return False
else:
return True
def set_epo_ignore(self, state):
"""
set_epo_ignore(state)
Sets the system to ignore or use the EPO latch, set to False if
you have an EPO switch, True if you do not
"""
if state:
level = COMMAND_VALUE_ON
else:
level = COMMAND_VALUE_OFF
self.write_to_bus(
COMMAND_SET_EPO_IGNORE,
level,
exception_message='Failed sending EPO ignore state!',
)
def get_epo_ignore(self):
"""
state = get_epo_ignore()
Reads the system EPO ignore state, False for using the EPO
latch, True for ignoring the EPO latch
"""
i2c_recv = self.read_from_bus(
COMMAND_GET_EPO_IGNORE,
exception_message='Failed reading EPO ignore state!'
)
if i2c_recv is None:
return
if i2c_recv[1] == COMMAND_VALUE_OFF:
return False
else:
return True
def has_new_ir_message(self):
"""
state = has_new_ir_message()
Reads the new IR message received flag.
If False there has been no messages to the IR sensor since the
last read.
If True there has been a new IR message which can be read using
GetIrMessage().
"""
i2c_recv = self.read_from_bus(
COMMAND_GET_NEW_IR,
exception_message='Failed reading new IR message recieved flag!'
)
if i2c_recv is None:
return
if i2c_recv[1] == COMMAND_VALUE_OFF:
return False
else:
return True
def get_ir_message(self):
"""
message = get_ir_message()
Reads the last IR message which has been received and clears
the new IR message received flag.
Returns the bytes from the remote control as a hexadecimal
string, e.g. 'F75AD5AA8000'
Use HasNewIrMessage() to see if there has been a new IR message
since the last call.
"""
i2c_recv = self.read_from_bus(
COMMAND_GET_LAST_IR,
exception_message='Failed reading IR message!'
)
if i2c_recv is None:
return
message = ''
for i in range(IR_MAX_BYTES):
message += '%02X' % (i2c_recv[1+i])
return message.rstrip('0')
def set_led_ir(self, state):
"""
set_led_ir(state)
Sets if IR messages control the state of the LED, False for no
effect, True for incoming messages blink the LED
"""
if state:
level = COMMAND_VALUE_ON
else:
level = COMMAND_VALUE_OFF
self.write_to_bus(
COMMAND_SET_LED_IR,
level,
exception_message='Failed sending LED state!',
)
def get_led_ir(self):
"""
state = get_led_ir()
Reads if IR messages control the state of the LED, False for no
effect, True for incoming messages blink the LED
"""
i2c_recv = self.read_from_bus(
COMMAND_GET_LED_IR,
exception_message='Failed reading LED state!'
)
if i2c_recv is None:
return
if i2c_recv[1] == COMMAND_VALUE_OFF:
return False
else:
return True
def get_analog_1(self):
"""
voltage = get_analog_1()
Reads the current analog level from port #1 (pin 2).
Returns the value as a voltage based on the 3.3 V reference pin
(pin 1).
"""
i2c_recv = self.read_from_bus(
COMMAND_GET_ANALOG_1,
exception_message='Failed reading analog level #1!'
)
if i2c_recv is None:
return
raw = (i2c_recv[1] << 8) + i2c_recv[2]
level = float(raw) / float(COMMAND_ANALOG_MAX)
return level * 3.3
def get_analog_2(self):
"""
voltage = get_analog_2()
Reads the current analog level from port #2 (pin 4).
Returns the value as a voltage based on the 3.3 V reference pin
(pin 1).
"""
i2c_recv = self.read_from_bus(
COMMAND_GET_ANALOG_2,
exception_message='Failed reading analog level #2!'
)
if i2c_recv is None:
return
raw = (i2c_recv[1] << 8) + i2c_recv[2]
level = float(raw) / float(COMMAND_ANALOG_MAX)
return level * 3.3
def set_comms_fail_safe(self, state):
"""
set_comms_fail_safe(state)
Sets the system to enable or disable the communications
failsafe
The failsafe will turn the motors off unless it is commanded at
least once every 1/4 of a second
Set to True to enable this failsafe, set to False to disable
this failsafe
The failsafe is disabled at power on
"""
if state:
level = COMMAND_VALUE_ON
else:
level = COMMAND_VALUE_OFF
self.write_to_bus(
COMMAND_SET_FAILSAFE,
level,
exception_message='Failed sending communications failsafe state!'
)
def get_comms_fail_safe(self):
"""
state = get_comms_fail_safe()
Read the current system state of the communications failsafe,
True for enabled, False for disabled
The failsafe will turn the motors off unless it is commanded at
least once every 1/4 of a second
"""
i2c_recv = self.read_from_bus(
COMMAND_GET_FAILSAFE,
exception_message='Failed reading communications fail safe state!'
)
if i2c_recv is None:
return
if i2c_recv[1] == COMMAND_VALUE_OFF:
return False
else:
return True
def help(self):
"""
help()
Displays the names and descriptions of the various functions
and settings provided
"""
func_list = [
ZeroBorg.__dict__.get(a) for a in
dir(ZeroBorg) if isinstance(ZeroBorg.__dict__.get(a), types.FunctionType)
]
func_list_sorted = sorted(func_list, key=lambda x: x.func_code.co_firstlineno)
print self.__doc__
print
for func in func_list_sorted:
print '=== %s === %s' % (func.func_name, func.func_doc)
def write_to_bus(self, command, value_to_set, exception_message=None):
try:
self.bus.write_byte_data(self.i2c_address, command, value_to_set)
except KeyboardInterrupt:
raise
except:
if exception_message is not None:
self.print_message(exception_message)
def read_from_bus(self, command, exception_message=None):
try:
i2c_recv = self.bus.read_i2c_block_data(
self.i2c_address,
command,
I2C_NORM_LEN
)
return i2c_recv
except KeyboardInterrupt:
raise
except:
self.print_message(exception_message)
| amorphic/sparkcc-formulapi | race_code/zero_borg.py | Python | mit | 28,297 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('boards', '0003_board_channel'),
]
operations = [
migrations.AlterModelOptions(
name='post',
options={'ordering': ('date_created',)},
),
migrations.AddField(
model_name='board',
name='color',
field=models.CharField(max_length=7, default='#FFFFFF', verbose_name='color'),
preserve_default=True,
),
]
| hedin/paraboard-back | paraboard/boards/migrations/0004_auto_20150415_1513.py | Python | apache-2.0 | 594 |
import os
import re
import redis
import pickle
import string
from enum import Enum
from constants import RegistryKeys
from voluptuous import Schema, Required, Any, MultipleInvalid
# Get redis host and port via environment variable
REDIS_HOST = os.environ['REDIS_HOST']
REDIS_PORT = os.environ['REDIS_PORT']
# Redis key for service registry
_SERVICE_REGISTRY = 'service_registry'
class ServiceRegistry:
def __init__(self):
self.redisinstance = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=0)
self.validateschema = Schema({
Required(RegistryKeys.SERVICE_ID): Any(str, unicode),
Required(RegistryKeys.SERVICE_NAME): Any(str, unicode),
Required(RegistryKeys.SERVICE_PORT): int,
Required(RegistryKeys.NAMESPACE, default='default'): Any(str, unicode),
Required(RegistryKeys.ENDPOINTS): [{
Required(RegistryKeys.URI): Any(str, unicode),
Required(RegistryKeys.ACCEPTANCE_REGEX): Any(str, unicode),
RegistryKeys.FILTER_REGEX: {
Required(RegistryKeys.PATTERN): Any(str, unicode),
Required(RegistryKeys.REPLACE): Any(str, unicode)
}
}]
})
def register(self, service_dict):
try:
# Validate the service that needs to be registered
self.validateschema(service_dict)
service_id = service_dict[RegistryKeys.SERVICE_ID]
# Compile each endpoint regex
endpoints = service_dict[RegistryKeys.ENDPOINTS]
for endpoint in endpoints:
a_regex = endpoint.pop(RegistryKeys.ACCEPTANCE_REGEX)
f_regex = endpoint.pop(RegistryKeys.FILTER_REGEX, None)
compiled_a_regex = re.compile(a_regex)
compiled_f_regex = None
if f_regex:
compiled_f_regex = {
RegistryKeys.COMPILED_PATTERN: re.compile(f_regex.pop(RegistryKeys.PATTERN), re.I),
RegistryKeys.REPLACE: f_regex.pop(RegistryKeys.REPLACE, None)
}
# Update the endpoint object
endpoint[RegistryKeys.FILTER_COMPILED_REGEX] = compiled_f_regex
endpoint[RegistryKeys.ACCEPTANCE_COMPILED_REGEX] = compiled_a_regex
self.redisinstance.hset(_SERVICE_REGISTRY, service_id, self._serialize_data(service_dict))
return True
except MultipleInvalid as e:
return False
def deregister(self, service_id):
if service_id:
self.redisinstance.hdel(_SERVICE_REGISTRY, service_id)
return not self.redisinstance.hexists(_SERVICE_REGISTRY, service_id)
return False
def get_service(self, service_id):
return self._deserialize_data(self.redisinstance.hget(_SERVICE_REGISTRY, service_id))
def get_service_list(self):
return self.redisinstance.hkeys(_SERVICE_REGISTRY)
def get_detailed_service_list(self):
all_services_map = self.redisinstance.hgetall(_SERVICE_REGISTRY)
all_services_list = all_services_map.values() if all_services_map else []
deserialized_all_services_list = []
for service in all_services_list:
deserialized_all_services_list.append(self._deserialize_data(service))
return deserialized_all_services_list
def clear(self):
self.redisinstance.flushdb()
def _serialize_data(self, data):
return None if data == None else pickle.dumps(data)
def _deserialize_data(self, data):
return None if data == None else pickle.loads(data)
| lahsivjar/jarvis-kube | core/orchestrator/src/serviceregistry.py | Python | mit | 3,331 |
# Test/__init__.py
#
# Copyright (C) 2005 Jens Gutzeit <jens@jgutzeit.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
| ProgVal/PyCorewar | Test/__init__.py | Python | gpl-2.0 | 776 |
# Spreading Udaciousness
# One of our modest goals is to teach everyone in the world to program and
# understand computer science. To estimate how long this will take we have
# developed a (very flawed!) model:
# Everyone answering this question will convince a number, spread, (input to
# the model) of their friends to take the course next offering. This will
# continue, so that all of the newly recruited students, as well as the original
# students, will convince spread of their
# friends to take the following offering of the course.
# recruited friends are unique, so there is no duplication among the newly
# recruited students. Define a procedure, hexes_to_udaciousness(n, spread,
# target), that takes three inputs: the starting number of Udacians, the spread
# rate (how many new friends each Udacian convinces to join each hexamester),
# and the target number, and outputs the number of hexamesters needed to reach
# (or exceed) the target.
def hexes_to_udaciousness(n, spread, target):
res = n
count = 0
while res < target:
count += 1
res+= spread*res
if res > target: break
return count if n < target else 0
# 0 more needed, since n already exceeds target
assert hexes_to_udaciousness(100000, 2, 36230) == 0
# after 1 hexamester, there will be 50000 + (50000 * 2) Udacians
assert hexes_to_udaciousness(50000, 2, 150000) == 1
# need to match or exceed the target
assert hexes_to_udaciousness(50000, 2, 150001) == 2
# only 12 hexamesters (2 years) to world domination!
assert hexes_to_udaciousness(20000, 2, 7 * 10 ** 9) == 12
# more friends means faster world domination!
assert hexes_to_udaciousness(15000, 3, 7 * 10 ** 9) == 10
#recursive way
def hexes_to_udaciousness(n, spread, target):
return 1 + hexes_to_udaciousness(n + n*spread, spread, target) if n < target else 0
# 0 more needed, since n already exceeds target
assert hexes_to_udaciousness(100000, 2, 36230) == 0
# after 1 hexamester, there will be 50000 + (50000 * 2) Udacians
assert hexes_to_udaciousness(50000, 2, 150000) == 1
# need to match or exceed the target
assert hexes_to_udaciousness(50000, 2, 150001) == 2
# only 12 hexamesters (2 years) to world domination!
assert hexes_to_udaciousness(20000, 2, 7 * 10 ** 9) == 12
# more friends means faster world domination!
assert hexes_to_udaciousness(15000, 3, 7 * 10 ** 9) == 10
| codecakes/random_games | order_of_growth_target.py | Python | mit | 2,382 |
"""
RenderPipeline
Copyright (c) 2014-2016 tobspr <tobias.springer1@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
"""
Script to download the Render Pipeline samples
"""
import os
import sys
sys.path.insert(0, "../")
sys.path.insert(0, "../rpcore/util")
from submodule_downloader import download_submodule
if __name__ == "__main__":
# Make sure we are in the right directory
main_dir = os.path.dirname(os.path.realpath(__file__))
os.chdir(main_dir)
# Now extract the samples
download_submodule("tobspr", "RenderPipeline-Samples", ".", ["README.md", "LICENSE"])
| croxis/SpaceDrive | spacedrive/renderpipeline/samples/download_samples.py | Python | mit | 1,629 |
#!/usr/bin/env python
"""
split_file.py [-o <dir>] <path>
Take the file at <path> and write it to multiple files, switching to a new file
every time an annotation of the form "// BEGIN file1.swift" is encountered. If
<dir> is specified, place the files in <dir>; otherwise, put them in the
current directory.
"""
import getopt
import os
import re
import sys
def usage():
sys.stderr.write(__doc__.strip() + "\n")
sys.exit(1)
fp_out = None
dest_dir = '.'
try:
opts, args = getopt.getopt(sys.argv[1:], 'o:h')
for (opt, arg) in opts:
if opt == '-o':
dest_dir = arg
elif opt == '-h':
usage()
except getopt.GetoptError:
usage()
if len(args) != 1:
usage()
fp_in = open(args[0], 'r')
for line in fp_in:
m = re.match(r'^//\s*BEGIN\s+([^\s]+)\s*$', line)
if m:
if fp_out:
fp_out.close()
fp_out = open(os.path.join(dest_dir, m.group(1)), 'w')
elif fp_out:
fp_out.write(line)
fp_in.close()
if fp_out:
fp_out.close()
| khizkhiz/swift | utils/split_file.py | Python | apache-2.0 | 1,030 |
# -*- coding: UTF-8 -*-
# Copyright 2012-2020 Rumma & Ko Ltd
# License: GNU Affero General Public License v3 (see file COPYING for details)
import logging ; logger = logging.getLogger(__name__)
# import os
# import yaml
# import base64
from unipath import Path
from django.db import models
from django.conf import settings
from django.utils.translation import gettext_lazy as _
from django.utils.translation import gettext
from django.core.exceptions import ValidationError
from etgen.html import E
from lino.core.utils import get_field
from lino.utils import ssin
from lino.modlib.checkdata.choicelists import Checker
from lino.api import dd
from .actions import BeIdReadCardAction, FindByBeIdAction
from .choicelists import BeIdCardTypes
MALE = Path(__file__).parent.child('luc.jpg')
FEMALE = Path(__file__).parent.child('ly.jpg')
class SSIN(dd.Model):
# used by lino_presto which does not use beid
class Meta:
abstract = True
validate_national_id = False
# national_id = dd.NullCharField(
# max_length=200,
# unique=True,
# verbose_name=_("National ID")
# #~ blank=True,verbose_name=_("National ID")
# # ~ ,validators=[ssin.ssin_validator] # 20121108
# )
national_id = models.CharField(
_("National ID"), max_length=200,
unique=True, blank=True, null=True
# ~ ,validators=[ssin.ssin_validator] # 20121108
)
nationality = dd.ForeignKey('countries.Country',
blank=True, null=True,
related_name="%(app_label)s_%(class)s_set_by_nationality",
verbose_name=_("Nationality"))
def full_clean(self):
if self.validate_national_id and self.national_id:
self.national_id = ssin.parse_ssin(self.national_id)
super(SSIN, self).full_clean()
class BeIdCardHolder(SSIN):
class Meta:
abstract = True
birth_country = dd.ForeignKey(
"countries.Country",
blank=True, null=True,
verbose_name=_("Birth country"), related_name='by_birth_place')
birth_place = models.CharField(_("Birth place"), max_length=200, blank=True)
card_number = models.CharField(max_length=20,
blank=True, # null=True,
verbose_name=_("eID card number"))
card_valid_from = models.DateField(
blank=True, null=True,
verbose_name=_("ID card valid from"))
card_valid_until = models.DateField(
blank=True, null=True,
verbose_name=_("until"))
card_type = BeIdCardTypes.field(blank=True)
card_issuer = models.CharField(max_length=50,
blank=True, # null=True,
verbose_name=_("eID card issuer"))
nationality_text = models.CharField(_("Nationality (text)"), max_length=200, blank=True)
read_beid = BeIdReadCardAction()
find_by_beid = FindByBeIdAction()
noble_condition = models.CharField(
max_length=50,
blank=True, # null=True,
verbose_name=_("noble condition"),
help_text=_("The eventual noble condition of this person."))
beid_readonly_fields = set(
'noble_condition card_valid_from card_valid_until \
card_issuer card_number card_type'.split())
def disabled_fields(self, ar):
rv = super(BeIdCardHolder, self).disabled_fields(ar)
if not ar.get_user().user_type.has_required_roles([dd.SiteStaff]):
rv |= self.beid_readonly_fields
#~ logger.info("20130808 beid %s", rv)
return rv
def has_valid_card_data(self, today=None):
if not self.card_number:
return False
if self.card_valid_until < (today or dd.today()):
return False
return True
@dd.displayfield(_("eID card"), default='<br/><br/><br/><br/>')
def eid_info(self, ar):
"Display some information about the eID card."
attrs = {'class':"lino-info"}
if ar is None:
return E.div(**attrs)
must_read = False
elems = []
if self.card_number:
elems += ["%s %s (%s)" %
(gettext("Card no."), self.card_number, self.card_type)]
if self.card_issuer:
elems.append(", %s %s" %
(gettext("issued by"), self.card_issuer))
#~ card_issuer = _("issued by"),
if self.card_valid_until is not None:
valid = ", %s %s %s %s" % (
gettext("valid from"), dd.dtos(self.card_valid_from),
gettext("until"), dd.dtos(self.card_valid_until))
if self.card_valid_until < dd.today():
must_read = True
elems.append(E.b(valid))
elems.append(E.br())
else:
elems.append(valid)
else:
must_read = True
else:
must_read = True
if must_read:
msg = _("Must read eID card!")
if dd.plugins.beid:
elems.append(ar.instance_action_button(
self.read_beid, msg, icon_name=None))
else:
elems.append(msg)
# same red as in lino.css for .x-grid3-row-red td
# ~ attrs.update(style="background-color:#FA7F7F; padding:3pt;")
attrs['class'] = "lino-info-red"
return E.div(*elems, **attrs)
def get_beid_diffs(self, attrs):
# Return two lists, one with the objects to save, and another
# with text lines to build a confirmation message explaining
# which changes are going to be applied after confirmation.
# The default implemantion is for the simple case where the
# holder is also a contacts.AddressLocation and the address is
# within the same database row.
diffs = []
objects = [self]
model = self.__class__ # the holder
for fldname, new in attrs.items():
fld = get_field(model, fldname)
old = getattr(self, fldname)
if old != new:
diffs.append(
"{} : {} -> {}".format(
str(fld.verbose_name), dd.obj2str(old), dd.obj2str(new)))
setattr(self, fld.name, new)
return objects, diffs
@dd.htmlbox()
def image(self, ar):
url = self.get_image_url(ar)
return E.a(E.img(src=url, width="100%"), href=url, target="_blank")
# s = '<img src="%s" width="100%%"/>' % url
# s = '<a href="%s" target="_blank">%s</a>' % (url, s)
# return s
@classmethod
def get_image_parts(cls, card_number):
return ("beid", card_number + ".jpg")
@classmethod
def card_number_to_image_path(cls, card_number):
"""
Return the full path of the image file on the server for the given id card.
This may be used by printable templates.
"""
if card_number:
parts = cls.get_image_parts(card_number)
# return os.path.join(settings.MEDIA_ROOT, *parts)
return Path(settings.MEDIA_ROOT).child(*parts)
return Path(settings.STATIC_ROOT).child("contacts.Person.jpg")
def get_image_url(self, ar):
if self.card_number:
parts = self.get_image_parts(self.card_number)
return settings.SITE.build_media_url(*parts)
return settings.SITE.build_static_url("contacts.Person.jpg")
def get_image_path(self):
return self.card_number_to_image_path(self.card_number)
def make_demo_picture(self):
# Create a demo picture for this card holder.
if not self.card_number:
raise Exception("20150730")
src = self.mf(MALE, FEMALE)
dst = self.get_image_path()
# dst = settings.SITE.cache_dir.child(
# 'media', 'beid', self.card_number + '.jpg')
if dst.needs_update([src]):
logger.info("Create demo picture %s", dst)
settings.SITE.makedirs_if_missing(dst.parent)
src.copy(dst)
else:
logger.info("Demo picture %s is up-to-date", dst)
class SSINChecker(Checker):
model = SSIN
verbose_name = _("Check for invalid SSINs")
def get_checkdata_problems(self, obj, fix=False):
if obj.national_id:
try:
expected = ssin.parse_ssin(obj.national_id)
except ValidationError as e:
yield (False, _("Cannot fix invalid SSIN ({0})").format(e))
else:
got = obj.national_id
if got != expected:
msg = _("Malformed SSIN '{got}' must be '{expected}'.")
params = dict(expected=expected, got=got, obj=obj)
yield (True, msg.format(**params))
if fix:
obj.national_id = expected
try:
obj.full_clean()
except ValidationError as e:
msg = _("Failed to fix malformed "
"SSIN '{got}' of '{obj}'.")
msg = msg.format(**params)
raise Warning(msg)
obj.save()
SSINChecker.activate()
| lino-framework/xl | lino_xl/lib/beid/mixins.py | Python | bsd-2-clause | 9,424 |
"""
FC Driver utils function
"""
import sys
import traceback
import hashlib
from cinder.openstack.common.gettextutils import _
from cinder.openstack.common import log as logging
LOG = logging.getLogger(__name__)
def log_exception(exception=None):
"""
:param exception:
:return:
"""
if exception:
# TODO
pass
etype, value, track_tb = sys.exc_info()
error_list = traceback.format_exception(etype, value, track_tb)
for error_info in error_list:
LOG.error(error_info)
def str_drop_password_key(str_data):
"""
remove json password key item
:param data:
:return:
"""
null = "null"
true = "true"
false = "false"
dict_data = eval(str_data)
if isinstance(dict_data, dict):
drop_password_key(dict_data)
return str(dict_data)
else:
LOG.debug(_("[BRM-DRIVER] str_data can't change to dict, str_data:(%s) "), str_data)
return
def drop_password_key(data):
"""
remove json password key item
:param data:
:return:
"""
encrypt_list = ['password', 'vncpassword', 'oldpassword',
'domainpassword', 'vncoldpassword', 'vncnewpassword',
'auth_token', 'token', 'fc_pwd', 'accessKey',
'secretKey']
for key in data.keys():
if key in encrypt_list:
del data[key]
elif data[key] and isinstance(data[key], dict):
drop_password_key(data[key])
def sha256_based_key(key):
"""
generate sha256 based key
:param key:
:return:
"""
hash_ = hashlib.sha256()
hash_.update(key)
return hash_.hexdigest() | hybrid-storage-dev/cinder-fs-111t-hybrid-cherry | volume/drivers/huawei/vrm/utils.py | Python | apache-2.0 | 1,672 |
import socket
from kmediatorrent import plugin, magnet, scrapers
from kmediatorrent.ga import tracked
from kmediatorrent.utils import ensure_fanart
from kmediatorrent.platform import PLATFORM
scrapers = ["eztv","yify","tpb","kickass","bitsnoop","btdigg","extratorrent","nyaa","zelka","cpb"]
scrapersToDisplay = []
for scraper in scrapers:
if plugin.get_setting("display_%s"%scraper, bool):
scrapersToDisplay.append("%s"%scraper)
from kmediatorrent.scrapers import \
MODULES
for scraper in scrapersToDisplay:
__import__("kmediatorrent.scrapers", fromlist=[scraper])
@plugin.route("/")
@ensure_fanart
@tracked(force=True)
def index():
if PLATFORM["os"] not in ["android", "linux", "windows", "darwin"]:
plugin.notify("Your system \"%(os)s_%(arch)s\" is not supported." % PLATFORM, delay=15000)
for module in MODULES:
yield {
"label": module["name"],
"thumbnail": module["image"],
"path": plugin.url_for(module["view"]),
}
| jmarth/plugin.video.kmediatorrent | resources/site-packages/kmediatorrent/index.py | Python | gpl-3.0 | 1,012 |
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Conector para videos externos de divxstage
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os
from core import scrapertools
from core import logger
from core import config
def test_video_exists( page_url ):
logger.info("[divxstage.py] test_video_exists(page_url='%s')" % page_url)
data = scrapertools.cache_page( url = page_url )
if "<h3>This file no longer exists" in data:
return False,"El archivo no existe<br/>en divxstage o ha sido borrado."
else:
return True,""
return True,""
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
logger.info("[divxstage.py] get_video_url(page_url='%s')" % page_url)
data = scrapertools.cache_page(page_url)
video_urls = []
# Descarga la página
headers = [ ['User-Agent','Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'],['Referer','http://www.movshare.net/'] ]
data = scrapertools.cache_page(page_url , headers = headers)
# La vuelve a descargar, como si hubieras hecho click en el botón
data = scrapertools.cache_page(page_url , headers = headers)
# Extrae el vídeo
#flashvars.file="an6u81bpsbenn";
#flashvars.filekey="88.12.109.83-e2d263cbff66b2a510d6f7417a57e498";
file = scrapertools.get_match(data,'flashvars.file="([^"]+)"')
filekey = scrapertools.get_match(data,'flashvars.filekey="([^"]+)"')
#http://www.divxstage.eu/api/player.api.php?file=pn7tthffreyoo&user=undefined&pass=undefined&codes=1&key=88%2E12%2E109%2E83%2Df1d041537679b37f5b25404ac66b341b
filekey = filekey.replace(".","%2E")
filekey = filekey.replace("-","%2D")
url = "http://www.divxstage.eu/api/player.api.php?file="+file+"&user=undefined&pass=undefined&codes=1&key="+filekey
data = scrapertools.cache_page(url , headers = headers)
logger.info("data="+data)
location = scrapertools.get_match(data,"url=([^\&]+)\&")
video_urls.append( [ scrapertools.get_filename_from_url(location)[-4:]+" [divxstage]" , location ] )
for video_url in video_urls:
logger.info("[divxstage.py] %s - %s" % (video_url[0],video_url[1]))
return video_urls
# Encuentra vídeos del servidor en el texto pasado
def find_videos(data):
encontrados = set()
devuelve = []
# divxstage http://www.divxstage.net/video/of7ww1tdv62gf"
patronvideos = 'http://www.divxstage.[\w]+/video/([\w]+)'
logger.info("[divxstage.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(data)
for match in matches:
titulo = "[Divxstage]"
url = "http://www.divxstage.net/video/"+match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'divxstage' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
return devuelve
def test():
video_urls = get_video_url("http://www.divxstage.net/video/of7ww1tdv62gf")
return len(video_urls)>0 | dknlght/dkodi | src/plugin.video.animehere/servers/divxstage.py | Python | gpl-2.0 | 3,303 |
import os
from os import path
import datetime
from datetime import date, time, timedelta
import time
def main():
print(os.name)
print("Item exists: " + str(path.exists("./PyPractice/conditions.py")))
print("isDirectory ./PyPractice/conditions.py: " + str(path.isdir("./PyPractice/conditions.py")))
print("isDirectory ./PyPractice: " + str(path.isdir("./PyPractice")))
print("isFile ./PyPractice: " + str(path.isfile("./PyPractice")))
print("isFile ./PyPractice/conditions.py: " + str(path.isfile("./PyPractice/conditions.py")))
## Delete a fileops
if(path.exists("fileops.txt")):
os.unlink("fileops.txt")
## Get Modification time of a file
t = datetime.datetime.fromtimestamp(path.getmtime("./PyPractice/functions.py"))
print (t)
if __name__ == "__main__":
main()
| mudragada/util-scripts | PyPractice/ospath.py | Python | mit | 824 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('stories', '0006_auto_20140821_0859'),
]
operations = [
migrations.CreateModel(
name='StoryImage',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('is_active', models.BooleanField(default=True, help_text=b'Whether this item is active, use this instead of deleting')),
('created_on', models.DateTimeField(help_text=b'When this item was originally created', auto_now_add=True)),
('modified_on', models.DateTimeField(help_text=b'When this item was last modified', auto_now=True)),
('name', models.CharField(help_text='The name to describe this image', max_length=64)),
('image', models.ImageField(help_text='The image file to use', upload_to=b'stories')),
('created_by', models.ForeignKey(help_text=b'The user which originally created this item', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(help_text=b'The user which last modified this item', to=settings.AUTH_USER_MODEL)),
('story', models.ForeignKey(help_text='The story to associate to', to='stories.Story')),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
]
| peterayeni/dash | dash/stories/migrations/0007_storyimage.py | Python | bsd-3-clause | 1,645 |
from rspecs.src.geni.v3.container.resource import Resource
from rspecs.src.geni.v3.container.link import Link
class CrafterManager:
#TODO: Take into account extensions
def __init__(self, resources=[], options={}):
self.resources = resources
self.options = options
self._urn_authority = "urn:publicID:MYAUTHORITY"
def get_advertisement(self, resources):
"""
Return advertisement with information of resources.
"""
output = self.advert_header()
for resource in resources:
output += self.advert_resource(resource)
output += self.advert_footer()
return output
def advert_node_template(self):
tmpl = """<node component_manager_id="%s" component_name="%s" component_id="%s" exclusive="%s">
<available now="%s"/>
</node>
"""
return tmpl
def advert_link_template(self):
tmpl = '''<link component_id="%s" component_name="%s">
<property source_id="%s" dest_id="%s" capacity="%s"/>
<link_type name="%s"/>
</link> '''
return tmpl
def advert_resource(self,resource):
resource_dir = dir(resource)
if resource_dir == dir(Link()):
return self.advert_link(resource)
elif resource_dir == dir(Resource()):
return self.advert_node(resource)
else:
return ""
def advert_node(self, resource):
resource_component_manager_id = str(resource.get_component_manager_id())
resource_exclusive = str(resource.get_exclusive()).lower()
resource_available = str(resource.get_available()).lower()
resource_component_name = resource.get_component_name()
resource_component_id = resource.get_component_id()
return self.advert_node_template() % (resource_component_manager_id,
resource_component_name,
resource_component_id,
resource_exclusive,
resource_available)
def advert_link(self, link):
resource_component_name = link.get_component_name()
resource_component_id = link.get_component_id()
resource_source_id = link.get_source_id()
resource_dest_id = link.get_dest_id()
resource_capacity = link.get_capacity()
resource_type = link.get_type()
return self.advert_link_template() % (resource_component_id,
resource_component_name,
resource_source_id,
resource_dest_id,
str(resource_capacity),
resource_type)
def advert_header(self):
header = """<?xml version="1.0" encoding="UTF-8"?>
<rspec xmlns="http://www.geni.net/resources/rspec/3" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.geni.net/resources/rspec/3 http://www.geni.net/resources/rspec/3/ad.xsd" type="advertisement">\n"""
return header
def advert_footer(self):
return "</rspec>\n"
def manifest_header(self):
header = """<?xml version="1.0" encoding="UTF-8"?>
<rspec xmlns="http://www.geni.net/resources/rspec/3" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.geni.net/resources/rspec/3 http://www.geni.net/resources/rspec/3/manifest.xsd" type="manifest">\n"""
return header
def manifest_template(self):
template ="""<node client_id="%s" component_id="%s" component_manager_id="%s" sliver_id="%s">\n"""
return template
def manifest_node_close_template(self):
template ="""</node>\n"""
return template
def manifest_sliver_type_template(self):
template = """<sliver_type name="%s"/>\n"""
return template
def manifest_services_template(self):
template = """<login authentication="ssh-keys" hostname="%s" port="22" username="%s"/>\n"""
return template
def manifest_services_template_root(self):
# BasicAuth for root; PKI for others
template = """<login authentication="ssh" hostname="%s" port="22" username="root:openflow"/>\n"""
return template
def manifest_slivers(self, resources):
"""
Return manifest with information of slivers.
"""
result = self.manifest_header()
for resource in resources:
sliver = resource.get_sliver()
result += self.manifest_template() % (sliver.get_client_id(), resource.get_component_id(), resource.get_component_manager_id(), sliver.get_urn())
if sliver.get_type():
result += self.manifest_sliver_type_template() % (sliver.get_type())
if sliver.get_services():
services = sliver.get_services()
result += "<services>\n"
for service in services:
if service["login"]["username"].startswith("root:"):
result += self.manifest_services_template_root() % service["login"]["hostname"]
else:
result += self.manifest_services_template() % (service["login"]["hostname"], service["login"]["username"])
result += "</services>\n"
result += self.manifest_node_close_template()
result += self.manifest_footer()
return result
def manifest_footer(self):
return "</rspec>\n"
| dana-i2cat/felix | core/lib/am/rspecs/src/geni/v3/craftermanager.py | Python | apache-2.0 | 5,625 |
from setuptools import setup, find_packages
import sys, os
version = '1.0.0'
setup(name='brica1',
version=version,
description="",
long_description="""\
""",
classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='',
author='ktnyt',
author_email='kotone [at] sfc.keio.ac.jp',
url='',
license='Apache v2',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=[
'numpy'
],
entry_points="""
# -*- Entry points: -*-
""",
)
| wbap/V1 | python/setup.py | Python | apache-2.0 | 657 |
#!/usr/bin/env python3
"""
Utilities and classes for Botty plugins.
Should be imported by all Botty plugins.
"""
import os, re
import functools
CHAT_HISTORY_DIRECTORY = os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "..", "@history")
class BasePlugin:
"""Base class for Botty plugins. Should be imported from plugins using `from .utilities import BasePlugin`."""
def __init__(self, bot):
self.bot = bot
self.logger = bot.logger.getChild(self.__class__.__name__)
self.flows = {}
def get_history_files(self):
"""Returns a mapping from channel IDs to absolute file paths of their history entries"""
for dirpath, _, filenames in os.walk(CHAT_HISTORY_DIRECTORY):
result = {}
for history_file in filenames:
channel_id, extension = os.path.splitext(os.path.basename(history_file))
if extension != ".json": continue
result[channel_id] = os.path.join(dirpath, history_file)
return result
return {}
def on_step(self): return False
def on_message(self, message): return False
def say(self, sendable_text, *, channel_id, thread_id=None): return self.bot.say(sendable_text, channel_id=channel_id, thread_id=thread_id)
def say_raw(self, text, *, channel_id, thread_id=None): return self.bot.say(self.text_to_sendable_text(text), channel_id=channel_id, thread_id=thread_id)
def say_complete(self, sendable_text, *, channel_id, thread_id=None): return self.bot.say_complete(sendable_text, channel_id=channel_id, thread_id=thread_id)
def say_raw_complete(self, text, *, channel_id, thread_id=None): return self.bot.say_complete(self.text_to_sendable_text(text), channel_id=channel_id, thread_id=thread_id)
def respond(self, sendable_text, *, as_thread=False): return self.bot.respond(sendable_text, as_thread=as_thread)
def respond_raw(self, text, *, as_thread=False): return self.bot.respond(self.text_to_sendable_text(text), as_thread=as_thread)
def respond_complete(self, sendable_text, *, as_thread=False): return self.bot.respond_complete(sendable_text, as_thread=as_thread)
def respond_raw_complete(self, text, *, as_thread=False): return self.bot.respond_complete(self.text_to_sendable_text(text), as_thread=as_thread)
def react(self, channel_id, timestamp, emoticon): return self.bot.react(channel_id, timestamp, emoticon)
def unreact(self, channel_id, timestamp, emoticon): return self.bot.unreact(channel_id, timestamp, emoticon)
def reply(self, emoticon): return self.bot.reply(emoticon)
def unreply(self, emoticon): return self.bot.unreply(emoticon)
def get_channel_name_by_id(self, channel_id): return self.bot.get_channel_name_by_id(channel_id)
def get_channel_id_by_name(self, channel_name): return self.bot.get_channel_id_by_name(channel_name)
def get_user_id_by_name(self, user_name): return self.bot.get_user_id_by_name(user_name)
def get_user_name_by_id(self, user_id): return self.bot.get_user_name_by_id(user_id)
def get_direct_message_channel_id_by_user_id(self, user_id): return self.bot.get_direct_message_channel_id_by_user_id(user_id)
def get_user_info_by_id(self, user_id): return self.bot.get_user_info_by_id(user_id)
def get_user_is_bot(self, user_id): return self.bot.get_user_is_bot(user_id)
def text_to_sendable_text(self, text): return self.bot.text_to_sendable_text(text)
def sendable_text_to_text(self, sendable_text): return self.bot.sendable_text_to_text(sendable_text)
def get_bot_user_id(self): return self.bot.bot_user_id
class Flow:
"""Create a new `Flow` instance (which map keys to generator iterators) with `generator_function` as its generator function. This class can be used to replace many complex message handling state machines with clean and concise Python code."""
def __init__(self, generator_function):
self.generator_function = generator_function
self.generator_iterators = {}
def start(self, flow_key, parameter_data = None):
"""Discards the current generator iterator associated with key `flow_key`, creates a new state machine from the generator function by calling it with `parameter_data` as an argument, then runs the state machine until it first yields."""
self.generator_iterators[flow_key] = self.generator_function(parameter_data)
next(self.generator_iterators[flow_key]) # run the generator all the way up until it first yields
def is_running(self, flow_key):
"""Returns `True` if there is currently a generator iterator associated with key `flow_key`, `False` otherwise."""
return flow_key in self.generator_iterators
def step(self, flow_key, yield_data = None):
"""Returns the result of running the generator iterator associated with key `flow_key` (sending the iterator `yield_data` in the process), or `False` if there is no such generator iterator."""
if flow_key not in self.generator_iterators: return False
try:
return self.generator_iterators[flow_key].send(yield_data)
except StopIteration as e:
del self.generator_iterators[flow_key] # remove the completed flow
return e.value
return False
def untag_word(word):
"""Returns `word` where characters are modified to appear the same but not tag users."""
assert isinstance(word, str), "`word` must be a string"
homoglyph_replacements = [ # glyphs that look similar to the glyphs that can tag users, in descending order by similarity
# basically identical replacements
(",", "\u201A"), ("-", "\u2010"), (";", "\u037E"), ("A", "\u0391"), ("B", "\u0392"),
("C", "\u0421"), ("D", "\u216E"), ("E", "\u0395"), ("H", "\u0397"), ("I", "\u0399"),
("J", "\u0408"), ("K", "\u039A"), ("L", "\u216C"), ("M", "\u039C"), ("N", "\u039D"),
("O", "\u039F"), ("P", "\u03A1"), ("S", "\u0405"), ("T", "\u03A4"), ("V", "\u2164"),
("X", "\u03A7"), ("Y", "\u03A5"), ("Z", "\u0396"), ("a", "\u0430"), ("c", "\u03F2"),
("d", "\u217E"), ("e", "\u0435"), ("i", "\u0456"), ("j", "\u0458"), ("l", "\u217C"),
("m", "\u217F"), ("o", "\u03BF"), ("p", "\u0440"), ("s", "\u0455"), ("v", "\u03BD"),
("x", "\u0445"), ("y", "\u0443"), ("\u00DF", "\u03B2"), ("\u00E4", "\u04D3"), ("\u00F6", "\u04E7"),
# similar replacements
("/", "\u2044"), ("F", "\u03DC"), ("G", "\u050C"), ("\u00C4", "\u04D2"), ("\u00D6", "\u04E6"),
# fixed width replacements
("*", "\uFF0A"), ("!", "\uFF01"), ("\"", "\uFF02"), ("#", "\uFF03"), ("$", "\uFF04"),
("%", "\uFF05"), ("&", "\uFF06"), ("'", "\uFF07"), ("(", "\uFF08"), (")", "\uFF09"),
("+", "\uFF0B"), (".", "\uFF0E"), ("0", "\uFF10"), ("1", "\uFF11"), ("2", "\uFF12"),
("3", "\uFF13"), ("4", "\uFF14"), ("5", "\uFF15"), ("6", "\uFF16"), ("7", "\uFF17"),
("8", "\uFF18"), ("9", "\uFF19"), (":", "\uFF1A"), ("<", "\uFF1C"), ("=", "\uFF1D"),
(">", "\uFF1E"), ("?", "\uFF1F"), ("@", "\uFF20"), ("Q", "\uFF31"), ("R", "\uFF32"),
("U", "\uFF35"), ("W", "\uFF37"), ("[", "\uFF3B"), ("\\", "\uFF3C"), ("]", "\uFF3D"),
("^", "\uFF3E"), ("_", "\uFF3F"), ("`", "\uFF40"), ("b", "\uFF42"), ("f", "\uFF46"),
("g", "\uFF47"), ("h", "\uFF48"), ("k", "\uFF4B"), ("n", "\uFF4E"), ("q", "\uFF51"),
("r", "\uFF52"), ("t", "\uFF54"), ("u", "\uFF55"), ("w", "\uFF57"), ("z", "\uFF5A"),
("{", "\uFF5B"), ("|", "\uFF5C"), ("}", "\uFF5D"), ("~", "\uFF5E"),
]
for character, homoglyph in homoglyph_replacements:
new_word = word.replace(character, homoglyph, 1)
if new_word != word:
return new_word
return word
def clockify(dt):
day_ratio = ((dt.hour % 12) + ((dt.minute + (dt.second / 60)) / 60)) / 12
clock_emoji = [
"clock12", "clock1230", "clock1", "clock130", "clock2",
"clock230", "clock3", "clock330", "clock4", "clock430",
"clock5", "clock530", "clock6", "clock630", "clock7",
"clock730", "clock8", "clock830", "clock9", "clock930",
"clock10", "clock1030", "clock11", "clock1130", "clock12"]
return clock_emoji[round(day_ratio * (len(clock_emoji) - 1))] | Uberi/botty-bot-bot-bot | src/plugins/utilities.py | Python | mit | 8,730 |
"""Redis cache backend."""
import random
import re
from django.core.cache.backends.base import DEFAULT_TIMEOUT, BaseCache
from django.core.serializers.base import PickleSerializer
from django.utils.functional import cached_property
from django.utils.module_loading import import_string
class RedisSerializer(PickleSerializer):
def dumps(self, obj):
if isinstance(obj, int):
return obj
return super().dumps(obj)
def loads(self, data):
try:
return int(data)
except ValueError:
return super().loads(data)
class RedisCacheClient:
def __init__(
self,
servers,
serializer=None,
db=None,
pool_class=None,
parser_class=None,
):
import redis
self._lib = redis
self._servers = servers
self._pools = {}
self._client = self._lib.Redis
if isinstance(pool_class, str):
pool_class = import_string(pool_class)
self._pool_class = pool_class or self._lib.ConnectionPool
if isinstance(serializer, str):
serializer = import_string(serializer)
if callable(serializer):
serializer = serializer()
self._serializer = serializer or RedisSerializer()
if isinstance(parser_class, str):
parser_class = import_string(parser_class)
parser_class = parser_class or self._lib.connection.DefaultParser
self._pool_options = {'parser_class': parser_class, 'db': db}
def _get_connection_pool_index(self, write):
# Write to the first server. Read from other servers if there are more,
# otherwise read from the first server.
if write or len(self._servers) == 1:
return 0
return random.randint(1, len(self._servers) - 1)
def _get_connection_pool(self, write):
index = self._get_connection_pool_index(write)
if index not in self._pools:
self._pools[index] = self._pool_class.from_url(
self._servers[index], **self._pool_options,
)
return self._pools[index]
def get_client(self, key=None, *, write=False):
# key is used so that the method signature remains the same and custom
# cache client can be implemented which might require the key to select
# the server, e.g. sharding.
pool = self._get_connection_pool(write)
return self._client(connection_pool=pool)
def add(self, key, value, timeout):
client = self.get_client(key, write=True)
value = self._serializer.dumps(value)
if timeout == 0:
if ret := bool(client.set(key, value, nx=True)):
client.delete(key)
return ret
else:
return bool(client.set(key, value, ex=timeout, nx=True))
def get(self, key, default):
client = self.get_client(key)
value = client.get(key)
return default if value is None else self._serializer.loads(value)
def set(self, key, value, timeout):
client = self.get_client(key, write=True)
value = self._serializer.dumps(value)
if timeout == 0:
client.delete(key)
else:
client.set(key, value, ex=timeout)
def touch(self, key, timeout):
client = self.get_client(key, write=True)
if timeout is None:
return bool(client.persist(key))
else:
return bool(client.expire(key, timeout))
def delete(self, key):
client = self.get_client(key, write=True)
return bool(client.delete(key))
def get_many(self, keys):
client = self.get_client(None)
ret = client.mget(keys)
return {
k: self._serializer.loads(v) for k, v in zip(keys, ret) if v is not None
}
def has_key(self, key):
client = self.get_client(key)
return bool(client.exists(key))
def incr(self, key, delta):
client = self.get_client(key)
if not client.exists(key):
raise ValueError("Key '%s' not found." % key)
return client.incr(key, delta)
def set_many(self, data, timeout):
client = self.get_client(None, write=True)
pipeline = client.pipeline()
pipeline.mset({k: self._serializer.dumps(v) for k, v in data.items()})
if timeout is not None:
# Setting timeout for each key as redis does not support timeout
# with mset().
for key in data:
pipeline.expire(key, timeout)
pipeline.execute()
def delete_many(self, keys):
client = self.get_client(None, write=True)
client.delete(*keys)
def clear(self):
client = self.get_client(None, write=True)
return bool(client.flushdb())
class RedisCache(BaseCache):
def __init__(self, server, params):
super().__init__(params)
if isinstance(server, str):
self._servers = re.split('[;,]', server)
else:
self._servers = server
self._class = RedisCacheClient
self._options = params.get('OPTIONS', {})
@cached_property
def _cache(self):
return self._class(self._servers, **self._options)
def get_backend_timeout(self, timeout=DEFAULT_TIMEOUT):
if timeout == DEFAULT_TIMEOUT:
timeout = self.default_timeout
# The key will be made persistent if None used as a timeout.
# Non-positive values will cause the key to be deleted.
return None if timeout is None else max(0, int(timeout))
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_and_validate_key(key, version=version)
return self._cache.add(key, value, self.get_backend_timeout(timeout))
def get(self, key, default=None, version=None):
key = self.make_and_validate_key(key, version=version)
return self._cache.get(key, default)
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_and_validate_key(key, version=version)
self._cache.set(key, value, self.get_backend_timeout(timeout))
def touch(self, key, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_and_validate_key(key, version=version)
return self._cache.touch(key, self.get_backend_timeout(timeout))
def delete(self, key, version=None):
key = self.make_and_validate_key(key, version=version)
return self._cache.delete(key)
def get_many(self, keys, version=None):
key_map = {self.make_and_validate_key(key, version=version): key for key in keys}
ret = self._cache.get_many(key_map.keys())
return {key_map[k]: v for k, v in ret.items()}
def has_key(self, key, version=None):
key = self.make_and_validate_key(key, version=version)
return self._cache.has_key(key)
def incr(self, key, delta=1, version=None):
key = self.make_and_validate_key(key, version=version)
return self._cache.incr(key, delta)
def set_many(self, data, timeout=DEFAULT_TIMEOUT, version=None):
safe_data = {}
for key, value in data.items():
key = self.make_and_validate_key(key, version=version)
safe_data[key] = value
self._cache.set_many(safe_data, self.get_backend_timeout(timeout))
return []
def delete_many(self, keys, version=None):
safe_keys = []
for key in keys:
key = self.make_and_validate_key(key, version=version)
safe_keys.append(key)
self._cache.delete_many(safe_keys)
def clear(self):
return self._cache.clear()
| ar4s/django | django/core/cache/backends/redis.py | Python | bsd-3-clause | 7,684 |
import pandas as pd
t = 600
df = pd.read_csv('player_time.csv')
df['team_1_gold'] = (df.gold_t_0 + df.gold_t_1 + df.gold_t_2 +
df.gold_t_3 + df.gold_t_4)
df['team_2_gold'] = (df.gold_t_128 + df.gold_t_129 + df.gold_t_130 +
df.gold_t_131 + df.gold_t_132)
df = df[['match_id', 'times', 'team_1_gold', 'team_2_gold']]
df = df.loc[df.times == t]
df['gold_lead'] = list((df.team_1_gold > df.team_2_gold))
df['gold_lead_by'] = (df.team_1_gold - df.team_2_gold) / df.team_2_gold
df = df[['match_id', 'gold_lead', 'gold_lead_by']]
match = pd.read_csv('match.csv')
match = match[['match_id', 'radiant_win']]
n = 0
gl = pd.merge(df, match)
gl = gl.loc[gl.gold_lead_by > n]
sum(gl.gold_lead == gl.radiant_win)
sum(gl.gold_lead == gl.radiant_win) / len(gl)
| nathbo/GNTRM | scripts/team_gold.py | Python | mit | 795 |
import csv
import numpy
def load(f_name):
with open(f_name, "rb") as f:
reader=csv.reader(f, delimiter=',', quotechar='"')
x=list(reader)
return numpy.array(x).astype('float')
def store(f_name, matrix):
numpy.savetxt(f_name, matrix, delimiter=",")
| Zomega/thesis | Wurm/Utils/MatrixIO.py | Python | mit | 258 |
from django.forms import widgets
from rest_framework import serializers
from api.models import Project, Task, Payment, Client
from django.contrib.auth.models import User
class UserSerializer(serializers.HyperlinkedModelSerializer):
#ProjectsOwned = serializers.HyperlinkedRelatedField(many=True,view_name='project-detail', read_only=True)
#TasksOwned = serializers.HyperlinkedRelatedField(many=True,view_name='task-detail', read_only=True)
#PaymentsOwned = serializers.HyperlinkedRelatedField(many=True,view_name='payment-detail', read_only=True)
#ClientsOwned = serializers.HyperlinkedRelatedField(many=True,view_name='client-detail', read_only=True)
class Meta:
model = User
fields = (
'id',
'username',
#'ProjectsOwned',
#'TasksOwned',
#'PaymentsOwned',
#'ClientsOwned'
)
class ProjectSerializer(serializers.HyperlinkedModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
#TaskProjects = serializers.HyperlinkedRelatedField(many=True,view_name='task-detail', read_only=True)
#PaymentProjects = serializers.HyperlinkedRelatedField(many=True,view_name='payment-detail', read_only=True)
class Meta:
model = Project
fields = (
'id',
'owner',
'name',
'description',
'image',
'status',
'client',
#'TaskProjects',
#'PaymentProjects'
)
class TaskSerializer(serializers.HyperlinkedModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
class Meta:
model = Task
fields = (
'id',
'owner',
'name',
'status',
'date',
'dateAdded',
'project'
)
class PaymentSerializer(serializers.HyperlinkedModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
class Meta:
model = Payment
fields = (
'id',
'owner',
'name',
'money',
'paymentType',
'date',
'dateAdded',
'taxPercentage',
'project'
)
class ClientSerializer(serializers.HyperlinkedModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
#ProjectsClient = serializers.HyperlinkedRelatedField(many=True,view_name='project-detail', read_only=True)
class Meta:
model = Client
fields = (
'id',
'owner',
'name',
'email',
'phone',
'image',
#'ProjectsClient'
)
| Arlefreak/PlusProjectAPI | api/serializers.py | Python | mit | 2,727 |
#!/usr/bin/env python
import sys
import os
import datetime
import logging
import argparse
import base64
import simplejson
from biokbase.workspace.client import Workspace
from biokbase.userandjobstate.client import UserAndJobState
from biokbase.Transform import handler_utils
from biokbase.Transform import script_utils
def main():
"""
KBase Convert task manager for converting between KBase objects.
Step 1 - Run a converter to pull the source object and save the destination object.
Args:
workspace_service_url: URL for a KBase Workspace service where KBase objects
are stored.
ujs_service_url: URL for a User and Job State service to report task progress
back to the user.
shock_service_url: URL for a KBase SHOCK data store service for storing files
and large reference data.
handle_service_url: URL for a KBase Handle service that maps permissions from
the Workspace to SHOCK for KBase types that specify a Handle
reference instead of a SHOCK reference.
source_workspace_name: The name of the source workspace.
destination_workspace_name: The name of the destination workspace.
source_object_name: The source object name.
destination_object_name: The destination object name.
source_kbase_type: The KBase Workspace type string that indicates the module
and type of the object being created.
destination_kbase_type: The KBase Workspace type string that indicates the module
and type of the object being created.
optional_arguments: This is a JSON string containing optional parameters that can
be passed in for custom behavior per conversion.
ujs_job_id: The job id from the User and Job State service that can be used to
report status on task progress back to the user.
job_details: This is a JSON string that passes in the script specific command
line options for a given conversion type. The service pulls
these config settings from a script config created by the developer
of the conversion script and passes that into the AWE job that
calls this script.
working_directory: The working directory on disk where files can be created and
will be cleaned when the job ends with success or failure.
keep_working_directory: A flag to tell the script not to delete the working
directory, which is mainly for debugging purposes.
Returns:
Literal return value is 0 for success and 1 for failure.
Actual data output is one or more Workspace objects saved to a user's workspace.
Authors:
Matt Henderson, Gavin Price
"""
logger = script_utils.stderrlogger(__file__, level=logging.DEBUG)
logger.info("Executing KBase Convert tasks")
script_details = script_utils.parse_docs(main.__doc__)
logger.debug(script_details["Args"])
parser = script_utils.ArgumentParser(description=script_details["Description"],
epilog=script_details["Authors"])
# provided by service config
parser.add_argument('--workspace_service_url',
help=script_details["Args"]["workspace_service_url"],
action='store',
required=True)
parser.add_argument('--ujs_service_url',
help=script_details["Args"]["ujs_service_url"],
action='store',
required=True)
# optional because not all KBase Workspace types contain a SHOCK or Handle reference
parser.add_argument('--shock_service_url',
help=script_details["Args"]["shock_service_url"],
action='store',
default=None)
parser.add_argument('--handle_service_url',
help=script_details["Args"]["handle_service_url"],
action='store',
default=None)
# workspace info for pulling the data
parser.add_argument('--source_workspace_name',
help=script_details["Args"]["source_workspace_name"],
action='store',
required=True)
parser.add_argument('--source_object_name',
help=script_details["Args"]["source_object_name"],
action='store',
required=True)
# workspace info for saving the data
parser.add_argument('--destination_workspace_name',
help=script_details["Args"]["destination_workspace_name"],
action='store',
required=True)
parser.add_argument('--destination_object_name',
help=script_details["Args"]["destination_object_name"],
action='store',
required=True)
# the types that we are transforming between, currently assumed one to one
parser.add_argument('--source_kbase_type',
help=script_details["Args"]["source_kbase_type"],
action='store',
required=True)
parser.add_argument('--destination_kbase_type',
help=script_details["Args"]["destination_kbase_type"],
action='store',
required=True)
# any user options provided, encoded as a jason string
parser.add_argument('--optional_arguments',
help=script_details["Args"]["optional_arguments"],
action='store',
default='{}')
# Used if you are restarting a previously executed job?
parser.add_argument('--ujs_job_id',
help=script_details["Args"]["ujs_job_id"],
action='store',
default=None,
required=False)
# config information for running the validate and transform scripts
parser.add_argument('--job_details',
help=script_details["Args"]["job_details"],
action='store',
default=None)
# the working directory is where all the files for this job will be written,
# and normal operation cleans it after the job ends (success or fail)
parser.add_argument('--working_directory',
help=script_details["Args"]["working_directory"],
action='store',
default=None,
required=True)
parser.add_argument('--keep_working_directory',
help=script_details["Args"]["keep_working_directory"],
action='store_true')
# ignore any extra arguments
args, unknown = parser.parse_known_args()
kb_token = os.environ.get('KB_AUTH_TOKEN')
ujs = UserAndJobState(url=args.ujs_service_url, token=kb_token)
est = datetime.datetime.utcnow() + datetime.timedelta(minutes=3)
if args.ujs_job_id is not None:
ujs.update_job_progress(args.ujs_job_id, kb_token, "KBase Data Convert started",
1, est.strftime('%Y-%m-%dT%H:%M:%S+0000'))
# parse all the json strings from the argument list into dicts
# TODO had issues with json.loads and unicode strings, workaround was using simplejson and base64
args.optional_arguments = simplejson.loads(base64.urlsafe_b64decode(args.optional_arguments))
args.job_details = simplejson.loads(base64.urlsafe_b64decode(args.job_details))
if not os.path.exists(args.working_directory):
os.mkdir(args.working_directory)
if args.ujs_job_id is not None:
ujs.update_job_progress(args.ujs_job_id, kb_token,
"Converting from {0} to {1}".format(args.source_kbase_type,args.destination_kbase_type),
1, est.strftime('%Y-%m-%dT%H:%M:%S+0000') )
# Step 1 : Convert the objects
try:
logger.info(args)
convert_args = args.job_details["transform"]
convert_args["optional_arguments"] = args.optional_arguments
convert_args["working_directory"] = args.working_directory
convert_args["workspace_service_url"] = args.workspace_service_url
convert_args["source_workspace_name"] = args.source_workspace_name
convert_args["source_object_name"] = args.source_object_name
convert_args["destination_workspace_name"] = args.destination_workspace_name
convert_args["destination_object_name"] = args.destination_object_name
logger.info(convert_args)
task_output = handler_utils.run_task(logger, convert_args)
if task_output["stdout"] is not None:
logger.debug("STDOUT : " + str(task_output["stdout"]))
if task_output["stderr"] is not None:
logger.debug("STDERR : " + str(task_output["stderr"]))
except Exception, e:
handler_utils.report_exception(logger,
{"message": 'ERROR : Conversion from {0} to {1}'.format(args.source_kbase_type,args.destination_kbase_type),
"exc": e,
"ujs": ujs,
"ujs_job_id": args.ujs_job_id,
"token": kb_token,
},
{"keep_working_directory": args.keep_working_directory,
"working_directory": args.working_directory})
ujs.complete_job(args.ujs_job_id,
kb_token,
"Convert to {0} failed.".format(
args.destination_workspace_name),
str(e),
None)
# Report progress on the overall task being completed
if args.ujs_job_id is not None:
ujs.complete_job(args.ujs_job_id,
kb_token,
"Convert to {0} completed".format(args.destination_workspace_name),
None,
{"shocknodes" : [],
"shockurl" : args.shock_service_url,
"workspaceids" : [],
"workspaceurl" : args.workspace_service_url,
"results" : [{"server_type" : "Workspace",
"url" : args.workspace_service_url,
"id" : "{}/{}".format(args.destination_workspace_name,
args.destination_object_name),
"description" : ""}]})
# Almost done, remove the working directory if possible
if not args.keep_working_directory:
handler_utils.cleanup(logger, args.working_directory)
sys.exit(0);
if __name__ == "__main__":
main() | aekazakov/transform | plugins/scripts/taskrunners/trns_convert_taskrunner.py | Python | mit | 11,480 |
#!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import sys, os
from calibre.customize.conversion import OutputFormatPlugin
from calibre.customize.conversion import OptionRecommendation
class LRFOptions(object):
def __init__(self, output, opts, oeb):
def f2s(f):
try:
return unicode(f[0])
except:
return ''
m = oeb.metadata
for x in ('left', 'top', 'right', 'bottom'):
attr = 'margin_'+x
val = getattr(opts, attr)
if val < 0:
setattr(opts, attr, 0)
self.title = None
self.author = self.publisher = _('Unknown')
self.title_sort = self.author_sort = ''
for x in m.creator:
if x.role == 'aut':
self.author = unicode(x)
fa = unicode(getattr(x, 'file_as', ''))
if fa:
self.author_sort = fa
for x in m.title:
if unicode(x.file_as):
self.title_sort = unicode(x.file_as)
self.freetext = f2s(m.description)
self.category = f2s(m.subject)
self.cover = None
self.use_metadata_cover = True
self.output = output
self.ignore_tables = opts.linearize_tables
if opts.disable_font_rescaling:
self.base_font_size = 0
else:
self.base_font_size = opts.base_font_size
self.blank_after_para = opts.insert_blank_line
self.use_spine = True
self.font_delta = 0
self.ignore_colors = False
from calibre.ebooks.lrf import PRS500_PROFILE
self.profile = PRS500_PROFILE
self.link_levels = sys.maxint
self.link_exclude = '@'
self.no_links_in_toc = True
self.disable_chapter_detection = True
self.chapter_regex = 'dsadcdswcdec'
self.chapter_attr = '$,,$'
self.override_css = self._override_css = ''
self.page_break = 'h[12]'
self.force_page_break = '$'
self.force_page_break_attr = '$'
self.add_chapters_to_toc = False
self.baen = self.pdftohtml = self.book_designer = False
self.verbose = opts.verbose
self.encoding = 'utf-8'
self.lrs = False
self.minimize_memory_usage = False
self.autorotation = opts.enable_autorotation
self.header_separation = (self.profile.dpi/72.) * opts.header_separation
self.headerformat = opts.header_format
for x in ('top', 'bottom', 'left', 'right'):
setattr(self, x+'_margin',
(self.profile.dpi/72.) * float(getattr(opts, 'margin_'+x)))
for x in ('wordspace', 'header', 'header_format',
'minimum_indent', 'serif_family',
'render_tables_as_images', 'sans_family', 'mono_family',
'text_size_multiplier_for_rendered_tables'):
setattr(self, x, getattr(opts, x))
class LRFOutput(OutputFormatPlugin):
name = 'LRF Output'
author = 'Kovid Goyal'
file_type = 'lrf'
options = set([
OptionRecommendation(name='enable_autorotation', recommended_value=False,
help=_('Enable auto-rotation of images that are wider than the screen width.')
),
OptionRecommendation(name='wordspace',
recommended_value=2.5, level=OptionRecommendation.LOW,
help=_('Set the space between words in pts. Default is %default')
),
OptionRecommendation(name='header', recommended_value=False,
help=_('Add a header to all the pages with title and author.')
),
OptionRecommendation(name='header_format', recommended_value="%t by %a",
help=_('Set the format of the header. %a is replaced by the author '
'and %t by the title. Default is %default')
),
OptionRecommendation(name='header_separation', recommended_value=0,
help=_('Add extra spacing below the header. Default is %default pt.')
),
OptionRecommendation(name='minimum_indent', recommended_value=0,
help=_('Minimum paragraph indent (the indent of the first line '
'of a paragraph) in pts. Default: %default')
),
OptionRecommendation(name='render_tables_as_images',
recommended_value=False,
help=_('Render tables in the HTML as images (useful if the '
'document has large or complex tables)')
),
OptionRecommendation(name='text_size_multiplier_for_rendered_tables',
recommended_value=1.0,
help=_('Multiply the size of text in rendered tables by this '
'factor. Default is %default')
),
OptionRecommendation(name='serif_family', recommended_value=None,
help=_('The serif family of fonts to embed')
),
OptionRecommendation(name='sans_family', recommended_value=None,
help=_('The sans-serif family of fonts to embed')
),
OptionRecommendation(name='mono_family', recommended_value=None,
help=_('The monospace family of fonts to embed')
),
])
recommendations = set([
('change_justification', 'original', OptionRecommendation.HIGH),
])
def convert_images(self, pages, opts, wide):
from calibre.ebooks.lrf.pylrs.pylrs import Book, BookSetting, ImageStream, ImageBlock
from uuid import uuid4
from calibre.constants import __appname__, __version__
width, height = (784, 1012) if wide else (584, 754)
ps = {}
ps['topmargin'] = 0
ps['evensidemargin'] = 0
ps['oddsidemargin'] = 0
ps['textwidth'] = width
ps['textheight'] = height
book = Book(title=opts.title, author=opts.author,
bookid=uuid4().hex,
publisher='%s %s'%(__appname__, __version__),
category=_('Comic'), pagestyledefault=ps,
booksetting=BookSetting(screenwidth=width, screenheight=height))
for page in pages:
imageStream = ImageStream(page)
_page = book.create_page()
_page.append(ImageBlock(refstream=imageStream,
blockwidth=width, blockheight=height, xsize=width,
ysize=height, x1=width, y1=height))
book.append(_page)
book.renderLrf(open(opts.output, 'wb'))
def flatten_toc(self):
from calibre.ebooks.oeb.base import TOC
nroot = TOC()
for x in self.oeb.toc.iterdescendants():
nroot.add(x.title, x.href)
self.oeb.toc = nroot
def convert(self, oeb, output_path, input_plugin, opts, log):
self.log, self.opts, self.oeb = log, opts, oeb
lrf_opts = LRFOptions(output_path, opts, oeb)
if input_plugin.is_image_collection:
self.convert_images(input_plugin.get_images(), lrf_opts,
getattr(opts, 'wide', False))
return
self.flatten_toc()
from calibre.ptempfile import TemporaryDirectory
with TemporaryDirectory(u'_lrf_output') as tdir:
from calibre.customize.ui import plugin_for_output_format
oeb_output = plugin_for_output_format('oeb')
oeb_output.convert(oeb, tdir, input_plugin, opts, log)
opf = [x for x in os.listdir(tdir) if x.endswith('.opf')][0]
from calibre.ebooks.lrf.html.convert_from import process_file
process_file(os.path.join(tdir, opf), lrf_opts, self.log)
| jelly/calibre | src/calibre/ebooks/conversion/plugins/lrf_output.py | Python | gpl-3.0 | 7,750 |
import contextlib
import re
import socket
import urllib
import uuid
from django.conf import settings
from django.contrib.auth.middleware import AuthenticationMiddleware
from django.contrib.auth.models import AnonymousUser
from django.contrib.sessions.middleware import SessionMiddleware
from django.db import transaction
from django.urls import is_valid_path
from django.http import (
HttpResponsePermanentRedirect, HttpResponseRedirect,
JsonResponse)
from django.middleware import common
from django.utils.cache import patch_cache_control, patch_vary_headers
from django.utils.deprecation import MiddlewareMixin
from django.utils.encoding import force_bytes, iri_to_uri
from django.utils.translation import activate, ugettext_lazy as _
from rest_framework import permissions
import MySQLdb as mysql
from corsheaders.middleware import CorsMiddleware as _CorsMiddleware
from olympia import amo
from olympia.amo.utils import render
from . import urlresolvers
from .templatetags.jinja_helpers import urlparams
auth_path = re.compile('%saccounts/authenticate/?$' % settings.DRF_API_REGEX)
class LocaleAndAppURLMiddleware(MiddlewareMixin):
"""
1. search for locale first
2. see if there are acceptable apps
3. save those matched parameters in the request
4. strip them from the URL so we can do stuff
"""
def process_request(self, request):
# Find locale, app
prefixer = urlresolvers.Prefixer(request)
if settings.DEBUG:
redirect_type = HttpResponseRedirect
else:
redirect_type = HttpResponsePermanentRedirect
urlresolvers.set_url_prefix(prefixer)
full_path = prefixer.fix(prefixer.shortened_path)
if (prefixer.app == amo.MOBILE.short and
request.path.rstrip('/').endswith('/' + amo.MOBILE.short)):
return redirect_type(request.path.replace('/mobile', '/android'))
if ('lang' in request.GET and not re.match(
settings.SUPPORTED_NONAPPS_NONLOCALES_REGEX,
prefixer.shortened_path)):
# Blank out the locale so that we can set a new one. Remove lang
# from query params so we don't have an infinite loop.
prefixer.locale = ''
new_path = prefixer.fix(prefixer.shortened_path)
query = dict((force_bytes(k), request.GET[k]) for k in request.GET)
query.pop('lang')
return redirect_type(urlparams(new_path, **query))
if full_path != request.path:
query_string = request.META.get('QUERY_STRING', '')
full_path = urllib.quote(full_path.encode('utf-8'))
if query_string:
query_string = query_string.decode('utf-8', 'ignore')
full_path = u'%s?%s' % (full_path, query_string)
response = redirect_type(full_path)
# Cache the redirect for a year.
if not settings.DEBUG:
patch_cache_control(response, max_age=60 * 60 * 24 * 365)
# Vary on Accept-Language or User-Agent if we changed the locale or
# app.
old_app = prefixer.app
old_locale = prefixer.locale
new_locale, new_app, _ = prefixer.split_path(full_path)
if old_locale != new_locale:
patch_vary_headers(response, ['Accept-Language'])
if old_app != new_app:
patch_vary_headers(response, ['User-Agent'])
return response
request.path_info = '/' + prefixer.shortened_path
request.LANG = prefixer.locale or prefixer.get_language()
activate(request.LANG)
request.APP = amo.APPS.get(prefixer.app, amo.FIREFOX)
# Match legacy api requests too - IdentifyAPIRequestMiddleware is v3+
# TODO - remove this when legacy_api goes away
# https://github.com/mozilla/addons-server/issues/9274
request.is_legacy_api = request.path_info.startswith('/api/')
class AuthenticationMiddlewareWithoutAPI(AuthenticationMiddleware):
"""
Like AuthenticationMiddleware, but disabled for the API, which uses its
own authentication mechanism.
"""
def process_request(self, request):
legacy_or_drf_api = request.is_api or request.is_legacy_api
if legacy_or_drf_api and not auth_path.match(request.path):
request.user = AnonymousUser()
else:
return super(
AuthenticationMiddlewareWithoutAPI,
self).process_request(request)
class NoVarySessionMiddleware(SessionMiddleware):
"""
SessionMiddleware sets Vary: Cookie anytime request.session is accessed.
request.session is accessed indirectly anytime request.user is touched.
We always touch request.user to see if the user is authenticated, so every
request would be sending vary, so we'd get no caching.
We skip the cache in Zeus if someone has an AMOv3+ cookie, so varying on
Cookie at this level only hurts us.
"""
def process_response(self, request, response):
if settings.READ_ONLY:
return response
# Let SessionMiddleware do its processing but prevent it from changing
# the Vary header.
vary = None
if hasattr(response, 'get'):
vary = response.get('Vary', None)
new_response = (
super(NoVarySessionMiddleware, self)
.process_response(request, response))
if vary:
new_response['Vary'] = vary
else:
del new_response['Vary']
return new_response
class RemoveSlashMiddleware(MiddlewareMixin):
"""
Middleware that tries to remove a trailing slash if there was a 404.
If the response is a 404 because url resolution failed, we'll look for a
better url without a trailing slash.
"""
def process_response(self, request, response):
if (response.status_code == 404 and
request.path_info.endswith('/') and
not is_valid_path(request.path_info) and
is_valid_path(request.path_info[:-1])):
# Use request.path because we munged app/locale in path_info.
newurl = request.path[:-1]
if request.GET:
with safe_query_string(request):
newurl += '?' + request.META.get('QUERY_STRING', '')
return HttpResponsePermanentRedirect(newurl)
else:
return response
@contextlib.contextmanager
def safe_query_string(request):
"""
Turn the QUERY_STRING into a unicode- and ascii-safe string.
We need unicode so it can be combined with a reversed URL, but it has to be
ascii to go in a Location header. iri_to_uri seems like a good compromise.
"""
qs = request.META.get('QUERY_STRING', '')
try:
request.META['QUERY_STRING'] = iri_to_uri(qs)
yield
finally:
request.META['QUERY_STRING'] = qs
class CommonMiddleware(common.CommonMiddleware):
def process_request(self, request):
with safe_query_string(request):
return super(CommonMiddleware, self).process_request(request)
class NonAtomicRequestsForSafeHttpMethodsMiddleware(MiddlewareMixin):
"""
Middleware to make the view non-atomic if the HTTP method used is safe,
in order to avoid opening and closing a useless transaction.
"""
def process_view(self, request, view_func, view_args, view_kwargs):
# This uses undocumented django APIS:
# - transaction.get_connection() followed by in_atomic_block property,
# which we need to make sure we're not messing with a transaction
# that has already started (which happens in tests using the regular
# TestCase class)
# - _non_atomic_requests(), which set the property to prevent the
# transaction on the view itself. We can't use non_atomic_requests
# (without the '_') as it returns a *new* view, and we can't do that
# in a middleware, we need to modify it in place and return None so
# that the rest of the middlewares are run.
is_method_safe = request.method in ('HEAD', 'GET', 'OPTIONS', 'TRACE')
if is_method_safe and not transaction.get_connection().in_atomic_block:
transaction._non_atomic_requests(view_func, using='default')
return None
class ReadOnlyMiddleware(MiddlewareMixin):
"""Middleware that announces a downtime which for us usually means
putting the site into read only mode.
Supports issuing `Retry-After` header.
"""
ERROR_MSG = _(
u'Some features are temporarily disabled while we '
u'perform website maintenance. We\'ll be back to '
u'full capacity shortly.')
def process_request(self, request):
if not settings.READ_ONLY:
return
if request.is_api:
writable_method = request.method not in permissions.SAFE_METHODS
if writable_method:
return JsonResponse({'error': self.ERROR_MSG}, status=503)
elif request.method == 'POST':
return render(request, 'amo/read-only.html', status=503)
def process_exception(self, request, exception):
if not settings.READ_ONLY:
return
if isinstance(exception, mysql.OperationalError):
if request.is_api:
return self._render_api_error()
return render(request, 'amo/read-only.html', status=503)
class SetRemoteAddrFromForwardedFor(MiddlewareMixin):
"""
Set request.META['REMOTE_ADDR'] from request.META['HTTP_X_FORWARDED_FOR'].
Our application servers should always be behind a load balancer that sets
this header correctly.
"""
def is_valid_ip(self, ip):
for af in (socket.AF_INET, socket.AF_INET6):
try:
socket.inet_pton(af, ip)
return True
except socket.error:
pass
return False
def process_request(self, request):
ips = []
if 'HTTP_X_FORWARDED_FOR' in request.META:
xff = [i.strip() for i in
request.META['HTTP_X_FORWARDED_FOR'].split(',')]
ips = [ip for ip in xff if self.is_valid_ip(ip)]
else:
return
ips.append(request.META['REMOTE_ADDR'])
known = getattr(settings, 'KNOWN_PROXIES', [])
ips.reverse()
for ip in ips:
request.META['REMOTE_ADDR'] = ip
if ip not in known:
break
class ScrubRequestOnException(MiddlewareMixin):
"""
Hide sensitive information so they're not recorded in error logging.
* passwords in request.POST
* sessionid in request.COOKIES
"""
def process_exception(self, request, exception):
# Get a copy so it's mutable.
request.POST = request.POST.copy()
for key in request.POST:
if 'password' in key.lower():
request.POST[key] = '******'
# Remove session id from cookies
if settings.SESSION_COOKIE_NAME in request.COOKIES:
request.COOKIES[settings.SESSION_COOKIE_NAME] = '******'
# Clearing out all cookies in request.META. They will already
# be sent with request.COOKIES.
request.META['HTTP_COOKIE'] = '******'
class RequestIdMiddleware(MiddlewareMixin):
"""Middleware that adds a unique request-id to every incoming request.
This can be used to track a request across different system layers,
e.g to correlate logs with sentry exceptions.
We are exposing this request id in the `X-AMO-Request-ID` response header.
"""
def process_request(self, request):
request.request_id = uuid.uuid4().hex
def process_response(self, request, response):
request_id = getattr(request, 'request_id', None)
if request_id:
response['X-AMO-Request-ID'] = request.request_id
return response
class CorsMiddleware(_CorsMiddleware, MiddlewareMixin):
"""Wrapper to allow old style Middleware to work with django 1.10+.
Will be unneeded once
https://github.com/mstriemer/django-cors-headers/pull/3 is merged and a
new release of django-cors-headers-multi is available."""
pass
| atiqueahmedziad/addons-server | src/olympia/amo/middleware.py | Python | bsd-3-clause | 12,302 |
from django.db.models import Avg, Count, Sum, Max
from django.shortcuts import render
from django.template.context_processors import csrf
from django.utils.decorators import method_decorator
from django.views.generic import View
from silk import models
from silk.auth import login_possibly_required, permissions_possibly_required
from silk.request_filters import BaseFilter, filters_from_request
class SummaryView(View):
filters_key = 'summary_filters'
def _avg_num_queries(self, filters):
queries__aggregate = models.Request.objects.filter(*filters).annotate(num_queries=Count('queries')).aggregate(num=Avg('num_queries'))
return queries__aggregate['num']
def _avg_time_spent_on_queries(self, filters):
taken__aggregate = models.Request.objects.filter(*filters).annotate(time_spent=Sum('queries__time_taken')).aggregate(num=Avg('time_spent'))
return taken__aggregate['num']
def _avg_overall_time(self, filters):
taken__aggregate = models.Request.objects.filter(*filters).annotate(time_spent=Sum('time_taken')).aggregate(num=Avg('time_spent'))
return taken__aggregate['num']
# TODO: Find a more efficient way to do this. Currently has to go to DB num. views + 1 times and is prob quite expensive
def _longest_query_by_view(self, filters):
values_list = models.Request.objects.filter(*filters).values_list("view_name").annotate(max=Max('time_taken')).order_by('-max')[:5]
requests = []
for view_name, _ in values_list:
request = models.Request.objects.filter(view_name=view_name, *filters).order_by('-time_taken')[0]
requests.append(request)
return requests
def _time_spent_in_db_by_view(self, filters):
values_list = models.Request.objects.filter(*filters).values_list('view_name').annotate(t=Sum('queries__time_taken')).filter(t__gte=0).order_by('-t')[:5]
requests = []
for view, _ in values_list:
r = models.Request.objects.filter(view_name=view, *filters).annotate(t=Sum('queries__time_taken')).order_by('-t')[0]
requests.append(r)
return requests
def _num_queries_by_view(self, filters):
queryset = models.Request.objects.filter(*filters).values_list('view_name').annotate(t=Count('queries')).order_by('-t')[:5]
views = [r[0] for r in queryset[:6]]
requests = []
for view in views:
try:
r = models.Request.objects.filter(view_name=view, *filters).annotate(t=Count('queries')).order_by('-t')[0]
requests.append(r)
except IndexError:
pass
return requests
def _create_context(self, request):
raw_filters = request.session.get(self.filters_key, {})
filters = [BaseFilter.from_dict(filter_d) for _, filter_d in raw_filters.items()]
avg_overall_time = self._avg_num_queries(filters)
c = {
'request': request,
'num_requests': models.Request.objects.filter(*filters).count(),
'num_profiles': models.Profile.objects.filter(*filters).count(),
'avg_num_queries': avg_overall_time,
'avg_time_spent_on_queries': self._avg_time_spent_on_queries(filters),
'avg_overall_time': self._avg_overall_time(filters),
'longest_queries_by_view': self._longest_query_by_view(filters),
'most_time_spent_in_db': self._time_spent_in_db_by_view(filters),
'most_queries': self._num_queries_by_view(filters),
'filters': raw_filters
}
c.update(csrf(request))
return c
@method_decorator(login_possibly_required)
@method_decorator(permissions_possibly_required)
def get(self, request):
c = self._create_context(request)
return render(request, 'silk/summary.html', c)
@method_decorator(login_possibly_required)
@method_decorator(permissions_possibly_required)
def post(self, request):
filters = filters_from_request(request)
request.session[self.filters_key] = {ident: f.as_dict() for ident, f in filters.items()}
return render(request, 'silk/summary.html', self._create_context(request))
| jazzband/silk | silk/views/summary.py | Python | mit | 4,217 |
from time import time
import argparse
import numpy as np
from sklearn.dummy import DummyClassifier
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.metrics import accuracy_score
from sklearn.utils.validation import check_array
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB
ESTIMATORS = {
"dummy": DummyClassifier(),
"random_forest": RandomForestClassifier(max_features="sqrt",
min_samples_split=10),
"extra_trees": ExtraTreesClassifier(max_features="sqrt",
min_samples_split=10),
"logistic_regression": LogisticRegression(),
"naive_bayes": MultinomialNB(),
"adaboost": AdaBoostClassifier(n_estimators=10),
}
###############################################################################
# Data
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--estimators', nargs="+", required=True,
choices=ESTIMATORS)
args = vars(parser.parse_args())
data_train = fetch_20newsgroups_vectorized(subset="train")
data_test = fetch_20newsgroups_vectorized(subset="test")
X_train = check_array(data_train.data, dtype=np.float32,
accept_sparse="csc")
X_test = check_array(data_test.data, dtype=np.float32, accept_sparse="csr")
y_train = data_train.target
y_test = data_test.target
print("20 newsgroups")
print("=============")
print(f"X_train.shape = {X_train.shape}")
print(f"X_train.format = {X_train.format}")
print(f"X_train.dtype = {X_train.dtype}")
print(f"X_train density = {X_train.nnz / np.product(X_train.shape)}")
print(f"y_train {y_train.shape}")
print(f"X_test {X_test.shape}")
print(f"X_test.format = {X_test.format}")
print(f"X_test.dtype = {X_test.dtype}")
print(f"y_test {y_test.shape}")
print()
print("Classifier Training")
print("===================")
accuracy, train_time, test_time = {}, {}, {}
for name in sorted(args["estimators"]):
clf = ESTIMATORS[name]
try:
clf.set_params(random_state=0)
except (TypeError, ValueError):
pass
print("Training %s ... " % name, end="")
t0 = time()
clf.fit(X_train, y_train)
train_time[name] = time() - t0
t0 = time()
y_pred = clf.predict(X_test)
test_time[name] = time() - t0
accuracy[name] = accuracy_score(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print()
print("%s %s %s %s" % ("Classifier ", "train-time", "test-time",
"Accuracy"))
print("-" * 44)
for name in sorted(accuracy, key=accuracy.get):
print("%s %s %s %s" % (name.ljust(16),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % accuracy[name]).center(10)))
print()
| kevin-intel/scikit-learn | benchmarks/bench_20newsgroups.py | Python | bsd-3-clause | 3,292 |
#
# Copyright (c) 2015 Red Hat, Inc.
#
# This software is licensed to you under the GNU Lesser General Public
# License as published by the Free Software Foundation; either version
# 2 of the License (LGPLv2) or (at your option) any later version.
# There is NO WARRANTY for this software, express or implied,
# including the implied warranties of MERCHANTABILITY,
# NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should
# have received a copy of LGPLv2 along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/lgpl-2.0.txt.
#
# Jeff Ortel <jortel@redhat.com>
#
from optparse import OptionParser
from gofer.messaging import Document
from gofer.agent.manager import Client, HOST, PORT
USAGE = '[options]'
parser = OptionParser(description='Management')
parser.add_option('-H', '--host', default=HOST, help='host')
parser.add_option('-p', '--port', default=PORT, type='int', help='port')
parser.add_option('-s', '--show', action='store_true', default=False, help='show loaded plugins')
parser.add_option('-l', '--load', help='load plugin: <path>')
parser.add_option('-r', '--reload', help='reload plugin: <path>')
parser.add_option('-u', '--unload', help='unload plugin: <path>')
def get_options():
options, _ = parser.parse_args()
return options
def display(reply):
if reply.result:
print(reply.result)
def main():
options = get_options()
client = Client(options.host, options.port)
# show
if options.show:
reply = Document(client.show())
display(reply)
return reply.code
# load
path = options.load
if path:
reply = Document(client.load(path))
display(reply)
return reply.code
path = options.reload
# reload
if path:
reply = Document(client.reload(path))
display(reply)
return reply.code
# unload
path = options.unload
if path:
reply = Document(client.unload(path))
display(reply)
return reply.code
| jortel/gofer | src/gofer/tools/mgt.py | Python | lgpl-2.1 | 2,014 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2010-2012 Associazione OpenERP Italia
# (<http://www.openerp-italia.org>).
# Copyright(c)2008-2010 SIA "KN dati".(http://kndati.lv) All Rights Reserved.
# General contacts <info@kndati.lv>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name' : 'Italian localization - Quotation / DDT report aeroo',
'version' : '0.1',
'category' : 'Localization/Italy/Reporting',
'description' : """
Base DDT in Aeroo:
DDT report in aeroo according with extra field created in italian
localization
""",
'author': 'OpenERP Italian Community',
'website': 'http://www.openerp-italia.org',
'license': 'AGPL-3',
'depends' : [
'base',
'l10n_it_sale',
'l10n_it_fiscalcode',
'report_aeroo',
'report_aeroo_ooo',
],
'init_xml' : [],
'update_xml' : [
'ddt_view.xml',
'report/ddt_report.xml',
],
'demo_xml' : [],
'test': [],
'active' : False,
'installable' : True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| Micronaet/micronaet-mx | l10n_it_ddt_report_aeroo/__openerp__.py | Python | agpl-3.0 | 1,959 |
"""
The :mod:`sklearn.utils` module includes various utilities.
"""
from collections import Sequence
import numpy as np
from scipy.sparse import issparse
import warnings
from .murmurhash import murmurhash3_32
from .validation import (as_float_array, check_arrays, safe_asarray,
assert_all_finite, array2d, atleast2d_or_csc,
atleast2d_or_csr, warn_if_not_float,
check_random_state, column_or_1d)
from .class_weight import compute_class_weight
from sklearn.utils.sparsetools import minimum_spanning_tree
__all__ = ["murmurhash3_32", "as_float_array", "check_arrays", "safe_asarray",
"assert_all_finite", "array2d", "atleast2d_or_csc",
"atleast2d_or_csr",
"warn_if_not_float",
"check_random_state",
"compute_class_weight",
"minimum_spanning_tree",
"column_or_1d", "safe_indexing"]
class deprecated(object):
"""Decorator to mark a function or class as deprecated.
Issue a warning when the function is called/the class is instantiated and
adds a warning to the docstring.
The optional extra argument will be appended to the deprecation message
and the docstring. Note: to use this with the default value for extra, put
in an empty of parentheses:
>>> from sklearn.utils import deprecated
>>> deprecated() # doctest: +ELLIPSIS
<sklearn.utils.deprecated object at ...>
>>> @deprecated()
... def some_function(): pass
"""
# Adapted from http://wiki.python.org/moin/PythonDecoratorLibrary,
# but with many changes.
def __init__(self, extra=''):
"""
Parameters
----------
extra: string
to be added to the deprecation messages
"""
self.extra = extra
def __call__(self, obj):
if isinstance(obj, type):
return self._decorate_class(obj)
else:
return self._decorate_fun(obj)
def _decorate_class(self, cls):
msg = "Class %s is deprecated" % cls.__name__
if self.extra:
msg += "; %s" % self.extra
# FIXME: we should probably reset __new__ for full generality
init = cls.__init__
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return init(*args, **kwargs)
cls.__init__ = wrapped
wrapped.__name__ = '__init__'
wrapped.__doc__ = self._update_doc(init.__doc__)
wrapped.deprecated_original = init
return cls
def _decorate_fun(self, fun):
"""Decorate function fun"""
msg = "Function %s is deprecated" % fun.__name__
if self.extra:
msg += "; %s" % self.extra
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return fun(*args, **kwargs)
wrapped.__name__ = fun.__name__
wrapped.__dict__ = fun.__dict__
wrapped.__doc__ = self._update_doc(fun.__doc__)
return wrapped
def _update_doc(self, olddoc):
newdoc = "DEPRECATED"
if self.extra:
newdoc = "%s: %s" % (newdoc, self.extra)
if olddoc:
newdoc = "%s\n\n%s" % (newdoc, olddoc)
return newdoc
def safe_mask(X, mask):
"""Return a mask which is safe to use on X.
Parameters
----------
X : {array-like, sparse matrix}
Data on which to apply mask.
mask: array
Mask to be used on X.
Returns
-------
mask
"""
mask = np.asarray(mask)
if np.issubdtype(mask.dtype, np.int):
return mask
if hasattr(X, "toarray"):
ind = np.arange(mask.shape[0])
mask = ind[mask]
return mask
def safe_indexing(X, indices):
"""Return items or rows from X using indices.
Allows simple indexing of lists or arrays.
Parameters
----------
X : array-like, sparse-matrix, list.
Data from which to sample rows or items.
indices : array-like, list
Indices according to which X will be subsampled.
"""
if hasattr(X, "shape"):
return X[indices]
else:
return [X[idx] for idx in indices]
def resample(*arrays, **options):
"""Resample arrays or sparse matrices in a consistent way
The default strategy implements one step of the bootstrapping
procedure.
Parameters
----------
`*arrays` : sequence of arrays or scipy.sparse matrices with same shape[0]
replace : boolean, True by default
Implements resampling with replacement. If False, this will implement
(sliced) random permutations.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
Returns
-------
Sequence of resampled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = [[1., 0.], [2., 1.], [0., 0.]]
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import resample
>>> X, X_sparse, y = resample(X, X_sparse, y, random_state=0)
>>> X
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 4 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([0, 1, 0])
>>> resample(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:class:`sklearn.cross_validation.Bootstrap`
:func:`sklearn.utils.shuffle`
"""
random_state = check_random_state(options.pop('random_state', None))
replace = options.pop('replace', True)
max_n_samples = options.pop('n_samples', None)
if options:
raise ValueError("Unexpected kw arguments: %r" % options.keys())
if len(arrays) == 0:
return None
first = arrays[0]
n_samples = first.shape[0] if hasattr(first, 'shape') else len(first)
if max_n_samples is None:
max_n_samples = n_samples
if max_n_samples > n_samples:
raise ValueError("Cannot sample %d out of arrays with dim %d" % (
max_n_samples, n_samples))
arrays = check_arrays(*arrays, sparse_format='csr')
if replace:
indices = random_state.randint(0, n_samples, size=(max_n_samples,))
else:
indices = np.arange(n_samples)
random_state.shuffle(indices)
indices = indices[:max_n_samples]
resampled_arrays = []
for array in arrays:
array = array[indices]
resampled_arrays.append(array)
if len(resampled_arrays) == 1:
# syntactic sugar for the unit argument case
return resampled_arrays[0]
else:
return resampled_arrays
def shuffle(*arrays, **options):
"""Shuffle arrays or sparse matrices in a consistent way
This is a convenience alias to ``resample(*arrays, replace=False)`` to do
random permutations of the collections.
Parameters
----------
`*arrays` : sequence of arrays or scipy.sparse matrices with same shape[0]
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
Returns
-------
Sequence of shuffled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = [[1., 0.], [2., 1.], [0., 0.]]
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import shuffle
>>> X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0)
>>> X
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 3 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([2, 1, 0])
>>> shuffle(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.resample`
"""
options['replace'] = False
return resample(*arrays, **options)
def safe_sqr(X, copy=True):
"""Element wise squaring of array-likes and sparse matrices.
Parameters
----------
X : array like, matrix, sparse matrix
Returns
-------
X ** 2 : element wise square
"""
X = safe_asarray(X)
if issparse(X):
if copy:
X = X.copy()
X.data **= 2
else:
if copy:
X = X ** 2
else:
X **= 2
return X
def gen_batches(n, batch_size):
"""Generator to create slices containing batch_size elements, from 0 to n.
The last slice may contain less than batch_size elements, when batch_size
does not divide n.
Examples
--------
>>> from sklearn.utils import gen_batches
>>> list(gen_batches(7, 3))
[slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)]
>>> list(gen_batches(6, 3))
[slice(0, 3, None), slice(3, 6, None)]
>>> list(gen_batches(2, 3))
[slice(0, 2, None)]
"""
start = 0
for _ in range(int(n // batch_size)):
end = start + batch_size
yield slice(start, end)
start = end
if start < n:
yield slice(start, n)
def gen_even_slices(n, n_packs, n_samples=None):
"""Generator to create n_packs slices going up to n.
Pass n_samples when the slices are to be used for sparse matrix indexing;
slicing off-the-end raises an exception, while it works for NumPy arrays.
Examples
--------
>>> from sklearn.utils import gen_even_slices
>>> list(gen_even_slices(10, 1))
[slice(0, 10, None)]
>>> list(gen_even_slices(10, 10)) #doctest: +ELLIPSIS
[slice(0, 1, None), slice(1, 2, None), ..., slice(9, 10, None)]
>>> list(gen_even_slices(10, 5)) #doctest: +ELLIPSIS
[slice(0, 2, None), slice(2, 4, None), ..., slice(8, 10, None)]
>>> list(gen_even_slices(10, 3))
[slice(0, 4, None), slice(4, 7, None), slice(7, 10, None)]
"""
start = 0
for pack_num in range(n_packs):
this_n = n // n_packs
if pack_num < n % n_packs:
this_n += 1
if this_n > 0:
end = start + this_n
if n_samples is not None:
end = min(n_samples, end)
yield slice(start, end, None)
start = end
def tosequence(x):
"""Cast iterable x to a Sequence, avoiding a copy if possible."""
if isinstance(x, np.ndarray):
return np.asarray(x)
elif isinstance(x, Sequence):
return x
else:
return list(x)
class ConvergenceWarning(Warning):
"Custom warning to capture convergence problems"
| chaluemwut/fbserver | venv/lib/python2.7/site-packages/sklearn/utils/__init__.py | Python | apache-2.0 | 11,586 |
# This file is part of Lerot.
#
# Lerot is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Lerot is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Lerot. If not, see <http://www.gnu.org/licenses/>.
import unittest
import sys
import os
import cStringIO
import numpy as np
sys.path.insert(0, os.path.abspath('..'))
import query as qu
from ranker import DeterministicRankingFunction, ProbabilisticRankingFunction
from BalancedInterleave import BalancedInterleave
from TeamDraft import TeamDraft
from DocumentConstraints import DocumentConstraints
from ProbabilisticInterleave import ProbabilisticInterleave
from HistBalancedInterleave import HistBalancedInterleave
from HistTeamDraft import HistTeamDraft
from HistDocumentConstraints import HistDocumentConstraints
from HistProbabilisticInterleave import HistProbabilisticInterleave
from ExploitativeProbabilisticInterleave import \
ExploitativeProbabilisticInterleave
class TestEvaluation(unittest.TestCase):
def setUp(self):
self.test_num_features = 6
test_query = """
1 qid:1 1:2.6 2:1 3:2.1 4:0 5:2 6:1.4 # relevant
1 qid:1 1:1.2 2:1 3:2.9 4:0 5:2 6:1.9 # relevant
0 qid:1 1:0.5 2:1 3:2.3 4:0 5:2 6:5.6 # not relevant
0 qid:1 1:0.5 2:1 3:2.3 4:0 5:2.1 6:5.6 # not relevant
"""
self.query_fh = cStringIO.StringIO(test_query)
self.queries = qu.Queries(self.query_fh, self.test_num_features)
self.query = self.queries['1']
zero_weight_str = "0 0 0 0 0 0"
self.zero_weights = np.asarray([float(x) for x in
zero_weight_str.split()])
# results in ranking: 1, 3, 2, 0
weight_str_1 = "0 0 1 0 1 0"
self.weights_1 = np.asarray([float(x) for x in weight_str_1.split()])
weight_str_2 = "1 0 0 0 1 0"
self.weights_2 = np.asarray([float(x) for x in weight_str_2.split()])
def testBalancedInterleave(self):
bi = BalancedInterleave()
r1 = DeterministicRankingFunction(None, self.weights_1)
r2 = DeterministicRankingFunction(None, self.weights_2)
(interleaved_list, assignments) = bi.interleave(r1, r2, self.query, 10)
self.assertIn(interleaved_list.tolist(), [[0, 1, 2, 3], [1, 0, 2, 3],
[0, 1, 3, 2], [1, 0, 3, 2]])
self.assertEqual(assignments[0].tolist(), [1, 3, 2, 0])
self.assertEqual(assignments[1].tolist(), [0, 1, 3, 2])
o = bi.infer_outcome([1, 0, 3, 2], [[1, 3, 2, 0], [0, 1, 3, 2]],
[1, 0, 0, 0], self.query)
self.assertEqual(o, -1, "l1 should win (1), o = %g" % o)
o = bi.infer_outcome([1, 0, 3, 2], [[1, 3, 2, 0], [0, 1, 3, 2]],
[1, 0, 1, 0], self.query)
self.assertEqual(o, -1, "l1 should win (2), o = %g" % o)
o = bi.infer_outcome([1, 0, 3, 2], [[1, 2, 3, 0], [0, 1, 3, 2]],
[1, 0, 1, 0], self.query)
self.assertEqual(o, 0, "The rankers should tie (1), o = %g" % o)
o = bi.infer_outcome([0, 1, 2, 3], [[0, 1, 2, 3], [1, 2, 3, 0]],
[0, 1, 0, 1], self.query)
self.assertEqual(o, 1, "l1 should win, o = %g" % o)
o = bi.infer_outcome([1, 0, 2, 3], [[0, 1, 2, 3], [1, 2, 3, 0]],
[0, 1, 0, 1], self.query)
self.assertEqual(o, 0, "The rankers should tie (2), o = %g" % o)
o = bi.infer_outcome([0, 2, 1, 3], [[3, 0, 1, 2], [1, 3, 2, 0]],
[1, 0, 1, 0], self.query)
self.assertEqual(o, -1, "l1 should win (3), o = %g" % o)
o = bi.infer_outcome([0, 2, 1, 3], [[3, 0, 1, 2], [4, 3, 2, 0]],
[1, 0, 1, 0], self.query)
self.assertEqual(o, -1, "l1 should win (4), o = %g" % o)
def testHistBalancedInterleave(self):
hbi = HistBalancedInterleave()
r1 = DeterministicRankingFunction(None, self.weights_1)
r1_test = DeterministicRankingFunction(None, self.weights_1)
r2 = DeterministicRankingFunction(None, self.weights_2)
self.assertEqual(hbi._get_assignment(r1, r1_test, self.query,
4)[0].tolist(), [1, 3, 2, 0])
self.assertEqual(hbi._get_assignment(r1, r1_test, self.query,
4)[1].tolist(), [1, 3, 2, 0]) # r1
self.assertEqual(hbi._get_assignment(r1, r2, self.query,
4)[1].tolist(), [0, 1, 3, 2]) # r2
o = hbi.infer_outcome([1, 0, 3, 2], ["src a is ignored"], [1, 0, 0, 0],
r1, r2, self.query)
self.assertEqual(o, -1, "Same as original, l1 should win, o = %g" % o)
o = hbi.infer_outcome([1, 0, 3, 2], ["src a is ignored"], [1, 0, 0, 0],
r2, r1, self.query)
self.assertEqual(o, 1, "Different from original, l2 should win, "
"o = %g" % o)
o = hbi.infer_outcome([1, 0, 3, 2], ["src a is ignored"], [1, 0, 0, 0],
r1_test, r1, self.query)
self.assertEqual(o, 0, "Same ranking - tie (1), o = %g" % o)
o = hbi.infer_outcome([2, 0, 3, 1], ["src a is ignored"], [1, 1, 0, 0],
r1, r2, self.query)
self.assertEqual(o, 0, "Same ranking - tie (2), o = %g" % o)
o = hbi.infer_outcome([2, 0, 3, 4], ["src a is ignored"], [1, 1, 0, 0],
r1, r2, self.query)
self.assertEqual(o, 0, "Same ranking - tie (3), o = %g" % o)
def testDocumentConstraints(self):
dc = DocumentConstraints()
r1 = DeterministicRankingFunction(None, self.weights_1)
r2 = DeterministicRankingFunction(None, self.weights_2)
(interleaved_list, assignments) = dc.interleave(r1, r2, self.query, 10)
self.assertIn(interleaved_list.tolist(), [[0, 1, 2, 3], [1, 0, 2, 3],
[0, 1, 3, 2], [1, 0, 3, 2]])
self.assertIn(assignments[0].tolist(), [[1, 2, 3, 0], [1, 3, 2, 0]])
self.assertIn(assignments[1].tolist(), [[0, 1, 2, 3], [0, 1, 3, 2]])
o = dc.infer_outcome([1, 0, 3, 2], [[1, 3, 2, 0], [0, 1, 3, 2]],
[1, 0, 0, 0], self.query)
self.assertEqual(o, -1, "l1 should win (1), o = %g" % o)
o = dc.infer_outcome([1, 0, 3, 2], [[1, 3, 2, 0], [0, 1, 3, 2]],
[0, 0, 0, 1], self.query)
self.assertEqual(o, -1, "l1 should win (2), o = %g" % o)
o = dc.infer_outcome([1, 0, 3, 2], [[1, 3, 2, 0], [0, 1, 3, 2]],
[0, 1, 0, 0], self.query)
self.assertEqual(o, 1, "l2 should win (1), o = %g" % o)
o = dc.infer_outcome([1, 0, 3, 2], [[1, 0, 2, 3], [0, 1, 3, 2]],
[0, 1, 0, 0], self.query)
# constraints: 0 > 1, 0 > 3
self.assertEqual(o, 1, "l2 should win (2), o = %g" % o)
o = dc.infer_outcome([1, 0, 3, 2], [[1, 2, 0, 3], [1, 0, 2, 3]],
[0, 1, 1, 0], self.query)
# constraints: 0 > 1, 3 > 1, 0 > 2, 3 > 2
self.assertEqual(o, 1, "l2 should win (3), o = %g" % o)
o = dc.infer_outcome([1, 0, 3, 2], [[1, 3, 2, 0], [0, 1, 3, 2]],
[0, 0, 0, 0], self.query)
self.assertEqual(o, 0, "No winner when there are no clicks o = %g" % o)
o = dc.infer_outcome([1, 0, 3, 2], [[1, 3, 2, 0], [0, 1, 3, 2]],
[1, 1, 1, 1], self.query)
self.assertEqual(o, 0, "No winner when all are clicked o = %g" % o)
dc = DocumentConstraints("--constraints 1")
o = dc.infer_outcome([1, 0, 3, 2], [[1, 0, 2, 3], [3, 0, 1, 2]],
[0, 1, 0, 0], self.query)
# constraint: 0 > 1
self.assertEqual(o, 1, "l2 should win with one constraint, o = %g" % o)
dc = DocumentConstraints("--constraints 2")
o = dc.infer_outcome([1, 0, 3, 2], [[1, 0, 2, 3], [3, 0, 1, 2]],
[0, 1, 0, 0], self.query)
self.assertEqual(o, 0, "Tie with two constraint types (1), o = %g" % o)
o = dc.infer_outcome([1, 0, 3, 2], [[1, 0, 2, 3], [1, 2, 0, 3]],
[0, 1, 1, 0], self.query)
# constraints: 0 > 1, 3 > 1, 3 > 2
self.assertEqual(o, 0, "Tie with two constraint types (2), o = %g" % o)
o = dc.infer_outcome([1, 0, 3, 2], [[1, 0, 4, 3], [1, 0, 3, 2]],
[0, 1, 1, 0], self.query)
self.assertEqual(o, 0, "Tie with two constraint types (3), o = %g" % o)
o = dc.infer_outcome([1, 0, 3, 2], [[1, 0, 4, 3], [1, 0, 2, 3]],
[0, 1, 1, 0], self.query)
self.assertEqual(o, -1, "l1 should win with two constr., o = %g" % o)
def testHistDocumentConstraints(self):
hdc = HistDocumentConstraints()
r1 = DeterministicRankingFunction(None, self.weights_1)
r2 = DeterministicRankingFunction(None, self.weights_2)
# results in assignments l1 = [1, 2, 3, 0] or [1, 3, 2, 0]
# and l2 = [0, 1, 2, 3] or [0, 1, 3, 2]
o = hdc.infer_outcome([1, 0, 3, 2], None, [1, 0, 0, 0], r1, r2,
self.query)
self.assertEqual(o, -1, "l1 should win, o = %g" % o)
o = hdc.infer_outcome([2, 1, 3, 0], None, [1, 0, 0, 0], r1, r2,
self.query)
self.assertEqual(o, 0, "No winner, both have 1 > 2 (1), o = %g" % o)
o = hdc.infer_outcome([2, 1, 4, 0], None, [1, 0, 0, 0], r1, r2,
self.query)
self.assertEqual(o, 0, "No winner, both have 1 > 2 (2), o = %g" % o)
o = hdc.infer_outcome([2, 1, 3, 0], None, [0, 0, 0, 0], r1, r2,
self.query)
self.assertEqual(o, 0, "No winner when none are clicked, o = %g" % o)
o = hdc.infer_outcome([2, 1, 3, 0], None, [1, 1, 1, 1], r1, r2,
self.query)
self.assertEqual(o, 0, "No winner when all are clicked, o = %g" % o)
def testTeamDraftInterleave(self):
td = TeamDraft(None)
r1 = DeterministicRankingFunction(None, self.weights_1)
r2 = DeterministicRankingFunction(None, self.weights_2)
(interleaved_list, assignments) = td.interleave(r1, r2, self.query, 10)
self.assertIn(interleaved_list.tolist(), [[0, 1, 2, 3], [1, 0, 2, 3],
[0, 1, 3, 2], [1, 0, 3, 2]])
self.assertIn(assignments.tolist(), [[0, 1, 0, 1], [1, 0, 1, 0],
[1, 0, 0, 1], [0, 1, 1, 0]])
def testHistTeamDraft_getPossibleAssignment(self):
r1 = DeterministicRankingFunction(None, self.weights_1)
r2 = DeterministicRankingFunction(None, self.weights_2)
htd = HistTeamDraft(None)
l = [0, 1, 3, 2]
self.assertIn(htd._get_possible_assignment(l, r1, r2, self.query),
[[1, 0, 0, 1], [1, 0, 1, 0]])
l = [1, 0, 3, 2]
self.assertIn(htd._get_possible_assignment(l, r1, r2, self.query),
[[0, 1, 0, 1], [0, 1, 1, 0]])
l = [1, 0, 2, 3]
self.assertEquals(htd._get_possible_assignment(l, r1, r2, self.query),
None)
def testHistTeamDraft_getPossibleAssignment_randomization(self):
r1 = DeterministicRankingFunction(None, self.weights_1)
r2 = DeterministicRankingFunction(None, self.weights_2)
htd = HistTeamDraft(None)
l = [0, 1, 3, 2]
test_assignments = {"1,0,0,1": 0, "1,0,1,0": 0}
trials = 0
MAX_TRIALS = 1000
while trials < MAX_TRIALS and 0 in test_assignments.values():
trials += 1
observed_assignment = ",".join(str(a) for a in
htd._get_possible_assignment(l, r1, r2, self.query))
self.assertIn(observed_assignment, test_assignments.keys())
test_assignments[observed_assignment] += 1
for assignment, count in test_assignments.items():
self.assertNotEqual(0, count, "Test failed for: %s" % assignment)
def testHistTeamDraft(self):
r1 = DeterministicRankingFunction(None, self.weights_1)
r2 = DeterministicRankingFunction(None, self.weights_2)
interleaved_list = [0, 1, 3, 2]
htd = HistTeamDraft()
self.assertEqual(htd.infer_outcome(interleaved_list, None,
[0, 0, 0, 0], r1, r2, self.query), 0, "No clicks.")
self.assertEqual(htd.infer_outcome(interleaved_list, None,
[1, 0, 0, 0], r1, r2, self.query), 1, "Target rankers"
" are the same as the original rankers, so ranker 2 has to win.")
self.assertEqual(htd.infer_outcome(interleaved_list, None,
[1, 0, 0, 0], r2, r1, self.query), -1, "Target rankers"
" are switched, so ranker 1 has to win.")
def testProbabilisticInterleaveWithDeterministicRankers(self):
pi = ProbabilisticInterleave(None)
# test a few possible interleavings
r1 = DeterministicRankingFunction(None, self.weights_1)
r2 = DeterministicRankingFunction(None, self.weights_2)
test_lists = {"0,1,3,2": 0, "1,0,3,2": 0, "1,3,0,2": 0, "1,3,2,0": 0}
trials = 0
MAX_TRIALS = 10000
while trials < MAX_TRIALS and 0 in test_lists.values():
trials += 1
(l, a) = pi.interleave(r1, r2, self.query, 10)
list_str = ",".join(str(a) for a in l.tolist())
self.assertIn(list_str, test_lists.keys())
test_lists[list_str] += 1
for list_str, count in test_lists.items():
self.assertNotEqual(0, count,
"Interleave failed for: %s" % list_str)
# test interleaving outcomes
context = (None, r1, r2)
self.assertEqual(pi.infer_outcome([0, 1, 2, 3], context, [0, 0, 0, 0],
self.query), 0, "No clicks, outcome should be 0.")
self.assertEqual(pi.infer_outcome([0, 1, 2, 3], context, [1, 0, 0, 0],
self.query), 0, "No possible assignment, outcome should be 0.")
o = pi.infer_outcome([1, 0, 3, 2], context, [1, 0, 0, 0], self.query)
self.assertAlmostEquals(o, -0.0625, 4,
"Ranker 1 should win (o = %.4f)." % o)
o = pi.infer_outcome([0, 1, 3, 2], context, [1, 0, 0, 0], self.query)
self.assertAlmostEquals(o, 0.0625, 4,
"Ranker 2 should win (o = %.4f)." % o)
# test get_probability_of_list
p = pi.get_probability_of_list([1, 0, 3, 2], context, self.query)
self.assertEqual(p, 0.25, "Probability of the most "
"likely list. p = %g" % p)
def testProbabilisticInterleave(self):
pi = ProbabilisticInterleave(None)
r1 = ProbabilisticRankingFunction(3, self.weights_1)
r2 = ProbabilisticRankingFunction(3, self.weights_2)
context = (None, r1, r2)
# test get_probability_of_list
p = pi.get_probability_of_list([1, 0, 3, 2], context, self.query)
self.assertAlmostEquals(p, 0.182775, 6, "Probability of the most "
"likely list. p = %.6f" % p)
# test a few possible interleavings
test_lists = {"0,1,2,3": 0, "0,1,3,2": 0, "0,2,1,3": 0, "0,2,3,1": 0,
"0,3,1,2": 0, "0,3,2,1": 0, "1,0,2,3": 0, "1,0,3,2": 0,
"1,2,0,3": 0, "1,2,3,0": 0, "1,3,0,2": 0, "1,3,2,0": 0,
"2,0,1,3": 0, "2,0,3,1": 0, "2,1,0,3": 0, "2,1,3,0": 0,
"2,3,0,1": 0, "2,3,1,0": 0, "3,0,1,2": 0, "3,0,2,1": 0,
"3,1,0,2": 0, "3,1,2,0": 0, "3,2,0,1": 0, "3,2,1,0": 0}
trials = 0
MAX_TRIALS = 100000
while trials < MAX_TRIALS and 0 in test_lists.values():
trials += 1
(l, _) = pi.interleave(r1, r2, self.query, 10)
list_str = ",".join(str(a) for a in l.tolist())
self.assertIn(list_str, test_lists.keys())
test_lists[list_str] += 1
for list_str, count in test_lists.items():
self.assertNotEqual(0, count,
"Interleave failed for: %s" % list_str)
# test interleaving outcomes
self.assertEqual(pi.infer_outcome([0, 1, 2, 3], context, [0, 0, 0, 0],
self.query), 0, "No clicks, outcome should be 0.")
o = pi.infer_outcome([1, 0, 3, 2], context, [1, 0, 0, 0], self.query)
self.assertAlmostEquals(o, -0.0486, 4,
"Ranker 1 should win (o = %.4f)." % o)
o = pi.infer_outcome([0, 1, 3, 2], context, [1, 0, 0, 0], self.query)
self.assertAlmostEquals(o, 0.0606, 4,
"Ranker 2 should win (o = %.4f)." % o)
# from the example in CIKM 2011
weight_str_1 = "0 0 1 0 -1 0"
weights_1 = np.asarray([float(x) for x in weight_str_1.split()])
weight_str_2 = "1 0 0 0 -1 0"
weights_2 = np.asarray([float(x) for x in weight_str_2.split()])
r1 = ProbabilisticRankingFunction(3, weights_1)
r2 = ProbabilisticRankingFunction(3, weights_2)
context = (None, r2, r1)
o = pi.infer_outcome([0, 1, 2, 3], context, [0, 1, 1, 0], self.query)
self.assertAlmostEquals(o, 0.0046, 4,
"Ranker 2 should win again (o = %.4f)." % o)
# click on one before last document
o = pi.infer_outcome([3, 1, 0, 2], context, [0, 0, 1, 0], self.query)
self.assertAlmostEquals(o, -0.0496, 4,
"Ranker 1 should win with click on doc 0 (o = %.4f)." % o)
# click on last document
o = pi.infer_outcome([3, 1, 2, 0], context, [0, 0, 0, 1], self.query)
self.assertAlmostEquals(o, 0.0, 4,
"Tie for click on last doc (o = %.4f)." % o)
def testHistProbabilisticInterleave(self):
r1 = ProbabilisticRankingFunction(3, self.weights_1)
r2 = ProbabilisticRankingFunction(3, self.weights_2)
hpi = HistProbabilisticInterleave(None)
a = ([0, 1, 1, 0], r1, r2)
self.assertEqual(hpi.infer_outcome([0, 1, 2, 3], a, [0, 0, 0, 0],
r1, r2, self.query), 0, "No clicks, outcome should be 0.")
o = hpi.infer_outcome([1, 0, 3, 2], a, [1, 0, 0, 0], r1, r2,
self.query)
self.assertAlmostEquals(o, -0.0486, 4, "Same target as original "
"rankers. Ranker 1 should win (o = %.4f)." % o)
o = hpi.infer_outcome([1, 0, 3, 2], a, [1, 0, 0, 0], r2, r1,
self.query)
self.assertAlmostEquals(o, 0.0486, 4, "Target rankers switched. "
"Ranker 2 should win (o = %.4f)." % o)
test_r1 = ProbabilisticRankingFunction(3, self.weights_1)
a = ([0, 1, 1, 0], r1, test_r1)
o = hpi.infer_outcome([1, 0, 3, 2], a, [1, 0, 0, 0], r2, r1,
self.query)
self.assertAlmostEquals(o, 0.1542, 4, "Same original ranker. "
"Ranker 2 should win (o = %.4f)." % o)
def testHistProbabilisticInterleaveWithoutMarginalization(self):
r1 = ProbabilisticRankingFunction(3, self.weights_1)
r2 = ProbabilisticRankingFunction(3, self.weights_2)
hpiIs = HistProbabilisticInterleave("--biased False "
"--marginalize False")
# test get_probability_of_list_and_assignment
p = hpiIs._get_probability_of_list_and_assignment([1, 3, 2, 0],
[0, 0, 0, 0], r1, r2, self.query)
self.assertAlmostEqual(p, 0.026261, 6, "Most likely list for ranker 1."
" p = %e" % p)
# test overall outcomes
a = ([0, 1, 1, 0], r1, r2)
self.assertEqual(hpiIs.infer_outcome([0, 1, 2, 3], a, [0, 0, 0, 0],
r1, r2, self.query), 0, "No clicks, outcome should be 0.")
o = hpiIs.infer_outcome([1, 0, 3, 2], a, [1, 0, 0, 0], r1, r2,
self.query)
self.assertEquals(o, -1, "Same original and target pair. "
"Ranker 1 should win (o = %d)." % o)
test_r1 = ProbabilisticRankingFunction(3, self.weights_1)
a = ([0, 1, 1, 0], r1, test_r1)
o = hpiIs.infer_outcome([1, 0, 3, 2], a, [1, 0, 0, 0], r2, r1,
self.query)
self.assertAlmostEquals(o, -0.1250, 4, "Different original pair. "
"Ranker 1 should win (o = %.4f)." % o)
def testExploitativeProbabilisticInterleave(self):
r1 = ProbabilisticRankingFunction(1, self.weights_1)
r2 = ProbabilisticRankingFunction(1, self.weights_2)
r1.init_ranking(self.query)
r2.init_ranking(self.query)
epi = ExploitativeProbabilisticInterleave("--exploration_rate=0.5")
(docids, probs) = epi._get_document_distribution(r1, r2)
exp_docids = [1, 0, 3, 2]
exp_probs = [0.36, 0.3, 0.2, 0.14]
self._prob_doc_test_helper(docids, exp_docids, probs, exp_probs)
def testExploitativeProbabilisticInterleaveThreeDocs(self):
epi = ExploitativeProbabilisticInterleave("--exploration_rate=0.5")
# prepare rankers
r1 = ProbabilisticRankingFunction(1, self.weights_1)
r2 = ProbabilisticRankingFunction(1, self.weights_2)
r1.init_ranking(self.query)
r2.init_ranking(self.query)
r1.rm_document(0)
r2.rm_document(0)
# test after document 0 was removed
(docids, probs) = epi._get_document_distribution(r1, r2)
exp_docids = [1, 3, 2]
exp_probs = [0.5034965, 0.29020979, 0.20629371]
self._prob_doc_test_helper(docids, exp_docids, probs, exp_probs)
# prepare rankers
r1.init_ranking(self.query)
r2.init_ranking(self.query)
r1.rm_document(3)
r2.rm_document(3)
# test after document 3 was removed
(docids, probs) = epi._get_document_distribution(r1, r2)
exp_docids = [1, 0, 2]
exp_probs = [0.45864662, 0.36466165, 0.17669173]
self._prob_doc_test_helper(docids, exp_docids, probs, exp_probs)
def testExploitativeProbabilisticInterleaveTwoDocs(self):
# prepare rankers
r1 = ProbabilisticRankingFunction(1, self.weights_1)
r2 = ProbabilisticRankingFunction(1, self.weights_2)
r1.init_ranking(self.query)
r2.init_ranking(self.query)
r1.rm_document(1)
r2.rm_document(1)
r1.rm_document(3)
r2.rm_document(3)
# test after 1 and 3 were removed
epi = ExploitativeProbabilisticInterleave("--exploration_rate=0.5")
(docids, probs) = epi._get_document_distribution(r1, r2)
exp_docids = [0, 2]
exp_probs = [0.61428571, 0.38571429]
self._prob_doc_test_helper(docids, exp_docids, probs, exp_probs)
def testExploitativeProbabilisticInterleaveExploit(self):
r1 = ProbabilisticRankingFunction(1, self.weights_1)
r2 = ProbabilisticRankingFunction(1, self.weights_2)
# exploration rate = 0.1
epi = ExploitativeProbabilisticInterleave("--exploration_rate=0.1")
r1.init_ranking(self.query)
r2.init_ranking(self.query)
(docids, probs) = epi._get_document_distribution(r1, r2)
exp_docids = [1, 3, 2, 0]
exp_probs = [0.456, 0.232, 0.156, 0.156]
self._prob_doc_test_helper(docids, exp_docids, probs, exp_probs)
# exploration rate = 0.0
epi = ExploitativeProbabilisticInterleave("--exploration_rate=0.0")
r1.init_ranking(self.query)
r2.init_ranking(self.query)
(docids, probs) = epi._get_document_distribution(r1, r2)
exp_docids = [1, 3, 2, 0]
exp_probs = [0.48, 0.24, 0.16, 0.12]
self._prob_doc_test_helper(docids, exp_docids, probs, exp_probs)
def _prob_doc_test_helper(self, docids, exp_docids, probs, exp_probs):
for r, (docid, prob) in enumerate(zip(docids, probs)):
self.assertEquals(docid, exp_docids[r], "Docid %d did not match "
"expected %d at rank %d" % (docid, exp_docids[r], r))
self.assertAlmostEquals(prob, exp_probs[r], 6, "Prob %g did not "
"match expected %g at rank %d" % (prob, exp_probs[r], r))
def testExploitativeProbabilisticInterleaveInterleave(self):
r1 = ProbabilisticRankingFunction(1, self.weights_1)
r2 = ProbabilisticRankingFunction(1, self.weights_2)
epi = ExploitativeProbabilisticInterleave("--exploration_rate=0.5")
r1.init_ranking(self.query)
r2.init_ranking(self.query)
(l, (r1_ret, r2_ret)) = epi.interleave(r1, r2, self.query, 4)
self.assertEqual(r1, r1_ret, "r1 is just passed through.")
self.assertEqual(r2, r2_ret, "r2 is just passed through.")
self.assertEqual(len(l), 4, "interleave produces a list of length 4.")
self.assertTrue(0 in l, "document 0 is in l.")
self.assertTrue(1 in l, "document 0 is in l.")
self.assertTrue(2 in l, "document 0 is in l.")
self.assertTrue(3 in l, "document 0 is in l.")
observed_l = {}
for _ in range(0, 100):
(l, (r1_ret, r2_ret)) = epi.interleave(r1, r2, self.query, 4)
l_str = " ".join([str(docid) for docid in l])
if not l_str in observed_l:
observed_l[l_str] = 1
else:
observed_l[l_str] += 1
self.assertIn("0 1 2 3", observed_l, "List was observed: 0 1 2 3.")
self.assertIn("1 0 3 2", observed_l, "List was observed: 0 1 2 3.")
self.assertIn("3 1 2 0", observed_l, "List was observed: 0 1 2 3.")
self.assertIn("2 1 0 3", observed_l, "List was observed: 0 1 2 3.")
def testGetSourceProbabilityOfList(self):
r1 = ProbabilisticRankingFunction(1, self.weights_1)
r2 = ProbabilisticRankingFunction(1, self.weights_2)
# with exploration rate 0.5
epi = ExploitativeProbabilisticInterleave("--exploration_rate=0.5")
p = epi._get_source_probability_of_list([1, 0, 3, 2], (None, r1, r2),
self.query)
self.assertAlmostEquals(0.090916137, p, 8, "Obtained p = %.g" % p)
# with exploration rate 0.1
epi = ExploitativeProbabilisticInterleave("--exploration_rate=0.1")
p = epi._get_source_probability_of_list([1, 0, 3, 2], (None, r1, r2),
self.query)
self.assertAlmostEquals(0.073751736, p, 8, "Obtained p = %.g" % p)
def testInferOutcomeBiased(self):
r1 = ProbabilisticRankingFunction(1, self.weights_1)
r2 = ProbabilisticRankingFunction(1, self.weights_2)
epi = ExploitativeProbabilisticInterleave("--exploration_rate=0.1 "
"--biased=True")
outcome = epi.infer_outcome([1, 0, 3, 2], (None, r1, r2), [0, 1, 0, 0],
self.query)
self.assertAlmostEquals(0.029049296, outcome, 8,
"Obtained outcome = %.8f" % outcome)
def testInferOutcomeUnbiased(self):
r1 = ProbabilisticRankingFunction(1, self.weights_1)
r2 = ProbabilisticRankingFunction(1, self.weights_2)
epi = ExploitativeProbabilisticInterleave("--exploration_rate=0.1")
outcome = epi.infer_outcome([1, 0, 3, 2], (None, r1, r2), [0, 1, 0, 0],
self.query)
self.assertAlmostEquals(0.03581, outcome, 8,
"Obtained outcome = %.8f" % outcome)
if __name__ == '__main__':
unittest.main()
| hubert667/AIR | src/python/comparison/test.py | Python | gpl-3.0 | 26,919 |
__author__ = 'Sulantha'
| sulantha2006/Conversion | Python/RUSRandomForest/__init__.py | Python | mit | 24 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-06-08 23:24
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('plugins', '0004_auto_20170508_2027'),
]
operations = [
migrations.AddField(
model_name='downloadrelease',
name='is_used',
field=models.BooleanField(default=False),
),
]
| ava-project/ava-website | website/apps/plugins/migrations/0005_downloadrelease_is_used.py | Python | mit | 460 |
"""Misc. utilities."""
from modoboa.lib.web_utils import NavigationParameters
def decode_payload(encoding, payload):
"""Decode the payload according to the given encoding
Supported encodings: base64, quoted-printable.
:param encoding: the encoding's name
:param payload: the value to decode
:return: a string
"""
encoding = encoding.lower()
if encoding == "base64":
import base64
return base64.b64decode(payload)
elif encoding == "quoted-printable":
import quopri
return quopri.decodestring(payload)
return payload
class WebmailNavigationParameters(NavigationParameters):
"""Specific NavigationParameters subclass for the webmail."""
def __init__(self, request, defmailbox=None):
super(WebmailNavigationParameters, self).__init__(
request, 'webmail_navparams'
)
if defmailbox is not None:
self.parameters += [('mbox', defmailbox, False)]
def _store_page(self):
"""Specific method to store the current page."""
if self.request.GET.get("reset_page", None) or "page" not in self:
self["page"] = 1
else:
page = self.request.GET.get("page", None)
if page is not None:
self["page"] = int(page)
| modoboa/modoboa-webmail | modoboa_webmail/lib/utils.py | Python | mit | 1,305 |
#!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Creates a TOC file from a Java jar.
The TOC file contains the non-package API of the jar. This includes all
public/protected/package classes/functions/members and the values of static
final variables (members with package access are kept because in some cases we
have multiple libraries with the same package, particularly test+non-test). Some
other information (major/minor javac version) is also included.
This TOC file then can be used to determine if a dependent library should be
rebuilt when this jar changes. I.e. any change to the jar that would require a
rebuild, will have a corresponding change in the TOC file.
"""
import optparse
import os
import re
import sys
import zipfile
from util import build_utils
from util import md5_check
def GetClassesInZipFile(zip_file):
classes = []
files = zip_file.namelist()
for f in files:
if f.endswith('.class'):
# f is of the form org/chromium/base/Class$Inner.class
classes.append(f.replace('/', '.')[:-6])
return classes
def CallJavap(classpath, classes):
javap_cmd = [
'javap',
'-package', # Show public/protected/package.
# -verbose is required to get constant values (which can be inlined in
# dependents).
'-verbose',
'-classpath', classpath
] + classes
return build_utils.CheckOutput(javap_cmd)
def ExtractToc(disassembled_classes):
# javap output is structured by indent (2-space) levels.
good_patterns = [
'^[^ ]', # This includes all class/function/member signatures.
'^ SourceFile:',
'^ minor version:',
'^ major version:',
'^ Constant value:',
]
bad_patterns = [
'^const #', # Matches the constant pool (i.e. literals used in the class).
]
def JavapFilter(line):
return (re.match('|'.join(good_patterns), line) and
not re.match('|'.join(bad_patterns), line))
toc = filter(JavapFilter, disassembled_classes.split('\n'))
return '\n'.join(toc)
def UpdateToc(jar_path, toc_path):
classes = GetClassesInZipFile(zipfile.ZipFile(jar_path))
toc = ''
if len(classes) != 0:
javap_output = CallJavap(classpath=jar_path, classes=classes)
toc = ExtractToc(javap_output)
with open(toc_path, 'w') as tocfile:
tocfile.write(toc)
def DoJarToc(options):
jar_path = options.jar_path
toc_path = options.toc_path
record_path = '%s.md5.stamp' % toc_path
md5_check.CallAndRecordIfStale(
lambda: UpdateToc(jar_path, toc_path),
record_path=record_path,
input_paths=[jar_path],
force=not os.path.exists(toc_path),
)
build_utils.Touch(toc_path, fail_if_missing=True)
def main():
parser = optparse.OptionParser()
build_utils.AddDepfileOption(parser)
parser.add_option('--jar-path', help='Input .jar path.')
parser.add_option('--toc-path', help='Output .jar.TOC path.')
parser.add_option('--stamp', help='Path to touch on success.')
options, _ = parser.parse_args()
if options.depfile:
build_utils.WriteDepfile(
options.depfile,
build_utils.GetPythonDependencies())
DoJarToc(options)
if options.depfile:
build_utils.WriteDepfile(
options.depfile,
build_utils.GetPythonDependencies())
if options.stamp:
build_utils.Touch(options.stamp)
if __name__ == '__main__':
sys.exit(main())
| guorendong/iridium-browser-ubuntu | build/android/gyp/jar_toc.py | Python | bsd-3-clause | 3,500 |
###########################################################################
#
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
#
# This code generated (see scripts folder for possible source):
# - Command: "python starthinker_ui/manage.py example"
#
###########################################################################
import argparse
import textwrap
from starthinker.util.configuration import Configuration
from starthinker.task.dataset.run import dataset
from starthinker.task.drive.run import drive
from starthinker.task.dv_targeter.run import dv_targeter
def recipe_dv360_targeter(config, auth_dv, auth_sheet, auth_bigquery, recipe_name, recipe_slug, command, first_and_third):
"""Allows bulk targeting DV360 through Sheets and BigQuery.
Args:
auth_dv (authentication) - Credentials used for dv.
auth_sheet (authentication) - Credentials used for sheet.
auth_bigquery (authentication) - Credentials used for bigquery.
recipe_name (string) - Name of Google Sheet to create.
recipe_slug (string) - Name of Google BigQuery dataset to create.
command (choice) - Action to take.
first_and_third (boolean) - Load first and third party data (may be slow). If not selected, enter audience identifiers into sheet manually.
"""
dataset(config, {
'__comment__':'Ensure dataset exists.',
'auth':auth_bigquery,
'dataset':recipe_slug
})
drive(config, {
'__comment__':'Copy the default template to sheet with the recipe name',
'auth':auth_sheet,
'copy':{
'source':'https://docs.google.com/spreadsheets/d/1ARkIvh0D-gltZeiwniUonMNrm0Mi1s2meZ9FUjutXOE/',
'destination':recipe_name
}
})
dv_targeter(config, {
'__comment':'Depending on users choice, execute a different part of the solution.',
'auth_dv':auth_dv,
'auth_sheets':auth_sheet,
'auth_bigquery':auth_bigquery,
'sheet':recipe_name,
'dataset':recipe_slug,
'command':command,
'first_and_third':first_and_third
})
if __name__ == "__main__":
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent("""
Allows bulk targeting DV360 through Sheets and BigQuery.
1. Select Load, click Save + Run, a sheet called DV Targeter will be created.
2. In the Partners sheet tab, fill in Filter column then select Load, click Save + Run.
3. In the Advertisers sheet tab, fill in Filter column. then select Load, click Save + Run.
4. Check the First And Third Party option to load audiences, which may be slow. If not loaded, user will enter audience ids into the sheet manually.
5. On the Line Items sheet tab, the Filter is used only to limit drop down choices in the rest of the tool.
6. Optionally edit or filter the Targeting Options or Inventory Sources sheets to limit choices.
7. Make targeting updates, fill in changes on all tabs with colored fields (RED FIELDS ARE NOT IMPLEMENTED, IGNORE).
8. Select Preview, click Save + Run then check the Preview tabs.
9. Select Update, click Save + Run then check the Success and Error tabs.
10. Load and Update can be run multiple times.
11. If an update fails, all parts of the update failed, break it up into multiple updates.
12. To refresh the Partner, Advertiser, or Line Item list, remove the filters and run load.
"""))
parser.add_argument("-project", help="Cloud ID of Google Cloud Project.", default=None)
parser.add_argument("-key", help="API Key of Google Cloud Project.", default=None)
parser.add_argument("-client", help="Path to CLIENT credentials json file.", default=None)
parser.add_argument("-user", help="Path to USER credentials json file.", default=None)
parser.add_argument("-service", help="Path to SERVICE credentials json file.", default=None)
parser.add_argument("-verbose", help="Print all the steps as they happen.", action="store_true")
parser.add_argument("-auth_dv", help="Credentials used for dv.", default='user')
parser.add_argument("-auth_sheet", help="Credentials used for sheet.", default='user')
parser.add_argument("-auth_bigquery", help="Credentials used for bigquery.", default='service')
parser.add_argument("-recipe_name", help="Name of Google Sheet to create.", default='')
parser.add_argument("-recipe_slug", help="Name of Google BigQuery dataset to create.", default='')
parser.add_argument("-command", help="Action to take.", default='Load')
parser.add_argument("-first_and_third", help="Load first and third party data (may be slow). If not selected, enter audience identifiers into sheet manually.", default=False)
args = parser.parse_args()
config = Configuration(
project=args.project,
user=args.user,
service=args.service,
client=args.client,
key=args.key,
verbose=args.verbose
)
recipe_dv360_targeter(config, args.auth_dv, args.auth_sheet, args.auth_bigquery, args.recipe_name, args.recipe_slug, args.command, args.first_and_third)
| google/starthinker | examples/dv360_targeter_example.py | Python | apache-2.0 | 5,655 |
'''
#----- Specific Databases ----
if __name__ == '__main__':
from multiprocessing import freeze_support
freeze_support()
helpers.PRINT_CHECKS = True
if 'paris' in sys.argv:
convert_from_oxford_style(params.PARIS)
if 'oxford' in sys.argv:
convert_from_oxford_style(params.OXFORD)
if 'wildebeast' in sys.argv:
wildid_xlsx_to_tables(params.WILDEBEAST)
if 'toads' in sys.argv:
wildid_csv_to_tables(params.TOADS)
'''
| SU-ECE-17-7/hotspotter | hstest/test_convertdb.py | Python | apache-2.0 | 474 |
from __future__ import unicode_literals
import unittest
import os
from flask import Flask, render_template_string
from flask.ext.upcdn import UPCDN
class DefaultsTest(unittest.TestCase):
def setUp(self):
self.app = Flask(__name__)
self.app.testing = True
UPCDN(self.app)
def test_domain_default(self):
""" Tests CDN_DOMAIN default value is correctly set. """
self.assertEquals(self.app.config['CDN_DOMAIN'], None)
def test_https_default(self):
""" Tests CDN_HTTPS default value is correctly set. """
self.assertEquals(self.app.config['CDN_HTTPS'], None)
def test_timestamp_default(self):
""" Tests CDN_TIMESTAMP default value is correctly set. """
self.assertEquals(self.app.config['CDN_TIMESTAMP'], None)
class UrlTests(unittest.TestCase):
def setUp(self):
self.app = Flask(__name__)
self.app.testing = True
self.app.config['CDN_DOMAIN'] = 'mycdnname.cloudfront.net'
self.app.config['CDN_TIMESTAMP'] = None
@self.app.route('/<url_for_string>')
def a(url_for_string):
return render_template_string(url_for_string)
@self.app.route('/')
def b():
return render_template_string("{{ url_for('b') }}")
def client_get(self, ufs, secure=False):
UPCDN(self.app)
client = self.app.test_client()
if secure:
return client.get('/%s' % ufs, base_url='https://localhost')
else:
return client.get('/%s' % ufs)
def test_url_for(self):
""" Tests static endpoint correctly affects generated URLs. """
# non static endpoint url_for in template
self.assertEquals(self.client_get('').get_data(True), '/')
# static endpoint url_for in template
ufs = "{{ url_for('static', filename='bah.js') }}"
exp = 'http://mycdnname.cloudfront.net/static/bah.js'
self.assertEquals(self.client_get(ufs).get_data(True), exp)
def test_url_for_debug(self):
""" Tests app.debug correctly affects generated URLs. """
self.app.debug = True
ufs = "{{ url_for('static', filename='bah.js') }}"
exp = '/static/bah.js'
self.assertEquals(self.client_get(ufs).get_data(True), exp)
def test_url_for_https(self):
""" Tests CDN_HTTPS correctly affects generated URLs. """
ufs = "{{ url_for('static', filename='bah.js') }}"
https_exp = 'https://mycdnname.cloudfront.net/static/bah.js'
http_exp = 'http://mycdnname.cloudfront.net/static/bah.js'
self.app.config['CDN_HTTPS'] = True
self.assertEquals(self.client_get(ufs, secure=True).get_data(True),
https_exp)
self.assertEquals(self.client_get(ufs).get_data(True), https_exp)
self.app.config['CDN_HTTPS'] = False
self.assertEquals(self.client_get(ufs, secure=True).get_data(True),
http_exp)
self.assertEquals(self.client_get(ufs).get_data(True), http_exp)
self.app.config['CDN_HTTPS'] = None
self.assertEquals(self.client_get(ufs, secure=True).get_data(True),
https_exp)
self.assertEquals(self.client_get(ufs).get_data(True), http_exp)
def test_url_for_timestamp(self):
""" Tests CDN_TIMESTAMP correctly affects generated URLs. """
ufs = "{{ url_for('static', filename='bah.js') }}"
self.app.config['CDN_TIMESTAMP'] = "1234"
path = os.path.join(self.app.static_folder, 'bah.js')
exp = 'http://mycdnname.cloudfront.net/{0}/static/bah.js'.format(self.app.config['CDN_TIMESTAMP'])
self.assertEquals(self.client_get(ufs).get_data(True), exp)
self.app.config['CDN_TIMESTAMP'] = None
exp = 'http://mycdnname.cloudfront.net/static/bah.js'
self.assertEquals(self.client_get(ufs).get_data(True), exp)
if __name__ == '__main__':
unittest.main()
| stuartgc/flask-upcdn | tests/test_flask_cdn.py | Python | mit | 3,968 |
################################################################################
# THIS FILE IS 100% GENERATED BY ZPROJECT; DO NOT EDIT EXCEPT EXPERIMENTALLY #
# Read the zproject/README.md for information about making permanent changes. #
################################################################################
import re
malamute_cdefs = list ()
# Custom setup for malamute
#Import definitions from dependent projects
################################################################################
# THIS FILE IS 100% GENERATED BY ZPROJECT; DO NOT EDIT EXCEPT EXPERIMENTALLY #
# Read the zproject/README.md for information about making permanent changes. #
################################################################################
# Python cffi compatible file slurp
czmq_cdefs = list ()
# Custom setup for czmq
czmq_cdefs.append ('''
typedef int time_t;
typedef int off_t;
typedef unsigned char byte; // Single unsigned byte = 8 bits
typedef unsigned short dbyte; // Double byte = 16 bits
typedef unsigned int qbyte; // Quad byte = 32 bits
typedef int SOCKET;
typedef struct sockaddr_in inaddr_t;
// -- destroy an item
typedef void (czmq_destructor) (void **item);
// -- duplicate an item
typedef void *(czmq_duplicator) (const void *item);
// - compare two items, for sorting
typedef int (czmq_comparator) (const void *item1, const void *item2);
''')
czmq_cdefs.append ('''
typedef struct _zsock_t zsock_t;
typedef struct _zactor_t zactor_t;
typedef struct _zmsg_t zmsg_t;
typedef struct _zargs_t zargs_t;
typedef struct _zarmour_t zarmour_t;
typedef struct _zchunk_t zchunk_t;
typedef struct _char_t char_t;
typedef struct _zcert_t zcert_t;
typedef struct _zlist_t zlist_t;
typedef struct _zcertstore_t zcertstore_t;
typedef struct _zlistx_t zlistx_t;
typedef struct _zframe_t zframe_t;
typedef struct _msecs_t msecs_t;
typedef struct _zclock_t zclock_t;
typedef struct _zconfig_t zconfig_t;
typedef struct _zdigest_t zdigest_t;
typedef struct _zdir_t zdir_t;
typedef struct _zhash_t zhash_t;
typedef struct _zfile_t zfile_t;
typedef struct _zdir_patch_t zdir_patch_t;
typedef struct _zhashx_t zhashx_t;
typedef struct _ziflist_t ziflist_t;
typedef struct _zloop_t zloop_t;
typedef struct _zmq_pollitem_t zmq_pollitem_t;
typedef struct _zpoller_t zpoller_t;
typedef struct _zproc_t zproc_t;
typedef struct _va_list_t va_list_t;
typedef struct _socket_t socket_t;
typedef struct _zstr_t zstr_t;
typedef struct _zsys_t zsys_t;
typedef struct _ztimerset_t ztimerset_t;
typedef struct _ztrie_t ztrie_t;
typedef struct _zuuid_t zuuid_t;
typedef struct _zhttp_client_t zhttp_client_t;
typedef struct _zhttp_server_options_t zhttp_server_options_t;
typedef struct _zhttp_server_t zhttp_server_t;
typedef struct _zhttp_request_t zhttp_request_t;
typedef struct _zhttp_response_t zhttp_response_t;
typedef struct _zosc_t zosc_t;
// Actors get a pipe and arguments from caller
typedef void (zactor_fn) (
zsock_t *pipe, void *args);
// Function to be called on zactor_destroy. Default behavior is to send zmsg_t with string "$TERM" in a first frame.
//
// An example - to send $KTHXBAI string
//
// if (zstr_send (self, "$KTHXBAI") == 0)
// zsock_wait (self);
typedef void (zactor_destructor_fn) (
zactor_t *self);
// Loaders retrieve certificates from an arbitrary source.
typedef void (zcertstore_loader) (
zcertstore_t *self);
// Destructor for loader state.
typedef void (zcertstore_destructor) (
void **self_p);
// Destroy an item
typedef void (zchunk_destructor_fn) (
void **hint);
//
typedef int (zconfig_fct) (
zconfig_t *self, void *arg, int level);
// Destroy an item
typedef void (zframe_destructor_fn) (
void **hint);
// Callback function for zhash_freefn method
typedef void (zhash_free_fn) (
void *data);
// Destroy an item
typedef void (zhashx_destructor_fn) (
void **item);
// Duplicate an item
typedef void * (zhashx_duplicator_fn) (
const void *item);
// Compare two items, for sorting
typedef int (zhashx_comparator_fn) (
const void *item1, const void *item2);
// Destroy an item.
typedef void (zhashx_free_fn) (
void *data);
// Hash function for keys.
typedef size_t (zhashx_hash_fn) (
const void *key);
// Serializes an item to a longstr.
// The caller takes ownership of the newly created object.
typedef char * (zhashx_serializer_fn) (
const void *item);
// Deserializes a longstr into an item.
// The caller takes ownership of the newly created object.
typedef void * (zhashx_deserializer_fn) (
const char *item_str);
// Comparison function e.g. for sorting and removing.
typedef int (zlist_compare_fn) (
void *item1, void *item2);
// Callback function for zlist_freefn method
typedef void (zlist_free_fn) (
void *data);
// Destroy an item
typedef void (zlistx_destructor_fn) (
void **item);
// Duplicate an item
typedef void * (zlistx_duplicator_fn) (
const void *item);
// Compare two items, for sorting
typedef int (zlistx_comparator_fn) (
const void *item1, const void *item2);
// Callback function for reactor socket activity
typedef int (zloop_reader_fn) (
zloop_t *loop, zsock_t *reader, void *arg);
// Callback function for reactor events (low-level)
typedef int (zloop_fn) (
zloop_t *loop, zmq_pollitem_t *item, void *arg);
// Callback for reactor timer events
typedef int (zloop_timer_fn) (
zloop_t *loop, int timer_id, void *arg);
// Callback for interrupt signal handler
typedef void (zsys_handler_fn) (
int signal_value);
// Callback function for timer event.
typedef void (ztimerset_fn) (
int timer_id, void *arg);
// Callback function for ztrie_node to destroy node data.
typedef void (ztrie_destroy_data_fn) (
void **data);
// CLASS: zactor
// Create a new actor passing arbitrary arguments reference.
zactor_t *
zactor_new (zactor_fn task, void *args);
// Destroy an actor.
void
zactor_destroy (zactor_t **self_p);
// Send a zmsg message to the actor, take ownership of the message
// and destroy when it has been sent.
int
zactor_send (zactor_t *self, zmsg_t **msg_p);
// Receive a zmsg message from the actor. Returns NULL if the actor
// was interrupted before the message could be received, or if there
// was a timeout on the actor.
zmsg_t *
zactor_recv (zactor_t *self);
// Probe the supplied object, and report if it looks like a zactor_t.
bool
zactor_is (void *self);
// Probe the supplied reference. If it looks like a zactor_t instance,
// return the underlying libzmq actor handle; else if it looks like
// a libzmq actor handle, return the supplied value.
void *
zactor_resolve (void *self);
// Return the actor's zsock handle. Use this when you absolutely need
// to work with the zsock instance rather than the actor.
zsock_t *
zactor_sock (zactor_t *self);
// Change default destructor by custom function. Actor MUST be able to handle new message instead of default $TERM.
void
zactor_set_destructor (zactor_t *self, zactor_destructor_fn destructor);
// Self test of this class.
void
zactor_test (bool verbose);
// CLASS: zargs
// Create a new zargs from command line arguments.
zargs_t *
zargs_new (int argc, char **argv);
// Destroy zargs instance.
void
zargs_destroy (zargs_t **self_p);
// Return program name (argv[0])
const char *
zargs_progname (zargs_t *self);
// Return number of positional arguments
size_t
zargs_arguments (zargs_t *self);
// Return first positional argument or NULL
const char *
zargs_first (zargs_t *self);
// Return next positional argument or NULL
const char *
zargs_next (zargs_t *self);
// Return first named parameter value, or NULL if there are no named
// parameters, or value for which zargs_param_empty (arg) returns true.
const char *
zargs_param_first (zargs_t *self);
// Return next named parameter value, or NULL if there are no named
// parameters, or value for which zargs_param_empty (arg) returns true.
const char *
zargs_param_next (zargs_t *self);
// Return current parameter name, or NULL if there are no named parameters.
const char *
zargs_param_name (zargs_t *self);
// Return value of named parameter or NULL is it has no value (or was not specified)
const char *
zargs_get (zargs_t *self, const char *name);
// Return value of one of parameter(s) or NULL is it has no value (or was not specified)
const char *
zargs_getx (zargs_t *self, const char *name, ...);
// Returns true if named parameter was specified on command line
bool
zargs_has (zargs_t *self, const char *name);
// Returns true if named parameter(s) was specified on command line
bool
zargs_hasx (zargs_t *self, const char *name, ...);
// Print an instance of zargs.
void
zargs_print (zargs_t *self);
// Self test of this class.
void
zargs_test (bool verbose);
// CLASS: zarmour
// Create a new zarmour
zarmour_t *
zarmour_new (void);
// Destroy the zarmour
void
zarmour_destroy (zarmour_t **self_p);
// Encode a stream of bytes into an armoured string. Returns the armoured
// string, or NULL if there was insufficient memory available to allocate
// a new string.
char *
zarmour_encode (zarmour_t *self, const byte *data, size_t size);
// Decode an armoured string into a chunk. The decoded output is
// null-terminated, so it may be treated as a string, if that's what
// it was prior to encoding.
zchunk_t *
zarmour_decode (zarmour_t *self, const char *data);
// Get the mode property.
int
zarmour_mode (zarmour_t *self);
// Get printable string for mode.
const char *
zarmour_mode_str (zarmour_t *self);
// Set the mode property.
void
zarmour_set_mode (zarmour_t *self, int mode);
// Return true if padding is turned on.
bool
zarmour_pad (zarmour_t *self);
// Turn padding on or off. Default is on.
void
zarmour_set_pad (zarmour_t *self, bool pad);
// Get the padding character.
char
zarmour_pad_char (zarmour_t *self);
// Set the padding character.
void
zarmour_set_pad_char (zarmour_t *self, char pad_char);
// Return if splitting output into lines is turned on. Default is off.
bool
zarmour_line_breaks (zarmour_t *self);
// Turn splitting output into lines on or off.
void
zarmour_set_line_breaks (zarmour_t *self, bool line_breaks);
// Get the line length used for splitting lines.
size_t
zarmour_line_length (zarmour_t *self);
// Set the line length used for splitting lines.
void
zarmour_set_line_length (zarmour_t *self, size_t line_length);
// Print properties of object
void
zarmour_print (zarmour_t *self);
// Self test of this class.
void
zarmour_test (bool verbose);
// CLASS: zcert
// Create and initialize a new certificate in memory
zcert_t *
zcert_new (void);
// Accepts public/secret key pair from caller
zcert_t *
zcert_new_from (const byte *public_key, const byte *secret_key);
// Accepts public/secret key text pair from caller
zcert_t *
zcert_new_from_txt (const char *public_txt, const char *secret_txt);
// Load certificate from file
zcert_t *
zcert_load (const char *filename);
// Destroy a certificate in memory
void
zcert_destroy (zcert_t **self_p);
// Return public part of key pair as 32-byte binary string
const byte *
zcert_public_key (zcert_t *self);
// Return secret part of key pair as 32-byte binary string
const byte *
zcert_secret_key (zcert_t *self);
// Return public part of key pair as Z85 armored string
const char *
zcert_public_txt (zcert_t *self);
// Return secret part of key pair as Z85 armored string
const char *
zcert_secret_txt (zcert_t *self);
// Set certificate metadata from formatted string.
void
zcert_set_meta (zcert_t *self, const char *name, const char *format, ...);
// Unset certificate metadata.
void
zcert_unset_meta (zcert_t *self, const char *name);
// Get metadata value from certificate; if the metadata value doesn't
// exist, returns NULL.
const char *
zcert_meta (zcert_t *self, const char *name);
// Get list of metadata fields from certificate. Caller is responsible for
// destroying list. Caller should not modify the values of list items.
zlist_t *
zcert_meta_keys (zcert_t *self);
// Save full certificate (public + secret) to file for persistent storage
// This creates one public file and one secret file (filename + "_secret").
int
zcert_save (zcert_t *self, const char *filename);
// Save public certificate only to file for persistent storage
int
zcert_save_public (zcert_t *self, const char *filename);
// Save secret certificate only to file for persistent storage
int
zcert_save_secret (zcert_t *self, const char *filename);
// Apply certificate to socket, i.e. use for CURVE security on socket.
// If certificate was loaded from public file, the secret key will be
// undefined, and this certificate will not work successfully.
void
zcert_apply (zcert_t *self, void *socket);
// Return copy of certificate; if certificate is NULL or we exhausted
// heap memory, returns NULL.
zcert_t *
zcert_dup (zcert_t *self);
// Return true if two certificates have the same keys
bool
zcert_eq (zcert_t *self, zcert_t *compare);
// Print certificate contents to stdout
void
zcert_print (zcert_t *self);
// Self test of this class
void
zcert_test (bool verbose);
// CLASS: zcertstore
// Create a new certificate store from a disk directory, loading and
// indexing all certificates in that location. The directory itself may be
// absent, and created later, or modified at any time. The certificate store
// is automatically refreshed on any zcertstore_lookup() call. If the
// location is specified as NULL, creates a pure-memory store, which you
// can work with by inserting certificates at runtime.
zcertstore_t *
zcertstore_new (const char *location);
// Destroy a certificate store object in memory. Does not affect anything
// stored on disk.
void
zcertstore_destroy (zcertstore_t **self_p);
// Override the default disk loader with a custom loader fn.
void
zcertstore_set_loader (zcertstore_t *self, zcertstore_loader loader, zcertstore_destructor destructor, void *state);
// Look up certificate by public key, returns zcert_t object if found,
// else returns NULL. The public key is provided in Z85 text format.
zcert_t *
zcertstore_lookup (zcertstore_t *self, const char *public_key);
// Insert certificate into certificate store in memory. Note that this
// does not save the certificate to disk. To do that, use zcert_save()
// directly on the certificate. Takes ownership of zcert_t object.
void
zcertstore_insert (zcertstore_t *self, zcert_t **cert_p);
// Empty certificate hashtable. This wrapper exists to be friendly to bindings,
// which don't usually have access to struct internals.
void
zcertstore_empty (zcertstore_t *self);
// Print list of certificates in store to logging facility
void
zcertstore_print (zcertstore_t *self);
// Return a list of all the certificates in the store.
// The caller takes ownership of the zlistx_t object and is responsible
// for destroying it. The caller does not take ownership of the zcert_t
// objects.
zlistx_t *
zcertstore_certs (zcertstore_t *self);
// Self test of this class
void
zcertstore_test (bool verbose);
// CLASS: zchunk
// Create a new chunk of the specified size. If you specify the data, it
// is copied into the chunk. If you do not specify the data, the chunk is
// allocated and left empty, and you can then add data using zchunk_append.
zchunk_t *
zchunk_new (const void *data, size_t size);
// Create a new chunk from memory. Take ownership of the memory and calling the destructor
// on destroy.
zchunk_t *
zchunk_frommem (void *data, size_t size, zchunk_destructor_fn destructor, void *hint);
// Destroy a chunk
void
zchunk_destroy (zchunk_t **self_p);
// Resizes chunk max_size as requested; chunk_cur size is set to zero
void
zchunk_resize (zchunk_t *self, size_t size);
// Return chunk cur size
size_t
zchunk_size (zchunk_t *self);
// Return chunk max size
size_t
zchunk_max_size (zchunk_t *self);
// Return chunk data
byte *
zchunk_data (zchunk_t *self);
// Set chunk data from user-supplied data; truncate if too large. Data may
// be null. Returns actual size of chunk
size_t
zchunk_set (zchunk_t *self, const void *data, size_t size);
// Fill chunk data from user-supplied octet
size_t
zchunk_fill (zchunk_t *self, byte filler, size_t size);
// Append user-supplied data to chunk, return resulting chunk size. If the
// data would exceeded the available space, it is truncated. If you want to
// grow the chunk to accommodate new data, use the zchunk_extend method.
size_t
zchunk_append (zchunk_t *self, const void *data, size_t size);
// Append user-supplied data to chunk, return resulting chunk size. If the
// data would exceeded the available space, the chunk grows in size.
size_t
zchunk_extend (zchunk_t *self, const void *data, size_t size);
// Copy as much data from 'source' into the chunk as possible; returns the
// new size of chunk. If all data from 'source' is used, returns exhausted
// on the source chunk. Source can be consumed as many times as needed until
// it is exhausted. If source was already exhausted, does not change chunk.
size_t
zchunk_consume (zchunk_t *self, zchunk_t *source);
// Returns true if the chunk was exhausted by consume methods, or if the
// chunk has a size of zero.
bool
zchunk_exhausted (zchunk_t *self);
// Read chunk from an open file descriptor
zchunk_t *
zchunk_read (FILE *handle, size_t bytes);
// Write chunk to an open file descriptor
int
zchunk_write (zchunk_t *self, FILE *handle);
// Try to slurp an entire file into a chunk. Will read up to maxsize of
// the file. If maxsize is 0, will attempt to read the entire file and
// fail with an assertion if that cannot fit into memory. Returns a new
// chunk containing the file data, or NULL if the file could not be read.
zchunk_t *
zchunk_slurp (const char *filename, size_t maxsize);
// Create copy of chunk, as new chunk object. Returns a fresh zchunk_t
// object, or null if there was not enough heap memory. If chunk is null,
// returns null.
zchunk_t *
zchunk_dup (zchunk_t *self);
// Return chunk data encoded as printable hex string. Caller must free
// string when finished with it.
char *
zchunk_strhex (zchunk_t *self);
// Return chunk data copied into freshly allocated string
// Caller must free string when finished with it.
char *
zchunk_strdup (zchunk_t *self);
// Return TRUE if chunk body is equal to string, excluding terminator
bool
zchunk_streq (zchunk_t *self, const char *string);
// Transform zchunk into a zframe that can be sent in a message.
zframe_t *
zchunk_pack (zchunk_t *self);
// Transform zchunk into a zframe that can be sent in a message.
// Take ownership of the chunk.
zframe_t *
zchunk_packx (zchunk_t **self_p);
// Transform a zframe into a zchunk.
zchunk_t *
zchunk_unpack (zframe_t *frame);
// Calculate SHA1 digest for chunk, using zdigest class.
const char *
zchunk_digest (zchunk_t *self);
// Dump chunk to FILE stream, for debugging and tracing.
void
zchunk_fprint (zchunk_t *self, FILE *file);
// Dump message to stderr, for debugging and tracing.
// See zchunk_fprint for details
void
zchunk_print (zchunk_t *self);
// Probe the supplied object, and report if it looks like a zchunk_t.
bool
zchunk_is (void *self);
// Self test of this class.
void
zchunk_test (bool verbose);
// CLASS: zclock
// Sleep for a number of milliseconds
void
zclock_sleep (int msecs);
// Return current system clock as milliseconds. Note that this clock can
// jump backwards (if the system clock is changed) so is unsafe to use for
// timers and time offsets. Use zclock_mono for that instead.
int64_t
zclock_time (void);
// Return current monotonic clock in milliseconds. Use this when you compute
// time offsets. The monotonic clock is not affected by system changes and
// so will never be reset backwards, unlike a system clock.
int64_t
zclock_mono (void);
// Return current monotonic clock in microseconds. Use this when you compute
// time offsets. The monotonic clock is not affected by system changes and
// so will never be reset backwards, unlike a system clock.
int64_t
zclock_usecs (void);
// Return formatted date/time as fresh string. Free using zstr_free().
char *
zclock_timestr (void);
// Self test of this class.
void
zclock_test (bool verbose);
// CLASS: zconfig
// Create new config item
zconfig_t *
zconfig_new (const char *name, zconfig_t *parent);
// Destroy a config item and all its children
void
zconfig_destroy (zconfig_t **self_p);
// Load a config tree from a specified ZPL text file; returns a zconfig_t
// reference for the root, if the file exists and is readable. Returns NULL
// if the file does not exist.
zconfig_t *
zconfig_load (const char *filename);
// Equivalent to zconfig_load, taking a format string instead of a fixed
// filename.
zconfig_t *
zconfig_loadf (const char *format, ...);
// Create copy of zconfig, caller MUST free the value
// Create copy of config, as new zconfig object. Returns a fresh zconfig_t
// object. If config is null, or memory was exhausted, returns null.
zconfig_t *
zconfig_dup (zconfig_t *self);
// Return name of config item
char *
zconfig_name (zconfig_t *self);
// Return value of config item
char *
zconfig_value (zconfig_t *self);
// Insert or update configuration key with value
void
zconfig_put (zconfig_t *self, const char *path, const char *value);
// Equivalent to zconfig_put, accepting a format specifier and variable
// argument list, instead of a single string value.
void
zconfig_putf (zconfig_t *self, const char *path, const char *format, ...);
// Get value for config item into a string value; leading slash is optional
// and ignored.
char *
zconfig_get (zconfig_t *self, const char *path, const char *default_value);
// Set config item name, name may be NULL
void
zconfig_set_name (zconfig_t *self, const char *name);
// Set new value for config item. The new value may be a string, a printf
// format, or NULL. Note that if string may possibly contain '%', or if it
// comes from an insecure source, you must use '%s' as the format, followed
// by the string.
void
zconfig_set_value (zconfig_t *self, const char *format, ...);
// Find our first child, if any
zconfig_t *
zconfig_child (zconfig_t *self);
// Find our first sibling, if any
zconfig_t *
zconfig_next (zconfig_t *self);
// Find a config item along a path; leading slash is optional and ignored.
zconfig_t *
zconfig_locate (zconfig_t *self, const char *path);
// Locate the last config item at a specified depth
zconfig_t *
zconfig_at_depth (zconfig_t *self, int level);
// Execute a callback for each config item in the tree; returns zero if
// successful, else -1.
int
zconfig_execute (zconfig_t *self, zconfig_fct handler, void *arg);
// Add comment to config item before saving to disk. You can add as many
// comment lines as you like. If you use a null format, all comments are
// deleted.
void
zconfig_set_comment (zconfig_t *self, const char *format, ...);
// Return comments of config item, as zlist.
zlist_t *
zconfig_comments (zconfig_t *self);
// Save a config tree to a specified ZPL text file, where a filename
// "-" means dump to standard output.
int
zconfig_save (zconfig_t *self, const char *filename);
// Equivalent to zconfig_save, taking a format string instead of a fixed
// filename.
int
zconfig_savef (zconfig_t *self, const char *format, ...);
// Report filename used during zconfig_load, or NULL if none
const char *
zconfig_filename (zconfig_t *self);
// Reload config tree from same file that it was previously loaded from.
// Returns 0 if OK, -1 if there was an error (and then does not change
// existing data).
int
zconfig_reload (zconfig_t **self_p);
// Load a config tree from a memory chunk
zconfig_t *
zconfig_chunk_load (zchunk_t *chunk);
// Save a config tree to a new memory chunk
zchunk_t *
zconfig_chunk_save (zconfig_t *self);
// Load a config tree from a null-terminated string
zconfig_t *
zconfig_str_load (const char *string);
// Save a config tree to a new null terminated string
char *
zconfig_str_save (zconfig_t *self);
// Return true if a configuration tree was loaded from a file and that
// file has changed in since the tree was loaded.
bool
zconfig_has_changed (zconfig_t *self);
// Destroy subtree (all children)
void
zconfig_remove_subtree (zconfig_t *self);
// Destroy node and subtree (all children)
void
zconfig_remove (zconfig_t **self_p);
// Print the config file to open stream
void
zconfig_fprint (zconfig_t *self, FILE *file);
// Print properties of object
void
zconfig_print (zconfig_t *self);
// Self test of this class
void
zconfig_test (bool verbose);
// CLASS: zdigest
// Constructor - creates new digest object, which you use to build up a
// digest by repeatedly calling zdigest_update() on chunks of data.
zdigest_t *
zdigest_new (void);
// Destroy a digest object
void
zdigest_destroy (zdigest_t **self_p);
// Add buffer into digest calculation
void
zdigest_update (zdigest_t *self, const byte *buffer, size_t length);
// Return final digest hash data. If built without crypto support,
// returns NULL.
const byte *
zdigest_data (zdigest_t *self);
// Return final digest hash size
size_t
zdigest_size (zdigest_t *self);
// Return digest as printable hex string; caller should not modify nor
// free this string. After calling this, you may not use zdigest_update()
// on the same digest. If built without crypto support, returns NULL.
char *
zdigest_string (zdigest_t *self);
// Self test of this class.
void
zdigest_test (bool verbose);
// CLASS: zdir
// Create a new directory item that loads in the full tree of the specified
// path, optionally located under some parent path. If parent is "-", then
// loads only the top-level directory, and does not use parent as a path.
zdir_t *
zdir_new (const char *path, const char *parent);
// Destroy a directory tree and all children it contains.
void
zdir_destroy (zdir_t **self_p);
// Return directory path
const char *
zdir_path (zdir_t *self);
// Return last modification time for directory.
time_t
zdir_modified (zdir_t *self);
// Return total hierarchy size, in bytes of data contained in all files
// in the directory tree.
off_t
zdir_cursize (zdir_t *self);
// Return directory count
size_t
zdir_count (zdir_t *self);
// Returns a sorted list of zfile objects; Each entry in the list is a pointer
// to a zfile_t item already allocated in the zdir tree. Do not destroy the
// original zdir tree until you are done with this list.
zlist_t *
zdir_list (zdir_t *self);
// Remove directory, optionally including all files that it contains, at
// all levels. If force is false, will only remove the directory if empty.
// If force is true, will remove all files and all subdirectories.
void
zdir_remove (zdir_t *self, bool force);
// Calculate differences between two versions of a directory tree.
// Returns a list of zdir_patch_t patches. Either older or newer may
// be null, indicating the directory is empty/absent. If alias is set,
// generates virtual filename (minus path, plus alias).
zlist_t *
zdir_diff (zdir_t *older, zdir_t *newer, const char *alias);
// Return full contents of directory as a zdir_patch list.
zlist_t *
zdir_resync (zdir_t *self, const char *alias);
// Load directory cache; returns a hash table containing the SHA-1 digests
// of every file in the tree. The cache is saved between runs in .cache.
zhash_t *
zdir_cache (zdir_t *self);
// Print contents of directory to open stream
void
zdir_fprint (zdir_t *self, FILE *file, int indent);
// Print contents of directory to stdout
void
zdir_print (zdir_t *self, int indent);
// Create a new zdir_watch actor instance:
//
// zactor_t *watch = zactor_new (zdir_watch, NULL);
//
// Destroy zdir_watch instance:
//
// zactor_destroy (&watch);
//
// Enable verbose logging of commands and activity:
//
// zstr_send (watch, "VERBOSE");
//
// Subscribe to changes to a directory path:
//
// zsock_send (watch, "ss", "SUBSCRIBE", "directory_path");
//
// Unsubscribe from changes to a directory path:
//
// zsock_send (watch, "ss", "UNSUBSCRIBE", "directory_path");
//
// Receive directory changes:
// zsock_recv (watch, "sp", &path, &patches);
//
// // Delete the received data.
// free (path);
// zlist_destroy (&patches);
void
zdir_watch (zsock_t *pipe, void *unused);
// Self test of this class.
void
zdir_test (bool verbose);
// CLASS: zdir_patch
// Create new patch
zdir_patch_t *
zdir_patch_new (const char *path, zfile_t *file, int op, const char *alias);
// Destroy a patch
void
zdir_patch_destroy (zdir_patch_t **self_p);
// Create copy of a patch. If the patch is null, or memory was exhausted,
// returns null.
zdir_patch_t *
zdir_patch_dup (zdir_patch_t *self);
// Return patch file directory path
const char *
zdir_patch_path (zdir_patch_t *self);
// Return patch file item
zfile_t *
zdir_patch_file (zdir_patch_t *self);
// Return operation
int
zdir_patch_op (zdir_patch_t *self);
// Return patch virtual file path
const char *
zdir_patch_vpath (zdir_patch_t *self);
// Calculate hash digest for file (create only)
void
zdir_patch_digest_set (zdir_patch_t *self);
// Return hash digest for patch file
const char *
zdir_patch_digest (zdir_patch_t *self);
// Self test of this class.
void
zdir_patch_test (bool verbose);
// CLASS: zfile
// If file exists, populates properties. CZMQ supports portable symbolic
// links, which are files with the extension ".ln". A symbolic link is a
// text file containing one line, the filename of a target file. Reading
// data from the symbolic link actually reads from the target file. Path
// may be NULL, in which case it is not used.
zfile_t *
zfile_new (const char *path, const char *name);
// Create new temporary file for writing via tmpfile. File is automatically
// deleted on destroy
zfile_t *
zfile_tmp (void);
// Destroy a file item
void
zfile_destroy (zfile_t **self_p);
// Duplicate a file item, returns a newly constructed item. If the file
// is null, or memory was exhausted, returns null.
zfile_t *
zfile_dup (zfile_t *self);
// Return file name, remove path if provided
const char *
zfile_filename (zfile_t *self, const char *path);
// Refresh file properties from disk; this is not done automatically
// on access methods, otherwise it is not possible to compare directory
// snapshots.
void
zfile_restat (zfile_t *self);
// Return when the file was last modified. If you want this to reflect the
// current situation, call zfile_restat before checking this property.
time_t
zfile_modified (zfile_t *self);
// Return the last-known size of the file. If you want this to reflect the
// current situation, call zfile_restat before checking this property.
off_t
zfile_cursize (zfile_t *self);
// Return true if the file is a directory. If you want this to reflect
// any external changes, call zfile_restat before checking this property.
bool
zfile_is_directory (zfile_t *self);
// Return true if the file is a regular file. If you want this to reflect
// any external changes, call zfile_restat before checking this property.
bool
zfile_is_regular (zfile_t *self);
// Return true if the file is readable by this process. If you want this to
// reflect any external changes, call zfile_restat before checking this
// property.
bool
zfile_is_readable (zfile_t *self);
// Return true if the file is writeable by this process. If you want this
// to reflect any external changes, call zfile_restat before checking this
// property.
bool
zfile_is_writeable (zfile_t *self);
// Check if file has stopped changing and can be safely processed.
// Updates the file statistics from disk at every call.
bool
zfile_is_stable (zfile_t *self);
// Return true if the file was changed on disk since the zfile_t object
// was created, or the last zfile_restat() call made on it.
bool
zfile_has_changed (zfile_t *self);
// Remove the file from disk
void
zfile_remove (zfile_t *self);
// Open file for reading
// Returns 0 if OK, -1 if not found or not accessible
int
zfile_input (zfile_t *self);
// Open file for writing, creating directory if needed
// File is created if necessary; chunks can be written to file at any
// location. Returns 0 if OK, -1 if error.
int
zfile_output (zfile_t *self);
// Read chunk from file at specified position. If this was the last chunk,
// sets the eof property. Returns a null chunk in case of error.
zchunk_t *
zfile_read (zfile_t *self, size_t bytes, off_t offset);
// Returns true if zfile_read() just read the last chunk in the file.
bool
zfile_eof (zfile_t *self);
// Write chunk to file at specified position
// Return 0 if OK, else -1
int
zfile_write (zfile_t *self, zchunk_t *chunk, off_t offset);
// Read next line of text from file. Returns a pointer to the text line,
// or NULL if there was nothing more to read from the file.
const char *
zfile_readln (zfile_t *self);
// Close file, if open
void
zfile_close (zfile_t *self);
// Return file handle, if opened
FILE *
zfile_handle (zfile_t *self);
// Calculate SHA1 digest for file, using zdigest class.
const char *
zfile_digest (zfile_t *self);
// Self test of this class.
void
zfile_test (bool verbose);
// CLASS: zframe
// Create a new frame. If size is not null, allocates the frame data
// to the specified size. If additionally, data is not null, copies
// size octets from the specified data into the frame body.
zframe_t *
zframe_new (const void *data, size_t size);
// Destroy a frame
void
zframe_destroy (zframe_t **self_p);
// Create an empty (zero-sized) frame
zframe_t *
zframe_new_empty (void);
// Create a frame with a specified string content.
zframe_t *
zframe_from (const char *string);
// Create a new frame from memory. Take ownership of the memory and calling the destructor
// on destroy.
zframe_t *
zframe_frommem (void *data, size_t size, zframe_destructor_fn destructor, void *hint);
// Receive frame from socket, returns zframe_t object or NULL if the recv
// was interrupted. Does a blocking recv, if you want to not block then use
// zpoller or zloop.
zframe_t *
zframe_recv (void *source);
// Send a frame to a socket, destroy frame after sending.
// Return -1 on error, 0 on success.
int
zframe_send (zframe_t **self_p, void *dest, int flags);
// Return number of bytes in frame data
size_t
zframe_size (zframe_t *self);
// Return address of frame data
byte *
zframe_data (zframe_t *self);
// Return meta data property for frame
// The caller shall not modify or free the returned value, which shall be
// owned by the message.
const char *
zframe_meta (zframe_t *self, const char *property);
// Create a new frame that duplicates an existing frame. If frame is null,
// or memory was exhausted, returns null.
zframe_t *
zframe_dup (zframe_t *self);
// Return frame data encoded as printable hex string, useful for 0MQ UUIDs.
// Caller must free string when finished with it.
char *
zframe_strhex (zframe_t *self);
// Return frame data copied into freshly allocated string
// Caller must free string when finished with it.
char *
zframe_strdup (zframe_t *self);
// Return TRUE if frame body is equal to string, excluding terminator
bool
zframe_streq (zframe_t *self, const char *string);
// Return frame MORE indicator (1 or 0), set when reading frame from socket
// or by the zframe_set_more() method
int
zframe_more (zframe_t *self);
// Set frame MORE indicator (1 or 0). Note this is NOT used when sending
// frame to socket, you have to specify flag explicitly.
void
zframe_set_more (zframe_t *self, int more);
// Return frame routing ID, if the frame came from a ZMQ_SERVER socket.
// Else returns zero.
uint32_t
zframe_routing_id (zframe_t *self);
// Set routing ID on frame. This is used if/when the frame is sent to a
// ZMQ_SERVER socket.
void
zframe_set_routing_id (zframe_t *self, uint32_t routing_id);
// Return frame group of radio-dish pattern.
const char *
zframe_group (zframe_t *self);
// Set group on frame. This is used if/when the frame is sent to a
// ZMQ_RADIO socket.
// Return -1 on error, 0 on success.
int
zframe_set_group (zframe_t *self, const char *group);
// Return TRUE if two frames have identical size and data
// If either frame is NULL, equality is always false.
bool
zframe_eq (zframe_t *self, zframe_t *other);
// Set new contents for frame
void
zframe_reset (zframe_t *self, const void *data, size_t size);
// Send message to zsys log sink (may be stdout, or system facility as
// configured by zsys_set_logstream). Prefix shows before frame, if not null.
// Long messages are truncated.
void
zframe_print (zframe_t *self, const char *prefix);
// Send message to zsys log sink (may be stdout, or system facility as
// configured by zsys_set_logstream). Prefix shows before frame, if not null.
// Message length is specified; no truncation unless length is zero.
// Backwards compatible with zframe_print when length is zero.
void
zframe_print_n (zframe_t *self, const char *prefix, size_t length);
// Probe the supplied object, and report if it looks like a zframe_t.
bool
zframe_is (void *self);
// Self test of this class.
void
zframe_test (bool verbose);
// CLASS: zhash
// Create a new, empty hash container
zhash_t *
zhash_new (void);
// Destroy a hash container and all items in it
void
zhash_destroy (zhash_t **self_p);
// Unpack binary frame into a new hash table. Packed data must follow format
// defined by zhash_pack. Hash table is set to autofree. An empty frame
// unpacks to an empty hash table.
zhash_t *
zhash_unpack (zframe_t *frame);
// Insert item into hash table with specified key and item.
// If key is already present returns -1 and leaves existing item unchanged
// Returns 0 on success.
int
zhash_insert (zhash_t *self, const char *key, void *item);
// Update item into hash table with specified key and item.
// If key is already present, destroys old item and inserts new one.
// Use free_fn method to ensure deallocator is properly called on item.
void
zhash_update (zhash_t *self, const char *key, void *item);
// Remove an item specified by key from the hash table. If there was no such
// item, this function does nothing.
void
zhash_delete (zhash_t *self, const char *key);
// Return the item at the specified key, or null
void *
zhash_lookup (zhash_t *self, const char *key);
// Reindexes an item from an old key to a new key. If there was no such
// item, does nothing. Returns 0 if successful, else -1.
int
zhash_rename (zhash_t *self, const char *old_key, const char *new_key);
// Set a free function for the specified hash table item. When the item is
// destroyed, the free function, if any, is called on that item.
// Use this when hash items are dynamically allocated, to ensure that
// you don't have memory leaks. You can pass 'free' or NULL as a free_fn.
// Returns the item, or NULL if there is no such item.
void *
zhash_freefn (zhash_t *self, const char *key, zhash_free_fn free_fn);
// Return the number of keys/items in the hash table
size_t
zhash_size (zhash_t *self);
// Make copy of hash table; if supplied table is null, returns null.
// Does not copy items themselves. Rebuilds new table so may be slow on
// very large tables. NOTE: only works with item values that are strings
// since there's no other way to know how to duplicate the item value.
zhash_t *
zhash_dup (zhash_t *self);
// Return keys for items in table
zlist_t *
zhash_keys (zhash_t *self);
// Simple iterator; returns first item in hash table, in no given order,
// or NULL if the table is empty. This method is simpler to use than the
// foreach() method, which is deprecated. To access the key for this item
// use zhash_cursor(). NOTE: do NOT modify the table while iterating.
void *
zhash_first (zhash_t *self);
// Simple iterator; returns next item in hash table, in no given order,
// or NULL if the last item was already returned. Use this together with
// zhash_first() to process all items in a hash table. If you need the
// items in sorted order, use zhash_keys() and then zlist_sort(). To
// access the key for this item use zhash_cursor(). NOTE: do NOT modify
// the table while iterating.
void *
zhash_next (zhash_t *self);
// After a successful first/next method, returns the key for the item that
// was returned. This is a constant string that you may not modify or
// deallocate, and which lasts as long as the item in the hash. After an
// unsuccessful first/next, returns NULL.
const char *
zhash_cursor (zhash_t *self);
// Add a comment to hash table before saving to disk. You can add as many
// comment lines as you like. These comment lines are discarded when loading
// the file. If you use a null format, all comments are deleted.
void
zhash_comment (zhash_t *self, const char *format, ...);
// Serialize hash table to a binary frame that can be sent in a message.
// The packed format is compatible with the 'dictionary' type defined in
// http://rfc.zeromq.org/spec:35/FILEMQ, and implemented by zproto:
//
// ; A list of name/value pairs
// dictionary = dict-count *( dict-name dict-value )
// dict-count = number-4
// dict-value = longstr
// dict-name = string
//
// ; Strings are always length + text contents
// longstr = number-4 *VCHAR
// string = number-1 *VCHAR
//
// ; Numbers are unsigned integers in network byte order
// number-1 = 1OCTET
// number-4 = 4OCTET
//
// Comments are not included in the packed data. Item values MUST be
// strings.
zframe_t *
zhash_pack (zhash_t *self);
// Save hash table to a text file in name=value format. Hash values must be
// printable strings; keys may not contain '=' character. Returns 0 if OK,
// else -1 if a file error occurred.
int
zhash_save (zhash_t *self, const char *filename);
// Load hash table from a text file in name=value format; hash table must
// already exist. Hash values must printable strings; keys may not contain
// '=' character. Returns 0 if OK, else -1 if a file was not readable.
int
zhash_load (zhash_t *self, const char *filename);
// When a hash table was loaded from a file by zhash_load, this method will
// reload the file if it has been modified since, and is "stable", i.e. not
// still changing. Returns 0 if OK, -1 if there was an error reloading the
// file.
int
zhash_refresh (zhash_t *self);
// Set hash for automatic value destruction. Note that this assumes that
// values are NULL-terminated strings. Do not use with different types.
void
zhash_autofree (zhash_t *self);
// Self test of this class.
void
zhash_test (bool verbose);
// CLASS: zhashx
// Create a new, empty hash container
zhashx_t *
zhashx_new (void);
// Destroy a hash container and all items in it
void
zhashx_destroy (zhashx_t **self_p);
// Unpack binary frame into a new hash table. Packed data must follow format
// defined by zhashx_pack. Hash table is set to autofree. An empty frame
// unpacks to an empty hash table.
zhashx_t *
zhashx_unpack (zframe_t *frame);
// Same as unpack but uses a user-defined deserializer function to convert
// a longstr back into item format.
zhashx_t *
zhashx_unpack_own (zframe_t *frame, zhashx_deserializer_fn deserializer);
// Insert item into hash table with specified key and item.
// If key is already present returns -1 and leaves existing item unchanged
// Returns 0 on success.
int
zhashx_insert (zhashx_t *self, const void *key, void *item);
// Update or insert item into hash table with specified key and item. If the
// key is already present, destroys old item and inserts new one. If you set
// a container item destructor, this is called on the old value. If the key
// was not already present, inserts a new item. Sets the hash cursor to the
// new item.
void
zhashx_update (zhashx_t *self, const void *key, void *item);
// Remove an item specified by key from the hash table. If there was no such
// item, this function does nothing.
void
zhashx_delete (zhashx_t *self, const void *key);
// Delete all items from the hash table. If the key destructor is
// set, calls it on every key. If the item destructor is set, calls
// it on every item.
void
zhashx_purge (zhashx_t *self);
// Return the item at the specified key, or null
void *
zhashx_lookup (zhashx_t *self, const void *key);
// Reindexes an item from an old key to a new key. If there was no such
// item, does nothing. Returns 0 if successful, else -1.
int
zhashx_rename (zhashx_t *self, const void *old_key, const void *new_key);
// Set a free function for the specified hash table item. When the item is
// destroyed, the free function, if any, is called on that item.
// Use this when hash items are dynamically allocated, to ensure that
// you don't have memory leaks. You can pass 'free' or NULL as a free_fn.
// Returns the item, or NULL if there is no such item.
void *
zhashx_freefn (zhashx_t *self, const void *key, zhashx_free_fn free_fn);
// Return the number of keys/items in the hash table
size_t
zhashx_size (zhashx_t *self);
// Return a zlistx_t containing the keys for the items in the
// table. Uses the key_duplicator to duplicate all keys and sets the
// key_destructor as destructor for the list.
zlistx_t *
zhashx_keys (zhashx_t *self);
// Return a zlistx_t containing the values for the items in the
// table. Uses the duplicator to duplicate all items and sets the
// destructor as destructor for the list.
zlistx_t *
zhashx_values (zhashx_t *self);
// Simple iterator; returns first item in hash table, in no given order,
// or NULL if the table is empty. This method is simpler to use than the
// foreach() method, which is deprecated. To access the key for this item
// use zhashx_cursor(). NOTE: do NOT modify the table while iterating.
void *
zhashx_first (zhashx_t *self);
// Simple iterator; returns next item in hash table, in no given order,
// or NULL if the last item was already returned. Use this together with
// zhashx_first() to process all items in a hash table. If you need the
// items in sorted order, use zhashx_keys() and then zlistx_sort(). To
// access the key for this item use zhashx_cursor(). NOTE: do NOT modify
// the table while iterating.
void *
zhashx_next (zhashx_t *self);
// After a successful first/next method, returns the key for the item that
// was returned. This is a constant string that you may not modify or
// deallocate, and which lasts as long as the item in the hash. After an
// unsuccessful first/next, returns NULL.
const void *
zhashx_cursor (zhashx_t *self);
// Add a comment to hash table before saving to disk. You can add as many
// comment lines as you like. These comment lines are discarded when loading
// the file. If you use a null format, all comments are deleted.
void
zhashx_comment (zhashx_t *self, const char *format, ...);
// Save hash table to a text file in name=value format. Hash values must be
// printable strings; keys may not contain '=' character. Returns 0 if OK,
// else -1 if a file error occurred.
int
zhashx_save (zhashx_t *self, const char *filename);
// Load hash table from a text file in name=value format; hash table must
// already exist. Hash values must printable strings; keys may not contain
// '=' character. Returns 0 if OK, else -1 if a file was not readable.
int
zhashx_load (zhashx_t *self, const char *filename);
// When a hash table was loaded from a file by zhashx_load, this method will
// reload the file if it has been modified since, and is "stable", i.e. not
// still changing. Returns 0 if OK, -1 if there was an error reloading the
// file.
int
zhashx_refresh (zhashx_t *self);
// Serialize hash table to a binary frame that can be sent in a message.
// The packed format is compatible with the 'dictionary' type defined in
// http://rfc.zeromq.org/spec:35/FILEMQ, and implemented by zproto:
//
// ; A list of name/value pairs
// dictionary = dict-count *( dict-name dict-value )
// dict-count = number-4
// dict-value = longstr
// dict-name = string
//
// ; Strings are always length + text contents
// longstr = number-4 *VCHAR
// string = number-1 *VCHAR
//
// ; Numbers are unsigned integers in network byte order
// number-1 = 1OCTET
// number-4 = 4OCTET
//
// Comments are not included in the packed data. Item values MUST be
// strings.
zframe_t *
zhashx_pack (zhashx_t *self);
// Same as pack but uses a user-defined serializer function to convert items
// into longstr.
zframe_t *
zhashx_pack_own (zhashx_t *self, zhashx_serializer_fn serializer);
// Make a copy of the list; items are duplicated if you set a duplicator
// for the list, otherwise not. Copying a null reference returns a null
// reference. Note that this method's behavior changed slightly for CZMQ
// v3.x, as it does not set nor respect autofree. It does however let you
// duplicate any hash table safely. The old behavior is in zhashx_dup_v2.
zhashx_t *
zhashx_dup (zhashx_t *self);
// Set a user-defined deallocator for hash items; by default items are not
// freed when the hash is destroyed.
void
zhashx_set_destructor (zhashx_t *self, zhashx_destructor_fn destructor);
// Set a user-defined duplicator for hash items; by default items are not
// copied when the hash is duplicated.
void
zhashx_set_duplicator (zhashx_t *self, zhashx_duplicator_fn duplicator);
// Set a user-defined deallocator for keys; by default keys are freed
// when the hash is destroyed using free().
void
zhashx_set_key_destructor (zhashx_t *self, zhashx_destructor_fn destructor);
// Set a user-defined duplicator for keys; by default keys are duplicated
// using strdup.
void
zhashx_set_key_duplicator (zhashx_t *self, zhashx_duplicator_fn duplicator);
// Set a user-defined comparator for keys; by default keys are
// compared using strcmp.
// The callback function should return zero (0) on matching
// items.
void
zhashx_set_key_comparator (zhashx_t *self, zhashx_comparator_fn comparator);
// Set a user-defined hash function for keys; by default keys are
// hashed by a modified Bernstein hashing function.
void
zhashx_set_key_hasher (zhashx_t *self, zhashx_hash_fn hasher);
// Make copy of hash table; if supplied table is null, returns null.
// Does not copy items themselves. Rebuilds new table so may be slow on
// very large tables. NOTE: only works with item values that are strings
// since there's no other way to know how to duplicate the item value.
zhashx_t *
zhashx_dup_v2 (zhashx_t *self);
// Self test of this class.
void
zhashx_test (bool verbose);
// CLASS: ziflist
// Get a list of network interfaces currently defined on the system
ziflist_t *
ziflist_new (void);
// Destroy a ziflist instance
void
ziflist_destroy (ziflist_t **self_p);
// Reload network interfaces from system
void
ziflist_reload (ziflist_t *self);
// Return the number of network interfaces on system
size_t
ziflist_size (ziflist_t *self);
// Get first network interface, return NULL if there are none
const char *
ziflist_first (ziflist_t *self);
// Get next network interface, return NULL if we hit the last one
const char *
ziflist_next (ziflist_t *self);
// Return the current interface IP address as a printable string
const char *
ziflist_address (ziflist_t *self);
// Return the current interface broadcast address as a printable string
const char *
ziflist_broadcast (ziflist_t *self);
// Return the current interface network mask as a printable string
const char *
ziflist_netmask (ziflist_t *self);
// Return the list of interfaces.
void
ziflist_print (ziflist_t *self);
// Get a list of network interfaces currently defined on the system
// Includes IPv6 interfaces
ziflist_t *
ziflist_new_ipv6 (void);
// Reload network interfaces from system, including IPv6
void
ziflist_reload_ipv6 (ziflist_t *self);
// Return true if the current interface uses IPv6
bool
ziflist_is_ipv6 (ziflist_t *self);
// Self test of this class.
void
ziflist_test (bool verbose);
// CLASS: zlist
// Create a new list container
zlist_t *
zlist_new (void);
// Destroy a list container
void
zlist_destroy (zlist_t **self_p);
// Return the item at the head of list. If the list is empty, returns NULL.
// Leaves cursor pointing at the head item, or NULL if the list is empty.
void *
zlist_first (zlist_t *self);
// Return the next item. If the list is empty, returns NULL. To move to
// the start of the list call zlist_first (). Advances the cursor.
void *
zlist_next (zlist_t *self);
// Return the item at the tail of list. If the list is empty, returns NULL.
// Leaves cursor pointing at the tail item, or NULL if the list is empty.
void *
zlist_last (zlist_t *self);
// Return first item in the list, or null, leaves the cursor
void *
zlist_head (zlist_t *self);
// Return last item in the list, or null, leaves the cursor
void *
zlist_tail (zlist_t *self);
// Return the current item of list. If the list is empty, returns NULL.
// Leaves cursor pointing at the current item, or NULL if the list is empty.
void *
zlist_item (zlist_t *self);
// Append an item to the end of the list, return 0 if OK or -1 if this
// failed for some reason (out of memory). Note that if a duplicator has
// been set, this method will also duplicate the item.
int
zlist_append (zlist_t *self, void *item);
// Push an item to the start of the list, return 0 if OK or -1 if this
// failed for some reason (out of memory). Note that if a duplicator has
// been set, this method will also duplicate the item.
int
zlist_push (zlist_t *self, void *item);
// Pop the item off the start of the list, if any
void *
zlist_pop (zlist_t *self);
// Checks if an item already is present. Uses compare method to determine if
// items are equal. If the compare method is NULL the check will only compare
// pointers. Returns true if item is present else false.
bool
zlist_exists (zlist_t *self, void *item);
// Remove the specified item from the list if present
void
zlist_remove (zlist_t *self, void *item);
// Make a copy of list. If the list has autofree set, the copied list will
// duplicate all items, which must be strings. Otherwise, the list will hold
// pointers back to the items in the original list. If list is null, returns
// NULL.
zlist_t *
zlist_dup (zlist_t *self);
// Purge all items from list
void
zlist_purge (zlist_t *self);
// Return number of items in the list
size_t
zlist_size (zlist_t *self);
// Sort the list. If the compare function is null, sorts the list by
// ascending key value using a straight ASCII comparison. If you specify
// a compare function, this decides how items are sorted. The sort is not
// stable, so may reorder items with the same keys. The algorithm used is
// combsort, a compromise between performance and simplicity.
void
zlist_sort (zlist_t *self, zlist_compare_fn compare);
// Set list for automatic item destruction; item values MUST be strings.
// By default a list item refers to a value held elsewhere. When you set
// this, each time you append or push a list item, zlist will take a copy
// of the string value. Then, when you destroy the list, it will free all
// item values automatically. If you use any other technique to allocate
// list values, you must free them explicitly before destroying the list.
// The usual technique is to pop list items and destroy them, until the
// list is empty.
void
zlist_autofree (zlist_t *self);
// Sets a compare function for this list. The function compares two items.
// It returns an integer less than, equal to, or greater than zero if the
// first item is found, respectively, to be less than, to match, or be
// greater than the second item.
// This function is used for sorting, removal and exists checking.
void
zlist_comparefn (zlist_t *self, zlist_compare_fn fn);
// Set a free function for the specified list item. When the item is
// destroyed, the free function, if any, is called on that item.
// Use this when list items are dynamically allocated, to ensure that
// you don't have memory leaks. You can pass 'free' or NULL as a free_fn.
// Returns the item, or NULL if there is no such item.
void *
zlist_freefn (zlist_t *self, void *item, zlist_free_fn fn, bool at_tail);
// Self test of this class.
void
zlist_test (bool verbose);
// CLASS: zlistx
// Create a new, empty list.
zlistx_t *
zlistx_new (void);
// Destroy a list. If an item destructor was specified, all items in the
// list are automatically destroyed as well.
void
zlistx_destroy (zlistx_t **self_p);
// Unpack binary frame into a new list. Packed data must follow format
// defined by zlistx_pack. List is set to autofree. An empty frame
// unpacks to an empty list.
zlistx_t *
zlistx_unpack (zframe_t *frame);
// Add an item to the head of the list. Calls the item duplicator, if any,
// on the item. Resets cursor to list head. Returns an item handle on
// success, NULL if memory was exhausted.
void *
zlistx_add_start (zlistx_t *self, void *item);
// Add an item to the tail of the list. Calls the item duplicator, if any,
// on the item. Resets cursor to list head. Returns an item handle on
// success, NULL if memory was exhausted.
void *
zlistx_add_end (zlistx_t *self, void *item);
// Return the number of items in the list
size_t
zlistx_size (zlistx_t *self);
// Return first item in the list, or null, leaves the cursor
void *
zlistx_head (zlistx_t *self);
// Return last item in the list, or null, leaves the cursor
void *
zlistx_tail (zlistx_t *self);
// Return the item at the head of list. If the list is empty, returns NULL.
// Leaves cursor pointing at the head item, or NULL if the list is empty.
void *
zlistx_first (zlistx_t *self);
// Return the next item. At the end of the list (or in an empty list),
// returns NULL. Use repeated zlistx_next () calls to work through the list
// from zlistx_first (). First time, acts as zlistx_first().
void *
zlistx_next (zlistx_t *self);
// Return the previous item. At the start of the list (or in an empty list),
// returns NULL. Use repeated zlistx_prev () calls to work through the list
// backwards from zlistx_last (). First time, acts as zlistx_last().
void *
zlistx_prev (zlistx_t *self);
// Return the item at the tail of list. If the list is empty, returns NULL.
// Leaves cursor pointing at the tail item, or NULL if the list is empty.
void *
zlistx_last (zlistx_t *self);
// Returns the value of the item at the cursor, or NULL if the cursor is
// not pointing to an item.
void *
zlistx_item (zlistx_t *self);
// Returns the handle of the item at the cursor, or NULL if the cursor is
// not pointing to an item.
void *
zlistx_cursor (zlistx_t *self);
// Returns the item associated with the given list handle, or NULL if passed
// in handle is NULL. Asserts that the passed in handle points to a list element.
void *
zlistx_handle_item (void *handle);
// Find an item in the list, searching from the start. Uses the item
// comparator, if any, else compares item values directly. Returns the
// item handle found, or NULL. Sets the cursor to the found item, if any.
void *
zlistx_find (zlistx_t *self, void *item);
// Detach an item from the list, using its handle. The item is not modified,
// and the caller is responsible for destroying it if necessary. If handle is
// null, detaches the first item on the list. Returns item that was detached,
// or null if none was. If cursor was at item, moves cursor to previous item,
// so you can detach items while iterating forwards through a list.
void *
zlistx_detach (zlistx_t *self, void *handle);
// Detach item at the cursor, if any, from the list. The item is not modified,
// and the caller is responsible for destroying it as necessary. Returns item
// that was detached, or null if none was. Moves cursor to previous item, so
// you can detach items while iterating forwards through a list.
void *
zlistx_detach_cur (zlistx_t *self);
// Delete an item, using its handle. Calls the item destructor if any is
// set. If handle is null, deletes the first item on the list. Returns 0
// if an item was deleted, -1 if not. If cursor was at item, moves cursor
// to previous item, so you can delete items while iterating forwards
// through a list.
int
zlistx_delete (zlistx_t *self, void *handle);
// Move an item to the start of the list, via its handle.
void
zlistx_move_start (zlistx_t *self, void *handle);
// Move an item to the end of the list, via its handle.
void
zlistx_move_end (zlistx_t *self, void *handle);
// Remove all items from the list, and destroy them if the item destructor
// is set.
void
zlistx_purge (zlistx_t *self);
// Sort the list. If an item comparator was set, calls that to compare
// items, otherwise compares on item value. The sort is not stable, so may
// reorder equal items.
void
zlistx_sort (zlistx_t *self);
// Create a new node and insert it into a sorted list. Calls the item
// duplicator, if any, on the item. If low_value is true, starts searching
// from the start of the list, otherwise searches from the end. Use the item
// comparator, if any, to find where to place the new node. Returns a handle
// to the new node, or NULL if memory was exhausted. Resets the cursor to the
// list head.
void *
zlistx_insert (zlistx_t *self, void *item, bool low_value);
// Move an item, specified by handle, into position in a sorted list. Uses
// the item comparator, if any, to determine the new location. If low_value
// is true, starts searching from the start of the list, otherwise searches
// from the end.
void
zlistx_reorder (zlistx_t *self, void *handle, bool low_value);
// Make a copy of the list; items are duplicated if you set a duplicator
// for the list, otherwise not. Copying a null reference returns a null
// reference.
zlistx_t *
zlistx_dup (zlistx_t *self);
// Set a user-defined deallocator for list items; by default items are not
// freed when the list is destroyed.
void
zlistx_set_destructor (zlistx_t *self, zlistx_destructor_fn destructor);
// Set a user-defined duplicator for list items; by default items are not
// copied when the list is duplicated.
void
zlistx_set_duplicator (zlistx_t *self, zlistx_duplicator_fn duplicator);
// Set a user-defined comparator for zlistx_find and zlistx_sort; the method
// must return -1, 0, or 1 depending on whether item1 is less than, equal to,
// or greater than, item2.
void
zlistx_set_comparator (zlistx_t *self, zlistx_comparator_fn comparator);
// Serialize list to a binary frame that can be sent in a message.
// The packed format is compatible with the 'strings' type implemented by zproto:
//
// ; A list of strings
// list = list-count *longstr
// list-count = number-4
//
// ; Strings are always length + text contents
// longstr = number-4 *VCHAR
//
// ; Numbers are unsigned integers in network byte order
// number-4 = 4OCTET
zframe_t *
zlistx_pack (zlistx_t *self);
// Self test of this class.
void
zlistx_test (bool verbose);
// CLASS: zloop
// Create a new zloop reactor
zloop_t *
zloop_new (void);
// Destroy a reactor
void
zloop_destroy (zloop_t **self_p);
// Register socket reader with the reactor. When the reader has messages,
// the reactor will call the handler, passing the arg. Returns 0 if OK, -1
// if there was an error. If you register the same socket more than once,
// each instance will invoke its corresponding handler.
int
zloop_reader (zloop_t *self, zsock_t *sock, zloop_reader_fn handler, void *arg);
// Cancel a socket reader from the reactor. If multiple readers exist for
// same socket, cancels ALL of them.
void
zloop_reader_end (zloop_t *self, zsock_t *sock);
// Configure a registered reader to ignore errors. If you do not set this,
// then readers that have errors are removed from the reactor silently.
void
zloop_reader_set_tolerant (zloop_t *self, zsock_t *sock);
// Register low-level libzmq pollitem with the reactor. When the pollitem
// is ready, will call the handler, passing the arg. Returns 0 if OK, -1
// if there was an error. If you register the pollitem more than once, each
// instance will invoke its corresponding handler. A pollitem with
// socket=NULL and fd=0 means 'poll on FD zero'.
int
zloop_poller (zloop_t *self, zmq_pollitem_t *item, zloop_fn handler, void *arg);
// Cancel a pollitem from the reactor, specified by socket or FD. If both
// are specified, uses only socket. If multiple poll items exist for same
// socket/FD, cancels ALL of them.
void
zloop_poller_end (zloop_t *self, zmq_pollitem_t *item);
// Configure a registered poller to ignore errors. If you do not set this,
// then poller that have errors are removed from the reactor silently.
void
zloop_poller_set_tolerant (zloop_t *self, zmq_pollitem_t *item);
// Register a timer that expires after some delay and repeats some number of
// times. At each expiry, will call the handler, passing the arg. To run a
// timer forever, use 0 times. Returns a timer_id that is used to cancel the
// timer in the future. Returns -1 if there was an error.
int
zloop_timer (zloop_t *self, size_t delay, size_t times, zloop_timer_fn handler, void *arg);
// Cancel a specific timer identified by a specific timer_id (as returned by
// zloop_timer).
int
zloop_timer_end (zloop_t *self, int timer_id);
// Register a ticket timer. Ticket timers are very fast in the case where
// you use a lot of timers (thousands), and frequently remove and add them.
// The main use case is expiry timers for servers that handle many clients,
// and which reset the expiry timer for each message received from a client.
// Whereas normal timers perform poorly as the number of clients grows, the
// cost of ticket timers is constant, no matter the number of clients. You
// must set the ticket delay using zloop_set_ticket_delay before creating a
// ticket. Returns a handle to the timer that you should use in
// zloop_ticket_reset and zloop_ticket_delete.
void *
zloop_ticket (zloop_t *self, zloop_timer_fn handler, void *arg);
// Reset a ticket timer, which moves it to the end of the ticket list and
// resets its execution time. This is a very fast operation.
void
zloop_ticket_reset (zloop_t *self, void *handle);
// Delete a ticket timer. We do not actually delete the ticket here, as
// other code may still refer to the ticket. We mark as deleted, and remove
// later and safely.
void
zloop_ticket_delete (zloop_t *self, void *handle);
// Set the ticket delay, which applies to all tickets. If you lower the
// delay and there are already tickets created, the results are undefined.
void
zloop_set_ticket_delay (zloop_t *self, size_t ticket_delay);
// Set hard limit on number of timers allowed. Setting more than a small
// number of timers (10-100) can have a dramatic impact on the performance
// of the reactor. For high-volume cases, use ticket timers. If the hard
// limit is reached, the reactor stops creating new timers and logs an
// error.
void
zloop_set_max_timers (zloop_t *self, size_t max_timers);
// Set verbose tracing of reactor on/off. The default verbose setting is
// off (false).
void
zloop_set_verbose (zloop_t *self, bool verbose);
// By default the reactor stops if the process receives a SIGINT or SIGTERM
// signal. This makes it impossible to shut-down message based architectures
// like zactors. This method lets you switch off break handling. The default
// nonstop setting is off (false).
void
zloop_set_nonstop (zloop_t *self, bool nonstop);
// Start the reactor. Takes control of the thread and returns when the 0MQ
// context is terminated or the process is interrupted, or any event handler
// returns -1. Event handlers may register new sockets and timers, and
// cancel sockets. Returns 0 if interrupted, -1 if canceled by a handler.
int
zloop_start (zloop_t *self);
// Self test of this class.
void
zloop_test (bool verbose);
// CLASS: zmsg
// Create a new empty message object
zmsg_t *
zmsg_new (void);
// Destroy a message object and all frames it contains
void
zmsg_destroy (zmsg_t **self_p);
// Receive message from socket, returns zmsg_t object or NULL if the recv
// was interrupted. Does a blocking recv. If you want to not block then use
// the zloop class or zmsg_recv_nowait or zmq_poll to check for socket input
// before receiving.
zmsg_t *
zmsg_recv (void *source);
// Load/append an open file into new message, return the message.
// Returns NULL if the message could not be loaded.
zmsg_t *
zmsg_load (FILE *file);
// Decodes a serialized message frame created by zmsg_encode () and returns
// a new zmsg_t object. Returns NULL if the frame was badly formatted or
// there was insufficient memory to work.
zmsg_t *
zmsg_decode (zframe_t *frame);
// Generate a signal message encoding the given status. A signal is a short
// message carrying a 1-byte success/failure code (by convention, 0 means
// OK). Signals are encoded to be distinguishable from "normal" messages.
zmsg_t *
zmsg_new_signal (byte status);
// Send message to destination socket, and destroy the message after sending
// it successfully. If the message has no frames, sends nothing but destroys
// the message anyhow. Nullifies the caller's reference to the message (as
// it is a destructor).
int
zmsg_send (zmsg_t **self_p, void *dest);
// Send message to destination socket as part of a multipart sequence, and
// destroy the message after sending it successfully. Note that after a
// zmsg_sendm, you must call zmsg_send or another method that sends a final
// message part. If the message has no frames, sends nothing but destroys
// the message anyhow. Nullifies the caller's reference to the message (as
// it is a destructor).
int
zmsg_sendm (zmsg_t **self_p, void *dest);
// Return size of message, i.e. number of frames (0 or more).
size_t
zmsg_size (zmsg_t *self);
// Return total size of all frames in message.
size_t
zmsg_content_size (zmsg_t *self);
// Return message routing ID, if the message came from a ZMQ_SERVER socket.
// Else returns zero.
uint32_t
zmsg_routing_id (zmsg_t *self);
// Set routing ID on message. This is used if/when the message is sent to a
// ZMQ_SERVER socket.
void
zmsg_set_routing_id (zmsg_t *self, uint32_t routing_id);
// Push frame to the front of the message, i.e. before all other frames.
// Message takes ownership of frame, will destroy it when message is sent.
// Returns 0 on success, -1 on error. Deprecates zmsg_push, which did not
// nullify the caller's frame reference.
int
zmsg_prepend (zmsg_t *self, zframe_t **frame_p);
// Add frame to the end of the message, i.e. after all other frames.
// Message takes ownership of frame, will destroy it when message is sent.
// Returns 0 on success. Deprecates zmsg_add, which did not nullify the
// caller's frame reference.
int
zmsg_append (zmsg_t *self, zframe_t **frame_p);
// Remove first frame from message, if any. Returns frame, or NULL.
zframe_t *
zmsg_pop (zmsg_t *self);
// Push block of memory to front of message, as a new frame.
// Returns 0 on success, -1 on error.
int
zmsg_pushmem (zmsg_t *self, const void *data, size_t size);
// Add block of memory to the end of the message, as a new frame.
// Returns 0 on success, -1 on error.
int
zmsg_addmem (zmsg_t *self, const void *data, size_t size);
// Push string as new frame to front of message.
// Returns 0 on success, -1 on error.
int
zmsg_pushstr (zmsg_t *self, const char *string);
// Push string as new frame to end of message.
// Returns 0 on success, -1 on error.
int
zmsg_addstr (zmsg_t *self, const char *string);
// Push formatted string as new frame to front of message.
// Returns 0 on success, -1 on error.
int
zmsg_pushstrf (zmsg_t *self, const char *format, ...);
// Push formatted string as new frame to end of message.
// Returns 0 on success, -1 on error.
int
zmsg_addstrf (zmsg_t *self, const char *format, ...);
// Pop frame off front of message, return as fresh string. If there were
// no more frames in the message, returns NULL.
char *
zmsg_popstr (zmsg_t *self);
// Push encoded message as a new frame. Message takes ownership of
// submessage, so the original is destroyed in this call. Returns 0 on
// success, -1 on error.
int
zmsg_addmsg (zmsg_t *self, zmsg_t **msg_p);
// Remove first submessage from message, if any. Returns zmsg_t, or NULL if
// decoding was not successful.
zmsg_t *
zmsg_popmsg (zmsg_t *self);
// Remove specified frame from list, if present. Does not destroy frame.
void
zmsg_remove (zmsg_t *self, zframe_t *frame);
// Set cursor to first frame in message. Returns frame, or NULL, if the
// message is empty. Use this to navigate the frames as a list.
zframe_t *
zmsg_first (zmsg_t *self);
// Return the next frame. If there are no more frames, returns NULL. To move
// to the first frame call zmsg_first(). Advances the cursor.
zframe_t *
zmsg_next (zmsg_t *self);
// Return the last frame. If there are no frames, returns NULL.
zframe_t *
zmsg_last (zmsg_t *self);
// Save message to an open file, return 0 if OK, else -1. The message is
// saved as a series of frames, each with length and data. Note that the
// file is NOT guaranteed to be portable between operating systems, not
// versions of CZMQ. The file format is at present undocumented and liable
// to arbitrary change.
int
zmsg_save (zmsg_t *self, FILE *file);
// Serialize multipart message to a single message frame. Use this method
// to send structured messages across transports that do not support
// multipart data. Allocates and returns a new frame containing the
// serialized message. To decode a serialized message frame, use
// zmsg_decode ().
zframe_t *
zmsg_encode (zmsg_t *self);
// Create copy of message, as new message object. Returns a fresh zmsg_t
// object. If message is null, or memory was exhausted, returns null.
zmsg_t *
zmsg_dup (zmsg_t *self);
// Send message to zsys log sink (may be stdout, or system facility as
// configured by zsys_set_logstream).
// Long messages are truncated.
void
zmsg_print (zmsg_t *self);
// Send message to zsys log sink (may be stdout, or system facility as
// configured by zsys_set_logstream).
// Message length is specified; no truncation unless length is zero.
// Backwards compatible with zframe_print when length is zero.
void
zmsg_print_n (zmsg_t *self, size_t size);
// Return true if the two messages have the same number of frames and each
// frame in the first message is identical to the corresponding frame in the
// other message. As with zframe_eq, return false if either message is NULL.
bool
zmsg_eq (zmsg_t *self, zmsg_t *other);
// Return signal value, 0 or greater, if message is a signal, -1 if not.
int
zmsg_signal (zmsg_t *self);
// Probe the supplied object, and report if it looks like a zmsg_t.
bool
zmsg_is (void *self);
// Self test of this class.
void
zmsg_test (bool verbose);
// CLASS: zpoller
// Create new poller, specifying zero or more readers. The list of
// readers ends in a NULL. Each reader can be a zsock_t instance, a
// zactor_t instance, a libzmq socket (void *), or a file handle.
zpoller_t *
zpoller_new (void *reader, ...);
// Destroy a poller
void
zpoller_destroy (zpoller_t **self_p);
// Add a reader to be polled. Returns 0 if OK, -1 on failure. The reader may
// be a libzmq void * socket, a zsock_t instance, a zactor_t instance or a
// file handle.
int
zpoller_add (zpoller_t *self, void *reader);
// Remove a reader from the poller; returns 0 if OK, -1 on failure. The reader
// must have been passed during construction, or in an zpoller_add () call.
int
zpoller_remove (zpoller_t *self, void *reader);
// By default the poller stops if the process receives a SIGINT or SIGTERM
// signal. This makes it impossible to shut-down message based architectures
// like zactors. This method lets you switch off break handling. The default
// nonstop setting is off (false).
void
zpoller_set_nonstop (zpoller_t *self, bool nonstop);
// Poll the registered readers for I/O, return first reader that has input.
// The reader will be a libzmq void * socket, a zsock_t, a zactor_t
// instance or a file handle as specified in zpoller_new/zpoller_add. The
// timeout should be zero or greater, or -1 to wait indefinitely. Socket
// priority is defined by their order in the poll list. If you need a
// balanced poll, use the low level zmq_poll method directly. If the poll
// call was interrupted (SIGINT), or the ZMQ context was destroyed, or the
// timeout expired, returns NULL. You can test the actual exit condition by
// calling zpoller_expired () and zpoller_terminated (). The timeout is in
// msec.
void *
zpoller_wait (zpoller_t *self, int timeout);
// Return true if the last zpoller_wait () call ended because the timeout
// expired, without any error.
bool
zpoller_expired (zpoller_t *self);
// Return true if the last zpoller_wait () call ended because the process
// was interrupted, or the parent context was destroyed.
bool
zpoller_terminated (zpoller_t *self);
// Self test of this class.
void
zpoller_test (bool verbose);
// CLASS: zproc
// Create a new zproc.
// NOTE: On Windows and with libzmq3 and libzmq2 this function
// returns NULL. Code needs to be ported there.
zproc_t *
zproc_new (void);
// Destroy zproc, wait until process ends.
void
zproc_destroy (zproc_t **self_p);
// Return command line arguments (the first item is the executable) or
// NULL if not set.
zlist_t *
zproc_args (zproc_t *self);
// Setup the command line arguments, the first item must be an (absolute) filename
// to run.
void
zproc_set_args (zproc_t *self, zlist_t **arguments);
// Setup the command line arguments, the first item must be an (absolute) filename
// to run. Variadic function, must be NULL terminated.
void
zproc_set_argsx (zproc_t *self, const char *arguments, ...);
// Setup the environment variables for the process.
void
zproc_set_env (zproc_t *self, zhash_t **arguments);
// Connects process stdin with a readable ('>', connect) zeromq socket. If
// socket argument is NULL, zproc creates own managed pair of inproc
// sockets. The writable one is then accessbile via zproc_stdin method.
void
zproc_set_stdin (zproc_t *self, void *socket);
// Connects process stdout with a writable ('@', bind) zeromq socket. If
// socket argument is NULL, zproc creates own managed pair of inproc
// sockets. The readable one is then accessbile via zproc_stdout method.
void
zproc_set_stdout (zproc_t *self, void *socket);
// Connects process stderr with a writable ('@', bind) zeromq socket. If
// socket argument is NULL, zproc creates own managed pair of inproc
// sockets. The readable one is then accessbile via zproc_stderr method.
void
zproc_set_stderr (zproc_t *self, void *socket);
// Return subprocess stdin writable socket. NULL for
// not initialized or external sockets.
void *
zproc_stdin (zproc_t *self);
// Return subprocess stdout readable socket. NULL for
// not initialized or external sockets.
void *
zproc_stdout (zproc_t *self);
// Return subprocess stderr readable socket. NULL for
// not initialized or external sockets.
void *
zproc_stderr (zproc_t *self);
// Starts the process, return just before execve/CreateProcess.
int
zproc_run (zproc_t *self);
// process exit code
int
zproc_returncode (zproc_t *self);
// PID of the process
int
zproc_pid (zproc_t *self);
// return true if process is running, false if not yet started or finished
bool
zproc_running (zproc_t *self);
// The timeout should be zero or greater, or -1 to wait indefinitely.
// wait or poll process status, return return code
int
zproc_wait (zproc_t *self, int timeout);
// send SIGTERM signal to the subprocess, wait for grace period and
// eventually send SIGKILL
void
zproc_shutdown (zproc_t *self, int timeout);
// return internal actor, useful for the polling if process died
void *
zproc_actor (zproc_t *self);
// send a signal to the subprocess
void
zproc_kill (zproc_t *self, int signal);
// set verbose mode
void
zproc_set_verbose (zproc_t *self, bool verbose);
// Self test of this class.
void
zproc_test (bool verbose);
// CLASS: zsock
// Create a new socket. Returns the new socket, or NULL if the new socket
// could not be created. Note that the symbol zsock_new (and other
// constructors/destructors for zsock) are redirected to the *_checked
// variant, enabling intelligent socket leak detection. This can have
// performance implications if you use a LOT of sockets. To turn off this
// redirection behaviour, define ZSOCK_NOCHECK.
zsock_t *
zsock_new (int type);
// Destroy the socket. You must use this for any socket created via the
// zsock_new method.
void
zsock_destroy (zsock_t **self_p);
// Create a PUB socket. Default action is bind.
zsock_t *
zsock_new_pub (const char *endpoint);
// Create a SUB socket, and optionally subscribe to some prefix string. Default
// action is connect.
zsock_t *
zsock_new_sub (const char *endpoint, const char *subscribe);
// Create a REQ socket. Default action is connect.
zsock_t *
zsock_new_req (const char *endpoint);
// Create a REP socket. Default action is bind.
zsock_t *
zsock_new_rep (const char *endpoint);
// Create a DEALER socket. Default action is connect.
zsock_t *
zsock_new_dealer (const char *endpoint);
// Create a ROUTER socket. Default action is bind.
zsock_t *
zsock_new_router (const char *endpoint);
// Create a PUSH socket. Default action is connect.
zsock_t *
zsock_new_push (const char *endpoint);
// Create a PULL socket. Default action is bind.
zsock_t *
zsock_new_pull (const char *endpoint);
// Create an XPUB socket. Default action is bind.
zsock_t *
zsock_new_xpub (const char *endpoint);
// Create an XSUB socket. Default action is connect.
zsock_t *
zsock_new_xsub (const char *endpoint);
// Create a PAIR socket. Default action is connect.
zsock_t *
zsock_new_pair (const char *endpoint);
// Create a STREAM socket. Default action is connect.
zsock_t *
zsock_new_stream (const char *endpoint);
// Create a SERVER socket. Default action is bind.
zsock_t *
zsock_new_server (const char *endpoint);
// Create a CLIENT socket. Default action is connect.
zsock_t *
zsock_new_client (const char *endpoint);
// Create a RADIO socket. Default action is bind.
zsock_t *
zsock_new_radio (const char *endpoint);
// Create a DISH socket. Default action is connect.
zsock_t *
zsock_new_dish (const char *endpoint);
// Create a GATHER socket. Default action is bind.
zsock_t *
zsock_new_gather (const char *endpoint);
// Create a SCATTER socket. Default action is connect.
zsock_t *
zsock_new_scatter (const char *endpoint);
// Create a DGRAM (UDP) socket. Default action is bind.
// The endpoint is a string consisting of a
// 'transport'`://` followed by an 'address'. As this is
// a UDP socket the 'transport' has to be 'udp'. The
// 'address' specifies the ip address and port to
// bind to. For example: udp://127.0.0.1:1234
// Note: To send to an endpoint over UDP you have to
// send a message with the destination endpoint address
// as a first message!
zsock_t *
zsock_new_dgram (const char *endpoint);
// Bind a socket to a formatted endpoint. For tcp:// endpoints, supports
// ephemeral ports, if you specify the port number as "*". By default
// zsock uses the IANA designated range from C000 (49152) to FFFF (65535).
// To override this range, follow the "*" with "[first-last]". Either or
// both first and last may be empty. To bind to a random port within the
// range, use "!" in place of "*".
//
// Examples:
// tcp://127.0.0.1:* bind to first free port from C000 up
// tcp://127.0.0.1:! bind to random port from C000 to FFFF
// tcp://127.0.0.1:*[60000-] bind to first free port from 60000 up
// tcp://127.0.0.1:![-60000] bind to random port from C000 to 60000
// tcp://127.0.0.1:![55000-55999]
// bind to random port from 55000 to 55999
//
// On success, returns the actual port number used, for tcp:// endpoints,
// and 0 for other transports. On failure, returns -1. Note that when using
// ephemeral ports, a port may be reused by different services without
// clients being aware. Protocols that run on ephemeral ports should take
// this into account.
int
zsock_bind (zsock_t *self, const char *format, ...);
// Returns last bound endpoint, if any.
const char *
zsock_endpoint (zsock_t *self);
// Unbind a socket from a formatted endpoint.
// Returns 0 if OK, -1 if the endpoint was invalid or the function
// isn't supported.
int
zsock_unbind (zsock_t *self, const char *format, ...);
// Connect a socket to a formatted endpoint
// Returns 0 if OK, -1 if the endpoint was invalid.
int
zsock_connect (zsock_t *self, const char *format, ...);
// Disconnect a socket from a formatted endpoint
// Returns 0 if OK, -1 if the endpoint was invalid or the function
// isn't supported.
int
zsock_disconnect (zsock_t *self, const char *format, ...);
// Attach a socket to zero or more endpoints. If endpoints is not null,
// parses as list of ZeroMQ endpoints, separated by commas, and prefixed by
// '@' (to bind the socket) or '>' (to connect the socket). Returns 0 if all
// endpoints were valid, or -1 if there was a syntax error. If the endpoint
// does not start with '@' or '>', the serverish argument defines whether
// it is used to bind (serverish = true) or connect (serverish = false).
int
zsock_attach (zsock_t *self, const char *endpoints, bool serverish);
// Returns socket type as printable constant string.
const char *
zsock_type_str (zsock_t *self);
// Send a 'picture' message to the socket (or actor). The picture is a
// string that defines the type of each frame. This makes it easy to send
// a complex multiframe message in one call. The picture can contain any
// of these characters, each corresponding to one or two arguments:
//
// i = int (signed)
// 1 = uint8_t
// 2 = uint16_t
// 4 = uint32_t
// 8 = uint64_t
// s = char *
// b = byte *, size_t (2 arguments)
// c = zchunk_t *
// f = zframe_t *
// h = zhashx_t *
// l = zlistx_t * (DRAFT)
// U = zuuid_t *
// p = void * (sends the pointer value, only meaningful over inproc)
// m = zmsg_t * (sends all frames in the zmsg)
// z = sends zero-sized frame (0 arguments)
// u = uint (deprecated)
//
// Note that s, b, c, and f are encoded the same way and the choice is
// offered as a convenience to the sender, which may or may not already
// have data in a zchunk or zframe. Does not change or take ownership of
// any arguments. Returns 0 if successful, -1 if sending failed for any
// reason.
int
zsock_send (void *self, const char *picture, ...);
// Send a 'picture' message to the socket (or actor). This is a va_list
// version of zsock_send (), so please consult its documentation for the
// details.
int
zsock_vsend (void *self, const char *picture, va_list argptr);
// Receive a 'picture' message to the socket (or actor). See zsock_send for
// the format and meaning of the picture. Returns the picture elements into
// a series of pointers as provided by the caller:
//
// i = int * (stores signed integer)
// 4 = uint32_t * (stores 32-bit unsigned integer)
// 8 = uint64_t * (stores 64-bit unsigned integer)
// s = char ** (allocates new string)
// b = byte **, size_t * (2 arguments) (allocates memory)
// c = zchunk_t ** (creates zchunk)
// f = zframe_t ** (creates zframe)
// U = zuuid_t * (creates a zuuid with the data)
// h = zhashx_t ** (creates zhashx)
// l = zlistx_t ** (creates zlistx) (DRAFT)
// p = void ** (stores pointer)
// m = zmsg_t ** (creates a zmsg with the remaining frames)
// z = null, asserts empty frame (0 arguments)
// u = uint * (stores unsigned integer, deprecated)
//
// Note that zsock_recv creates the returned objects, and the caller must
// destroy them when finished with them. The supplied pointers do not need
// to be initialized. Returns 0 if successful, or -1 if it failed to recv
// a message, in which case the pointers are not modified. When message
// frames are truncated (a short message), sets return values to zero/null.
// If an argument pointer is NULL, does not store any value (skips it).
// An 'n' picture matches an empty frame; if the message does not match,
// the method will return -1.
int
zsock_recv (void *self, const char *picture, ...);
// Receive a 'picture' message from the socket (or actor). This is a
// va_list version of zsock_recv (), so please consult its documentation
// for the details.
int
zsock_vrecv (void *self, const char *picture, va_list argptr);
// Send a binary encoded 'picture' message to the socket (or actor). This
// method is similar to zsock_send, except the arguments are encoded in a
// binary format that is compatible with zproto, and is designed to reduce
// memory allocations. The pattern argument is a string that defines the
// type of each argument. Supports these argument types:
//
// pattern C type zproto type:
// 1 uint8_t type = "number" size = "1"
// 2 uint16_t type = "number" size = "2"
// 4 uint32_t type = "number" size = "3"
// 8 uint64_t type = "number" size = "4"
// s char *, 0-255 chars type = "string"
// S char *, 0-2^32-1 chars type = "longstr"
// c zchunk_t * type = "chunk"
// f zframe_t * type = "frame"
// u zuuid_t * type = "uuid"
// m zmsg_t * type = "msg"
// p void *, sends pointer value, only over inproc
//
// Does not change or take ownership of any arguments. Returns 0 if
// successful, -1 if sending failed for any reason.
int
zsock_bsend (void *self, const char *picture, ...);
// Receive a binary encoded 'picture' message from the socket (or actor).
// This method is similar to zsock_recv, except the arguments are encoded
// in a binary format that is compatible with zproto, and is designed to
// reduce memory allocations. The pattern argument is a string that defines
// the type of each argument. See zsock_bsend for the supported argument
// types. All arguments must be pointers; this call sets them to point to
// values held on a per-socket basis.
// For types 1, 2, 4 and 8 the caller must allocate the memory itself before
// calling zsock_brecv.
// For types S, the caller must free the value once finished with it, as
// zsock_brecv will allocate the buffer.
// For type s, the caller must not free the value as it is stored in a
// local cache for performance purposes.
// For types c, f, u and m the caller must call the appropriate destructor
// depending on the object as zsock_brecv will create new objects.
// For type p the caller must coordinate with the sender, as it is just a
// pointer value being passed.
int
zsock_brecv (void *self, const char *picture, ...);
// Return socket routing ID if any. This returns 0 if the socket is not
// of type ZMQ_SERVER or if no request was already received on it.
uint32_t
zsock_routing_id (zsock_t *self);
// Set routing ID on socket. The socket MUST be of type ZMQ_SERVER.
// This will be used when sending messages on the socket via the zsock API.
void
zsock_set_routing_id (zsock_t *self, uint32_t routing_id);
// Set socket to use unbounded pipes (HWM=0); use this in cases when you are
// totally certain the message volume can fit in memory. This method works
// across all versions of ZeroMQ. Takes a polymorphic socket reference.
void
zsock_set_unbounded (void *self);
// Send a signal over a socket. A signal is a short message carrying a
// success/failure code (by convention, 0 means OK). Signals are encoded
// to be distinguishable from "normal" messages. Accepts a zsock_t or a
// zactor_t argument, and returns 0 if successful, -1 if the signal could
// not be sent. Takes a polymorphic socket reference.
int
zsock_signal (void *self, byte status);
// Wait on a signal. Use this to coordinate between threads, over pipe
// pairs. Blocks until the signal is received. Returns -1 on error, 0 or
// greater on success. Accepts a zsock_t or a zactor_t as argument.
// Takes a polymorphic socket reference.
int
zsock_wait (void *self);
// If there is a partial message still waiting on the socket, remove and
// discard it. This is useful when reading partial messages, to get specific
// message types.
void
zsock_flush (void *self);
// Join a group for the RADIO-DISH pattern. Call only on ZMQ_DISH.
// Returns 0 if OK, -1 if failed.
int
zsock_join (void *self, const char *group);
// Leave a group for the RADIO-DISH pattern. Call only on ZMQ_DISH.
// Returns 0 if OK, -1 if failed.
int
zsock_leave (void *self, const char *group);
// Probe the supplied object, and report if it looks like a zsock_t.
// Takes a polymorphic socket reference.
bool
zsock_is (void *self);
// Probe the supplied reference. If it looks like a zsock_t instance, return
// the underlying libzmq socket handle; else if it looks like a file
// descriptor, return NULL; else if it looks like a libzmq socket handle,
// return the supplied value. Takes a polymorphic socket reference.
void *
zsock_resolve (void *self);
// Check whether the socket has available message to read.
bool
zsock_has_in (void *self);
// Set socket option `only_first_subscribe`.
// Available from libzmq 4.3.0.
void
zsock_set_only_first_subscribe (void *self, int only_first_subscribe);
// Set socket option `hello_msg`.
// Available from libzmq 4.3.0.
void
zsock_set_hello_msg (void *self, zframe_t *hello_msg);
// Set socket option `disconnect_msg`.
// Available from libzmq 4.3.0.
void
zsock_set_disconnect_msg (void *self, zframe_t *disconnect_msg);
// Set socket option `wss_trust_system`.
// Available from libzmq 4.3.0.
void
zsock_set_wss_trust_system (void *self, int wss_trust_system);
// Set socket option `wss_hostname`.
// Available from libzmq 4.3.0.
void
zsock_set_wss_hostname (void *self, const char *wss_hostname);
// Set socket option `wss_trust_pem`.
// Available from libzmq 4.3.0.
void
zsock_set_wss_trust_pem (void *self, const char *wss_trust_pem);
// Set socket option `wss_cert_pem`.
// Available from libzmq 4.3.0.
void
zsock_set_wss_cert_pem (void *self, const char *wss_cert_pem);
// Set socket option `wss_key_pem`.
// Available from libzmq 4.3.0.
void
zsock_set_wss_key_pem (void *self, const char *wss_key_pem);
// Get socket option `out_batch_size`.
// Available from libzmq 4.3.0.
int
zsock_out_batch_size (void *self);
// Set socket option `out_batch_size`.
// Available from libzmq 4.3.0.
void
zsock_set_out_batch_size (void *self, int out_batch_size);
// Get socket option `in_batch_size`.
// Available from libzmq 4.3.0.
int
zsock_in_batch_size (void *self);
// Set socket option `in_batch_size`.
// Available from libzmq 4.3.0.
void
zsock_set_in_batch_size (void *self, int in_batch_size);
// Get socket option `socks_password`.
// Available from libzmq 4.3.0.
char *
zsock_socks_password (void *self);
// Set socket option `socks_password`.
// Available from libzmq 4.3.0.
void
zsock_set_socks_password (void *self, const char *socks_password);
// Get socket option `socks_username`.
// Available from libzmq 4.3.0.
char *
zsock_socks_username (void *self);
// Set socket option `socks_username`.
// Available from libzmq 4.3.0.
void
zsock_set_socks_username (void *self, const char *socks_username);
// Set socket option `xpub_manual_last_value`.
// Available from libzmq 4.3.0.
void
zsock_set_xpub_manual_last_value (void *self, int xpub_manual_last_value);
// Get socket option `router_notify`.
// Available from libzmq 4.3.0.
int
zsock_router_notify (void *self);
// Set socket option `router_notify`.
// Available from libzmq 4.3.0.
void
zsock_set_router_notify (void *self, int router_notify);
// Get socket option `multicast_loop`.
// Available from libzmq 4.3.0.
int
zsock_multicast_loop (void *self);
// Set socket option `multicast_loop`.
// Available from libzmq 4.3.0.
void
zsock_set_multicast_loop (void *self, int multicast_loop);
// Get socket option `metadata`.
// Available from libzmq 4.3.0.
char *
zsock_metadata (void *self);
// Set socket option `metadata`.
// Available from libzmq 4.3.0.
void
zsock_set_metadata (void *self, const char *metadata);
// Get socket option `loopback_fastpath`.
// Available from libzmq 4.3.0.
int
zsock_loopback_fastpath (void *self);
// Set socket option `loopback_fastpath`.
// Available from libzmq 4.3.0.
void
zsock_set_loopback_fastpath (void *self, int loopback_fastpath);
// Get socket option `zap_enforce_domain`.
// Available from libzmq 4.3.0.
int
zsock_zap_enforce_domain (void *self);
// Set socket option `zap_enforce_domain`.
// Available from libzmq 4.3.0.
void
zsock_set_zap_enforce_domain (void *self, int zap_enforce_domain);
// Get socket option `gssapi_principal_nametype`.
// Available from libzmq 4.3.0.
int
zsock_gssapi_principal_nametype (void *self);
// Set socket option `gssapi_principal_nametype`.
// Available from libzmq 4.3.0.
void
zsock_set_gssapi_principal_nametype (void *self, int gssapi_principal_nametype);
// Get socket option `gssapi_service_principal_nametype`.
// Available from libzmq 4.3.0.
int
zsock_gssapi_service_principal_nametype (void *self);
// Set socket option `gssapi_service_principal_nametype`.
// Available from libzmq 4.3.0.
void
zsock_set_gssapi_service_principal_nametype (void *self, int gssapi_service_principal_nametype);
// Get socket option `bindtodevice`.
// Available from libzmq 4.3.0.
char *
zsock_bindtodevice (void *self);
// Set socket option `bindtodevice`.
// Available from libzmq 4.3.0.
void
zsock_set_bindtodevice (void *self, const char *bindtodevice);
// Get socket option `heartbeat_ivl`.
// Available from libzmq 4.2.0.
int
zsock_heartbeat_ivl (void *self);
// Set socket option `heartbeat_ivl`.
// Available from libzmq 4.2.0.
void
zsock_set_heartbeat_ivl (void *self, int heartbeat_ivl);
// Get socket option `heartbeat_ttl`.
// Available from libzmq 4.2.0.
int
zsock_heartbeat_ttl (void *self);
// Set socket option `heartbeat_ttl`.
// Available from libzmq 4.2.0.
void
zsock_set_heartbeat_ttl (void *self, int heartbeat_ttl);
// Get socket option `heartbeat_timeout`.
// Available from libzmq 4.2.0.
int
zsock_heartbeat_timeout (void *self);
// Set socket option `heartbeat_timeout`.
// Available from libzmq 4.2.0.
void
zsock_set_heartbeat_timeout (void *self, int heartbeat_timeout);
// Get socket option `use_fd`.
// Available from libzmq 4.2.0.
int
zsock_use_fd (void *self);
// Set socket option `use_fd`.
// Available from libzmq 4.2.0.
void
zsock_set_use_fd (void *self, int use_fd);
// Set socket option `xpub_manual`.
// Available from libzmq 4.2.0.
void
zsock_set_xpub_manual (void *self, int xpub_manual);
// Set socket option `xpub_welcome_msg`.
// Available from libzmq 4.2.0.
void
zsock_set_xpub_welcome_msg (void *self, const char *xpub_welcome_msg);
// Set socket option `stream_notify`.
// Available from libzmq 4.2.0.
void
zsock_set_stream_notify (void *self, int stream_notify);
// Get socket option `invert_matching`.
// Available from libzmq 4.2.0.
int
zsock_invert_matching (void *self);
// Set socket option `invert_matching`.
// Available from libzmq 4.2.0.
void
zsock_set_invert_matching (void *self, int invert_matching);
// Set socket option `xpub_verboser`.
// Available from libzmq 4.2.0.
void
zsock_set_xpub_verboser (void *self, int xpub_verboser);
// Get socket option `connect_timeout`.
// Available from libzmq 4.2.0.
int
zsock_connect_timeout (void *self);
// Set socket option `connect_timeout`.
// Available from libzmq 4.2.0.
void
zsock_set_connect_timeout (void *self, int connect_timeout);
// Get socket option `tcp_maxrt`.
// Available from libzmq 4.2.0.
int
zsock_tcp_maxrt (void *self);
// Set socket option `tcp_maxrt`.
// Available from libzmq 4.2.0.
void
zsock_set_tcp_maxrt (void *self, int tcp_maxrt);
// Get socket option `thread_safe`.
// Available from libzmq 4.2.0.
int
zsock_thread_safe (void *self);
// Get socket option `multicast_maxtpdu`.
// Available from libzmq 4.2.0.
int
zsock_multicast_maxtpdu (void *self);
// Set socket option `multicast_maxtpdu`.
// Available from libzmq 4.2.0.
void
zsock_set_multicast_maxtpdu (void *self, int multicast_maxtpdu);
// Get socket option `vmci_buffer_size`.
// Available from libzmq 4.2.0.
int
zsock_vmci_buffer_size (void *self);
// Set socket option `vmci_buffer_size`.
// Available from libzmq 4.2.0.
void
zsock_set_vmci_buffer_size (void *self, int vmci_buffer_size);
// Get socket option `vmci_buffer_min_size`.
// Available from libzmq 4.2.0.
int
zsock_vmci_buffer_min_size (void *self);
// Set socket option `vmci_buffer_min_size`.
// Available from libzmq 4.2.0.
void
zsock_set_vmci_buffer_min_size (void *self, int vmci_buffer_min_size);
// Get socket option `vmci_buffer_max_size`.
// Available from libzmq 4.2.0.
int
zsock_vmci_buffer_max_size (void *self);
// Set socket option `vmci_buffer_max_size`.
// Available from libzmq 4.2.0.
void
zsock_set_vmci_buffer_max_size (void *self, int vmci_buffer_max_size);
// Get socket option `vmci_connect_timeout`.
// Available from libzmq 4.2.0.
int
zsock_vmci_connect_timeout (void *self);
// Set socket option `vmci_connect_timeout`.
// Available from libzmq 4.2.0.
void
zsock_set_vmci_connect_timeout (void *self, int vmci_connect_timeout);
// Get socket option `tos`.
// Available from libzmq 4.1.0.
int
zsock_tos (void *self);
// Set socket option `tos`.
// Available from libzmq 4.1.0.
void
zsock_set_tos (void *self, int tos);
// Set socket option `router_handover`.
// Available from libzmq 4.1.0.
void
zsock_set_router_handover (void *self, int router_handover);
// Set socket option `connect_rid`.
// Available from libzmq 4.1.0.
void
zsock_set_connect_rid (void *self, const char *connect_rid);
// Set socket option `connect_rid` from 32-octet binary
// Available from libzmq 4.1.0.
void
zsock_set_connect_rid_bin (void *self, const byte *connect_rid);
// Get socket option `handshake_ivl`.
// Available from libzmq 4.1.0.
int
zsock_handshake_ivl (void *self);
// Set socket option `handshake_ivl`.
// Available from libzmq 4.1.0.
void
zsock_set_handshake_ivl (void *self, int handshake_ivl);
// Get socket option `socks_proxy`.
// Available from libzmq 4.1.0.
char *
zsock_socks_proxy (void *self);
// Set socket option `socks_proxy`.
// Available from libzmq 4.1.0.
void
zsock_set_socks_proxy (void *self, const char *socks_proxy);
// Set socket option `xpub_nodrop`.
// Available from libzmq 4.1.0.
void
zsock_set_xpub_nodrop (void *self, int xpub_nodrop);
// Set socket option `router_mandatory`.
// Available from libzmq 4.0.0.
void
zsock_set_router_mandatory (void *self, int router_mandatory);
// Set socket option `probe_router`.
// Available from libzmq 4.0.0.
void
zsock_set_probe_router (void *self, int probe_router);
// Set socket option `req_relaxed`.
// Available from libzmq 4.0.0.
void
zsock_set_req_relaxed (void *self, int req_relaxed);
// Set socket option `req_correlate`.
// Available from libzmq 4.0.0.
void
zsock_set_req_correlate (void *self, int req_correlate);
// Set socket option `conflate`.
// Available from libzmq 4.0.0.
void
zsock_set_conflate (void *self, int conflate);
// Get socket option `zap_domain`.
// Available from libzmq 4.0.0.
char *
zsock_zap_domain (void *self);
// Set socket option `zap_domain`.
// Available from libzmq 4.0.0.
void
zsock_set_zap_domain (void *self, const char *zap_domain);
// Get socket option `mechanism`.
// Available from libzmq 4.0.0.
int
zsock_mechanism (void *self);
// Get socket option `plain_server`.
// Available from libzmq 4.0.0.
int
zsock_plain_server (void *self);
// Set socket option `plain_server`.
// Available from libzmq 4.0.0.
void
zsock_set_plain_server (void *self, int plain_server);
// Get socket option `plain_username`.
// Available from libzmq 4.0.0.
char *
zsock_plain_username (void *self);
// Set socket option `plain_username`.
// Available from libzmq 4.0.0.
void
zsock_set_plain_username (void *self, const char *plain_username);
// Get socket option `plain_password`.
// Available from libzmq 4.0.0.
char *
zsock_plain_password (void *self);
// Set socket option `plain_password`.
// Available from libzmq 4.0.0.
void
zsock_set_plain_password (void *self, const char *plain_password);
// Get socket option `curve_server`.
// Available from libzmq 4.0.0.
int
zsock_curve_server (void *self);
// Set socket option `curve_server`.
// Available from libzmq 4.0.0.
void
zsock_set_curve_server (void *self, int curve_server);
// Get socket option `curve_publickey`.
// Available from libzmq 4.0.0.
char *
zsock_curve_publickey (void *self);
// Set socket option `curve_publickey`.
// Available from libzmq 4.0.0.
void
zsock_set_curve_publickey (void *self, const char *curve_publickey);
// Set socket option `curve_publickey` from 32-octet binary
// Available from libzmq 4.0.0.
void
zsock_set_curve_publickey_bin (void *self, const byte *curve_publickey);
// Get socket option `curve_secretkey`.
// Available from libzmq 4.0.0.
char *
zsock_curve_secretkey (void *self);
// Set socket option `curve_secretkey`.
// Available from libzmq 4.0.0.
void
zsock_set_curve_secretkey (void *self, const char *curve_secretkey);
// Set socket option `curve_secretkey` from 32-octet binary
// Available from libzmq 4.0.0.
void
zsock_set_curve_secretkey_bin (void *self, const byte *curve_secretkey);
// Get socket option `curve_serverkey`.
// Available from libzmq 4.0.0.
char *
zsock_curve_serverkey (void *self);
// Set socket option `curve_serverkey`.
// Available from libzmq 4.0.0.
void
zsock_set_curve_serverkey (void *self, const char *curve_serverkey);
// Set socket option `curve_serverkey` from 32-octet binary
// Available from libzmq 4.0.0.
void
zsock_set_curve_serverkey_bin (void *self, const byte *curve_serverkey);
// Get socket option `gssapi_server`.
// Available from libzmq 4.0.0.
int
zsock_gssapi_server (void *self);
// Set socket option `gssapi_server`.
// Available from libzmq 4.0.0.
void
zsock_set_gssapi_server (void *self, int gssapi_server);
// Get socket option `gssapi_plaintext`.
// Available from libzmq 4.0.0.
int
zsock_gssapi_plaintext (void *self);
// Set socket option `gssapi_plaintext`.
// Available from libzmq 4.0.0.
void
zsock_set_gssapi_plaintext (void *self, int gssapi_plaintext);
// Get socket option `gssapi_principal`.
// Available from libzmq 4.0.0.
char *
zsock_gssapi_principal (void *self);
// Set socket option `gssapi_principal`.
// Available from libzmq 4.0.0.
void
zsock_set_gssapi_principal (void *self, const char *gssapi_principal);
// Get socket option `gssapi_service_principal`.
// Available from libzmq 4.0.0.
char *
zsock_gssapi_service_principal (void *self);
// Set socket option `gssapi_service_principal`.
// Available from libzmq 4.0.0.
void
zsock_set_gssapi_service_principal (void *self, const char *gssapi_service_principal);
// Get socket option `ipv6`.
// Available from libzmq 4.0.0.
int
zsock_ipv6 (void *self);
// Set socket option `ipv6`.
// Available from libzmq 4.0.0.
void
zsock_set_ipv6 (void *self, int ipv6);
// Get socket option `immediate`.
// Available from libzmq 4.0.0.
int
zsock_immediate (void *self);
// Set socket option `immediate`.
// Available from libzmq 4.0.0.
void
zsock_set_immediate (void *self, int immediate);
// Get socket option `sndhwm`.
// Available from libzmq 3.0.0.
int
zsock_sndhwm (void *self);
// Set socket option `sndhwm`.
// Available from libzmq 3.0.0.
void
zsock_set_sndhwm (void *self, int sndhwm);
// Get socket option `rcvhwm`.
// Available from libzmq 3.0.0.
int
zsock_rcvhwm (void *self);
// Set socket option `rcvhwm`.
// Available from libzmq 3.0.0.
void
zsock_set_rcvhwm (void *self, int rcvhwm);
// Get socket option `maxmsgsize`.
// Available from libzmq 3.0.0.
int
zsock_maxmsgsize (void *self);
// Set socket option `maxmsgsize`.
// Available from libzmq 3.0.0.
void
zsock_set_maxmsgsize (void *self, int maxmsgsize);
// Get socket option `multicast_hops`.
// Available from libzmq 3.0.0.
int
zsock_multicast_hops (void *self);
// Set socket option `multicast_hops`.
// Available from libzmq 3.0.0.
void
zsock_set_multicast_hops (void *self, int multicast_hops);
// Set socket option `xpub_verbose`.
// Available from libzmq 3.0.0.
void
zsock_set_xpub_verbose (void *self, int xpub_verbose);
// Get socket option `tcp_keepalive`.
// Available from libzmq 3.0.0.
int
zsock_tcp_keepalive (void *self);
// Set socket option `tcp_keepalive`.
// Available from libzmq 3.0.0.
void
zsock_set_tcp_keepalive (void *self, int tcp_keepalive);
// Get socket option `tcp_keepalive_idle`.
// Available from libzmq 3.0.0.
int
zsock_tcp_keepalive_idle (void *self);
// Set socket option `tcp_keepalive_idle`.
// Available from libzmq 3.0.0.
void
zsock_set_tcp_keepalive_idle (void *self, int tcp_keepalive_idle);
// Get socket option `tcp_keepalive_cnt`.
// Available from libzmq 3.0.0.
int
zsock_tcp_keepalive_cnt (void *self);
// Set socket option `tcp_keepalive_cnt`.
// Available from libzmq 3.0.0.
void
zsock_set_tcp_keepalive_cnt (void *self, int tcp_keepalive_cnt);
// Get socket option `tcp_keepalive_intvl`.
// Available from libzmq 3.0.0.
int
zsock_tcp_keepalive_intvl (void *self);
// Set socket option `tcp_keepalive_intvl`.
// Available from libzmq 3.0.0.
void
zsock_set_tcp_keepalive_intvl (void *self, int tcp_keepalive_intvl);
// Get socket option `tcp_accept_filter`.
// Available from libzmq 3.0.0.
char *
zsock_tcp_accept_filter (void *self);
// Set socket option `tcp_accept_filter`.
// Available from libzmq 3.0.0.
void
zsock_set_tcp_accept_filter (void *self, const char *tcp_accept_filter);
// Get socket option `last_endpoint`.
// Available from libzmq 3.0.0.
char *
zsock_last_endpoint (void *self);
// Set socket option `router_raw`.
// Available from libzmq 3.0.0.
void
zsock_set_router_raw (void *self, int router_raw);
// Get socket option `ipv4only`.
// Available from libzmq 3.0.0.
int
zsock_ipv4only (void *self);
// Set socket option `ipv4only`.
// Available from libzmq 3.0.0.
void
zsock_set_ipv4only (void *self, int ipv4only);
// Set socket option `delay_attach_on_connect`.
// Available from libzmq 3.0.0.
void
zsock_set_delay_attach_on_connect (void *self, int delay_attach_on_connect);
// Get socket option `hwm`.
// Available from libzmq 2.0.0 to 3.0.0.
int
zsock_hwm (void *self);
// Set socket option `hwm`.
// Available from libzmq 2.0.0 to 3.0.0.
void
zsock_set_hwm (void *self, int hwm);
// Get socket option `swap`.
// Available from libzmq 2.0.0 to 3.0.0.
int
zsock_swap (void *self);
// Set socket option `swap`.
// Available from libzmq 2.0.0 to 3.0.0.
void
zsock_set_swap (void *self, int swap);
// Get socket option `affinity`.
// Available from libzmq 2.0.0.
int
zsock_affinity (void *self);
// Set socket option `affinity`.
// Available from libzmq 2.0.0.
void
zsock_set_affinity (void *self, int affinity);
// Get socket option `identity`.
// Available from libzmq 2.0.0.
char *
zsock_identity (void *self);
// Set socket option `identity`.
// Available from libzmq 2.0.0.
void
zsock_set_identity (void *self, const char *identity);
// Get socket option `rate`.
// Available from libzmq 2.0.0.
int
zsock_rate (void *self);
// Set socket option `rate`.
// Available from libzmq 2.0.0.
void
zsock_set_rate (void *self, int rate);
// Get socket option `recovery_ivl`.
// Available from libzmq 2.0.0.
int
zsock_recovery_ivl (void *self);
// Set socket option `recovery_ivl`.
// Available from libzmq 2.0.0.
void
zsock_set_recovery_ivl (void *self, int recovery_ivl);
// Get socket option `recovery_ivl_msec`.
// Available from libzmq 2.0.0 to 3.0.0.
int
zsock_recovery_ivl_msec (void *self);
// Set socket option `recovery_ivl_msec`.
// Available from libzmq 2.0.0 to 3.0.0.
void
zsock_set_recovery_ivl_msec (void *self, int recovery_ivl_msec);
// Get socket option `mcast_loop`.
// Available from libzmq 2.0.0 to 3.0.0.
int
zsock_mcast_loop (void *self);
// Set socket option `mcast_loop`.
// Available from libzmq 2.0.0 to 3.0.0.
void
zsock_set_mcast_loop (void *self, int mcast_loop);
// Get socket option `rcvtimeo`.
// Available from libzmq 2.2.0.
int
zsock_rcvtimeo (void *self);
// Set socket option `rcvtimeo`.
// Available from libzmq 2.2.0.
void
zsock_set_rcvtimeo (void *self, int rcvtimeo);
// Get socket option `sndtimeo`.
// Available from libzmq 2.2.0.
int
zsock_sndtimeo (void *self);
// Set socket option `sndtimeo`.
// Available from libzmq 2.2.0.
void
zsock_set_sndtimeo (void *self, int sndtimeo);
// Get socket option `sndbuf`.
// Available from libzmq 2.0.0.
int
zsock_sndbuf (void *self);
// Set socket option `sndbuf`.
// Available from libzmq 2.0.0.
void
zsock_set_sndbuf (void *self, int sndbuf);
// Get socket option `rcvbuf`.
// Available from libzmq 2.0.0.
int
zsock_rcvbuf (void *self);
// Set socket option `rcvbuf`.
// Available from libzmq 2.0.0.
void
zsock_set_rcvbuf (void *self, int rcvbuf);
// Get socket option `linger`.
// Available from libzmq 2.0.0.
int
zsock_linger (void *self);
// Set socket option `linger`.
// Available from libzmq 2.0.0.
void
zsock_set_linger (void *self, int linger);
// Get socket option `reconnect_ivl`.
// Available from libzmq 2.0.0.
int
zsock_reconnect_ivl (void *self);
// Set socket option `reconnect_ivl`.
// Available from libzmq 2.0.0.
void
zsock_set_reconnect_ivl (void *self, int reconnect_ivl);
// Get socket option `reconnect_ivl_max`.
// Available from libzmq 2.0.0.
int
zsock_reconnect_ivl_max (void *self);
// Set socket option `reconnect_ivl_max`.
// Available from libzmq 2.0.0.
void
zsock_set_reconnect_ivl_max (void *self, int reconnect_ivl_max);
// Get socket option `backlog`.
// Available from libzmq 2.0.0.
int
zsock_backlog (void *self);
// Set socket option `backlog`.
// Available from libzmq 2.0.0.
void
zsock_set_backlog (void *self, int backlog);
// Set socket option `subscribe`.
// Available from libzmq 2.0.0.
void
zsock_set_subscribe (void *self, const char *subscribe);
// Set socket option `unsubscribe`.
// Available from libzmq 2.0.0.
void
zsock_set_unsubscribe (void *self, const char *unsubscribe);
// Get socket option `type`.
// Available from libzmq 2.0.0.
int
zsock_type (void *self);
// Get socket option `rcvmore`.
// Available from libzmq 2.0.0.
int
zsock_rcvmore (void *self);
// Get socket option `fd`.
// Available from libzmq 2.0.0.
SOCKET
zsock_fd (void *self);
// Get socket option `events`.
// Available from libzmq 2.0.0.
int
zsock_events (void *self);
// Self test of this class.
void
zsock_test (bool verbose);
// CLASS: zstr
// Receive C string from socket. Caller must free returned string using
// zstr_free(). Returns NULL if the context is being terminated or the
// process was interrupted.
char *
zstr_recv (void *source);
// Receive a series of strings (until NULL) from multipart data.
// Each string is allocated and filled with string data; if there
// are not enough frames, unallocated strings are set to NULL.
// Returns -1 if the message could not be read, else returns the
// number of strings filled, zero or more. Free each returned string
// using zstr_free(). If not enough strings are provided, remaining
// multipart frames in the message are dropped.
int
zstr_recvx (void *source, char **string_p, ...);
// De-compress and receive C string from socket, received as a message
// with two frames: size of the uncompressed string, and the string itself.
// Caller must free returned string using zstr_free(). Returns NULL if the
// context is being terminated or the process was interrupted.
char *
zstr_recv_compress (void *source);
// Send a C string to a socket, as a frame. The string is sent without
// trailing null byte; to read this you can use zstr_recv, or a similar
// method that adds a null terminator on the received string. String
// may be NULL, which is sent as "".
int
zstr_send (void *dest, const char *string);
// Send a C string to a socket, as zstr_send(), with a MORE flag, so that
// you can send further strings in the same multi-part message.
int
zstr_sendm (void *dest, const char *string);
// Send a formatted string to a socket. Note that you should NOT use
// user-supplied strings in the format (they may contain '%' which
// will create security holes).
int
zstr_sendf (void *dest, const char *format, ...);
// Send a formatted string to a socket, as for zstr_sendf(), with a
// MORE flag, so that you can send further strings in the same multi-part
// message.
int
zstr_sendfm (void *dest, const char *format, ...);
// Send a series of strings (until NULL) as multipart data
// Returns 0 if the strings could be sent OK, or -1 on error.
int
zstr_sendx (void *dest, const char *string, ...);
// Compress and send a C string to a socket, as a message with two frames:
// size of the uncompressed string, and the string itself. The string is
// sent without trailing null byte; to read this you can use
// zstr_recv_compress, or a similar method that de-compresses and adds a
// null terminator on the received string.
int
zstr_send_compress (void *dest, const char *string);
// Compress and send a C string to a socket, as zstr_send_compress(),
// with a MORE flag, so that you can send further strings in the same
// multi-part message.
int
zstr_sendm_compress (void *dest, const char *string);
// Accepts a void pointer and returns a fresh character string. If source
// is null, returns an empty string.
char *
zstr_str (void *source);
// Free a provided string, and nullify the parent pointer. Safe to call on
// a null pointer.
void
zstr_free (char **string_p);
// Self test of this class.
void
zstr_test (bool verbose);
// CLASS: zsys
// Initialize CZMQ zsys layer; this happens automatically when you create
// a socket or an actor; however this call lets you force initialization
// earlier, so e.g. logging is properly set-up before you start working.
// Not threadsafe, so call only from main thread. Safe to call multiple
// times. Returns global CZMQ context.
void *
zsys_init (void);
// Optionally shut down the CZMQ zsys layer; this normally happens automatically
// when the process exits; however this call lets you force a shutdown
// earlier, avoiding any potential problems with atexit() ordering, especially
// with Windows dlls.
void
zsys_shutdown (void);
// Get a new ZMQ socket, automagically creating a ZMQ context if this is
// the first time. Caller is responsible for destroying the ZMQ socket
// before process exits, to avoid a ZMQ deadlock. Note: you should not use
// this method in CZMQ apps, use zsock_new() instead.
// *** This is for CZMQ internal use only and may change arbitrarily ***
void *
zsys_socket (int type, const char *filename, size_t line_nbr);
// Destroy/close a ZMQ socket. You should call this for every socket you
// create using zsys_socket().
// *** This is for CZMQ internal use only and may change arbitrarily ***
int
zsys_close (void *handle, const char *filename, size_t line_nbr);
// Return ZMQ socket name for socket type
// *** This is for CZMQ internal use only and may change arbitrarily ***
char *
zsys_sockname (int socktype);
// Create a pipe, which consists of two PAIR sockets connected over inproc.
// The pipe is configured to use the zsys_pipehwm setting. Returns the
// frontend socket successful, NULL if failed.
zsock_t *
zsys_create_pipe (zsock_t **backend_p);
// Set interrupt handler; this saves the default handlers so that a
// zsys_handler_reset () can restore them. If you call this multiple times
// then the last handler will take affect. If handler_fn is NULL, disables
// default SIGINT/SIGTERM handling in CZMQ.
void
zsys_handler_set (zsys_handler_fn *handler_fn);
// Reset interrupt handler, call this at exit if needed
void
zsys_handler_reset (void);
// Set default interrupt handler, so Ctrl-C or SIGTERM will set
// zsys_interrupted. Idempotent; safe to call multiple times.
// Can be suppressed by ZSYS_SIGHANDLER=false
// *** This is for CZMQ internal use only and may change arbitrarily ***
void
zsys_catch_interrupts (void);
// Check if default interrupt handler of Ctrl-C or SIGTERM was called.
// Does not work if ZSYS_SIGHANDLER is false and code does not call
// set interrupted on signal.
bool
zsys_is_interrupted (void);
// Set interrupted flag. This is done by default signal handler, however
// this can be handy for language bindings or cases without default
// signal handler.
void
zsys_set_interrupted (void);
// Return 1 if file exists, else zero
bool
zsys_file_exists (const char *filename);
// Return file modification time. Returns 0 if the file does not exist.
time_t
zsys_file_modified (const char *filename);
// Return file mode; provides at least support for the POSIX S_ISREG(m)
// and S_ISDIR(m) macros and the S_IRUSR and S_IWUSR bits, on all boxes.
// Returns a mode_t cast to int, or -1 in case of error.
int
zsys_file_mode (const char *filename);
// Delete file. Does not complain if the file is absent
int
zsys_file_delete (const char *filename);
// Check if file is 'stable'
bool
zsys_file_stable (const char *filename);
// Create a file path if it doesn't exist. The file path is treated as
// printf format.
int
zsys_dir_create (const char *pathname, ...);
// Remove a file path if empty; the pathname is treated as printf format.
int
zsys_dir_delete (const char *pathname, ...);
// Move to a specified working directory. Returns 0 if OK, -1 if this failed.
int
zsys_dir_change (const char *pathname);
// Set private file creation mode; all files created from here will be
// readable/writable by the owner only.
void
zsys_file_mode_private (void);
// Reset default file creation mode; all files created from here will use
// process file mode defaults.
void
zsys_file_mode_default (void);
// Return the CZMQ version for run-time API detection; returns version
// number into provided fields, providing reference isn't null in each case.
void
zsys_version (int *major, int *minor, int *patch);
// Format a string using printf formatting, returning a freshly allocated
// buffer. If there was insufficient memory, returns NULL. Free the returned
// string using zstr_free(). The hinted version allows to optimize by using
// a larger starting buffer size (known to/assumed by the developer) and so
// avoid reallocations.
char *
zsys_sprintf_hint (int hint, const char *format, ...);
// Format a string using printf formatting, returning a freshly allocated
// buffer. If there was insufficient memory, returns NULL. Free the returned
// string using zstr_free().
char *
zsys_sprintf (const char *format, ...);
// Format a string with a va_list argument, returning a freshly allocated
// buffer. If there was insufficient memory, returns NULL. Free the returned
// string using zstr_free().
char *
zsys_vprintf (const char *format, va_list argptr);
// Create UDP beacon socket; if the routable option is true, uses
// multicast (not yet implemented), else uses broadcast. This method
// and related ones might _eventually_ be moved to a zudp class.
// *** This is for CZMQ internal use only and may change arbitrarily ***
SOCKET
zsys_udp_new (bool routable);
// Close a UDP socket
// *** This is for CZMQ internal use only and may change arbitrarily ***
int
zsys_udp_close (SOCKET handle);
// Send zframe to UDP socket, return -1 if sending failed due to
// interface having disappeared (happens easily with WiFi)
// *** This is for CZMQ internal use only and may change arbitrarily ***
int
zsys_udp_send (SOCKET udpsock, zframe_t *frame, inaddr_t *address, int addrlen);
// Receive zframe from UDP socket, and set address of peer that sent it
// The peername must be a char [INET_ADDRSTRLEN] array if IPv6 is disabled or
// NI_MAXHOST if it's enabled. Returns NULL when failing to get peer address.
// *** This is for CZMQ internal use only and may change arbitrarily ***
zframe_t *
zsys_udp_recv (SOCKET udpsock, char *peername, int peerlen);
// Handle an I/O error on some socket operation; will report and die on
// fatal errors, and continue silently on "try again" errors.
// *** This is for CZMQ internal use only and may change arbitrarily ***
void
zsys_socket_error (const char *reason);
// Return current host name, for use in public tcp:// endpoints. Caller gets
// a freshly allocated string, should free it using zstr_free(). If the host
// name is not resolvable, returns NULL.
char *
zsys_hostname (void);
// Move the current process into the background. The precise effect depends
// on the operating system. On POSIX boxes, moves to a specified working
// directory (if specified), closes all file handles, reopens stdin, stdout,
// and stderr to the null device, and sets the process to ignore SIGHUP. On
// Windows, does nothing. Returns 0 if OK, -1 if there was an error.
int
zsys_daemonize (const char *workdir);
// Drop the process ID into the lockfile, with exclusive lock, and switch
// the process to the specified group and/or user. Any of the arguments
// may be null, indicating a no-op. Returns 0 on success, -1 on failure.
// Note if you combine this with zsys_daemonize, run after, not before
// that method, or the lockfile will hold the wrong process ID.
int
zsys_run_as (const char *lockfile, const char *group, const char *user);
// Returns true if the underlying libzmq supports CURVE security.
// Uses a heuristic probe according to the version of libzmq being used.
bool
zsys_has_curve (void);
// Configure the number of I/O threads that ZeroMQ will use. A good
// rule of thumb is one thread per gigabit of traffic in or out. The
// default is 1, sufficient for most applications. If the environment
// variable ZSYS_IO_THREADS is defined, that provides the default.
// Note that this method is valid only before any socket is created.
void
zsys_set_io_threads (size_t io_threads);
// Configure the scheduling policy of the ZMQ context thread pool.
// Not available on Windows. See the sched_setscheduler man page or sched.h
// for more information. If the environment variable ZSYS_THREAD_SCHED_POLICY
// is defined, that provides the default.
// Note that this method is valid only before any socket is created.
void
zsys_set_thread_sched_policy (int policy);
// Configure the scheduling priority of the ZMQ context thread pool.
// Not available on Windows. See the sched_setscheduler man page or sched.h
// for more information. If the environment variable ZSYS_THREAD_PRIORITY is
// defined, that provides the default.
// Note that this method is valid only before any socket is created.
void
zsys_set_thread_priority (int priority);
// Configure the numeric prefix to each thread created for the internal
// context's thread pool. This option is only supported on Linux.
// If the environment variable ZSYS_THREAD_NAME_PREFIX is defined, that
// provides the default.
// Note that this method is valid only before any socket is created.
void
zsys_set_thread_name_prefix (int prefix);
// Return thread name prefix.
int
zsys_thread_name_prefix (void);
// Adds a specific CPU to the affinity list of the ZMQ context thread pool.
// This option is only supported on Linux.
// Note that this method is valid only before any socket is created.
void
zsys_thread_affinity_cpu_add (int cpu);
// Removes a specific CPU to the affinity list of the ZMQ context thread pool.
// This option is only supported on Linux.
// Note that this method is valid only before any socket is created.
void
zsys_thread_affinity_cpu_remove (int cpu);
// Configure the number of sockets that ZeroMQ will allow. The default
// is 1024. The actual limit depends on the system, and you can query it
// by using zsys_socket_limit (). A value of zero means "maximum".
// Note that this method is valid only before any socket is created.
void
zsys_set_max_sockets (size_t max_sockets);
// Return maximum number of ZeroMQ sockets that the system will support.
size_t
zsys_socket_limit (void);
// Configure the maximum allowed size of a message sent.
// The default is INT_MAX.
void
zsys_set_max_msgsz (int max_msgsz);
// Return maximum message size.
int
zsys_max_msgsz (void);
// Configure whether to use zero copy strategy in libzmq. If the environment
// variable ZSYS_ZERO_COPY_RECV is defined, that provides the default.
// Otherwise the default is 1.
void
zsys_set_zero_copy_recv (int zero_copy);
// Return ZMQ_ZERO_COPY_RECV option.
int
zsys_zero_copy_recv (void);
// Configure the threshold value of filesystem object age per st_mtime
// that should elapse until we consider that object "stable" at the
// current zclock_time() moment.
// The default is S_DEFAULT_ZSYS_FILE_STABLE_AGE_MSEC defined in zsys.c
// which generally depends on host OS, with fallback value of 5000.
void
zsys_set_file_stable_age_msec (int64_t file_stable_age_msec);
// Return current threshold value of file stable age in msec.
// This can be used in code that chooses to wait for this timeout
// before testing if a filesystem object is "stable" or not.
int64_t
zsys_file_stable_age_msec (void);
// Configure the default linger timeout in msecs for new zsock instances.
// You can also set this separately on each zsock_t instance. The default
// linger time is zero, i.e. any pending messages will be dropped. If the
// environment variable ZSYS_LINGER is defined, that provides the default.
// Note that process exit will typically be delayed by the linger time.
void
zsys_set_linger (size_t linger);
// Configure the default outgoing pipe limit (HWM) for new zsock instances.
// You can also set this separately on each zsock_t instance. The default
// HWM is 1,000, on all versions of ZeroMQ. If the environment variable
// ZSYS_SNDHWM is defined, that provides the default. Note that a value of
// zero means no limit, i.e. infinite memory consumption.
void
zsys_set_sndhwm (size_t sndhwm);
// Configure the default incoming pipe limit (HWM) for new zsock instances.
// You can also set this separately on each zsock_t instance. The default
// HWM is 1,000, on all versions of ZeroMQ. If the environment variable
// ZSYS_RCVHWM is defined, that provides the default. Note that a value of
// zero means no limit, i.e. infinite memory consumption.
void
zsys_set_rcvhwm (size_t rcvhwm);
// Configure the default HWM for zactor internal pipes; this is set on both
// ends of the pipe, for outgoing messages only (sndhwm). The default HWM is
// 1,000, on all versions of ZeroMQ. If the environment var ZSYS_ACTORHWM is
// defined, that provides the default. Note that a value of zero means no
// limit, i.e. infinite memory consumption.
void
zsys_set_pipehwm (size_t pipehwm);
// Return the HWM for zactor internal pipes.
size_t
zsys_pipehwm (void);
// Configure use of IPv6 for new zsock instances. By default sockets accept
// and make only IPv4 connections. When you enable IPv6, sockets will accept
// and connect to both IPv4 and IPv6 peers. You can override the setting on
// each zsock_t instance. The default is IPv4 only (ipv6 set to 0). If the
// environment variable ZSYS_IPV6 is defined (as 1 or 0), this provides the
// default. Note: has no effect on ZMQ v2.
void
zsys_set_ipv6 (int ipv6);
// Return use of IPv6 for zsock instances.
int
zsys_ipv6 (void);
// Test if ipv6 is available on the system. Return true if available.
// The only way to reliably check is to actually open a socket and
// try to bind it. (ported from libzmq)
bool
zsys_ipv6_available (void);
// Set network interface name to use for broadcasts, particularly zbeacon.
// This lets the interface be configured for test environments where required.
// For example, on Mac OS X, zbeacon cannot bind to 255.255.255.255 which is
// the default when there is no specified interface. If the environment
// variable ZSYS_INTERFACE is set, use that as the default interface name.
// Setting the interface to "*" means "use all available interfaces".
void
zsys_set_interface (const char *value);
// Return network interface to use for broadcasts, or "" if none was set.
const char *
zsys_interface (void);
// Set IPv6 address to use zbeacon socket, particularly for receiving zbeacon.
// This needs to be set IPv6 is enabled as IPv6 can have multiple addresses
// on a given interface. If the environment variable ZSYS_IPV6_ADDRESS is set,
// use that as the default IPv6 address.
void
zsys_set_ipv6_address (const char *value);
// Return IPv6 address to use for zbeacon reception, or "" if none was set.
const char *
zsys_ipv6_address (void);
// Set IPv6 milticast address to use for sending zbeacon messages. This needs
// to be set if IPv6 is enabled. If the environment variable
// ZSYS_IPV6_MCAST_ADDRESS is set, use that as the default IPv6 multicast
// address.
void
zsys_set_ipv6_mcast_address (const char *value);
// Return IPv6 multicast address to use for sending zbeacon, or "" if none was
// set.
const char *
zsys_ipv6_mcast_address (void);
// Set IPv4 multicast address to use for sending zbeacon messages. By default
// IPv4 multicast is NOT used. If the environment variable
// ZSYS_IPV4_MCAST_ADDRESS is set, use that as the default IPv4 multicast
// address. Calling this function or setting ZSYS_IPV4_MCAST_ADDRESS
// will enable IPv4 zbeacon messages.
void
zsys_set_ipv4_mcast_address (const char *value);
// Return IPv4 multicast address to use for sending zbeacon, or NULL if none was
// set.
const char *
zsys_ipv4_mcast_address (void);
// Set multicast TTL default is 1
void
zsys_set_mcast_ttl (byte value);
// Get multicast TTL
byte
zsys_mcast_ttl (void);
// Configure the automatic use of pre-allocated FDs when creating new sockets.
// If 0 (default), nothing will happen. Else, when a new socket is bound, the
// system API will be used to check if an existing pre-allocated FD with a
// matching port (if TCP) or path (if IPC) exists, and if it does it will be
// set via the ZMQ_USE_FD socket option so that the library will use it
// instead of creating a new socket.
void
zsys_set_auto_use_fd (int auto_use_fd);
// Return use of automatic pre-allocated FDs for zsock instances.
int
zsys_auto_use_fd (void);
// Print formatted string. Format is specified by variable names
// in Python-like format style
//
// "%(KEY)s=%(VALUE)s", KEY=key, VALUE=value
// become
// "key=value"
//
// Returns freshly allocated string or NULL in a case of error.
// Not enough memory, invalid format specifier, name not in args
char *
zsys_zprintf (const char *format, zhash_t *args);
// Return error string for given format/args combination.
char *
zsys_zprintf_error (const char *format, zhash_t *args);
// Print formatted string. Format is specified by variable names
// in Python-like format style
//
// "%(KEY)s=%(VALUE)s", KEY=key, VALUE=value
// become
// "key=value"
//
// Returns freshly allocated string or NULL in a case of error.
// Not enough memory, invalid format specifier, name not in args
char *
zsys_zplprintf (const char *format, zconfig_t *args);
// Return error string for given format/args combination.
char *
zsys_zplprintf_error (const char *format, zconfig_t *args);
// Set log identity, which is a string that prefixes all log messages sent
// by this process. The log identity defaults to the environment variable
// ZSYS_LOGIDENT, if that is set.
void
zsys_set_logident (const char *value);
// Set stream to receive log traffic. By default, log traffic is sent to
// stdout. If you set the stream to NULL, no stream will receive the log
// traffic (it may still be sent to the system facility).
void
zsys_set_logstream (FILE *stream);
// Sends log output to a PUB socket bound to the specified endpoint. To
// collect such log output, create a SUB socket, subscribe to the traffic
// you care about, and connect to the endpoint. Log traffic is sent as a
// single string frame, in the same format as when sent to stdout. The
// log system supports a single sender; multiple calls to this method will
// bind the same sender to multiple endpoints. To disable the sender, call
// this method with a null argument.
void
zsys_set_logsender (const char *endpoint);
// Enable or disable logging to the system facility (syslog on POSIX boxes,
// event log on Windows). By default this is disabled.
void
zsys_set_logsystem (bool logsystem);
// Log error condition - highest priority
void
zsys_error (const char *format, ...);
// Log warning condition - high priority
void
zsys_warning (const char *format, ...);
// Log normal, but significant, condition - normal priority
void
zsys_notice (const char *format, ...);
// Log informational message - low priority
void
zsys_info (const char *format, ...);
// Log debug-level message - lowest priority
void
zsys_debug (const char *format, ...);
// Self test of this class.
void
zsys_test (bool verbose);
// CLASS: ztimerset
// Create new timer set.
ztimerset_t *
ztimerset_new (void);
// Destroy a timer set
void
ztimerset_destroy (ztimerset_t **self_p);
// Add a timer to the set. Returns timer id if OK, -1 on failure.
int
ztimerset_add (ztimerset_t *self, size_t interval, ztimerset_fn handler, void *arg);
// Cancel a timer. Returns 0 if OK, -1 on failure.
int
ztimerset_cancel (ztimerset_t *self, int timer_id);
// Set timer interval. Returns 0 if OK, -1 on failure.
// This method is slow, canceling the timer and adding a new one yield better performance.
int
ztimerset_set_interval (ztimerset_t *self, int timer_id, size_t interval);
// Reset timer to start interval counting from current time. Returns 0 if OK, -1 on failure.
// This method is slow, canceling the timer and adding a new one yield better performance.
int
ztimerset_reset (ztimerset_t *self, int timer_id);
// Return the time until the next interval.
// Should be used as timeout parameter for the zpoller wait method.
// The timeout is in msec.
int
ztimerset_timeout (ztimerset_t *self);
// Invoke callback function of all timers which their interval has elapsed.
// Should be call after zpoller wait method.
// Returns 0 if OK, -1 on failure.
int
ztimerset_execute (ztimerset_t *self);
// Self test of this class.
void
ztimerset_test (bool verbose);
// CLASS: ztrie
// Creates a new ztrie.
ztrie_t *
ztrie_new (char delimiter);
// Destroy the ztrie.
void
ztrie_destroy (ztrie_t **self_p);
// Inserts a new route into the tree and attaches the data. Returns -1
// if the route already exists, otherwise 0. This method takes ownership of
// the provided data if a destroy_data_fn is provided.
int
ztrie_insert_route (ztrie_t *self, const char *path, void *data, ztrie_destroy_data_fn destroy_data_fn);
// Removes a route from the trie and destroys its data. Returns -1 if the
// route does not exists, otherwise 0.
// the start of the list call zlist_first (). Advances the cursor.
int
ztrie_remove_route (ztrie_t *self, const char *path);
// Returns true if the path matches a route in the tree, otherwise false.
bool
ztrie_matches (ztrie_t *self, const char *path);
// Returns the data of a matched route from last ztrie_matches. If the path
// did not match, returns NULL. Do not delete the data as it's owned by
// ztrie.
void *
ztrie_hit_data (ztrie_t *self);
// Returns the count of parameters that a matched route has.
size_t
ztrie_hit_parameter_count (ztrie_t *self);
// Returns the parameters of a matched route with named regexes from last
// ztrie_matches. If the path did not match or the route did not contain any
// named regexes, returns NULL.
zhashx_t *
ztrie_hit_parameters (ztrie_t *self);
// Returns the asterisk matched part of a route, if there has been no match
// or no asterisk match, returns NULL.
const char *
ztrie_hit_asterisk_match (ztrie_t *self);
// Print the trie
void
ztrie_print (ztrie_t *self);
// Self test of this class.
void
ztrie_test (bool verbose);
// CLASS: zuuid
// Create a new UUID object.
zuuid_t *
zuuid_new (void);
// Destroy a specified UUID object.
void
zuuid_destroy (zuuid_t **self_p);
// Create UUID object from supplied ZUUID_LEN-octet value.
zuuid_t *
zuuid_new_from (const byte *source);
// Set UUID to new supplied ZUUID_LEN-octet value.
void
zuuid_set (zuuid_t *self, const byte *source);
// Set UUID to new supplied string value skipping '-' and '{' '}'
// optional delimiters. Return 0 if OK, else returns -1.
int
zuuid_set_str (zuuid_t *self, const char *source);
// Return UUID binary data.
const byte *
zuuid_data (zuuid_t *self);
// Return UUID binary size
size_t
zuuid_size (zuuid_t *self);
// Returns UUID as string
const char *
zuuid_str (zuuid_t *self);
// Return UUID in the canonical string format: 8-4-4-4-12, in lower
// case. Caller does not modify or free returned value. See
// http://en.wikipedia.org/wiki/Universally_unique_identifier
const char *
zuuid_str_canonical (zuuid_t *self);
// Store UUID blob in target array
void
zuuid_export (zuuid_t *self, byte *target);
// Check if UUID is same as supplied value
bool
zuuid_eq (zuuid_t *self, const byte *compare);
// Check if UUID is different from supplied value
bool
zuuid_neq (zuuid_t *self, const byte *compare);
// Make copy of UUID object; if uuid is null, or memory was exhausted,
// returns null.
zuuid_t *
zuuid_dup (zuuid_t *self);
// Self test of this class.
void
zuuid_test (bool verbose);
// CLASS: zhttp_client
// Create a new http client
zhttp_client_t *
zhttp_client_new (bool verbose);
// Destroy an http client
void
zhttp_client_destroy (zhttp_client_t **self_p);
// Self test of this class.
void
zhttp_client_test (bool verbose);
// CLASS: zhttp_server
// Create a new http server
zhttp_server_t *
zhttp_server_new (zhttp_server_options_t *options);
// Destroy an http server
void
zhttp_server_destroy (zhttp_server_t **self_p);
// Return the port the server is listening on.
int
zhttp_server_port (zhttp_server_t *self);
// Self test of this class.
void
zhttp_server_test (bool verbose);
// CLASS: zhttp_server_options
// Create a new zhttp_server_options.
zhttp_server_options_t *
zhttp_server_options_new (void);
// Create options from config tree.
zhttp_server_options_t *
zhttp_server_options_from_config (zconfig_t *config);
// Destroy the zhttp_server_options.
void
zhttp_server_options_destroy (zhttp_server_options_t **self_p);
// Get the server listening port.
int
zhttp_server_options_port (zhttp_server_options_t *self);
// Set the server listening port
void
zhttp_server_options_set_port (zhttp_server_options_t *self, int port);
// Get the address sockets should connect to in order to receive requests.
const char *
zhttp_server_options_backend_address (zhttp_server_options_t *self);
// Set the address sockets should connect to in order to receive requests.
void
zhttp_server_options_set_backend_address (zhttp_server_options_t *self, const char *address);
// Self test of this class.
void
zhttp_server_options_test (bool verbose);
// CLASS: zhttp_request
// Create a new http request.
zhttp_request_t *
zhttp_request_new (void);
// Destroy an http request.
void
zhttp_request_destroy (zhttp_request_t **self_p);
// Receive a new request from zhttp_server.
// Return the underlying connection if successful, to be used when calling zhttp_response_send.
void *
zhttp_request_recv (zhttp_request_t *self, zsock_t *sock);
// Send a request to zhttp_client.
// Url and the request path will be concatenated.
// This behavior is useful for url rewrite and reverse proxy.
//
// Send also allow two user provided arguments which will be returned with the response.
// The reason for two, is to be able to pass around the server connection when forwarding requests or both a callback function and an arg.
int
zhttp_request_send (zhttp_request_t *self, zhttp_client_t *client, int timeout, void *arg, void *arg2);
// Get the request method
const char *
zhttp_request_method (zhttp_request_t *self);
// Set the request method
void
zhttp_request_set_method (zhttp_request_t *self, const char *method);
// Get the request url.
// When receiving a request from http server this is only the path part of the url.
const char *
zhttp_request_url (zhttp_request_t *self);
// Set the request url
// When sending a request to http client this should be full url.
void
zhttp_request_set_url (zhttp_request_t *self, const char *url);
// Get the request content type
const char *
zhttp_request_content_type (zhttp_request_t *self);
// Set the request content type
void
zhttp_request_set_content_type (zhttp_request_t *self, const char *content_type);
// Get the content length of the request
size_t
zhttp_request_content_length (zhttp_request_t *self);
// Get the headers of the request
zhash_t *
zhttp_request_headers (zhttp_request_t *self);
// Get the content of the request.
const char *
zhttp_request_content (zhttp_request_t *self);
// Get the content of the request.
char *
zhttp_request_get_content (zhttp_request_t *self);
// Set the content of the request.
// Content must by dynamically allocated string.
// Takes ownership of the content.
void
zhttp_request_set_content (zhttp_request_t *self, char **content);
// Set the content of the request..
// The content is assumed to be constant-memory and will therefore not be copied or deallocated in any way.
void
zhttp_request_set_content_const (zhttp_request_t *self, const char *content);
// Set the content to NULL
void
zhttp_request_reset_content (zhttp_request_t *self);
// Match the path of the request.
// Support wildcards with '%s' symbol inside the match string.
// Matching wildcards until the next '/', '?' or '\0'.
// On successful match the variadic arguments will be filled with the matching strings.
// On successful match the method is modifying the url field and break it into substrings.
// If you need to use the url, do it before matching or take a copy.
//
// User must not free the variadic arguments as they are part of the url.
//
// To use the percent symbol, just double it, e.g "%%something".
//
// Example:
// if (zhttp_request_match (request, "POST", "/send/%s/%s", &name, &id))
bool
zhttp_request_match (zhttp_request_t *self, const char *method, const char *path, ...);
// Self test of this class.
void
zhttp_request_test (bool verbose);
// CLASS: zhttp_response
// Create a new zhttp_response.
zhttp_response_t *
zhttp_response_new (void);
// Destroy the zhttp_response.
void
zhttp_response_destroy (zhttp_response_t **self_p);
// Send a response to a request.
// Returns 0 if successful and -1 otherwise.
int
zhttp_response_send (zhttp_response_t *self, zsock_t *sock, void **connection);
// Receive a response from zhttp_client.
// On success return 0, -1 otherwise.
//
// Recv returns the two user arguments which was provided with the request.
// The reason for two, is to be able to pass around the server connection when forwarding requests or both a callback function and an argument.
int
zhttp_response_recv (zhttp_response_t *self, zhttp_client_t *client, void **arg, void **arg2);
// Get the response content type
const char *
zhttp_response_content_type (zhttp_response_t *self);
// Set the content type of the response.
void
zhttp_response_set_content_type (zhttp_response_t *self, const char *value);
// Get the status code of the response.
uint32_t
zhttp_response_status_code (zhttp_response_t *self);
// Set the status code of the response.
void
zhttp_response_set_status_code (zhttp_response_t *self, uint32_t status_code);
// Get the headers of the response.
zhash_t *
zhttp_response_headers (zhttp_response_t *self);
// Get the content length of the response
size_t
zhttp_response_content_length (zhttp_response_t *self);
// Get the content of the response.
const char *
zhttp_response_content (zhttp_response_t *self);
// Get the content of the response.
char *
zhttp_response_get_content (zhttp_response_t *self);
// Set the content of the response.
// Content must by dynamically allocated string.
// Takes ownership of the content.
void
zhttp_response_set_content (zhttp_response_t *self, char **content);
// Set the content of the response.
// The content is assumed to be constant-memory and will therefore not be copied or deallocated in any way.
void
zhttp_response_set_content_const (zhttp_response_t *self, const char *content);
// Set the content to NULL
void
zhttp_response_reset_content (zhttp_response_t *self);
// Self test of this class.
void
zhttp_response_test (bool verbose);
// CLASS: zosc
// Create a new empty OSC message with the specified address string.
zosc_t *
zosc_new (const char *address);
// Create a new OSC message from the specified zframe. Takes ownership of
// the zframe.
zosc_t *
zosc_fromframe (zframe_t *frame);
// Create a new zosc message from memory. Take ownership of the memory
// and calling free on the data after construction.
zosc_t *
zosc_frommem (char *data, size_t size);
// Create a new zosc message from the given format and arguments.
// The format type tags are as follows:
// i - 32bit integer
// h - 64bit integer
// f - 32bit floating point number (IEEE)
// d - 64bit (double) floating point number
// s - string (NULL terminated)
// t = timetag: an OSC timetag in NTP format (uint64_t)
// S - symbol
// c - char
// m - 4 byte midi packet (8 digits hexadecimal)
// T - TRUE (no value required)
// F - FALSE (no value required)
// N - NIL (no value required)
// I - Impulse (for triggers) or INFINITUM (no value required)
// b - binary blob
zosc_t *
zosc_create (const char *address, const char *format, ...);
// Destroy an OSC message
void
zosc_destroy (zosc_t **self_p);
// Return chunk data size
size_t
zosc_size (zosc_t *self);
// Return OSC chunk data. Caller does not own the data!
byte *
zosc_data (zosc_t *self);
// Return the OSC address string
const char *
zosc_address (zosc_t *self);
// Return the OSC format of the message.
// i - 32bit integer
// h - 64bit integer
// f - 32bit floating point number (IEEE)
// d - 64bit (double) floating point number
// s - string (NULL terminated)
// t = timetag: an OSC timetag in NTP format (uint64_t)
// S - symbol
// c - char
// m - 4 byte midi packet (8 digits hexadecimal)
// T - TRUE (no value required)
// F - FALSE (no value required)
// N - NIL (no value required)
// I - Impulse (for triggers) or INFINITUM (no value required)
// b - binary blob
const char *
zosc_format (zosc_t *self);
// Retrieve the values provided by the given format. Note that zosc_retr
// creates the objects and the caller must destroy them when finished.
// The supplied pointers do not need to be initialized. Returns 0 if
// successful, or -1 if it failed to retrieve a value in which case the
// pointers are not modified. If an argument pointer is NULL is skips the
// value. See the format method for a detailed list op type tags for the
// format string.
int
zosc_retr (zosc_t *self, const char *format, ...);
// Create copy of the message, as new chunk object. Returns a fresh zosc_t
// object, or null if there was not enough heap memory. If chunk is null,
// returns null.
zosc_t *
zosc_dup (zosc_t *self);
// Transform zosc into a zframe that can be sent in a message.
zframe_t *
zosc_pack (zosc_t *self);
// Transform zosc into a zframe that can be sent in a message.
// Take ownership of the chunk.
zframe_t *
zosc_packx (zosc_t **self_p);
// Transform a zframe into a zosc.
zosc_t *
zosc_unpack (zframe_t *frame);
// Dump OSC message to stderr, for debugging and tracing.
void
zosc_print (zosc_t *self);
// Probe the supplied object, and report if it looks like a zosc_t.
bool
zosc_is (void *self);
// Self test of this class.
void
zosc_test (bool verbose);
''')
malamute_cdefs.extend (czmq_cdefs)
malamute_cdefs.append ('''
typedef struct _mlm_proto_t mlm_proto_t;
typedef struct _zsock_t zsock_t;
typedef struct _zframe_t zframe_t;
typedef struct _zmsg_t zmsg_t;
typedef struct _mlm_client_t mlm_client_t;
typedef struct _zactor_t zactor_t;
// CLASS: mlm_proto
// Create a new empty mlm_proto
mlm_proto_t *
mlm_proto_new (void);
// Destroy a mlm_proto instance
void
mlm_proto_destroy (mlm_proto_t **self_p);
// Receive a mlm_proto from the socket. Returns 0 if OK, -1 if
// there was an error. Blocks if there is no message waiting.
int
mlm_proto_recv (mlm_proto_t *self, zsock_t *input);
// Send the mlm_proto to the output socket, does not destroy it
int
mlm_proto_send (mlm_proto_t *self, zsock_t *output);
// Print contents of message to stdout
void
mlm_proto_print (mlm_proto_t *self);
// Get the message routing id, as a frame
zframe_t *
mlm_proto_routing_id (mlm_proto_t *self);
// Set the message routing id from a frame
void
mlm_proto_set_routing_id (mlm_proto_t *self, zframe_t *routing_id);
// Get the mlm_proto message id
int
mlm_proto_id (mlm_proto_t *self);
// Set the mlm_proto message id
void
mlm_proto_set_id (mlm_proto_t *self, int id);
// Get the mlm_proto message id as printable text
const char *
mlm_proto_command (mlm_proto_t *self);
// Get the address field
const char *
mlm_proto_address (mlm_proto_t *self);
// Set the address field
void
mlm_proto_set_address (mlm_proto_t *self, const char *address);
// Get the stream field
const char *
mlm_proto_stream (mlm_proto_t *self);
// Set the stream field
void
mlm_proto_set_stream (mlm_proto_t *self, const char *stream);
// Get the pattern field
const char *
mlm_proto_pattern (mlm_proto_t *self);
// Set the pattern field
void
mlm_proto_set_pattern (mlm_proto_t *self, const char *pattern);
// Get the subject field
const char *
mlm_proto_subject (mlm_proto_t *self);
// Set the subject field
void
mlm_proto_set_subject (mlm_proto_t *self, const char *subject);
// Get a copy of the content field
zmsg_t *
mlm_proto_content (mlm_proto_t *self);
// Get the content field and transfer ownership to caller
zmsg_t *
mlm_proto_get_content (mlm_proto_t *self);
//
void
mlm_proto_set_content (mlm_proto_t *self, zmsg_t **content_p);
// Get the sender field
const char *
mlm_proto_sender (mlm_proto_t *self);
// Set the sender field
void
mlm_proto_set_sender (mlm_proto_t *self, const char *sender);
// Get the tracker field
const char *
mlm_proto_tracker (mlm_proto_t *self);
// Set the tracker field
void
mlm_proto_set_tracker (mlm_proto_t *self, const char *tracker);
// Get the timeout field
uint32_t
mlm_proto_timeout (mlm_proto_t *self);
// Set the timeout field
void
mlm_proto_set_timeout (mlm_proto_t *self, uint32_t timeout);
// Get the status_code field
uint16_t
mlm_proto_status_code (mlm_proto_t *self);
// Set the status_code field
void
mlm_proto_set_status_code (mlm_proto_t *self, uint16_t status_code);
// Get the status_reason field
const char *
mlm_proto_status_reason (mlm_proto_t *self);
// Set the status_reason field
void
mlm_proto_set_status_reason (mlm_proto_t *self, const char *status_reason);
// Get the amount field
uint16_t
mlm_proto_amount (mlm_proto_t *self);
// Set the amount field
void
mlm_proto_set_amount (mlm_proto_t *self, uint16_t amount);
// Self test of this class.
void
mlm_proto_test (bool verbose);
// CLASS: mlm_client
// Create a new mlm_client, return the reference if successful,
// or NULL if construction failed due to lack of available memory.
mlm_client_t *
mlm_client_new (void);
// Destroy the mlm_client and free all memory used by the object.
void
mlm_client_destroy (mlm_client_t **self_p);
// Return actor, when caller wants to work with multiple actors and/or
// input sockets asynchronously.
zactor_t *
mlm_client_actor (mlm_client_t *self);
// Return message pipe for asynchronous message I/O. In the high-volume case,
// we send methods and get replies to the actor, in a synchronous manner, and
// we send/recv high volume message data to a second pipe, the msgpipe. In
// the low-volume case we can do everything over the actor pipe, if traffic
// is never ambiguous.
zsock_t *
mlm_client_msgpipe (mlm_client_t *self);
// Return true if client is currently connected, else false. Note that the
// client will automatically re-connect if the server dies and restarts after
// a successful first connection.
bool
mlm_client_connected (mlm_client_t *self);
// Set PLAIN authentication username and password. If you do not call this, the
// client will use NULL authentication. TODO: add "set curve auth".
// Returns >= 0 if successful, -1 if interrupted.
int
mlm_client_set_plain_auth (mlm_client_t *self, const char *username, const char *password);
// Connect to server endpoint, with specified timeout in msecs (zero means wait
// forever). Constructor succeeds if connection is successful. The caller may
// specify its address.
// Returns >= 0 if successful, -1 if interrupted.
int
mlm_client_connect (mlm_client_t *self, const char *endpoint, uint32_t timeout, const char *address);
// Prepare to publish to a specified stream. After this, all messages are sent to
// this stream exclusively.
// Returns >= 0 if successful, -1 if interrupted.
int
mlm_client_set_producer (mlm_client_t *self, const char *stream);
// Consume messages with matching subjects. The pattern is a regular expression
// using the CZMQ zrex syntax. The most useful elements are: ^ and $ to match the
// start and end, . to match any character, \s and \S to match whitespace and
// non-whitespace, \d and \D to match a digit and non-digit, \a and \A to match
// alphabetic and non-alphabetic, \w and \W to match alphanumeric and
// non-alphanumeric, + for one or more repetitions, * for zero or more repetitions,
// and ( ) to create groups. Returns 0 if subscription was successful, else -1.
// Returns >= 0 if successful, -1 if interrupted.
int
mlm_client_set_consumer (mlm_client_t *self, const char *stream, const char *pattern);
// Remove all subscriptions to a stream
// Returns >= 0 if successful, -1 if interrupted.
int
mlm_client_remove_consumer (mlm_client_t *self, const char *stream);
// Offer a particular named service, where the pattern matches request subjects
// using the CZMQ zrex syntax.
// Returns >= 0 if successful, -1 if interrupted.
int
mlm_client_set_worker (mlm_client_t *self, const char *address, const char *pattern);
// Send STREAM SEND message to server, takes ownership of message
// and destroys message when done sending it.
int
mlm_client_send (mlm_client_t *self, const char *subject, zmsg_t **content);
// Send MAILBOX SEND message to server, takes ownership of message
// and destroys message when done sending it.
int
mlm_client_sendto (mlm_client_t *self, const char *address, const char *subject, const char *tracker, uint32_t timeout, zmsg_t **content);
// Send SERVICE SEND message to server, takes ownership of message
// and destroys message when done sending it.
int
mlm_client_sendfor (mlm_client_t *self, const char *address, const char *subject, const char *tracker, uint32_t timeout, zmsg_t **content);
// Receive message from server; caller destroys message when done
zmsg_t *
mlm_client_recv (mlm_client_t *self);
// Return last received command. Can be one of these values:
// "STREAM DELIVER"
// "MAILBOX DELIVER"
// "SERVICE DELIVER"
const char *
mlm_client_command (mlm_client_t *self);
// Return last received status
int
mlm_client_status (mlm_client_t *self);
// Return last received reason
const char *
mlm_client_reason (mlm_client_t *self);
// Return last received address
const char *
mlm_client_address (mlm_client_t *self);
// Return last received sender
const char *
mlm_client_sender (mlm_client_t *self);
// Return last received subject
const char *
mlm_client_subject (mlm_client_t *self);
// Return last received content
zmsg_t *
mlm_client_content (mlm_client_t *self);
// Return last received tracker
const char *
mlm_client_tracker (mlm_client_t *self);
// Send multipart string message to stream, end list with NULL
// Returns 0 if OK, -1 if failed due to lack of memory or other error.
int
mlm_client_sendx (mlm_client_t *self, const char *subject, const char *content, ...);
// Send multipart string to mailbox, end list with NULL
// Returns 0 if OK, -1 if failed due to lack of memory or other error.
int
mlm_client_sendtox (mlm_client_t *self, const char *address, const char *subject, const char *content, ...);
// Send multipart string to service, end list with NULL
// Returns 0 if OK, -1 if failed due to lack of memory or other error.
int
mlm_client_sendforx (mlm_client_t *self, const char *address, const char *subject, const char *content, ...);
// Receive a subject and string content from the server. The content may be
// 1 or more string frames. This method is orthogonal to the sendx methods.
// End the string arguments with NULL. If there are not enough frames in
// the received message, remaining strings are set to NULL. Returns number
// of string contents received, or -1 in case of error. Free the returned
// subject and content strings when finished with them. To get the type of
// the command, use mlm_client_command ().
int
mlm_client_recvx (mlm_client_t *self, char **subject_p, char **string_p, ...);
// Enable verbose tracing (animation) of state machine activity.
void
mlm_client_set_verbose (mlm_client_t *self, bool verbose);
// Self test of this class.
void
mlm_client_test (bool verbose);
''')
for i, item in enumerate (malamute_cdefs):
malamute_cdefs [i] = re.sub(r';[^;]*\bva_list\b[^;]*;', ';', item, flags=re.S) # we don't support anything with a va_list arg
| zeromq/malamute | bindings/python_cffi/malamute_cffi/cdefs.py | Python | mpl-2.0 | 164,451 |
import pylab as pyl
import cPickle as pickle
from astLib import astStats
galaxies = pickle.load(open('galaxies.pickle', 'rb'))
galaxies = filter(lambda galaxy: galaxy.ston_I >10. and galaxy.clumps != None,
galaxies)
f = pyl.figure(1, figsize=(6,4))
f1s1 = f.add_subplot(111)
d = [[galaxy.clumps, galaxy.ICD_IH*100] for galaxy in galaxies]
d = pyl.asarray(d)
f1s1.scatter(d[:,0], d[:,1], s=50, c='0.8', edgecolor='0.8')
bins = pyl.arange(0, 50, 5)
index = pyl.digitize(d[:,1], bins) - 1
delta = bins[1] - bins[2]
avgs = [pyl.mean(d[:,0][index==k]) for k in range(len(bins))]
#avgs = [astStats.biweightLocation(d[:,0][index==k], 6.0) for k in range(len(bins))]
#avgs = astStats.runningStatistic(d[:,1], d[:,0])
#bins = pyl.linspace(d[:,1].min(), d[:,1].max(), 10)
#delta = bins[1] - bins[0]
#f1s1.hlines(bins - delta/2., [0], avgs, lw=2, color='#A60628')
f1s1.plot(avgs, bins - delta/2., lw=2, color='#A60628')
avg=[]
for i in range(9):
d = [galaxy.ICD_IH*100 for galaxy in galaxies if galaxy.clumps ==i]
avg.append(astStats.biweightLocation(d, 6.0))
#f1s1.vlines(i, [0],pyl.mean(d), linestyle='dashed', lw=2, color='blue')
#f1s1.vlines(i, [0],astStats.biweightLocation(d,6.0), linestyle='dashed',
# lw=2, color='#348ABD')
f1s1.plot(range(9), avg, linestyle='dashed',
lw=2, color='#348ABD')
f1s1.set_xlim(-0.5, 10)
f1s1.set_ylim(0, 50)
f1s1.set_xlabel('Clump Number')
f1s1.set_ylabel(r'$\xi(i_{775}, H_{160})$ (%)')
line1 = pyl.Line2D([], [], marker='o', mfc='0.8', mec='0.8', lw=0)
line2 = pyl.Line2D([], [], color='#A60628', lw=2)
line3 = pyl.Line2D([], [], color='#348ABD',linestyle='dashed', lw=2)
pyl.legend((line1, line2, line3), ('Data', 'Mean Clump', 'Mean ICD'),
loc='upper right', ncol=1)
pyl.show()
| boada/ICD | sandbox/lowerSN/plot_icd_clumps.py | Python | mit | 1,772 |
class Error(Exception):
pass
class DownloadError(Error):
def __init__(self, url: str, reason: str) -> None:
self.url = url
self.reason = reason
def __str__(self) -> str:
return f"{self.reason}: {self.url}"
class NoFileExistsError(Error):
def __init__(self, path: str) -> None:
self.path = path
| yu-i9/dblpy | dblpy/exceptions.py | Python | mit | 347 |
from flask_restful import Api
import logging
from . import app, model, api
app.logger.setLevel(logging.DEBUG)
db_session = model.db.session
@app.route('/')
def hello_world():
app.logger.debug("hello")
return 'Hello World!'
rest_api = Api(app)
rest_api.add_resource(api.User, "/user/", "/user/<name>/")
| lucidfrontier45/flasktest | flask_app/application.py | Python | mit | 317 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.