code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
#!/usr/bin/python2
import requests
import time
import json
import sys
if len(sys.argv) < 2:
raise Exception('no host')
host = sys.argv[1]
if len(sys.argv) > 2:
port = sys.argv[2]
else:
port = 8080
names = [line.strip().rstrip(' \xc2\xa0') for line in open('names.txt')]
coords = []
for p in [line.strip() for line in open('points.txt')]:
coord = p.split(',')
coord = [float(coord[0]), float(coord[1])]
coords.append(coord)
size = len(coords)
if (len(names) < len(coords)):
size = len(names)
print "sending %s records" % size
for i in range(size):
presence = {}
presence['uid'] = '1234567890_TEST_' + str(i)
presence['label'] = names[i]
presence['snippit'] = 'having fun in CA'
presence['ttl'] = 2
now = int(round(time.time() * 1000))
presence['time'] = now
presence['space'] = 'test photowalk'
presence['location'] = {}
presence['location']['type'] = "Point"
presence['location']['coordinates'] = [coords[i][1], coords[i][0]]
print presence
headers = {'Content-Type': 'application/json'}
r = requests.put('http://' + host + ':' + str(port) + '/presence', data=json.dumps(presence), headers=headers)
print r.status_code
print r.text
| navicore/oemap | www/test/simulateMap/runme.py | Python | apache-2.0 | 1,205 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from django.conf.urls import patterns, url
from rest_framework import routers
from . import views
router = routers.DefaultRouter()
router.register('notes', views.NoteViewSet)
router.register('releases', views.ReleaseViewSet)
urlpatterns = router.urls + patterns(
'',
url(r'^releases/(?P<pk>\d+)/notes/$', views.NestedNoteView.as_view()),
url(r'^auth_token/$', views.auth_token))
| jgmize/rna | rna/urls.py | Python | mpl-2.0 | 594 |
from unittest.mock import Mock
from aquarius.Interactor import Interactor
from aquarius.Persistence import Persistence
from aquarius.interactors.AddBookInteractor import AddBookInteractor
from aquarius.objects.Book import Book
from aquarius.objects.BookFormat import BookFormat
from tests.interactors.InteractorTestBase import InteractorTestBase
class TestAddBookInteractor(InteractorTestBase):
def setUp(self):
self.__persistence = Mock(Persistence)
self.__target = AddBookInteractor(self.__persistence)
def test_is_instance_of_interactor(self):
self.assertIsInstance(self.__target, Interactor)
def test_execute_gets_book_from_persistence(self):
self.__persistence.get_book_by_title_and_author = Mock(return_value=(self.__get_book()))
self.__target.execute(Book())
self.assertTrue(self.__persistence.get_book_by_title_and_author.called)
def test_execute_with_new_book_adds_it(self):
self.__persistence.get_book_by_title_and_author = Mock(return_value=Book())
self.__target.execute(Book())
self.assert_called(self.__persistence.add_book)
def test_execute_with_existing_book_does_not_add_it(self):
self.__persistence.get_book_by_title_and_author = Mock(return_value=(self.__get_book()))
self.__target.execute(self.__get_book())
self.assert_not_called(self.__persistence.add_book)
def test_execute_with_format_adds_format(self):
self.__persistence.get_book_by_title_and_author = Mock(return_value=(self.__get_book_with_format()))
self.__persistence.format_exists = Mock(return_value=False)
self.__target.execute(self.__get_book_with_format())
self.assert_called(self.__persistence.add_book_format)
def test_execute_with_format_does_not_add_it_when_the_book_has_it(self):
self.__persistence.get_book_by_title_and_author = Mock(return_value=(self.__get_book_with_format()))
self.__persistence.format_exists = Mock(return_value=True)
self.__target.execute(self.__get_book_with_format())
self.assert_not_called(self.__persistence.add_book_format)
def __get_book_with_format(self):
b = self.__get_book()
bf = BookFormat()
bf.Format = "EPUB"
bf.Location = "/dev/null"
b.add_format(bf)
return b
def __get_book(self):
my_book = Book()
my_book.id = 1337
return my_book
| jeroanan/Aquarius | tests/interactors/TestAddBookInteractor.py | Python | gpl-3.0 | 2,437 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2016 Eugene Frolov <eugene@frolov.net.ru>
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import sqlalchemy as sa
from sqlalchemy import orm
_engine = None
_session_maker = None
DB_CONNECTION = "sqlite:////tmp/restalchemy-%s.db" % uuid.uuid4()
def get_engine():
global _engine
if _engine is None:
_engine = sa.create_engine(DB_CONNECTION, echo=True)
return _engine
def get_session():
return orm.sessionmaker(bind=get_engine())
| phantomii/restalchemy | restalchemy/tests/functional/restapi/sa_based/microservice/db.py | Python | apache-2.0 | 1,082 |
#!/usr/bin/env python3
###############################################################################
# #
# Copyright 2019. Triad National Security, LLC. All rights reserved. #
# This program was produced under U.S. Government contract 89233218CNA000001 #
# for Los Alamos National Laboratory (LANL), which is operated by Triad #
# National Security, LLC for the U.S. Department of Energy/National Nuclear #
# Security Administration. #
# #
# All rights in the program are reserved by Triad National Security, LLC, and #
# the U.S. Department of Energy/National Nuclear Security Administration. The #
# Government is granted for itself and others acting on its behalf a #
# nonexclusive, paid-up, irrevocable worldwide license in this material to #
# reproduce, prepare derivative works, distribute copies to the public, #
# perform publicly and display publicly, and to permit others to do so. #
# #
###############################################################################
'''
This is a Unit Test for Rule ConfigureAppleSoftwareUpdate
@author: Brandon R. Gonzales
@change: 2018/12/12 - Original implementation
'''
import sys
import unittest
import os
import pwd
sys.path.append("../../../..")
from src.tests.lib.RuleTestTemplate import RuleTest
from src.stonix_resources.CommandHelper import CommandHelper
from src.stonix_resources.rules.ShowBluetoothIcon import ShowBluetoothIcon
class zzzTestRuleShowBluetoothIcon(RuleTest):
def setUp(self):
RuleTest.setUp(self)
self.rule = ShowBluetoothIcon(self.config,
self.environ,
self.logdispatch,
self.statechglogger)
self.rulename = self.rule.rulename
self.rulenumber = self.rule.rulenumber
self.setCheckUndo(True)
self.ch = CommandHelper(self.logdispatch)
self.dc = "/usr/bin/defaults"
def runTest(self):
# This rule is only intended to be ran in user mode
if self.environ.geteuid() != 0:
self.simpleRuleTest()
def setConditionsForRule(self):
'''This makes sure the initial report fails by executing the following
command:
defaults -currentHost delete /Users/(username)/Library/Preferences/com.apple.systemuiserver menuExtras
:param self: essential if you override this definition
:returns: boolean - If successful True; If failure False
@author: Brandon R. Gonzales
'''
success = True
if success:
user = pwd.getpwuid(os.getuid())[0]
self.systemuiserver = "/Users/" + user + "/Library/Preferences/com.apple.systemuiserver"
if os.path.exists(self.systemuiserver):
command = [self.dc, "-currentHost", "delete", self.systemuiserver, "menuExtras"]
success = self.ch.executeCommand(command)
if success:
success = self.checkReportForRule(False, True)
return success
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| CSD-Public/stonix | src/tests/rules/unit_tests/zzzTestRuleShowBluetoothIcon.py | Python | gpl-2.0 | 3,421 |
# Copyright 2017 Datera
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import json
import re
import six
import time
import types
import uuid
import eventlet
import requests
from oslo_log import log as logging
from six.moves import http_client
from cinder import context
from cinder import exception
from cinder.i18n import _
from cinder.volume import qos_specs
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
OS_PREFIX = "OS-"
UNMANAGE_PREFIX = "UNMANAGED-"
# Taken from this SO post :
# http://stackoverflow.com/a/18516125
# Using old-style string formatting because of the nature of the regex
# conflicting with new-style curly braces
UUID4_STR_RE = ("%s[a-f0-9]{8}-?[a-f0-9]{4}-?4[a-f0-9]{3}-?[89ab]"
"[a-f0-9]{3}-?[a-f0-9]{12}")
UUID4_RE = re.compile(UUID4_STR_RE % OS_PREFIX)
# Recursive dict to assemble basic url structure for the most common
# API URL endpoints. Most others are constructed from these
URL_TEMPLATES = {
'ai': lambda: 'app_instances',
'ai_inst': lambda: (URL_TEMPLATES['ai']() + '/{}'),
'si': lambda: (URL_TEMPLATES['ai_inst']() + '/storage_instances'),
'si_inst': lambda storage_name: (
(URL_TEMPLATES['si']() + '/{}').format(
'{}', storage_name)),
'vol': lambda storage_name: (
(URL_TEMPLATES['si_inst'](storage_name) + '/volumes')),
'vol_inst': lambda storage_name, volume_name: (
(URL_TEMPLATES['vol'](storage_name) + '/{}').format(
'{}', volume_name)),
'at': lambda: 'app_templates/{}'}
DEFAULT_SI_SLEEP = 1
DEFAULT_SI_SLEEP_API_2 = 5
DEFAULT_SNAP_SLEEP = 1
INITIATOR_GROUP_PREFIX = "IG-"
API_VERSIONS = ["2", "2.1"]
API_TIMEOUT = 20
###############
# METADATA KEYS
###############
M_TYPE = 'cinder_volume_type'
M_CALL = 'cinder_calls'
M_CLONE = 'cinder_clone_from'
M_MANAGED = 'cinder_managed'
M_KEYS = [M_TYPE, M_CALL, M_CLONE, M_MANAGED]
def _get_name(name):
return "".join((OS_PREFIX, name))
def _get_unmanaged(name):
return "".join((UNMANAGE_PREFIX, name))
def _authenticated(func):
"""Ensure the driver is authenticated to make a request.
In do_setup() we fetch an auth token and store it. If that expires when
we do API request, we'll fetch a new one.
"""
@functools.wraps(func)
def func_wrapper(driver, *args, **kwargs):
try:
return func(driver, *args, **kwargs)
except exception.NotAuthorized:
# Prevent recursion loop. After the driver arg is the
# resource_type arg from _issue_api_request(). If attempt to
# login failed, we should just give up.
if args[0] == 'login':
raise
# Token might've expired, get a new one, try again.
driver.login()
return func(driver, *args, **kwargs)
return func_wrapper
def _api_lookup(func):
"""Perform a dynamic API implementation lookup for a call
Naming convention follows this pattern:
# original_func(args) --> _original_func_X_?Y?(args)
# where X and Y are the major and minor versions of the latest
# supported API version
# From the Datera box we've determined that it supports API
# versions ['2', '2.1']
# This is the original function call
@_api_lookup
def original_func(arg1, arg2):
print("I'm a shim, this won't get executed!")
pass
# This is the function that is actually called after determining
# the correct API version to use
def _original_func_2_1(arg1, arg2):
some_version_2_1_implementation_here()
# This is the function that would be called if the previous function
# did not exist:
def _original_func_2(arg1, arg2):
some_version_2_implementation_here()
# This function would NOT be called, because the connected Datera box
# does not support the 1.5 version of the API
def _original_func_1_5(arg1, arg2):
some_version_1_5_implementation_here()
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
obj = args[0]
api_versions = _get_supported_api_versions(obj)
api_version = None
index = -1
while True:
try:
api_version = api_versions[index]
except (IndexError, KeyError):
msg = _("No compatible API version found for this product: "
"api_versions -> %(api_version)s, %(func)s")
LOG.error(msg, api_version=api_version, func=func)
raise exception.DateraAPIException(msg % {
'api_version': api_version, 'func': func})
# Py27
try:
name = "_" + "_".join(
(func.func_name, api_version.replace(".", "_")))
# Py3+
except AttributeError:
name = "_" + "_".join(
(func.__name__, api_version.replace(".", "_")))
try:
if obj.do_profile:
LOG.info("Trying method: %s", name)
call_id = uuid.uuid4()
LOG.debug("Profiling method: %s, id %s", name, call_id)
t1 = time.time()
obj.thread_local.trace_id = call_id
result = getattr(obj, name)(*args[1:], **kwargs)
if obj.do_profile:
t2 = time.time()
timedelta = round(t2 - t1, 3)
LOG.debug("Profile for method %s, id %s: %ss",
name, call_id, timedelta)
return result
except AttributeError as e:
# If we find the attribute name in the error message
# then we continue otherwise, raise to prevent masking
# errors
if name not in six.text_type(e):
raise
else:
LOG.info(e)
index -= 1
except exception.DateraAPIException as e:
if "UnsupportedVersionError" in six.text_type(e):
index -= 1
else:
raise
return wrapper
def _get_supported_api_versions(driver):
t = time.time()
if driver.api_cache and driver.api_timeout - t < API_TIMEOUT:
return driver.api_cache
driver.api_timeout = t + API_TIMEOUT
results = []
host = driver.configuration.san_ip
port = driver.configuration.datera_api_port
client_cert = driver.configuration.driver_client_cert
client_cert_key = driver.configuration.driver_client_cert_key
cert_data = None
header = {'Content-Type': 'application/json; charset=utf-8',
'Datera-Driver': 'OpenStack-Cinder-{}'.format(driver.VERSION)}
protocol = 'http'
if client_cert:
protocol = 'https'
cert_data = (client_cert, client_cert_key)
try:
url = '%s://%s:%s/api_versions' % (protocol, host, port)
resp = driver._request(url, "get", None, header, cert_data)
data = resp.json()
results = [elem.strip("v") for elem in data['api_versions']]
except (exception.DateraAPIException, KeyError):
# Fallback to pre-endpoint logic
for version in API_VERSIONS[0:-1]:
url = '%s://%s:%s/v%s' % (protocol, host, port, version)
resp = driver._request(url, "get", None, header, cert_data)
if ("api_req" in resp.json() or
str(resp.json().get("code")) == "99"):
results.append(version)
else:
LOG.error("No supported API versions available, "
"Please upgrade your Datera EDF software")
return results
def _get_volume_type_obj(driver, resource):
type_id = resource.get('volume_type_id', None)
# Handle case of volume with no type. We still want the
# specified defaults from above
if type_id:
ctxt = context.get_admin_context()
volume_type = volume_types.get_volume_type(ctxt, type_id)
else:
volume_type = None
return volume_type
def _get_policies_for_resource(driver, resource):
"""Get extra_specs and qos_specs of a volume_type.
This fetches the scoped keys from the volume type. Anything set from
qos_specs will override key/values set from extra_specs.
"""
volume_type = driver._get_volume_type_obj(resource)
# Handle case of volume with no type. We still want the
# specified defaults from above
if volume_type:
specs = volume_type.get('extra_specs')
else:
specs = {}
# Set defaults:
policies = {k.lstrip('DF:'): str(v['default']) for (k, v)
in driver._init_vendor_properties()[0].items()}
if volume_type:
# Populate updated value
for key, value in specs.items():
if ':' in key:
fields = key.split(':')
key = fields[1]
policies[key] = value
qos_specs_id = volume_type.get('qos_specs_id')
if qos_specs_id is not None:
ctxt = context.get_admin_context()
qos_kvs = qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs']
if qos_kvs:
policies.update(qos_kvs)
# Cast everything except booleans int that can be cast
for k, v in policies.items():
# Handle String Boolean case
if v == 'True' or v == 'False':
policies[k] = policies[k] == 'True'
continue
# Int cast
try:
policies[k] = int(v)
except ValueError:
pass
return policies
# ================
# = API Requests =
# ================
def _request(driver, connection_string, method, payload, header, cert_data):
LOG.debug("Endpoint for Datera API call: %s", connection_string)
LOG.debug("Payload for Datera API call: %s", payload)
try:
response = getattr(requests, method)(connection_string,
data=payload, headers=header,
verify=False, cert=cert_data)
return response
except requests.exceptions.RequestException as ex:
msg = _(
'Failed to make a request to Datera cluster endpoint due '
'to the following reason: %s') % six.text_type(
ex.message)
LOG.error(msg)
raise exception.DateraAPIException(msg)
def _raise_response(driver, response):
msg = _('Request to Datera cluster returned bad status:'
' %(status)s | %(reason)s') % {
'status': response.status_code,
'reason': response.reason}
LOG.error(msg)
raise exception.DateraAPIException(msg)
def _handle_bad_status(driver,
response,
connection_string,
method,
payload,
header,
cert_data,
sensitive=False,
conflict_ok=False):
if (response.status_code == http_client.BAD_REQUEST and
connection_string.endswith("api_versions")):
# Raise the exception, but don't log any error. We'll just fall
# back to the old style of determining API version. We make this
# request a lot, so logging it is just noise
raise exception.DateraAPIException
if response.status_code == http_client.NOT_FOUND:
raise exception.NotFound(response.json()['message'])
elif response.status_code in [http_client.FORBIDDEN,
http_client.UNAUTHORIZED]:
raise exception.NotAuthorized()
elif response.status_code == http_client.CONFLICT and conflict_ok:
# Don't raise, because we're expecting a conflict
pass
elif response.status_code == http_client.SERVICE_UNAVAILABLE:
current_retry = 0
while current_retry <= driver.retry_attempts:
LOG.debug("Datera 503 response, trying request again")
eventlet.sleep(driver.interval)
resp = driver._request(connection_string,
method,
payload,
header,
cert_data)
if resp.ok:
return response.json()
elif resp.status_code != http_client.SERVICE_UNAVAILABLE:
driver._raise_response(resp)
else:
driver._raise_response(response)
@_authenticated
def _issue_api_request(driver, resource_url, method='get', body=None,
sensitive=False, conflict_ok=False,
api_version='2', tenant=None):
"""All API requests to Datera cluster go through this method.
:param resource_url: the url of the resource
:param method: the request verb
:param body: a dict with options for the action_type
:param sensitive: Bool, whether request should be obscured from logs
:param conflict_ok: Bool, True to suppress ConflictError exceptions
during this request
:param api_version: The Datera api version for the request
:param tenant: The tenant header value for the request (only applicable
to 2.1 product versions and later)
:returns: a dict of the response from the Datera cluster
"""
host = driver.configuration.san_ip
port = driver.configuration.datera_api_port
api_token = driver.datera_api_token
payload = json.dumps(body, ensure_ascii=False)
payload.encode('utf-8')
header = {'Content-Type': 'application/json; charset=utf-8'}
header.update(driver.HEADER_DATA)
protocol = 'http'
if driver.configuration.driver_use_ssl:
protocol = 'https'
if api_token:
header['Auth-Token'] = api_token
if tenant == "all":
header['tenant'] = tenant
elif tenant and '/root' not in tenant:
header['tenant'] = "".join(("/root/", tenant))
elif tenant and '/root' in tenant:
header['tenant'] = tenant
elif driver.tenant_id and driver.tenant_id.lower() != "map":
header['tenant'] = driver.tenant_id
client_cert = driver.configuration.driver_client_cert
client_cert_key = driver.configuration.driver_client_cert_key
cert_data = None
if client_cert:
protocol = 'https'
cert_data = (client_cert, client_cert_key)
connection_string = '%s://%s:%s/v%s/%s' % (protocol, host, port,
api_version, resource_url)
request_id = uuid.uuid4()
if driver.do_profile:
t1 = time.time()
if not sensitive:
LOG.debug("\nDatera Trace ID: %(tid)s\n"
"Datera Request ID: %(rid)s\n"
"Datera Request URL: /v%(api)s/%(url)s\n"
"Datera Request Method: %(method)s\n"
"Datera Request Payload: %(payload)s\n"
"Datera Request Headers: %(header)s\n",
{'tid': driver.thread_local.trace_id,
'rid': request_id,
'api': api_version,
'url': resource_url,
'method': method,
'payload': payload,
'header': header})
response = driver._request(connection_string,
method,
payload,
header,
cert_data)
data = response.json()
timedelta = "Profiling disabled"
if driver.do_profile:
t2 = time.time()
timedelta = round(t2 - t1, 3)
if not sensitive:
LOG.debug("\nDatera Trace ID: %(tid)s\n"
"Datera Response ID: %(rid)s\n"
"Datera Response TimeDelta: %(delta)ss\n"
"Datera Response URL: %(url)s\n"
"Datera Response Payload: %(payload)s\n"
"Datera Response Object: %(obj)s\n",
{'tid': driver.thread_local.trace_id,
'rid': request_id,
'delta': timedelta,
'url': response.url,
'payload': payload,
'obj': vars(response)})
if not response.ok:
driver._handle_bad_status(response,
connection_string,
method,
payload,
header,
cert_data,
conflict_ok=conflict_ok)
return data
def register_driver(driver):
for func in [_get_supported_api_versions,
_get_volume_type_obj,
_get_policies_for_resource,
_request,
_raise_response,
_handle_bad_status,
_issue_api_request]:
# PY27
f = types.MethodType(func, driver)
try:
setattr(driver, func.func_name, f)
# PY3+
except AttributeError:
setattr(driver, func.__name__, f)
| j-griffith/cinder | cinder/volume/drivers/datera/datera_common.py | Python | apache-2.0 | 17,778 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
import logging
from six.moves import filter # @UnresolvedImport
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.db import models, transaction
from django.db.models.signals import post_save, pre_delete
from django.utils.translation import ugettext_lazy as _, ugettext
# Django 1.6 transaction API, required for 1.8+
from django.utils.encoding import python_2_unicode_compatible
try:
notrans = transaction.non_atomic_requests
except:
notrans = transaction.commit_manually # @UndefinedVariable
# Django 1.9 deprecation of contenttypes.generic
try:
from django.contrib.contenttypes.fields import GenericRelation
except ImportError:
from django.contrib.contenttypes.generic import GenericRelation
from mptt.fields import TreeForeignKey
from mptt.models import MPTTModel
from wiki import managers
from wiki.conf import settings
from wiki.core.compat import atomic, transaction_commit_on_success
from wiki.core.exceptions import NoRootURL, MultipleRootURLs
from wiki.models.article import ArticleRevision, ArticleForObject, Article
log = logging.getLogger(__name__)
# class Slug(models.Model):
# slug = models.SlugField(verbose_name=_('slug'), null=True, blank=True,
# max_length=SLUG_MAX_LENGTH)
@python_2_unicode_compatible
class URLPath(MPTTModel):
"""
Strategy: Very few fields go here, as most has to be managed through an
article's revision. As a side-effect, the URL resolution remains slim
and swift.
"""
# Tells django-wiki that permissions from a this object's article
# should be inherited to children's articles. In this case, it's a static
# property.. but you can also use a BooleanField.
INHERIT_PERMISSIONS = True
objects = managers.URLPathManager()
# Do not use this because of
# https://github.com/django-mptt/django-mptt/issues/369
# _default_manager = objects
articles = GenericRelation(
ArticleForObject,
content_type_field='content_type',
object_id_field='object_id',
)
# Do NOT modify this field - it is updated with signals whenever
# ArticleForObject is changed.
article = models.ForeignKey(
Article,
on_delete=models.CASCADE,
editable=False,
verbose_name=_('Cache lookup value for articles'),
)
SLUG_MAX_LENGTH = 50
slug = models.SlugField(verbose_name=_('slug'), null=True, blank=True,
max_length=SLUG_MAX_LENGTH)
site = models.ForeignKey(Site)
parent = TreeForeignKey(
'self',
null=True,
blank=True,
related_name='children')
# slug2 = models.OneToOneField(Slug)
def __init__(self, *args, **kwargs):
pass
# Fixed in django-mptt 0.5.3
# self._tree_manager = URLPath.objects
return super(URLPath, self).__init__(*args, **kwargs)
def __cached_ancestors(self):
"""
This returns the ancestors of this urlpath. These ancestors are
hopefully cached from the article path lookup. Accessing a foreign
key included in add_selecte_related on one of these ancestors will
not occur an additional sql query, as they were retrieved with a
select_related.
If the cached ancestors were not set explicitly, they will be retrieved
from the database.
"""
if not self.get_ancestors().exists():
self._cached_ancestors = []
if not hasattr(self, "_cached_ancestors"):
self._cached_ancestors = list(
self.get_ancestors().select_related_common())
return self._cached_ancestors
def __cached_ancestors_setter(self, ancestors):
self._cached_ancestors = ancestors
# Python 2.5 compatible property constructor
cached_ancestors = property(__cached_ancestors,
__cached_ancestors_setter)
def set_cached_ancestors_from_parent(self, parent):
self.cached_ancestors = parent.cached_ancestors + [parent]
@property
def path(self):
if not self.parent:
return ""
ancestors = list(
filter(
lambda ancestor: ancestor.parent is not None,
self.cached_ancestors))
slugs = [obj.slug if obj.slug else "" for obj in ancestors + [self]]
return "/".join(slugs) + "/"
def is_deleted(self):
"""
Returns True if this article or any of its ancestors have been deleted
"""
return self.first_deleted_ancestor() is not None
def first_deleted_ancestor(self):
for ancestor in self.cached_ancestors + [self]:
if ancestor.article.current_revision.deleted:
return ancestor
return None
@atomic
@transaction_commit_on_success
def _delete_subtree(self):
for descendant in self.get_descendants(
include_self=True).order_by("-level"):
descendant.article.delete()
def delete_subtree(self):
"""
NB! This deletes this urlpath, its children, and ALL of the related
articles. This is a purged delete and CANNOT be undone.
"""
try:
self._delete_subtree()
except:
# Not sure why any exception is getting caught here? Have we had
# unresolved database integrity errors?
log.exception("Exception deleting article subtree.")
@classmethod
def root(cls):
site = Site.objects.get_current()
root_nodes = list(
cls.objects.root_nodes().filter(site=site).select_related_common()
)
# We fetch the nodes as a list and use len(), not count() because we need
# to get the result out anyway. This only takes one sql query
no_paths = len(root_nodes)
if no_paths == 0:
raise NoRootURL(
"You need to create a root article on site '%s'" %
site)
if no_paths > 1:
raise MultipleRootURLs(
"Somehow you have multiple roots on %s" %
site)
return root_nodes[0]
class MPTTMeta:
pass
def __str__(self):
path = self.path
return path if path else ugettext("(root)")
def save(self, *args, **kwargs):
super(URLPath, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
assert not (self.parent and self.get_children()
), "You cannot delete a root article with children."
super(URLPath, self).delete(*args, **kwargs)
class Meta:
verbose_name = _('URL path')
verbose_name_plural = _('URL paths')
unique_together = ('site', 'parent', 'slug')
app_label = settings.APP_LABEL
def clean(self, *args, **kwargs):
if self.slug and not self.parent:
raise ValidationError(
_('Sorry but you cannot have a root article with a slug.'))
if not self.slug and self.parent:
raise ValidationError(
_('A non-root note must always have a slug.'))
if not self.parent:
if URLPath.objects.root_nodes().filter(
site=self.site).exclude(
id=self.id):
raise ValidationError(
_('There is already a root node on %s') %
self.site)
super(URLPath, self).clean(*args, **kwargs)
@classmethod
def get_by_path(cls, path, select_related=False):
"""
Strategy: Don't handle all kinds of weird cases. Be strict.
Accepts paths both starting with and without '/'
"""
# TODO: Save paths directly in the model for constant time lookups?
# Or: Save the parents in a lazy property because the parents are
# always fetched anyways so it's fine to fetch them here.
path = path.lstrip("/")
path = path.rstrip("/")
# Root page requested
if not path:
return cls.root()
slugs = path.split('/')
level = 1
parent = cls.root()
for slug in slugs:
if settings.URL_CASE_SENSITIVE:
child = parent.get_children().select_related_common().get(
slug=slug)
child.cached_ancestors = parent.cached_ancestors + [parent]
parent = child
else:
child = parent.get_children().select_related_common().get(
slug__iexact=slug)
child.cached_ancestors = parent.cached_ancestors + [parent]
parent = child
level += 1
return parent
def get_absolute_url(self):
return reverse('wiki:get', kwargs={'path': self.path})
@classmethod
def create_root(cls, site=None, title="Root", request=None, **kwargs):
if not site:
site = Site.objects.get_current()
root_nodes = cls.objects.root_nodes().filter(site=site)
if not root_nodes:
# (get_or_create does not work for MPTT models??)
article = Article()
revision = ArticleRevision(title=title, **kwargs)
if request:
revision.set_from_request(request)
article.add_revision(revision, save=True)
article.save()
root = cls.objects.create(site=site, article=article)
article.add_object_relation(root)
else:
root = root_nodes[0]
return root
@classmethod
@atomic
@transaction_commit_on_success
def create_article(
cls,
parent,
slug,
site=None,
title="Root",
article_kwargs={},
**kwargs):
"""Utility function:
Create a new urlpath with an article and a new revision for the article"""
if not site:
site = Site.objects.get_current()
article = Article(**article_kwargs)
article.add_revision(ArticleRevision(title=title, **kwargs),
save=True)
article.save()
newpath = cls.objects.create(
site=site,
parent=parent,
slug=slug,
article=article)
article.add_object_relation(newpath)
return newpath
######################################################
# SIGNAL HANDLERS
######################################################
# Just get this once
urlpath_content_type = None
def on_article_relation_save(**kwargs):
global urlpath_content_type
instance = kwargs['instance']
if not urlpath_content_type:
urlpath_content_type = ContentType.objects.get_for_model(URLPath)
if instance.content_type == urlpath_content_type:
URLPath.objects.filter(
id=instance.object_id).update(
article=instance.article)
post_save.connect(on_article_relation_save, ArticleForObject)
class Namespace:
# An instance of Namespace simulates "nonlocal variable_name" declaration
# in any nested function, that is possible in Python 3. It allows assigning
# to non local variable without rebinding it local. See PEP 3104.
pass
def on_article_delete(instance, *args, **kwargs):
# If an article is deleted, then throw out its URLPaths
# But move all descendants to a lost-and-found node.
site = Site.objects.get_current()
# Get the Lost-and-found path or create a new one
# Only create the lost-and-found article if it's necessary and such
# that the lost-and-found article can be deleted without being recreated!
ns = Namespace() # nonlocal namespace backported to Python 2.x
ns.lost_and_found = None
def get_lost_and_found():
if ns.lost_and_found:
return ns.lost_and_found
try:
ns.lost_and_found = URLPath.objects.get(
slug=settings.LOST_AND_FOUND_SLUG,
parent=URLPath.root(),
site=site)
except URLPath.DoesNotExist:
article = Article(group_read=True,
group_write=False,
other_read=False,
other_write=False)
article.add_revision(
ArticleRevision(
content=_(
'Articles who lost their parents\n'
'===============================\n\n'
'The children of this article have had their parents deleted. You should probably find a new home for them.'),
title=_("Lost and found")))
ns.lost_and_found = URLPath.objects.create(
slug=settings.LOST_AND_FOUND_SLUG,
parent=URLPath.root(),
site=site,
article=article)
article.add_object_relation(ns.lost_and_found)
return ns.lost_and_found
for urlpath in URLPath.objects.filter(
articles__article=instance,
site=site):
# Delete the children
for child in urlpath.get_children():
child.move_to(get_lost_and_found())
# ...and finally delete the path itself
pre_delete.connect(on_article_delete, Article)
| thoma5B/Django-Wiki | wiki/models/urlpath.py | Python | gpl-3.0 | 13,554 |
# -*- coding: utf-8 -*-
from openerp import models, fields, api
class customized_rfq(models.Model):
_inherit=["purchase.order"]
@api.model
def _default_rfq_template(self):
company_obj = self.env['res.company']
company = self.env['res.users'].browse([self.env.user.id]).company_id
if not company.template_rfq:
def_tpl = self.env['ir.ui.view'].search([('key', 'like', 'professional_templates.RFQ_%' ), ('type', '=', 'qweb')], order='id asc', limit=1)
company.write({'template_rfq': def_tpl.id})
return company.template_rfq or self.env.ref('purchase.report_purchasequotation_document')
rfq_logo = fields.Binary("Logo", attachment=True,
help="This field holds the image used as logo for the RFQ, if non is uploaded, the default logo define in the company settings will be used")
templ_rfq_id = fields.Many2one('ir.ui.view', 'RFQ Template', default=_default_rfq_template,required=True,
domain="[('type', '=', 'qweb'), ('key', 'like', 'professional_templates.RFQ\_%\_document' )]")
| optima-ict/odoo | addons/professional_templates/models/rfq.py | Python | agpl-3.0 | 1,040 |
def example_BackgroundCall():
import urllib,time
def work():
return urllib.urlopen('http://www.python.org/').read()
bkcall=BackgroundCall(work)
print 'work() executing in background ...'
while not bkcall.is_done():
print '.',
time.sleep(0.010)
print 'done.'
print bkcall.get_return()[:500]
import sys
from time import time as _time, sleep as _sleep
class Full(Exception):pass
class Empty(Exception):pass
class BackgroundCall:
"""BackgroundCall
Example:
bkcall=BackgroundCall( time_consuming_function )
...
if bkcall.is_done():
print "got", bkcall.get_return()
"""
id=None
done=0 #1=returned; 2=exception raised
def __init__(self, func, args=(), kwargs={}):
import thread
def thread_bkcall():
try:
self.ret=func(*args, **kwargs)
self.done=1
except:
self.exc=sys.exc_info()
self.done=2
self.id=thread.start_new(thread_bkcall, ())
def is_done(self):
return self.done
def get_return(self, wait=1, timeout=None, raise_exception=1, alt_return=None):
"""delivers the return value or (by default) echoes the exception of
the call job
wait: 0=no waiting; Attribute error raised if no
1=waits for return value or exception
callable -> waits and wait()-call's while waiting for return
"""
if not self.done and wait:
starttime=_time()
delay=0.0005
while not self.done:
if timeout:
remaining = starttime + timeout - _time()
if remaining <= 0: #time is over
if raise_exception:
raise Empty, "return timed out"
else:
return alt_return
delay = min(delay * 2, remaining, .05)
else:
delay = min(delay * 2, .05)
if callable(wait): wait()
_sleep(delay) #reduce CPU usage by using a sleep
if self.done==2: #we had an exception
exc=self.exc
del self.exc
if raise_exception & 1: #by default exception is raised
raise exc[0],exc[1],exc[2]
else:
return alt_return
return self.ret
def get_exception(self):
return self.exc
if __name__=='__main__':
example_BackgroundCall()
| ActiveState/code | recipes/Python/491280_BackgroundCall_Threading_like/recipe-491280.py | Python | mit | 2,587 |
from django.conf import settings
from django.views.generic.detail import DetailView
from .models import MenuLink
class PageView(DetailView):
"""
Show a page
Template: ``page.html``
Specific context variable: ``menu_link``
"""
template_name = '{0}/page.html'.format(settings.CURRENT_SKIN)
queryset = MenuLink.objects.have_pages()
context_object_name = 'menu_link'
| romanvm/romans_blog | pages/views.py | Python | gpl-3.0 | 399 |
########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
########
import os
import sys
import shlex
import tempfile
import subprocess
import pkg_resources
import jinja2
import distro
import requests
from cloudify import ctx
from cloudify import exceptions
from cloudify.decorators import operation
import telegraf_plugin
dist = distro.id()
TELEGRAF_CONFIG_FILE_DEFAULT = os.path.join(
'/', 'etc', 'telegraf', 'telegraf.conf')
TELEGRAF_PATH_DEFAULT = os.path.join('/', 'opt', 'telegraf')
@operation
def install(telegraf_config_inputs,
telegraf_config_file='',
telegraf_install_path='',
download_url='', **kwargs):
"""Installation operation.
Downloading and installing telegraf packacge - default version is 0.12.0.
Default installation dir is set to /opt/telegraf.
Only linux distributions are supported.
"""
if 'linux' not in sys.platform:
raise exceptions.NonRecoverableError(
'Error! Telegraf-plugin is available on linux distribution only')
if not telegraf_install_path:
telegraf_install_path = TELEGRAF_PATH_DEFAULT
if os.path.isfile(telegraf_install_path):
raise ValueError(
format("Error! {0} file already exists, can't create dir.",
telegraf_install_path))
installation_file = download_telegraf(download_url, telegraf_install_path)
install_telegraf(installation_file, telegraf_install_path)
configure(telegraf_config_file, telegraf_config_inputs)
@operation
def start(**kwargs):
"""Start operation call for telegraf service,
with telegraf_plugin configuration file.
If telegraf service was already running -
it will restart it and will use updated configuration file.
"""
ctx.logger.info('Starting telegraf service...')
telegraf_config_file = TELEGRAF_CONFIG_FILE_DEFAULT
if not os.path.isfile(telegraf_config_file):
raise ValueError(
"Can't start the service. Wrong config file provided")
if os.path.exists('/usr/bin/systemctl'):
proc = _run('sudo systemctl restart telegraf')
else:
proc = _run('sudo service telegraf restart')
ctx.logger.info(
'GoodLuck! Telegraf service is up!'
'Have an awesome monitoring experience...')
return proc.aggr_stdout
def download_telegraf(download_url='', telegraf_install_path='', **kwargs):
"""Downloading telegraf package form your desire url.
Default url set to be version 0.12.0
anf downloaded from official influxdb site.
"""
if not os.path.exists(telegraf_install_path):
_run('sudo mkdir -p {0}'.format(telegraf_install_path))
ctx.logger.info('Downloading telegraf...')
if not download_url:
if dist in ('ubuntu', 'debian'):
download_url = 'http://get.influxdb.org/telegraf/' + \
'telegraf_0.12.0-1_amd64.deb'
elif dist in ('centos', 'redhat'):
download_url = 'http://get.influxdb.org/telegraf/' + \
'telegraf-0.12.0-1.x86_64.rpm'
else:
raise exceptions.NonRecoverableError(
'''Error! distribution is not supported.
Ubuntu, Debian, Centos and Redhat are supported currently''')
installation_file = _download_file(download_url, telegraf_install_path)
ctx.logger.info('Telegraf downloaded.')
return installation_file
def install_telegraf(installation_file, telegraf_install_path, **kwargs):
"""Depacking telegraf package."""
ctx.logger.info('Installing telegraf...')
if dist in ('ubuntu', 'debian'):
install_cmd = 'sudo dpkg -i {0}'.format(
os.path.join(telegraf_install_path, installation_file))
elif dist in ('centos', 'redhat'):
install_cmd = 'sudo yum localinstall -y {0}'.format(
os.path.join(telegraf_install_path, installation_file))
else:
raise exceptions.NonRecoverableError(
'''Error! distribution is not supported.
Ubuntu, Debian, Centos and Redhat are supported currently''')
_run(install_cmd)
ctx.logger.info('Telegraf service was installed...')
def configure(telegraf_config_file='', telgraf_config='', **kwargs):
"""Generating configuration file from your own desire destination
or from telegraf_plugin telegraf.conf file.
Rendering your inputs/outputs definitions.
"""
ctx.logger.info('Configuring Telegraf...')
dest_file = os.path.join(tempfile.gettempdir(), 'telegraf.conf')
if telegraf_config_file:
try:
ctx.download_resource_and_render(telegraf_config_file,
dest_file,
telgraf_config)
except:
raise ValueError(
"wrong inputs provided! can't redner configuration file")
else:
telegraf_config_file = pkg_resources.resource_string(
telegraf_plugin.__name__, 'resources/telegraf.conf')
configuration = jinja2.Template(telegraf_config_file)
try:
with open(dest_file, 'w') as f:
f.write(configuration.render(telgraf_config))
except:
raise ValueError(
"wrong inputs provided! can't redner configuration file")
_run('sudo mv {0} {1}'.format(dest_file, TELEGRAF_CONFIG_FILE_DEFAULT))
try:
_run('telegraf -config {0} -test'.format(
TELEGRAF_CONFIG_FILE_DEFAULT))
except:
raise ValueError(
"wrong inputs prodided! configuration file is unvalid")
ctx.logger.info('telegraf.conf was configured...')
def _download_file(url, destination):
try:
filename = url.split('/')[-1]
except:
raise ValueError("wrong url provided! can't _download_file")
temp_dir = tempfile.gettempdir()
local_filename = os.path.join(temp_dir, filename)
response = requests.get(url, stream=True)
with open(local_filename, 'wb') as temp_file:
for chunk in response.iter_content(chunk_size=512):
if chunk:
temp_file.write(chunk)
_run('sudo mv {0} {1}'.format(local_filename, os.path.join(destination,
filename)))
return filename
def _run(command):
if isinstance(command, str):
command = shlex.split(command)
stderr = subprocess.PIPE
stdout = subprocess.PIPE
ctx.logger.debug('Running: {0}'.format(command))
proc = subprocess.Popen(command, stdout=stdout, stderr=stderr)
proc.aggr_stdout, proc.aggr_stderr = proc.communicate()
if proc.returncode != 0:
command_str = ' '.join(command)
ctx.logger.error('Failed running command: {0} ({1}).'.format(
command_str, proc.aggr_stderr))
sys.exit(1)
return proc
| fogelomer/cloudify-telegraf-plugin | telegraf_plugin/tasks.py | Python | apache-2.0 | 7,442 |
from sense_hat import SenseHat
###Egg Drop###
###Coded by dan_aldred###
###Based on micro:bit game###
import time
import random
sense = SenseHat()
sense.clear()
global game_over
global score
game_over = False
basket_x = 7
score = 0
'''main pitch measurement'''
def basket_move(pitch, basket_x):
sense.set_pixel(basket_x, 7, [0, 0, 0])
new_x = basket_x
if 1 < pitch < 179 and basket_x != 0:
new_x -= 1
elif 359 > pitch > 179 and basket_x != 7:
new_x += 1
return new_x,
'''Main game setup'''
def main():
global game_over
'''Introduction'''
sense.show_message("Egg Drop", text_colour = [255, 255, 0])
sense.set_rotation(90)
sense.load_image("chick.png")
time.sleep(2)
sense.set_rotation()
'''countdown'''
countdown = [3, 2, 1]
for i in countdown:
sense.show_message(str(i), text_colour = [255, 255, 255])
basket_x = 7
egg_x = random.randrange(0,7)
egg_y = 0
sense.set_pixel(egg_x, egg_y, [255, 255, 0])
sense.set_pixel(basket_x, 7, [139, 69, 19])
time.sleep(1)
while game_over == False:
global score
#print (score)
'''move basket first'''
'''Get basket position'''
pitch = sense.get_orientation()['pitch']
basket_x, = basket_move(pitch, basket_x)
#print (pitch, basket_x)
'''Set Basket Positon'''
sense.set_pixel(basket_x, 7, [139, 69, 19])
#print ("BASKET", basket_x)
time.sleep(0.2)
#print ("First Basket", basket_x)
'''Egg drop'''
sense.set_pixel(basket_x, 7, [0, 0, 0])
sense.set_pixel(egg_x, egg_y, [0, 0, 0])
egg_y = egg_y + 1
#print (egg_y)
sense.set_pixel(egg_x, egg_y, [255, 255, 0])
#print("FINAL", egg_y, basket_x, egg_x)
'''Check posiion of the egg and basket x , y'''
if (egg_y == 7) and (basket_x == egg_x or basket_x-1 == egg_x ):
#print ("YOU WIN")
sense.show_message("1up", text_colour = [0, 255, 0])
sense.set_pixel(egg_x, egg_y, [0, 0, 0])#hides old egg
#sense.set_pixel(basket_x, 7, [255, 0, 0])
#print ("last basket", basket_x)
egg_x = random.randrange(0,7)
score = score =+ 1
egg_y = 0
elif egg_y == 7:
#print("Game OVER")
sense.show_message("Game Over", text_colour = [255, 38, 0])
#sense.show_message("You Scored", score, text_colour = [255, 0, 0])
#print ("Final Score", score)
return score
game_over = True
break
main()
time.sleep(1)
sense.clear()
sense.show_message("You Scored " + str(score), text_colour = [128, 45, 255], scroll_speed = 0.08)
| TeCoEd/Egg-Drop | old_version.py | Python | mit | 2,807 |
from django.test import TestCase
from .models import Parent, Child, SignedParent
from django_rest_cryptingfields.serializer_fields import CryptingCharField, Crypter
from rest_framework import serializers
from keyczar import errors
from .serializers import getParentCharFieldSerializerClass, getParentCharFieldMaxSixLengthSerializerClass, getParentCharFieldMinSixLengthSerializerClass, getParentTextFieldSerializerClass
import json
from rest_framework.parsers import JSONParser
from rest_framework.renderers import JSONRenderer
from django.utils.six import BytesIO
from .models import Parent
import datetime
TEXT = u'CHINESE: \u4E2D\u56FD; ENGLISH: Permission is hereby granted, free of charge, to any person obtaining a copy' \
'+of this software and associated documentation files (the "Software"), to deal' \
'+in the Software without restriction, including without limitation the rights' \
'+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell' \
'+copies of the Software, and to permit persons to whom the Software is' \
'+furnished to do so, subject to the following conditions:'
TEXT_BLANK = ''
EMAIL = 'blah@gmail.com'
class CrypterUnitTests(TestCase):
def setUp(self):
self.key = Crypter.generate_key_string()
self.crypter = Crypter(self.key)
def tearDown(self):
pass
def test_crypting(self):
encryptedText = self.crypter.encrypt(TEXT)
decryptedText = self.crypter.decrypt(encryptedText)
self.assertEqual(decryptedText, TEXT)
self.assertNotEqual(encryptedText, TEXT)
def test_key_not_found(self):
other_key = Crypter.generate_key_string()
other_crypter = Crypter(other_key)
encryptedText = self.crypter.encrypt(TEXT)
encryptedTextOther = other_crypter.encrypt(TEXT)
self.assertEqual(self.crypter.decrypt(encryptedText), TEXT)
self.assertEqual(other_crypter.decrypt(encryptedTextOther), TEXT)
self.assertRaises(errors.KeyNotFoundError, self.crypter.decrypt, encryptedTextOther)
class SerializerFieldUnitTests(TestCase):
def setUp(self):
self.key_string = Crypter.generate_key_string()
def tearDown(self):
pass
def test_cryptingcharfield(self):
json_string = json.dumps({'char_field': TEXT})
stream = BytesIO(json_string)
data = JSONParser().parse(stream)
deserializer = getParentCharFieldSerializerClass(self.key_string)(data=data)
deserializer.is_valid()
deserializer.save()
parent_model_from_db = Parent.objects.get(pk=1)
parent_model_char_field_from_db = parent_model_from_db.char_field
self.assertNotEqual(parent_model_char_field_from_db, TEXT)
serializer = getParentCharFieldSerializerClass(self.key_string)(parent_model_from_db)
data = serializer.data
#print(parent_model_char_field_from_db)
#print(data['char_field'])
del data['id']
serialized_model = JSONRenderer().render(data)
serialized_model = json.dumps(json.loads(serialized_model)) #get rid of space due to differences in parser output....
self.assertEqual(serialized_model, json_string)
def test_cryptingcharfield_empy_text(self):
json_string = json.dumps({'char_field': TEXT_BLANK})
stream = BytesIO(json_string)
data = JSONParser().parse(stream)
deserializer = getParentCharFieldSerializerClass(self.key_string)(data=data)
deserializer.is_valid()
deserializer.save()
parent_model_from_db = Parent.objects.get(pk=2)
parent_model_char_field_from_db = parent_model_from_db.char_field
self.assertEqual(parent_model_char_field_from_db, TEXT_BLANK)
# they should be equal because they should both be blank since we're not encrypting blank.
serializer = getParentCharFieldSerializerClass(self.key_string)(parent_model_from_db)
data = serializer.data
del data['id']
serialized_model = JSONRenderer().render(data)
serialized_model = json.dumps(json.loads(serialized_model)) #get rid of space due to differences in parser output....
self.assertEqual(serialized_model, json_string)
def test_cryptingcharfield_text_greater_than_max_length(self):
json_string = json.dumps({'char_field': '1234567'})
stream = BytesIO(json_string)
data = JSONParser().parse(stream)
deserializer = getParentCharFieldMaxSixLengthSerializerClass(self.key_string)(data=data)
self.assertFalse(deserializer.is_valid())
def test_cryptingcharfield_text_less_than_max_length(self):
json_string = json.dumps({'char_field': '12345'})
stream = BytesIO(json_string)
data = JSONParser().parse(stream)
deserializer = getParentCharFieldMaxSixLengthSerializerClass(self.key_string)(data=data)
self.assertTrue(deserializer.is_valid())
def test_cryptingcharfield_text_greater_than_min_length(self):
json_string = json.dumps({'char_field': '1234567'})
stream = BytesIO(json_string)
data = JSONParser().parse(stream)
deserializer = getParentCharFieldMinSixLengthSerializerClass(self.key_string)(data=data)
self.assertTrue(deserializer.is_valid())
def test_cryptingcharfield_text_less_than_max_length(self):
json_string = json.dumps({'char_field': '12345'})
stream = BytesIO(json_string)
data = JSONParser().parse(stream)
deserializer = getParentCharFieldMinSixLengthSerializerClass(self.key_string)(data=data)
self.assertFalse(deserializer.is_valid())
def test_cryptingtextfield(self):
print(self.key_string)
json_string = json.dumps({'text_field': TEXT})
stream = BytesIO(json_string)
data = JSONParser().parse(stream)
deserializer = getParentTextFieldSerializerClass(self.key_string)(data=data)
deserializer.is_valid()
deserializer.save()
parent_model_from_db = Parent.objects.get(pk=3)
parent_model_char_field_from_db = parent_model_from_db.text_field
self.assertNotEqual(parent_model_char_field_from_db, TEXT)
serializer = getParentTextFieldSerializerClass(self.key_string)(parent_model_from_db)
data = serializer.data
print(parent_model_char_field_from_db)
print(data['text_field'])
del data['id']
serialized_model = JSONRenderer().render(data)
serialized_model = json.dumps(json.loads(serialized_model)) #get rid of space due to differences in parser output....
self.assertEqual(serialized_model, json_string)
class SignedModelUnitTests(TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_signed_model(self):
signed_parent = SignedParent(**{
'char_field': 'I am the char field',
'text_field': 'I am the text field',
'bool_field': True,
'datetime_field': datetime.datetime.now()
})
signed_parent.save()
signed_parents = SignedParent.objects.get(pk=1)
signed_parent.char_field = 'I am the new char field data'
signed_parent.save()
signed_parents = SignedParent.objects.get(pk=1)
#when saving or getting a signed parent doesn't throw an exception the signature validated correctly.
| russellmorley/django_rest_cryptingfields | tests/tests.py | Python | mit | 7,474 |
#
# Copyright (c) 2010 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
'''
The main interface through which you should interact with the Strata API.
'''
from redhat_support_lib.infrastructure import contextmanager
from redhat_support_lib.infrastructure.connectionspool import ConnectionsPool
from redhat_support_lib.infrastructure.proxy import Proxy
from redhat_support_lib.infrastructure.contextmanager import Mode
from redhat_support_lib.infrastructure.brokers import solutions
from redhat_support_lib.infrastructure.brokers import articles
from redhat_support_lib.infrastructure.brokers import cases
from redhat_support_lib.infrastructure.brokers import groups
from redhat_support_lib.infrastructure.brokers import users
from redhat_support_lib.infrastructure.brokers import comments
from redhat_support_lib.infrastructure.brokers import attachments
from redhat_support_lib.infrastructure.brokers import problems
from redhat_support_lib.infrastructure.brokers import entitlements
from redhat_support_lib.infrastructure.brokers import products
from redhat_support_lib.infrastructure.brokers import values
from redhat_support_lib.infrastructure.brokers import InstanceMaker
from redhat_support_lib.infrastructure.brokers import symptoms
from redhat_support_lib.infrastructure.brokers import search
from redhat_support_lib.utils import reporthelper
import redhat_support_lib.utils.confighelper as confighelper
from redhat_support_lib.xml import report
import redhat_support_lib.version as version
import logging
__author__ = 'Keith Robertson <kroberts@redhat.com>'
STREAM_LOG_FORMAT = '%(levelname)s: %(message)s'
USER_AGENT = 'redhat-support-lib-%s' % (version.version)
logger = logging.getLogger("redhat_support_lib.infrastructure.proxy")
class API(object):
def __init__(self,
username,
password,
url='https://api.access.redhat.com',
key_file=None,
cert_file=None,
proxy_url=None,
proxy_user=None,
proxy_pass=None,
ftp_host='dropbox.redhat.com',
ftp_port=21,
ftp_user=None,
ftp_pass=None,
ftp_dir="/incoming",
timeout=None,
userAgent=None,
no_verify_ssl=False,
ssl_ca=None):
"""
Initialize an instance of the Red Hat Support Library
:param username: User name for Red Hat Customer Portal
:type username: string
:param password: Password for Red Hat Customer Portal
:type password: string
:param url:
Strata REST URL (by default this is https://api.access.redhat.com)
:type url: string
:param key_file:
SSL key location for SSL authentication (not implemented)
:type key_file: string
:param cert_file:
SSL certificate location for SSL authentication (not implemented)
:type cert_file: string
:param proxy_url: URL for HTTP/HTTPS proxy server (optional)
:type proxy_url: string
:param proxy_user: User name for HTTP/HTTPS proxy server (optional)
:type proxy_user: string
:param proxy_pass: Password for HTTP/HTTPS proxy server (optional)
:type proxy_pass: string
:param timeout: Request timeout (optional)
:type timeout: string
:param userAgent: User agent to set for API communications (optional)
:type userAgent: string
:param no_verify_ssl: If True, don't verify server identity (optional)
:type no_verify_ssl: boolean
:param ssl_ca: Path to an alternative certificate authority to trust
:type ssl_ca: string/filepath
:returns: Strata API object
"""
# Make sure logger is initialized
if len(logging.getLogger().handlers) == 0:
logging.basicConfig(level=logging.CRITICAL)
httpdebug = False
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
httpdebug = True
self._ua = None
if userAgent:
ua = {'User-Agent': userAgent}
else:
ua = {'User-Agent': USER_AGENT}
config = confighelper.get_config_helper()
config.username = username
config.password = password
config.url = url
config.key_file = key_file
config.cert_file = cert_file
config.proxy_url = proxy_url
config.proxy_user = proxy_user
config.proxy_pass = proxy_pass
config.ftp_host = ftp_host
config.ftp_port = ftp_port
config.ftp_user = ftp_user
config.ftp_pass = ftp_pass
config.ftp_dir = ftp_dir
config.timeout = timeout
config.userAgent = ua
config.http_debug = httpdebug
config.no_verify_ssl = no_verify_ssl
config.ssl_ca = ssl_ca
self.config = config
contextmanager.add('proxy',
Proxy(ConnectionsPool(url=config.url,
key_file=config.key_file,
cert_file=config.cert_file,
timeout=config.timeout,
username=config.username,
password=config.password,
proxy_url=config.proxy_url,
proxy_user=config.proxy_user,
proxy_pass=config.proxy_pass,
debug=config.http_debug,
noverify=config.no_verify_ssl,
ssl_ca=config.ssl_ca),
config.userAgent),
Mode.R)
# Initialize the container classes.
self.solutions = solutions()
self.articles = articles()
self.cases = cases()
self.groups = groups()
self.users = users()
self.comments = comments()
self.attachments = attachments()
self.problems = problems()
self.entitlements = entitlements()
self.products = products()
self.symptoms = symptoms()
self.values = values()
self.search = search()
self.im = InstanceMaker()
def disconnect(self):
''' terminates server connection/s '''
contextmanager._remove('proxy', force=True)
@classmethod
def make_report(cls,
path,
custom=None,
max_file_size=reporthelper.MAX_FILE_SIZE_BYTES,
report_dir=None):
'''
A Report file is made from a path which can either be a single file or
a directory. The name and content params allow for customer name/value
entry into xml. Typical use is to only use the path name.
:param path: the file or folder from which a report should be made
:type path: string
:param custom:
A dictionary of bindings. Key will be name and value will
binding's value.
:type custom: dict
:param max_file_size:
The max size (in bytes) of a file which should be included in
content.xml.
:type max_file_size: int
:param report_dir:
Path to save the generated report to, a subdirectory will be
created by :func:`tempfile.mkdtemp`. This value will be /tmp
by default.
:type report_dir: string
Example:
.. code-block:: python
api.make_report("/var/spool/abrt/ccpp-2013-03-15-15:26:39-2202")
:returns:
The path to an XML file or a TGZ depending on the size of 'path'
:rtype: string'''
return reporthelper.make_report(path,
custom,
max_file_size,
report_dir)
@classmethod
def process_report_file(cls,
path):
'''
A utility function which returns a redhat_support_lib.xml.report object
given a report file's content.xml. The report object can then be
inspected to see what was in the content.xml.
:param path: A path to a report file's content.xml
:type path: string
:returns: A redhat_support_lib.xml.report object
'''
return report.parse(path, False)
| redhataccess/redhat-support-lib-python | src/redhat_support_lib/api.py | Python | apache-2.0 | 9,207 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for VerifyNotificationChannel
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-monitoring
# [START monitoring_v3_generated_NotificationChannelService_VerifyNotificationChannel_async]
from google.cloud import monitoring_v3
async def sample_verify_notification_channel():
# Create a client
client = monitoring_v3.NotificationChannelServiceAsyncClient()
# Initialize request argument(s)
request = monitoring_v3.VerifyNotificationChannelRequest(
name="name_value",
code="code_value",
)
# Make the request
response = await client.verify_notification_channel(request=request)
# Handle the response
print(response)
# [END monitoring_v3_generated_NotificationChannelService_VerifyNotificationChannel_async]
| googleapis/python-monitoring | samples/generated_samples/monitoring_v3_generated_notification_channel_service_verify_notification_channel_async.py | Python | apache-2.0 | 1,617 |
""" Submissions Admin Views. """
from django.contrib import admin
from django.urls import reverse
from django.utils.html import format_html
from submissions.models import Score, ScoreSummary, StudentItem, Submission, TeamSubmission
class StudentItemAdminMixin:
"""Mix this class into anything that has a student_item fkey."""
search_fields = (
'student_item__course_id',
'student_item__student_id',
'student_item__item_id',
'student_item__id'
)
def course_id(self, obj):
return obj.student_item.course_id
course_id.admin_order_field = 'student_item__course_id'
def item_id(self, obj):
return obj.student_item.item_id
item_id.admin_order_field = 'student_item__item_id'
def student_id(self, obj):
return obj.student_item.student_id
student_id.admin_order_field = 'student_item__student_id'
def student_item_id(self, obj):
""" Formated student item id. """
url = reverse(
'admin:submissions_studentitem_change',
args=[obj.student_item.id]
)
return format_html(f'<a href="{url}">{obj.student_item.id}</a>')
student_item_id.admin_order_field = 'student_item__id'
student_item_id.short_description = 'S.I. ID'
class StudentItemAdmin(admin.ModelAdmin):
list_display = ('id', 'course_id', 'item_type', 'item_id', 'student_id')
list_filter = ('item_type',)
search_fields = ('id', 'course_id', 'item_type', 'item_id', 'student_id')
readonly_fields = ('course_id', 'item_type', 'item_id', 'student_id')
class SubmissionAdmin(admin.ModelAdmin, StudentItemAdminMixin):
""" Student Submission Admin View. """
list_display = (
'id', 'uuid',
'course_id', 'item_id', 'student_id', 'student_item_id',
'attempt_number', 'submitted_at',
)
list_display_links = ('id', 'uuid')
list_filter = ('student_item__item_type',)
readonly_fields = (
'student_item_id',
'course_id', 'item_id', 'student_id',
'attempt_number', 'submitted_at', 'created_at',
'answer', 'all_scores',
)
search_fields = ('id', 'uuid') + StudentItemAdminMixin.search_fields
# We're creating our own explicit link and displaying parts of the
# student_item in separate fields -- no need to display this as well.
exclude = ('student_item',)
def all_scores(self, submission):
return "\n".join(
"{}/{} - {}".format(
score.points_earned, score.points_possible, score.created_at
)
for score in Score.objects.filter(submission=submission)
)
class SubmissionInlineAdmin(admin.TabularInline, StudentItemAdminMixin):
""" Inline admin for TeamSubmissions to view individual Submissions """
model = Submission
readonly_fields = ('uuid', 'student_id', 'status')
exclude = ('student_item', 'attempt_number', 'submitted_at', 'answer')
extra = 0
class TeamSubmissionAdmin(admin.ModelAdmin):
""" Student Submission Admin View. """
list_display = ('id', 'uuid', 'course_id', 'item_id', 'team_id', 'status')
search_fields = ('uuid', 'course_id', 'item_id', 'team_id')
fields = ('uuid', 'attempt_number', 'submitted_at', 'course_id', 'item_id', 'team_id', 'submitted_by', 'status')
inlines = (SubmissionInlineAdmin,)
class ScoreAdmin(admin.ModelAdmin, StudentItemAdminMixin):
""" Student Score Admin View. """
list_display = (
'id',
'course_id', 'item_id', 'student_id', 'student_item_id',
'points', 'created_at'
)
list_filter = ('student_item__item_type',)
readonly_fields = (
'student_item_id',
'student_item',
'submission',
'points_earned',
'points_possible',
'reset',
)
search_fields = ('id', ) + StudentItemAdminMixin.search_fields
def points(self, score):
return f"{score.points_earned}/{score.points_possible}"
class ScoreSummaryAdmin(admin.ModelAdmin, StudentItemAdminMixin):
""" Student Score Summary Admin View. """
list_display = (
'id',
'course_id', 'item_id', 'student_id', 'student_item_id',
'latest', 'highest',
)
search_fields = ('id', ) + StudentItemAdminMixin.search_fields
readonly_fields = (
'student_item_id', 'student_item', 'highest_link', 'latest_link'
)
exclude = ('highest', 'latest')
def highest_link(self, score_summary):
url = reverse(
'admin:submissions_score_change', args=[score_summary.highest.id]
)
return format_html(f'<a href="{url}">{score_summary.highest}</a>')
highest_link.short_description = 'Highest'
def latest_link(self, score_summary):
url = reverse(
'admin:submissions_score_change', args=[score_summary.latest.id]
)
return format_html(f'<a href="{url}">{score_summary.latest}</a>')
latest_link.short_description = 'Latest'
admin.site.register(Score, ScoreAdmin)
admin.site.register(StudentItem, StudentItemAdmin)
admin.site.register(Submission, SubmissionAdmin)
admin.site.register(TeamSubmission, TeamSubmissionAdmin)
admin.site.register(ScoreSummary, ScoreSummaryAdmin)
| edx/edx-submissions | submissions/admin.py | Python | agpl-3.0 | 5,223 |
# -*- coding: UTF-8 -*-
# Copyright 2017 Luc Saffre
#
# License: BSD (see file COPYING for details)
"""
"""
from lino.api import rt
def objects():
from lino_xl.lib.phones.mixins import ContactDetailsOwner
for m in rt.models_by_base(ContactDetailsOwner):
for p in m.objects.all():
p.propagate_contact_details()
yield p
| khchine5/xl | lino_xl/lib/phones/fixtures/demo2.py | Python | bsd-2-clause | 363 |
from sympy import Symbol, S, oo, sqrt
from sympy.calculus.codomain import codomain, not_empty_in
from sympy.sets.sets import Interval, FiniteSet, Complement, Union
from sympy.utilities.pytest import XFAIL, raises
def test_codomain():
x = Symbol('x', real=True)
assert codomain(x, Interval(-1, 1), x) == Interval(-1, 1)
assert codomain(x, Interval(0, 1, True, True), x) == \
Interval(0, 1, True, True)
assert codomain(x, Interval(1, 2, True, False), x) == Interval(1, 2, True, False)
assert codomain(x, Interval(1, 2, False, True), x) == Interval(1, 2, False, True)
assert codomain(x**2, Interval(-1, 1), x) == Interval(0, 1)
assert codomain(x**3, Interval(0, 1), x) == Interval(0, 1)
assert codomain(x/(x**2 - 4), Interval(3, 4), x) == Interval(S(1)/3, S(3)/5)
assert codomain(1, Interval(-1, 4), x) == FiniteSet(1)
assert codomain(x, Interval(-oo, oo), x) == S.Reals
assert codomain(1/x**2, FiniteSet(1, 2, -1, 0), x) == FiniteSet(1, S(1)/4)
assert codomain(x, FiniteSet(1, -1, 3, 5), x) == FiniteSet(-1, 1, 3, 5)
assert codomain(x**2 - x, FiniteSet(1, -1, 3, 5, -oo), x) == \
FiniteSet(0, 2, 6, 20, oo)
assert codomain(x**2/(x - 4), FiniteSet(4), x) == S.EmptySet
assert codomain(x**2 - x, FiniteSet(S(1)/2, -oo, oo, 2), x) == \
FiniteSet(S(-1)/4, 2, oo)
assert codomain(x**2, Interval(-1, 1, True, True), x) == Interval(0, 1, False, True)
assert codomain(x**2, Interval(-1, 1, False, True), x) == Interval(0, 1)
assert codomain(x**2, Interval(-1, 1, True, False), x) == Interval(0, 1)
assert codomain(1/x, Interval(0, 1), x) == Interval(1, oo)
assert codomain(1/x, Interval(-1, 1), x) == Union(Interval(-oo, -1), Interval(1, oo))
assert codomain(1/x**2, Interval(-1, 1), x) == Interval(1, oo)
assert codomain(1/x**2, Interval(-1, 1, True, False), x) == Interval(1, oo)
assert codomain(1/x**2, Interval(-1, 1, True, True), x) == \
Interval(1, oo, True, True)
assert codomain(1/x**2, Interval(-1, 1, False, True), x) == Interval(1, oo)
assert codomain(1/x, Interval(1, 2), x) == Interval(S(1)/2, 1)
assert codomain(1/x**2, Interval(-2, -1, True, True), x) == \
Interval(S(1)/4, 1, True, True)
assert codomain(x**2/(x - 4), Interval(-oo, oo), x) == \
Complement(S.Reals, Interval(0, 16, True, True))
assert codomain(x**2/(x - 4), Interval(3, 4), x) == Interval(-oo, -9)
assert codomain(-x**2/(x - 4), Interval(3, 4), x) == Interval(9, oo)
assert codomain((x**2 - x)/(x**3 - 1), S.Reals, x) == Interval(-1, S(1)/3, False, True)
assert codomain(-x**2 + 1/x, S.Reals, x) == S.Reals
assert codomain(x**2 - 1/x, S.Reals, x) == S.Reals
assert codomain(x**2, Union(Interval(1, 2), FiniteSet(3)), x) == \
Union(Interval(1, 4), FiniteSet(9))
assert codomain(x/(x**2 - 4), Union(Interval(-oo, 1), Interval(0, oo)), x) == S.Reals
assert codomain(x, Union(Interval(-1, 1), FiniteSet(-oo)), x) == \
Union(Interval(-1, 1), FiniteSet(-oo))
assert codomain(x**2 - x, Interval(1, oo), x) == Interval(0, oo)
raises(ValueError, lambda: codomain(sqrt(x), Interval(-1, 2), x))
def test_not_empty_in():
from sympy.abc import x
a = Symbol('a', real=True)
assert not_empty_in(FiniteSet(x, 2*x).intersect(Interval(1, 2, True, False)), x) == \
Interval(S(1)/2, 2, True, False)
assert not_empty_in(FiniteSet(x, x**2).intersect(Interval(1, 2)), x) == \
Union(Interval(-sqrt(2), -1), Interval(1, 2))
assert not_empty_in(FiniteSet(x**2 + x, x).intersect(Interval(2, 4)), x) == \
Union(Interval(-sqrt(17)/2 - S(1)/2, -2), Interval(1, -S(1)/2 + sqrt(17)/2), Interval(2, 4))
assert not_empty_in(FiniteSet(x/(x - 1)).intersect(S.Reals), x) == Complement(S.Reals, FiniteSet(1))
assert not_empty_in(FiniteSet(a/(a - 1)).intersect(S.Reals), a) == Complement(S.Reals, FiniteSet(1))
assert not_empty_in(FiniteSet((x**2 - 3*x + 2)/(x - 1)).intersect(S.Reals), x) == \
Complement(S.Reals, FiniteSet(1))
assert not_empty_in(FiniteSet(3, 4, x/(x - 1)).intersect(Interval(2, 3)), x) == \
Union(Interval(S(3)/2, 2), FiniteSet(3))
assert not_empty_in(FiniteSet(x/(x**2 - 1)).intersect(S.Reals), x) == \
Complement(S.Reals, FiniteSet(-1, 1))
assert not_empty_in(FiniteSet(x, x**2).intersect(Union(Interval(1, 3, True, True), Interval(4, 5))), x) == \
Union(Interval(-sqrt(5), -2), Interval(-sqrt(3), -1, True, True), Interval(1, 3, True, True), Interval(4, 5))
assert not_empty_in(FiniteSet(1).intersect(Interval(3, 4)), x) == S.EmptySet
assert not_empty_in(FiniteSet(x**2/(x + 2)).intersect(Interval(1, oo)), x) == \
Union(Interval(-2, -1, True, False), Interval(2, oo))
| atreyv/sympy | sympy/calculus/tests/test_codomain.py | Python | bsd-3-clause | 4,793 |
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2016 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .tektronixMDO3000 import *
class tektronixMDO3012(tektronixMDO3000):
"Tektronix MDO3012 IVI oscilloscope driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'MDO3012')
super(tektronixMDO3012, self).__init__(*args, **kwargs)
self._analog_channel_count = 2
self._digital_channel_count = 16
self._channel_count = self._analog_channel_count + self._digital_channel_count
self._bandwidth = 100e6
# AFG option
self._output_count = 1
self._init_channels()
self._init_outputs()
| Diti24/python-ivi | ivi/tektronix/tektronixMDO3012.py | Python | mit | 1,724 |
""" This module contains the Video and Movie class and functions. """
import webbrowser
class Video():
def __init__(self, title, duration):
self.title = title
self.duration = (duration)
class Movie(Video): # The main class for movie-trailer website
valid_ratings = ["G", "PG", "PG-13", "R"] # Global variable
def __init__(self, title, duration, movie_storyline, poster_image_url,
trailer_youtube_url):
Video.__init__(self, title, duration)
self.storyline = movie_storyline
self.poster_image_url = poster_image_url
self.trailer_youtube_url = trailer_youtube_url
def show_trailer(self):
webbrowser.open(self.trailer_youtube_url)
class Tvshow(Video):
def __init__(self, title, duration, season, episode, tv_station):
Video.__init__(self, title, duration)
self.season = season
self.episode = episode
self.tv_station = tv_station
| cooldiplomat/movie-trailer | media.py | Python | mit | 1,015 |
# -*- coding: utf-8 -*-
import re
import itertools
from scrapy import log
from scrapy.selector import Selector
from summaries.items import SummariesItem
from thread_float_bbs import (
SequenceAppend,
ThreadFloatBbsSpider
)
class OnecallSpider(ThreadFloatBbsSpider):
""" for onecall.livedoor.biz
"""
# TODO: sourceが画像のためタイトルが取れなくなるため廃止
name = 'onecall'
allowed_domains = ['onecall.livedoor.biz']
start_urls = ['http://onecall.livedoor.biz/index.rdf']
def spider_page(self, response):
""" scraping page
"""
sel = Selector(response)
image_urls = []
contents = SequenceAppend({
"index": int,
"subject": '',
"body": ''
})
main = sel.css('div.article-body-inner')
for sub, body in itertools.izip(main.css('.name'), main.css('.onecall')):
image_urls.extend(sub.css('img').xpath('@src').extract())
image_urls.extend(body.css('img').xpath('@src').extract())
contents.append({
"subject": sub.extract(),
"body": body.extract()
})
item = dict(
posted=False,
source=self.extract_source(sel),
url=response.url,
title=self.get_text(sel.css('h1.article-title a')),
tags=self.extract_tags(main, response),
contents=contents.result(),
image_urls=image_urls
)
# set title from source.
return self.request_title(item['source'], SummariesItem(**item))
def extract_source(self, selector):
""" Sourceを抽出
"""
try:
url = [
text for text
in selector.css('div').xpath('text()').extract()
if text.find('2ch.net') != -1
or text.find('2ch.sc') != -1
][0]
return re.search("(?P<url>https?://[^\s]+)", url).group("url")
except Exception as exc:
log.msg(
format=("Extract source (error): "
"Error selector %(selector)s "
"url `%(url)s`: %(errormsg)s"),
level=log.WARNING,
spider=self,
selector=selector,
url=selector.response.url,
errormsg=str(exc))
return None
def extract_tags(self, selector, response):
""" tagsを抽出
"""
try:
feed = self.get_feed(response.url)
return list({feed['tags'][0]['term']})
except Exception as exc:
log.msg(
format=("Extract tags (error): "
"Error selector %(selector)s "
"url `%(url)s`: %(errormsg)s"),
level=log.WARNING,
spider=self,
selector=selector,
url=response.url,
errormsg=str(exc))
return []
| ikeikeikeike/scrapy-2ch-summary-spiders | onecall.py | Python | mit | 3,020 |
from django.forms import ModelForm, ValidationError
from app.models import AppOne
import re
class FormAppOne(ModelForm):
class Meta:
model = AppOne
def clean_name(self):
cleaned_name = self.cleaned_data.get('name')
if re.findall('\d+', cleaned_name):
raise ValidationError("Name must be only text")
return cleaned_name
| valdergallo/mock_django_orm | app/forms.py | Python | mit | 374 |
import _plotly_utils.basevalidators
class GridwidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="gridwidth", parent_name="layout.ternary.caxis", **kwargs
):
super(GridwidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
min=kwargs.pop("min", 0),
**kwargs
)
| plotly/plotly.py | packages/python/plotly/plotly/validators/layout/ternary/caxis/_gridwidth.py | Python | mit | 467 |
import contextlib
import os
import sys
from itertools import takewhile
from exceptions import BundleError
__all__ = ('md5_constructor', 'pickle', 'set', 'StringIO',
'common_path_prefix', 'working_directory')
if sys.version_info >= (2, 5):
import hashlib
md5_constructor = hashlib.md5
else:
import md5
md5_constructor = md5.new
try:
import cPickle as pickle
except ImportError:
import pickle
try:
set
except NameError:
from sets import Set as set
else:
set = set
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
def common_path_prefix(paths, sep=os.path.sep):
"""os.path.commonpath() is completely in the wrong place; it's
useless with paths since it only looks at one character at a time,
see http://bugs.python.org/issue10395
This replacement is from:
http://rosettacode.org/wiki/Find_Common_Directory_Path#Python
"""
def allnamesequal(name):
return all(n==name[0] for n in name[1:])
bydirectorylevels = zip(*[p.split(sep) for p in paths])
return sep.join(x[0] for x in takewhile(allnamesequal, bydirectorylevels))
@contextlib.contextmanager
def working_directory(directory=None, filename=None):
"""A context manager which changes the working directory to the given
path, and then changes it back to its previous value on exit.
Filters will often find this helpful.
Instead of a ``directory``, you may also give a ``filename``, and the
working directory will be set to the directory that file is in.s
"""
assert bool(directory) != bool(filename) # xor
if not directory:
directory = os.path.dirname(filename)
prev_cwd = os.getcwd()
os.chdir(directory)
yield
os.chdir(prev_cwd)
def make_option_resolver(clazz=None, attribute=None, classes=None,
allow_none=True, desc=None):
"""Returns a function which can resolve an option to an object.
The option may given as an instance or a class (of ``clazz``, or
duck-typed with an attribute ``attribute``), or a string value referring
to a class as defined by the registry in ``classes``.
This support arguments, so an option may look like this:
cache:/tmp/cachedir
If this must instantiate a class, it will pass such an argument along,
if given. In addition, if the class to be instantiated has a classmethod
``make()``, this method will be used as a factory, and will be given an
Environment object (if one has been passed to the resolver). This allows
classes that need it to initialize themselves based on an Environment.
"""
assert clazz or attribute or classes
desc_string = ' to %s' % desc if desc else None
def instantiate(clazz, env, *a, **kw):
# Create an instance of clazz, via the Factory if one is defined,
# passing along the Environment, or creating the class directly.
if hasattr(clazz, 'make'):
# make() protocol is that if e.g. the get_manifest() resolver takes
# an env, then the first argument of the factory is the env.
args = (env,) + a if env is not None else a
return clazz.make(*args, **kw)
return clazz(*a, **kw)
def resolve_option(option, env=None):
the_clazz = clazz() if callable(clazz) and not isinstance(option, type) else clazz
if not option and allow_none:
return None
# If the value has one of the support attributes (duck-typing).
if attribute and hasattr(option, attribute):
if isinstance(option, type):
return instantiate(option, env)
return option
# If it is the class we support.
if the_clazz and isinstance(option, the_clazz):
return option
elif isinstance(option, type) and issubclass(option, the_clazz):
return instantiate(option, env)
# If it is a string
elif isinstance(option, basestring):
parts = option.split(':', 1)
key = parts[0]
arg = parts[1] if len(parts) > 1 else None
if key in classes:
return instantiate(classes[key], env, *([arg] if arg else []))
raise ValueError('%s cannot be resolved%s' % (option, desc_string))
resolve_option.__doc__ = """Resolve ``option``%s.""" % desc_string
return resolve_option
def RegistryMetaclass(clazz=None, attribute=None, allow_none=True, desc=None):
"""Returns a metaclass which will keep a registry of all subclasses, keyed
by their ``id`` attribute.
The metaclass will also have a ``resolve`` method which can turn a string
into an instance of one of the classes (based on ``make_option_resolver``).
"""
def eq(self, other):
"""Return equality with config values that instantiate this."""
return (hasattr(self, 'id') and self.id == other) or\
id(self) == id(other)
def unicode(self):
return "%s" % (self.id if hasattr(self, 'id') else repr(self))
class Metaclass(type):
REGISTRY = {}
def __new__(mcs, name, bases, attrs):
if not '__eq__' in attrs:
attrs['__eq__'] = eq
if not '__unicode__' in attrs:
attrs['__unicode__'] = unicode
if not '__str__' in attrs:
attrs['__str__'] = unicode
new_klass = type.__new__(mcs, name, bases, attrs)
if hasattr(new_klass, 'id'):
mcs.REGISTRY[new_klass.id] = new_klass
return new_klass
resolve = staticmethod(make_option_resolver(
clazz=clazz,
attribute=attribute,
allow_none=allow_none,
desc=desc,
classes=REGISTRY
))
return Metaclass
def cmp_debug_levels(level1, level2):
"""cmp() for debug levels, returns -1, 0 or +1 indicating which debug
level is higher than the other one."""
level_ints = { False: 0, 'merge': 1, True: 2 }
try:
return cmp(level_ints[level1], level_ints[level2])
except KeyError, e:
# Not sure if a dependency on BundleError is proper here. Validating
# debug values should probably be done on assign. But because this
# needs to happen in two places (Environment and Bundle) we do it here.
raise BundleError('Invalid debug value: %s' % e)
| mozilla/verbatim | vendor/lib/python/webassets/utils.py | Python | gpl-2.0 | 6,430 |
# -*- coding: utf-8 -*-
try:
from PIL import Image
except ImportError:
import Image
import glob
import os
from module.plugins.internal.OCR import OCR
class LinksaveIn(OCR):
__name__ = "LinksaveIn"
__type__ = "ocr"
__version__ = "0.15"
__status__ = "testing"
__description__ = """Linksave.in ocr plugin"""
__license__ = "GPLv3"
__authors__ = [("pyLoad Team", "admin@pyload.org")]
def init(self):
self.data_dir = os.path.dirname(os.path.abspath(__file__)) + os.sep + "LinksaveIn" + os.sep
def load_image(self, image):
im = Image.open(image)
frame_nr = 0
lut = im.resize((256, 1))
lut.putdata(xrange(256))
lut = list(lut.convert("RGB").getdata())
new = Image.new("RGB", im.size)
npix = new.load()
while True:
try:
im.seek(frame_nr)
except EOFError:
break
frame = im.copy()
pix = frame.load()
for x in xrange(frame.size[0]):
for y in xrange(frame.size[1]):
if lut[pix[x, y]] != (0, 0, 0):
npix[x, y] = lut[pix[x, y]]
frame_nr += 1
new.save(self.data_dir+"unblacked.png")
self.image = new.copy()
self.pixels = self.image.load()
self.result_captcha = ""
def get_bg(self):
stat = {}
cstat = {}
img = self.image.convert("P")
for bgpath in glob.glob(self.data_dir+"bg/*.gif"):
stat[bgpath] = 0
bg = Image.open(bgpath)
bglut = bg.resize((256, 1))
bglut.putdata(xrange(256))
bglut = list(bglut.convert("RGB").getdata())
lut = img.resize((256, 1))
lut.putdata(xrange(256))
lut = list(lut.convert("RGB").getdata())
bgpix = bg.load()
pix = img.load()
for x in xrange(bg.size[0]):
for y in xrange(bg.size[1]):
rgb_bg = bglut[bgpix[x, y]]
rgb_c = lut[pix[x, y]]
try:
cstat[rgb_c] += 1
except Exception:
cstat[rgb_c] = 1
if rgb_bg is rgb_c:
stat[bgpath] += 1
max_p = 0
bg = ""
for bgpath, value in stat.items():
if max_p < value:
bg = bgpath
max_p = value
return bg
def substract_bg(self, bgpath):
bg = Image.open(bgpath)
img = self.image.convert("P")
bglut = bg.resize((256, 1))
bglut.putdata(xrange(256))
bglut = list(bglut.convert("RGB").getdata())
lut = img.resize((256, 1))
lut.putdata(xrange(256))
lut = list(lut.convert("RGB").getdata())
bgpix = bg.load()
pix = img.load()
orgpix = self.image.load()
for x in xrange(bg.size[0]):
for y in xrange(bg.size[1]):
rgb_bg = bglut[bgpix[x, y]]
rgb_c = lut[pix[x, y]]
if rgb_c is rgb_bg:
orgpix[x, y] = (255, 255, 255)
def eval_black_white(self):
new = Image.new("RGB", (140, 75))
pix = new.load()
orgpix = self.image.load()
thresh = 4
for x in xrange(new.size[0]):
for y in xrange(new.size[1]):
rgb = orgpix[x, y]
r, g, b = rgb
pix[x, y] = (255, 255, 255)
if r > max(b, g)+thresh:
pix[x, y] = (0, 0, 0)
if g < min(r, b):
pix[x, y] = (0, 0, 0)
if g > max(r, b)+thresh:
pix[x, y] = (0, 0, 0)
if b > max(r, g)+thresh:
pix[x, y] = (0, 0, 0)
self.image = new
self.pixels = self.image.load()
def recognize(self, image):
self.load_image(image)
bg = self.get_bg()
self.substract_bg(bg)
self.eval_black_white()
self.to_greyscale()
self.image.save(self.data_dir+"cleaned_pass1.png")
self.clean(4)
self.clean(4)
self.image.save(self.data_dir+"cleaned_pass2.png")
letters = self.split_captcha_letters()
final = ""
for n, letter in enumerate(letters):
self.image = letter
self.image.save(ocr.data_dir+"letter%d.png" % n)
self.run_tesser(True, True, False, False)
final += self.result_captcha
return final
| fzimmermann89/pyload | module/plugins/captcha/LinksaveIn.py | Python | gpl-3.0 | 4,613 |
import urllib2,smtplib,sys,time,datetime
from ConfigParser import SafeConfigParser
#Parsing Config File
parser = SafeConfigParser()
parser.read('config.cfg')
smtpUrl= parser.get('MMI', 'smtpUrl')
username = parser.get('MMI', 'username')
password = parser.get('MMI', 'password')
fromaddr = parser.get('MMI', 'fromAddress')
toaddrs = parser.get('MMI', 'toAddress')
intreval= parser.get('MMI', 'timerInterval')
#Main
while True:
pub_ip = urllib2.urlopen("http://ipecho.net/plain").read()#grabs ip from http://ipecho.net/plain
ip_log=open("iplog.txt","r")
prev_ip=ip_log.read()
print prev_ip
ip_log.close()
if prev_ip == pub_ip: #check wether the ip has changed or not
print "\n Your Ip %s didnot change so mail is not sent"%prev_ip
else:
ip_log=open("iplog.txt","w")
ip_log.write(pub_ip) #write the ip to the log
ip_log.close()
now = datetime.datetime.now()#gets time
day = now.strftime("%Y%m%d_%H")
print "\n The New IP: %s ip is being sent" %pub_ip
msg = "The new ip is-"+ pub_ip #email body
# The actual mail send
server = smtplib.SMTP(smtpUrl)
server.starttls()
server.login(username,password)
server.sendmail(fromaddr, toaddrs, """Subject: %s\r\n\r\n%s\r\n.\r\n""" % (
'IP address-homepc'+day,msg))
server.quit()
time.sleep(float(intreval)) #timer
| sai-prasanna/sendmyip | sendmyip/sendmyip.py | Python | mit | 1,436 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_compute_interconnect_attachment_facts
description:
- Gather facts for GCP InterconnectAttachment
short_description: Gather facts for GCP InterconnectAttachment
version_added: 2.8
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
filters:
description:
- A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters).
- Each additional filter in the list will act be added as an AND condition (filter1
and filter2) .
region:
description:
- Region where the regional interconnect attachment resides.
required: true
extends_documentation_fragment: gcp
'''
EXAMPLES = '''
- name: " a interconnect attachment facts"
gcp_compute_interconnect_attachment_facts:
region: us-central1
filters:
- name = test_object
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
state: facts
'''
RETURN = '''
resources:
description: List of resources
returned: always
type: complex
contains:
cloudRouterIpAddress:
description:
- IPv4 address + prefix length to be configured on Cloud Router Interface for
this interconnect attachment.
returned: success
type: str
customerRouterIpAddress:
description:
- IPv4 address + prefix length to be configured on the customer router subinterface
for this interconnect attachment.
returned: success
type: str
interconnect:
description:
- URL of the underlying Interconnect object that this attachment's traffic will
traverse through. Required if type is DEDICATED, must not be set if type is
PARTNER.
returned: success
type: str
description:
description:
- An optional description of this resource.
returned: success
type: str
edgeAvailabilityDomain:
description:
- Desired availability domain for the attachment. Only available for type PARTNER,
at creation time. For improved reliability, customers should configure a pair
of attachments with one per availability domain. The selected availability
domain will be provided to the Partner via the pairing key so that the provisioned
circuit will lie in the specified domain. If not specified, the value will
default to AVAILABILITY_DOMAIN_ANY.
returned: success
type: str
pairingKey:
description:
- '[Output only for type PARTNER. Not present for DEDICATED]. The opaque identifier
of an PARTNER attachment used to initiate provisioning with a selected partner.
Of the form "XXXXX/region/domain" .'
returned: success
type: str
partnerAsn:
description:
- "[Output only for type PARTNER. Not present for DEDICATED]. Optional BGP ASN
for the router that should be supplied by a layer 3 Partner if they configured
BGP on behalf of the customer."
returned: success
type: str
privateInterconnectInfo:
description:
- Information specific to an InterconnectAttachment. This property is populated
if the interconnect that this is attached to is of type DEDICATED.
returned: success
type: complex
contains:
tag8021q:
description:
- 802.1q encapsulation tag to be used for traffic between Google and the
customer, going to and from this network and region.
returned: success
type: int
type:
description:
- The type of InterconnectAttachment you wish to create. Defaults to DEDICATED.
returned: success
type: str
state:
description:
- "[Output Only] The current state of this attachment's functionality."
returned: success
type: str
googleReferenceId:
description:
- Google reference ID, to be used when raising support tickets with Google or
otherwise to debug backend connectivity issues.
returned: success
type: str
router:
description:
- URL of the cloud router to be used for dynamic routing. This router must be
in the same region as this InterconnectAttachment. The InterconnectAttachment
will automatically connect the Interconnect to the network & region within
which the Cloud Router is configured.
returned: success
type: dict
creationTimestamp:
description:
- Creation timestamp in RFC3339 text format.
returned: success
type: str
id:
description:
- The unique identifier for the resource. This identifier is defined by the
server.
returned: success
type: str
name:
description:
- Name of the resource. Provided by the client when the resource is created.
The name must be 1-63 characters long, and comply with RFC1035. Specifically,
the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
returned: success
type: str
candidateSubnets:
description:
- Up to 16 candidate prefixes that can be used to restrict the allocation of
cloudRouterIpAddress and customerRouterIpAddress for this attachment.
- All prefixes must be within link-local address space (169.254.0.0/16) and
must be /29 or shorter (/28, /27, etc). Google will attempt to select an unused
/29 from the supplied candidate prefix(es). The request will fail if all possible
/29s are in use on Google's edge. If not supplied, Google will randomly select
an unused /29 from all of link-local space.
returned: success
type: list
vlanTag8021q:
description:
- The IEEE 802.1Q VLAN tag for this attachment, in the range 2-4094. When using
PARTNER type this will be managed upstream.
returned: success
type: int
region:
description:
- Region where the regional interconnect attachment resides.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest
import json
################################################################################
# Main
################################################################################
def main():
module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'), region=dict(required=True, type='str')))
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/compute']
items = fetch_list(module, collection(module), query_options(module.params['filters']))
if items.get('items'):
items = items.get('items')
else:
items = []
return_value = {'resources': items}
module.exit_json(**return_value)
def collection(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/interconnectAttachments".format(**module.params)
def fetch_list(module, link, query):
auth = GcpSession(module, 'compute')
response = auth.get(link, params={'filter': query})
return return_if_object(module, response)
def query_options(filters):
if not filters:
return ''
if len(filters) == 1:
return filters[0]
else:
queries = []
for f in filters:
# For multiple queries, all queries should have ()
if f[0] != '(' and f[-1] != ')':
queries.append("(%s)" % ''.join(f))
else:
queries.append(f)
return ' '.join(queries)
def return_if_object(module, response):
# If not found, return nothing.
if response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
if __name__ == "__main__":
main()
| t794104/ansible | lib/ansible/modules/cloud/google/gcp_compute_interconnect_attachment_facts.py | Python | gpl-3.0 | 9,851 |
import os
import re
import copy
import math
import time
import glob
import shutil
import pickle
import pathlib
import warnings
import functools
import importlib
import itertools
from ..utils import _get_fn_name, prod, progbar
from .combo_runner import (
nan_like_result,
combo_runner_core,
combo_runner_to_ds,
)
from .case_runner import (
case_runner,
)
from .prepare import (
parse_combos,
parse_constants,
parse_attrs,
parse_fn_args,
parse_cases,
)
from .farming import Runner, Harvester, Sampler, XYZError
BTCH_NM = "xyz-batch-{}.jbdmp"
RSLT_NM = "xyz-result-{}.jbdmp"
FNCT_NM = "xyz-function.clpkl"
INFO_NM = "xyz-settings.jbdmp"
def write_to_disk(obj, fname):
with open(fname, 'wb') as file:
pickle.dump(obj, file)
def read_from_disk(fname):
with open(fname, 'rb') as file:
return pickle.load(file)
@functools.lru_cache(8)
def get_picklelib(picklelib='joblib.externals.cloudpickle'):
return importlib.import_module(picklelib)
def to_pickle(obj, picklelib='joblib.externals.cloudpickle'):
plib = get_picklelib(picklelib)
s = plib.dumps(obj)
return s
def from_pickle(s, picklelib='joblib.externals.cloudpickle'):
plib = get_picklelib(picklelib)
obj = plib.loads(s)
return obj
# --------------------------------- parsing --------------------------------- #
def parse_crop_details(fn, crop_name, crop_parent):
"""Work out how to structure the sowed data.
Parameters
----------
fn : callable, optional
Function to infer name crop_name from, if not given.
crop_name : str, optional
Specific name to give this set of runs.
crop_parent : str, optional
Specific directory to put the ".xyz-{crop_name}/" folder in
with all the cases and results.
Returns
-------
crop_location : str
Full path to the crop-folder.
crop_name : str
Name of the crop.
crop_parent : str
Parent folder of the crop.
"""
if crop_name is None:
if fn is None:
raise ValueError("Either `fn` or `crop_name` must be give.")
crop_name = _get_fn_name(fn)
crop_parent = crop_parent if crop_parent is not None else os.getcwd()
crop_location = os.path.join(crop_parent, ".xyz-{}".format(crop_name))
return crop_location, crop_name, crop_parent
def parse_fn_farmer(fn, farmer):
if farmer is not None:
if fn is not None:
warnings.warn("'fn' is ignored if a 'Runner', 'Harvester', or "
"'Sampler' is supplied as the 'farmer' kwarg.")
fn = farmer.fn
return fn, farmer
def calc_clean_up_default_res(crop, clean_up, allow_incomplete):
"""Logic for choosing whether to automatically clean up a crop, and what,
if any, the default all-nan result should be.
"""
if clean_up is None:
clean_up = not allow_incomplete
if allow_incomplete:
default_result = crop.all_nan_result
else:
default_result = None
return clean_up, default_result
def check_ready_to_reap(crop, allow_incomplete, wait):
if not (allow_incomplete or wait or crop.is_ready_to_reap()):
raise XYZError("This crop is not ready to reap yet - results are "
"missing. You can reap only finished batches by setting"
" ``allow_incomplete=True``, but be aware this will "
"represent all missing batches with ``np.nan`` and thus"
" might effect data-types.")
class Crop(object):
"""Encapsulates all the details describing a single 'crop', that is,
its location, name, and batch size/number. Also allows tracking of
crop's progress, and experimentally, automatic submission of
workers to grid engine to complete un-grown cases. Can also be instantiated
directly from a :class:`~xyzpy.Runner` or :class:`~xyzpy.Harvester` or
:class:`~Sampler.Crop` instance.
Parameters
----------
fn : callable, optional
Target function - Crop `name` will be inferred from this if
not given explicitly. If given, `Sower` will also default
to saving a version of `fn` to disk for `cropping.grow` to use.
name : str, optional
Custom name for this set of runs - must be given if `fn`
is not.
parent_dir : str, optional
If given, alternative directory to put the ".xyz-{name}/"
folder in with all the cases and results.
save_fn : bool, optional
Whether to save the function to disk for `cropping.grow` to use.
Will default to True if `fn` is given.
batchsize : int, optional
How many cases to group into a single batch per worker.
By default, batchsize=1. Cannot be specified if `num_batches`
is.
num_batches : int, optional
How many total batches to aim for, cannot be specified if
`batchsize` is.
farmer : {xyzpy.Runner, xyzpy.Harvester, xyzpy.Sampler}, optional
A Runner, Harvester or Sampler, instance, from which the `fn` can be
inferred and which can also allow the Crop to reap itself straight to a
dataset or dataframe.
autoload : bool, optional
If True, check for the existence of a Crop written to disk
with the same location, and if found, load it.
See Also
--------
Runner.Crop, Harvester.Crop, Sampler.Crop
"""
def __init__(
self, *,
fn=None,
name=None,
parent_dir=None,
save_fn=None,
batchsize=None,
num_batches=None,
shuffle=False,
farmer=None,
autoload=True
):
self._fn, self.farmer = parse_fn_farmer(fn, farmer)
self.name = name
self.parent_dir = parent_dir
self.save_fn = save_fn
self.batchsize = batchsize
self.num_batches = num_batches
self.shuffle = shuffle
self._batch_remainder = None
self._all_nan_result = None
# Work out the full directory for the crop
self.location, self.name, self.parent_dir = \
parse_crop_details(self._fn, self.name, self.parent_dir)
# try loading crop information if it exists
if autoload and self.is_prepared():
self._sync_info_from_disk()
# Save function so it can be automatically loaded with all deps?
if (fn is None) and (save_fn is True):
raise ValueError("Must specify a function for it to be saved!")
self.save_fn = save_fn is not False
@property
def runner(self):
if isinstance(self.farmer, Runner):
return self.farmer
elif isinstance(self.farmer, (Harvester, Sampler)):
return self.farmer.runner
else:
return None
# ------------------------------- methods ------------------------------- #
def choose_batch_settings(self, *, combos=None, cases=None):
"""Work out how to divide all cases into batches, i.e. ensure
that ``batchsize * num_batches >= num_cases``.
"""
if combos:
n_combos = prod(len(x) for _, x in combos)
else:
n_combos = 1
if cases:
n_cases = len(cases)
else:
n_cases = 1
# for each case every combination is run
n = n_cases * n_combos
if (self.batchsize is not None) and (self.num_batches is not None):
# Check that they are set correctly
pos_tot = self.batchsize * self.num_batches
if self._batch_remainder is not None:
pos_tot += self._batch_remainder
if not (n <= pos_tot < n + self.batchsize):
raise ValueError("`batchsize` and `num_batches` cannot both"
"be specified if they do not not multiply"
"to the correct number of total cases.")
# Decide based on batchsize
elif self.num_batches is None:
if self.batchsize is None:
self.batchsize = 1
if not isinstance(self.batchsize, int):
raise TypeError("`batchsize` must be an integer.")
if self.batchsize < 1:
raise ValueError("`batchsize` must be >= 1.")
self.num_batches = math.ceil(n / self.batchsize)
self._batch_remainder = 0
# Decide based on num_batches:
else:
# cap at the total number of cases
self.num_batches = min(n, self.num_batches)
if not isinstance(self.num_batches, int):
raise TypeError("`num_batches` must be an integer.")
if self.num_batches < 1:
raise ValueError("`num_batches` must be >= 1.")
self.batchsize, self._batch_remainder = divmod(n, self.num_batches)
def ensure_dirs_exists(self):
"""Make sure the directory structure for this crop exists.
"""
os.makedirs(os.path.join(self.location, "batches"), exist_ok=True)
os.makedirs(os.path.join(self.location, "results"), exist_ok=True)
def save_info(self, combos=None, cases=None, fn_args=None):
"""Save information about the sowed cases.
"""
# If saving Harvester or Runner, strip out function information so
# as just to use pickle.
if self.farmer is not None:
farmer_copy = copy.deepcopy(self.farmer)
farmer_copy.fn = None
farmer_pkl = to_pickle(farmer_copy)
else:
farmer_pkl = None
write_to_disk({
'combos': combos,
'cases': cases,
'fn_args': fn_args,
'batchsize': self.batchsize,
'num_batches': self.num_batches,
'_batch_remainder': self._batch_remainder,
'shuffle': self.shuffle,
'farmer': farmer_pkl,
}, os.path.join(self.location, INFO_NM))
def load_info(self):
"""Load the full settings from disk.
"""
sfile = os.path.join(self.location, INFO_NM)
if not os.path.isfile(sfile):
raise XYZError("Settings can't be found at {}.".format(sfile))
else:
return read_from_disk(sfile)
def _sync_info_from_disk(self, only_missing=True):
"""Load information about the saved cases.
"""
settings = self.load_info()
self.batchsize = settings['batchsize']
self.num_batches = settings['num_batches']
self._batch_remainder = settings['_batch_remainder']
farmer_pkl = settings['farmer']
farmer = (
None if farmer_pkl is None else
from_pickle(farmer_pkl)
)
fn, farmer = parse_fn_farmer(None, farmer)
# if crop already has a harvester/runner. (e.g. was instantiated from
# one) by default don't overwrite from disk
if (self.farmer) is None or (not only_missing):
self.farmer = farmer
if self.fn is None:
self.load_function()
def save_function_to_disk(self):
"""Save the base function to disk using cloudpickle
"""
write_to_disk(to_pickle(self._fn),
os.path.join(self.location, FNCT_NM))
def load_function(self):
"""Load the saved function from disk, and try to re-insert it back into
Harvester or Runner if present.
"""
self._fn = from_pickle(read_from_disk(
os.path.join(self.location, FNCT_NM)))
if self.farmer is not None:
if self.farmer.fn is None:
self.farmer.fn = self._fn
else:
# TODO: check equality?
raise XYZError("Trying to load this Crop's function, {}, from "
"disk but its farmer already has a function "
"set: {}.".format(self._fn, self.farmer.fn))
def prepare(self, combos=None, cases=None, fn_args=None):
"""Write information about this crop and the supplied combos to disk.
Typically done at start of sow, not when Crop instantiated.
"""
self.ensure_dirs_exists()
if self.save_fn:
self.save_function_to_disk()
self.save_info(combos=combos, cases=cases, fn_args=fn_args)
def is_prepared(self):
"""Check whether this crop has been written to disk.
"""
return os.path.exists(os.path.join(self.location, INFO_NM))
def calc_progress(self):
"""Calculate how much progressed has been made in growing the batches.
"""
if self.is_prepared():
self._sync_info_from_disk()
self._num_sown_batches = len(glob.glob(
os.path.join(self.location, "batches", BTCH_NM.format("*"))))
self._num_results = len(glob.glob(
os.path.join(self.location, "results", RSLT_NM.format("*"))))
else:
self._num_sown_batches = -1
self._num_results = -1
def is_ready_to_reap(self):
"""Have all batches been grown?
"""
self.calc_progress()
return (
self._num_results > 0 and
(self._num_results == self.num_sown_batches)
)
def missing_results(self):
"""Return tuple of batches which haven't been grown yet.
"""
self.calc_progress()
def no_result_exists(x):
return not os.path.isfile(
os.path.join(self.location, "results", RSLT_NM.format(x)))
return tuple(filter(no_result_exists, range(1, self.num_batches + 1)))
def delete_all(self):
"""Delete the crop directory and all its contents.
"""
# delete everything
shutil.rmtree(self.location)
@property
def all_nan_result(self):
"""Get a stand-in result for cases which are missing still.
"""
if self._all_nan_result is None:
result_files = glob.glob(
os.path.join(self.location, "results", RSLT_NM.format("*"))
)
if not result_files:
raise XYZError("To infer an all-nan result requires at least "
"one finished result.")
reference_result = read_from_disk(result_files[0])[0]
self._all_nan_result = nan_like_result(reference_result)
return self._all_nan_result
def __str__(self):
# Location and name, underlined
if not os.path.exists(self.location):
return self.location + "\n * Not yet sown, or already reaped * \n"
loc_len = len(self.location)
name_len = len(self.name)
self.calc_progress()
percentage = 100 * self._num_results / self.num_batches
# Progress bar
total_bars = 20
bars = int(percentage * total_bars / 100)
return ("\n"
"{location}\n"
"{under_crop_dir}{under_crop_name}\n"
"{num_results} / {total} batches of size {bsz} completed\n"
"[{done_bars}{not_done_spaces}] : {percentage:.1f}%"
"\n").format(
location=self.location,
under_crop_dir="-" * (loc_len - name_len),
under_crop_name="=" * name_len,
num_results=self._num_results,
total=self.num_batches,
bsz=self.batchsize,
done_bars="#" * bars,
not_done_spaces=" " * (total_bars - bars),
percentage=percentage,
)
def __repr__(self):
if not os.path.exists(self.location):
progress = "*reaped or unsown*"
else:
self.calc_progress()
progress = "{}/{}".format(self._num_results, self.num_batches)
msg = "<Crop(name='{}', progress={}, batchsize={})>"
return msg.format(self.name, progress, self.batchsize)
def parse_constants(self, constants=None):
constants = parse_constants(constants)
if self.runner is not None:
constants = {**self.runner._constants, **constants}
constants = {**self.runner._resources, **constants}
return constants
def sow_combos(
self,
combos,
cases=None,
constants=None,
shuffle=False,
verbosity=1,
batchsize=None,
num_batches=None,
):
"""Sow combos to disk to be later grown, potentially in batches.
Parameters
----------
combos : mapping of arg names to values
The combinations to sow for all or some function arguments.
cases : iterable or mappings, optional
Optionally provide an sequence of individual cases to sow for some
or all function arguments.
constants : mapping, optional
Provide additional constant function values to use when sowing.
shuffle : bool or int, optional
If given, sow the combos in a random order (using ``random.seed``
and ``random.shuffle``), which can be helpful for distributing
resources when not all cases are computationally equal.
verbosity : int, optional
How much information to show when sowing.
batchsize : int, optional
If specified, set a new batchsize for the crop.
num_batches : int, optional
If specified, set a new num_batches for the crop.
"""
if batchsize is not None:
self.batchsize = batchsize
if num_batches is not None:
self.num_batches = num_batches
if shuffle is not None:
self.shuffle = shuffle
combos = parse_combos(combos)
cases = parse_cases(cases)
constants = self.parse_constants(constants)
# Sort to ensure order remains same for reaping results
# (don't want to hash kwargs)
combos = sorted(combos, key=lambda x: x[0])
self.choose_batch_settings(combos=combos, cases=cases)
self.prepare(combos=combos, cases=cases)
with Sower(self) as sow_fn:
combo_runner_core(
fn=sow_fn,
combos=combos,
cases=cases,
constants=constants,
shuffle=shuffle,
verbosity=verbosity,
)
def sow_cases(
self,
fn_args,
cases,
combos=None,
constants=None,
verbosity=1,
batchsize=None,
num_batches=None,
):
"""Sow cases to disk to be later grown, potentially in batches.
Parameters
----------
fn_args : iterable[str] or str
The names and order of the function arguments, can be ``None`` if
each case is supplied as a ``dict``.
cases : iterable or mappings, optional
Sequence of individual cases to sow for all or some function
arguments.
combos : mapping of arg names to values, optional
Combinations to sow for some or all function arguments.
constants : mapping, optional
Provide additional constant function values to use when sowing.
verbosity : int, optional
How much information to show when sowing.
batchsize : int, optional
If specified, set a new batchsize for the crop.
num_batches : int, optional
If specified, set a new num_batches for the crop.
"""
if batchsize is not None:
self.batchsize = batchsize
if num_batches is not None:
self.num_batches = num_batches
fn_args = parse_fn_args(self._fn, fn_args)
cases = parse_cases(cases, fn_args)
constants = self.parse_constants(constants)
self.choose_batch_settings(combos=combos, cases=cases)
self.prepare(fn_args=fn_args, combos=combos, cases=cases)
with Sower(self) as sow_fn:
case_runner(
fn=sow_fn,
fn_args=fn_args,
cases=cases,
combos=combos,
constants=constants,
verbosity=verbosity,
parse=False,
)
def sow_samples(self, n, combos=None, constants=None, verbosity=1):
"""Sow ``n`` samples to disk.
"""
fn_args, cases = self.farmer.gen_cases_fnargs(n, combos)
self.sow_cases(fn_args, cases,
constants=constants, verbosity=verbosity)
def grow(self, batch_ids, **combo_runner_opts):
"""Grow specific batch numbers using this process.
"""
if isinstance(batch_ids, int):
batch_ids = (batch_ids,)
combo_runner_core(grow, combos=(('batch_number', batch_ids),),
constants={'verbosity': 0, 'crop': self},
**combo_runner_opts)
def grow_missing(self, **combo_runner_opts):
"""Grow any missing results using this process.
"""
self.grow(batch_ids=self.missing_results(), **combo_runner_opts)
def reap_combos(self, wait=False, clean_up=None, allow_incomplete=False):
"""Reap already sown and grown results from this crop.
Parameters
----------
wait : bool, optional
Whether to wait for results to appear. If false (default) all
results need to be in place before the reap.
clean_up : bool, optional
Whether to delete all the batch files once the results have been
gathered. If left as ``None`` this will be automatically set to
``not allow_incomplete``.
allow_incomplete : bool, optional
Allow only partially completed crop results to be reaped,
incomplete results will all be filled-in as nan.
Returns
-------
results : nested tuple
'N-dimensional' tuple containing the results.
"""
check_ready_to_reap(self, allow_incomplete, wait)
clean_up, default_result = calc_clean_up_default_res(
self, clean_up, allow_incomplete
)
# load same combinations as cases saved with
settings = self.load_info()
with Reaper(self, num_batches=settings['num_batches'],
wait=wait, default_result=default_result) as reap_fn:
results = combo_runner_core(
fn=reap_fn,
combos=settings['combos'],
cases=settings['cases'],
constants={},
shuffle=settings.get('shuffle', False),
)
if clean_up:
self.delete_all()
return results
def reap_combos_to_ds(
self,
var_names=None,
var_dims=None,
var_coords=None,
constants=None,
attrs=None,
parse=True,
wait=False,
clean_up=None,
allow_incomplete=False,
to_df=False,
):
"""Reap a function over sowed combinations and output to a Dataset.
Parameters
----------
var_names : str, sequence of strings, or None
Variable name(s) of the output(s) of `fn`, set to None if
fn outputs data already labelled in a Dataset or DataArray.
var_dims : sequence of either strings or string sequences, optional
'Internal' names of dimensions for each variable, the values for
each dimension should be contained as a mapping in either
`var_coords` (not needed by `fn`) or `constants` (needed by `fn`).
var_coords : mapping, optional
Mapping of extra coords the output variables may depend on.
constants : mapping, optional
Arguments to `fn` which are not iterated over, these will be
recorded either as attributes or coordinates if they are named
in `var_dims`.
resources : mapping, optional
Like `constants` but they will not be recorded.
attrs : mapping, optional
Any extra attributes to store.
wait : bool, optional
Whether to wait for results to appear. If false (default) all
results need to be in place before the reap.
clean_up : bool, optional
Whether to delete all the batch files once the results have been
gathered. If left as ``None`` this will be automatically set to
``not allow_incomplete``.
allow_incomplete : bool, optional
Allow only partially completed crop results to be reaped,
incomplete results will all be filled-in as nan.
to_df : bool, optional
Whether to reap to a ``xarray.Dataset`` or a ``pandas.DataFrame``.
Returns
-------
xarray.Dataset or pandas.Dataframe
Multidimensional labelled dataset contatining all the results.
"""
check_ready_to_reap(self, allow_incomplete, wait)
clean_up, default_result = calc_clean_up_default_res(
self, clean_up, allow_incomplete
)
# load exact same combinations as cases saved with
settings = self.load_info()
if parse:
constants = parse_constants(constants)
attrs = parse_attrs(attrs)
with Reaper(self, num_batches=settings['num_batches'],
wait=wait, default_result=default_result) as reap_fn:
# move constants into attrs, so as not to pass them to the Reaper
# when if fact they were meant for the original function.
data = combo_runner_to_ds(
fn=reap_fn,
combos=settings['combos'],
cases=settings['cases'],
var_names=var_names,
var_dims=var_dims,
var_coords=var_coords,
constants={},
resources={},
attrs={**constants, **attrs},
shuffle=settings.get('shuffle', False),
parse=parse,
to_df=to_df,
)
if clean_up:
self.delete_all()
return data
def reap_runner(self, runner, wait=False, clean_up=None,
allow_incomplete=False, to_df=False):
"""Reap a Crop over sowed combos and save to a dataset defined by a
:class:`~xyzpy.Runner`.
"""
# Can ignore `Runner.resources` as they play no part in desecribing the
# output, though they should be supplied to sow and thus grow.
data = self.reap_combos_to_ds(
var_names=runner._var_names,
var_dims=runner._var_dims,
var_coords=runner._var_coords,
constants=runner._constants,
attrs=runner._attrs,
parse=False,
wait=wait,
clean_up=clean_up,
allow_incomplete=allow_incomplete,
to_df=to_df)
if to_df:
runner._last_df = data
else:
runner._last_ds = data
return data
def reap_harvest(self, harvester, wait=False, sync=True, overwrite=None,
clean_up=None, allow_incomplete=False):
"""Reap a Crop over sowed combos and merge with the dataset defined by
a :class:`~xyzpy.Harvester`.
"""
if harvester is None:
raise ValueError("Cannot reap and harvest if no Harvester is set.")
ds = self.reap_runner(harvester.runner, wait=wait, clean_up=False,
allow_incomplete=allow_incomplete, to_df=False)
if sync:
harvester.add_ds(ds, sync=sync, overwrite=overwrite)
# defer cleaning up until we have sucessfully synced new dataset
if clean_up is None:
clean_up = not allow_incomplete
if clean_up:
self.delete_all()
return ds
def reap_samples(
self,
sampler,
wait=False,
sync=True,
clean_up=None,
allow_incomplete=False
):
"""Reap a Crop over sowed combos and merge with the dataframe defined
by a :class:`~xyzpy.Sampler`.
"""
if sampler is None:
raise ValueError("Cannot reap samples without a 'Sampler'.")
df = self.reap_runner(sampler.runner, wait=wait, clean_up=clean_up,
allow_incomplete=allow_incomplete, to_df=True)
if sync:
sampler._last_df = df
sampler.add_df(df, sync=sync)
return df
def reap(
self,
wait=False,
sync=True,
overwrite=None,
clean_up=None,
allow_incomplete=False,
):
"""Reap sown and grown combos from disk. Return a dataset if a runner
or harvester is set, otherwise, the raw nested tuple.
Parameters
----------
wait : bool, optional
Whether to wait for results to appear. If false (default) all
results need to be in place before the reap.
sync : bool, optional
Immediately sync the new dataset with the on-disk full dataset or
dataframe if a harvester or sampler is used.
overwrite : bool, optional
How to compare data when syncing to on-disk dataset.
If ``None``, (default) merge as long as no conflicts.
``True``: overwrite with the new data. ``False``, discard any
new conflicting data.
clean_up : bool, optional
Whether to delete all the batch files once the results have been
gathered. If left as ``None`` this will be automatically set to
``not allow_incomplete``.
allow_incomplete : bool, optional
Allow only partially completed crop results to be reaped,
incomplete results will all be filled-in as nan.
Returns
-------
nested tuple or xarray.Dataset
"""
opts = dict(clean_up=clean_up, wait=wait,
allow_incomplete=allow_incomplete)
if isinstance(self.farmer, Runner):
return self.reap_runner(self.farmer, **opts)
if isinstance(self.farmer, Harvester):
opts['overwrite'] = overwrite
return self.reap_harvest(self.farmer, sync=sync, **opts)
if isinstance(self.farmer, Sampler):
return self.reap_samples(self.farmer, sync=sync, **opts)
return self.reap_combos(**opts)
def check_bad(self, delete_bad=True):
"""Check that the result dumps are not bad -> sometimes length does not
match the batch. Optionally delete these so that they can be re-grown.
Parameters
----------
delete_bad : bool
Delete bad results as they are come across.
Returns
-------
bad_ids : tuple
The bad batch numbers.
"""
# XXX: work out why this is needed sometimes on network filesystems.
result_files = glob.glob(
os.path.join(self.location, "results", RSLT_NM.format("*")))
bad_ids = []
for result_file in result_files:
# load corresponding batch file to check length.
result_num = os.path.split(
result_file)[-1].strip("xyz-result-").strip(".jbdmp")
batch_file = os.path.join(
self.location, "batches", BTCH_NM.format(result_num))
batch = read_from_disk(batch_file)
try:
result = read_from_disk(result_file)
unloadable = False
except Exception as e:
unloadable = True
err = e
if unloadable or (len(result) != len(batch)):
msg = "result {} is bad".format(result_file)
msg += "." if not delete_bad else " - deleting it."
msg += " Error was: {}".format(err) if unloadable else ""
print(msg)
if delete_bad:
os.remove(result_file)
bad_ids.append(result_num)
return tuple(bad_ids)
# ----------------------------- properties ----------------------------- #
def _get_fn(self):
return self._fn
def _set_fn(self, fn):
if self.save_fn is None and fn is not None:
self.save_fn = True
self._fn = fn
def _del_fn(self):
self._fn = None
self.save_fn = False
fn = property(_get_fn, _set_fn, _del_fn,
"Function to save with the Crop for automatic loading and "
"running. Default crop name will be inferred from this if"
"not given explicitly as well.")
@property
def num_sown_batches(self):
"""Total number of batches to be run/grown.
"""
self.calc_progress()
return self._num_sown_batches
@property
def num_results(self):
self.calc_progress()
return self._num_results
def load_crops(directory='.'):
"""Automatically load all the crops found in the current directory.
Parameters
----------
directory : str, optional
Which directory to load the crops from, defaults to '.' - the current.
Returns
-------
dict[str, Crop]
Mapping of the crop name to the Crop.
"""
import os
import re
folders = next(os.walk(directory))[1]
crop_rgx = re.compile(r'^\.xyz-(.+)')
names = []
for folder in folders:
match = crop_rgx.match(folder)
if match:
names.append(match.groups(1)[0])
return {name: Crop(name=name) for name in names}
class Sower(object):
"""Class for sowing a 'crop' of batched combos to then 'grow' (on any
number of workers sharing the filesystem) and then reap.
"""
def __init__(self, crop):
"""
Parameters
----------
crop : xyzpy.Crop
Description of where and how to store the cases and results.
"""
self.crop = crop
# Internal:
self._batch_cases = [] # collects cases to be written in single batch
self._counter = 0 # counts how many cases are in batch so far
self._batch_counter = 0 # counts how many batches have been written
def save_batch(self):
"""Save the current batch of cases to disk and start the next batch.
"""
self._batch_counter += 1
write_to_disk(self._batch_cases, os.path.join(
self.crop.location, "batches", BTCH_NM.format(self._batch_counter))
)
self._batch_cases = []
self._counter = 0
# Context manager #
def __enter__(self):
return self
def __call__(self, **kwargs):
self._batch_cases.append(kwargs)
self._counter += 1
# when the number of cases doesn't divide the number of batches we
# distribute the remainder among the first crops.
extra_batch = self._batch_counter < self.crop._batch_remainder
if self._counter == self.crop.batchsize + int(extra_batch):
self.save_batch()
def __exit__(self, exception_type, exception_value, traceback):
# Make sure any overfill also saved
if self._batch_cases:
self.save_batch()
def grow(batch_number, crop=None, fn=None, check_mpi=True,
verbosity=2, debugging=False):
"""Automatically process a batch of cases into results. Should be run in an
".xyz-{fn_name}" folder.
Parameters
----------
batch_number : int
Which batch to 'grow' into a set of results.
crop : xyzpy.Crop
Description of where and how to store the cases and results.
fn : callable, optional
If specified, the function used to generate the results, otherwise
the function will be loaded from disk.
check_mpi : bool, optional
Whether to check if the process is rank 0 and only save results if
so - allows mpi functions to be simply used. Defaults to true,
this should only be turned off if e.g. a pool of workers is being
used to run different ``grow`` instances.
verbosity : {0, 1, 2}, optional
How much information to show.
debugging : bool, optional
Set logging level to DEBUG.
"""
if debugging:
import logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
if crop is None:
current_folder = os.path.relpath('.', '..')
if current_folder[:5] != ".xyz-":
raise XYZError("`grow` should be run in a "
"\"{crop_parent}/.xyz-{crop_name}\" folder, else "
"`crop_parent` and `crop_name` (or `fn`) should be "
"specified.")
crop_name = current_folder[5:]
crop_location = os.getcwd()
else:
crop_name = crop.name
crop_location = crop.location
fn_file = os.path.join(crop_location, FNCT_NM)
cases_file = os.path.join(crop_location, "batches",
BTCH_NM.format(batch_number))
results_file = os.path.join(crop_location, "results",
RSLT_NM.format(batch_number))
# load function
if fn is None:
fn = from_pickle(read_from_disk(fn_file))
# load cases to evaluate
cases = read_from_disk(cases_file)
if len(cases) == 0:
raise ValueError("Something has gone wrong with the loading of "
"batch {} ".format(BTCH_NM.format(batch_number)) +
"for the crop at {}.".format(crop.location))
# maybe want to run grow as mpiexec (i.e. `fn` itself in parallel),
# so only save and delete on rank 0
if check_mpi and 'OMPI_COMM_WORLD_RANK' in os.environ: # pragma: no cover
rank = int(os.environ['OMPI_COMM_WORLD_RANK'])
elif check_mpi and 'PMI_RANK' in os.environ: # pragma: no cover
rank = int(os.environ['PMI_RANK'])
else:
rank = 0
if rank == 0:
if verbosity >= 1:
print(f"xyzpy: loaded batch {batch_number} of {crop_name}.")
pbar = progbar(range(len(cases)), disable=verbosity <= 0)
results = []
for i in pbar:
if verbosity >= 2:
pbar.set_description(f"{cases[i]}")
# compute and store result!
results.append(fn(**cases[i]))
if len(results) != len(cases):
raise ValueError("Something has gone wrong with processing "
"batch {} ".format(BTCH_NM.format(batch_number)) +
"for the crop at {}.".format(crop.location))
# save to results
write_to_disk(tuple(results), results_file)
if verbosity >= 1:
print(f"xyzpy: success - batch {batch_number} completed.")
else:
for case in cases:
# worker: just help compute the result!
fn(**case)
# --------------------------------------------------------------------------- #
# Gathering results #
# --------------------------------------------------------------------------- #
class Reaper(object):
"""Class that acts as a stateful function to retrieve already sown and
grow results.
"""
def __init__(self, crop, num_batches, wait=False, default_result=None):
"""Class for retrieving the batched, flat, 'grown' results.
Parameters
----------
crop : xyzpy.Crop
Description of where and how to store the cases and results.
"""
self.crop = crop
files = (
os.path.join(self.crop.location, "results", RSLT_NM.format(i + 1))
for i in range(num_batches)
)
def _load(x):
use_default = (
(default_result is not None) and
(not wait) and
(not os.path.isfile(x))
)
# actual result doesn't exist yet - use the default if specified
if use_default:
i = int(re.findall(RSLT_NM.format(r'(\d+)'), x)[0])
size = crop.batchsize + int(i < crop._batch_remainder)
res = (default_result,) * size
else:
res = read_from_disk(x)
if (res is None) or len(res) == 0:
raise ValueError("Something not right: result {} contains "
"no data upon read from disk.".format(x))
return res
def wait_to_load(x):
while not os.path.exists(x):
time.sleep(0.2)
if os.path.isfile(x):
return _load(x)
else:
raise ValueError("{} is not a file.".format(x))
self.results = itertools.chain.from_iterable(map(
wait_to_load if wait else _load, files))
def __enter__(self):
return self
def __call__(self, **kwargs):
return next(self.results)
def __exit__(self, exception_type, exception_value, traceback):
# Check everything gone acccording to plan
if tuple(self.results):
raise XYZError("Not all results reaped!")
# --------------------------------------------------------------------------- #
# Automatic Batch Submission Scripts #
# --------------------------------------------------------------------------- #
_SGE_HEADER = (
"#!/bin/bash -l\n"
"#$ -S /bin/bash\n"
"#$ -l h_rt={hours}:{minutes}:{seconds},mem={gigabytes}G\n"
"#$ -l tmpfs={temp_gigabytes}G\n"
"{extra_resources}\n"
"#$ -N {name}\n"
"mkdir -p {output_directory}\n"
"#$ -wd {output_directory}\n"
"#$ -pe {pe} {num_procs}\n"
"#$ -t {run_start}-{run_stop}\n")
_PBS_HEADER = (
"#!/bin/bash -l\n"
"#PBS -lselect={num_nodes}:ncpus={num_procs}:mem={gigabytes}gb\n"
"#PBS -lwalltime={hours:02}:{minutes:02}:{seconds:02}\n"
"{extra_resources}\n"
"#PBS -N {name}\n"
"#PBS -J {run_start}-{run_stop}\n")
_SLURM_HEADER = (
"#!/bin/bash -l\n"
"#SBATCH --nodes={num_nodes}\n"
"#SBATCH --mem={gigabytes}gb\n"
"#SBATCH --cpus-per-task={num_procs}\n"
"#SBATCH --time={hours:02}:{minutes:02}:{seconds:02}\n"
"{extra_resources}\n"
"#SBATCH --job-name={name}\n"
"#SBATCH --array={run_start}-{run_stop}\n")
_BASE = (
"cd {working_directory}\n"
"export OMP_NUM_THREADS={num_threads}\n"
"export MKL_NUM_THREADS={num_threads}\n"
"export OPENBLAS_NUM_THREADS={num_threads}\n"
"{shell_setup}\n"
"tmpfile=$(mktemp .xyzpy-qsub.XXXXXXXX)\n"
"cat <<EOF > $tmpfile\n"
"{setup}\n"
"from xyzpy.gen.cropping import grow, Crop\n"
"if __name__ == '__main__':\n"
" crop = Crop(name='{name}', parent_dir='{parent_dir}')\n")
_CLUSTER_SGE_GROW_ALL_SCRIPT = (
" grow($SGE_TASK_ID, crop=crop, debugging={debugging})\n")
_CLUSTER_PBS_GROW_ALL_SCRIPT = (
" grow($PBS_ARRAY_INDEX, crop=crop, debugging={debugging})\n")
_CLUSTER_SLURM_GROW_ALL_SCRIPT = (
" grow($SLURM_ARRAY_TASK_ID, crop=crop, debugging={debugging})\n")
_CLUSTER_SGE_GROW_PARTIAL_SCRIPT = (
" batch_ids = {batch_ids}]\n"
" grow(batch_ids[$SGE_TASK_ID - 1], crop=crop, "
"debugging={debugging})\n")
_CLUSTER_PBS_GROW_PARTIAL_SCRIPT = (
" batch_ids = {batch_ids}\n"
" grow(batch_ids[$PBS_ARRAY_INDEX - 1], crop=crop, "
"debugging={debugging})\n")
_CLUSTER_SLURM_GROW_PARTIAL_SCRIPT = (
" batch_ids = {batch_ids}\n"
" grow(batch_ids[$SLURM_ARRAY_TASK_ID - 1], crop=crop, "
"debugging={debugging})\n")
_BASE_CLUSTER_SCRIPT_END = (
"EOF\n"
"{launcher} $tmpfile\n"
"rm $tmpfile\n")
def gen_cluster_script(
crop, scheduler, batch_ids=None, *,
hours=None,
minutes=None,
seconds=None,
gigabytes=2,
num_procs=1,
num_threads=None,
num_nodes=1,
launcher='python',
setup="#",
shell_setup="",
mpi=False,
temp_gigabytes=1,
output_directory=None,
extra_resources=None,
debugging=False,
):
"""Generate a cluster script to grow a Crop.
Parameters
----------
crop : Crop
The crop to grow.
scheduler : {'sge', 'pbs', 'slurm'}
Whether to use a SGE, PBS or slurm submission script template.
batch_ids : int or tuple[int]
Which batch numbers to grow, defaults to all missing batches.
hours : int
How many hours to request, default=0.
minutes : int, optional
How many minutes to request, default=20.
seconds : int, optional
How many seconds to request, default=0.
gigabytes : int, optional
How much memory to request, default: 2.
num_procs : int, optional
How many processes to request (threaded cores or MPI), default: 1.
launcher : str, optional
How to launch the script, default: ``'python'``. But could for example
be ``'mpiexec python'`` for a MPI program.
setup : str, optional
Python script to run before growing, for things that shouldnt't be put
in the crop function itself, e.g. one-time imports with side-effects
like: ``"import tensorflow as tf; tf.enable_eager_execution()``".
shell_setup : str, optional
Commands to be run by the shell before the python script is executed.
E.g. ``conda activate my_env``.
mpi : bool, optional
Request MPI processes not threaded processes.
temp_gigabytes : int, optional
How much temporary on-disk memory.
output_directory : str, optional
What directory to write output to. Defaults to "$HOME/Scratch/output".
extra_resources : str, optional
Extra "#$ -l" resources, e.g. 'gpu=1'
debugging : bool, optional
Set the python log level to debugging.
Returns
-------
str
"""
scheduler = scheduler.lower() # be case-insensitive for scheduler
if scheduler not in {'sge', 'pbs', 'slurm'}:
raise ValueError("scheduler must be one of 'sge', 'pbs', or 'slurm'")
if hours is minutes is seconds is None:
hours, minutes, seconds = 1, 0, 0
else:
hours = 0 if hours is None else int(hours)
minutes = 0 if minutes is None else int(minutes)
seconds = 0 if seconds is None else int(seconds)
if output_directory is None:
from os.path import expanduser
home = expanduser("~")
output_directory = os.path.join(home, 'Scratch', 'output')
crop.calc_progress()
if extra_resources is None:
extra_resources = ""
elif scheduler == 'slurm':
extra_resources = '#SBATCH --' + \
'\n#SBATCH --'.join(extra_resources.split(','))
else:
extra_resources = "#$ -l {}".format(extra_resources)
if num_threads is None:
if mpi:
num_threads = 1
else:
num_threads = num_procs
# get absolute path
full_parent_dir = str(pathlib.Path(crop.parent_dir).expanduser().resolve())
opts = {
'hours': hours,
'minutes': minutes,
'seconds': seconds,
'gigabytes': gigabytes,
'name': crop.name,
'parent_dir': full_parent_dir,
'num_procs': num_procs,
'num_threads': num_threads,
'num_nodes': num_nodes,
'run_start': 1,
'launcher': launcher,
'setup': setup,
'shell_setup': shell_setup,
'pe': 'mpi' if mpi else 'smp',
'temp_gigabytes': temp_gigabytes,
'output_directory': output_directory,
'working_directory': full_parent_dir,
'extra_resources': extra_resources,
'debugging': debugging,
}
if scheduler == 'sge':
script = _SGE_HEADER
elif scheduler == 'pbs':
script = _PBS_HEADER
elif scheduler == 'slurm':
script = _SLURM_HEADER
script += _BASE
# grow specific ids
if batch_ids is not None:
if scheduler == 'sge':
script += _CLUSTER_SGE_GROW_PARTIAL_SCRIPT
elif scheduler == 'pbs':
script += _CLUSTER_PBS_GROW_PARTIAL_SCRIPT
elif scheduler == 'slurm':
script += _CLUSTER_SLURM_GROW_PARTIAL_SCRIPT
batch_ids = tuple(batch_ids)
opts['run_stop'] = len(batch_ids)
opts['batch_ids'] = batch_ids
# grow all ids
elif crop.num_results == 0:
batch_ids = tuple(range(crop.num_batches))
if scheduler == 'sge':
script += _CLUSTER_SGE_GROW_ALL_SCRIPT
elif scheduler == 'pbs':
script += _CLUSTER_PBS_GROW_ALL_SCRIPT
elif scheduler == 'slurm':
script += _CLUSTER_SLURM_GROW_ALL_SCRIPT
opts['run_stop'] = crop.num_batches
# grow missing ids only
else:
if scheduler == 'sge':
script += _CLUSTER_SGE_GROW_PARTIAL_SCRIPT
elif scheduler == 'pbs':
script += _CLUSTER_PBS_GROW_PARTIAL_SCRIPT
elif scheduler == 'slurm':
script += _CLUSTER_SLURM_GROW_PARTIAL_SCRIPT
batch_ids = crop.missing_results()
opts['run_stop'] = len(batch_ids)
opts['batch_ids'] = batch_ids
script += _BASE_CLUSTER_SCRIPT_END
script = script.format(**opts)
if (scheduler == 'pbs') and len(batch_ids) == 1:
# PBS can't handle arrays jobs of size 1...
script = (script.replace('#PBS -J 1-1\n', "")
.replace("$PBS_ARRAY_INDEX", '1'))
return script
def grow_cluster(
crop, scheduler, batch_ids=None, *,
hours=None,
minutes=None,
seconds=None,
gigabytes=2,
num_procs=1,
num_threads=None,
num_nodes=1,
launcher='python',
setup="#",
shell_setup="",
mpi=False,
temp_gigabytes=1,
output_directory=None,
extra_resources=None,
debugging=False,
): # pragma: no cover
"""Automagically submit SGE, PBS, or slurm jobs to grow all missing
results.
Parameters
----------
crop : Crop
The crop to grow.
scheduler : {'sge', 'pbs', 'slurm'}
Whether to use a SGE, PBS or slurm submission script template.
batch_ids : int or tuple[int]
Which batch numbers to grow, defaults to all missing batches.
hours : int
How many hours to request, default=0.
minutes : int, optional
How many minutes to request, default=20.
seconds : int, optional
How many seconds to request, default=0.
gigabytes : int, optional
How much memory to request, default: 2.
num_procs : int, optional
How many processes to request (threaded cores or MPI), default: 1.
launcher : str, optional
How to launch the script, default: ``'python'``. But could for example
be ``'mpiexec python'`` for a MPI program.
setup : str, optional
Python script to run before growing, for things that shouldnt't be put
in the crop function itself, e.g. one-time imports with side-effects
like: ``"import tensorflow as tf; tf.enable_eager_execution()``".
shell_setup : str, optional
Commands to be run by the shell before the python script is executed.
E.g. ``conda activate my_env``.
mpi : bool, optional
Request MPI processes not threaded processes.
temp_gigabytes : int, optional
How much temporary on-disk memory.
output_directory : str, optional
What directory to write output to. Defaults to "$HOME/Scratch/output".
extra_resources : str, optional
Extra "#$ -l" resources, e.g. 'gpu=1'
debugging : bool, optional
Set the python log level to debugging.
"""
if crop.is_ready_to_reap():
print("Crop ready to reap: nothing to submit.")
return
import subprocess
script = gen_cluster_script(
crop, scheduler, batch_ids=batch_ids,
hours=hours,
minutes=minutes,
seconds=seconds,
gigabytes=gigabytes,
temp_gigabytes=temp_gigabytes,
output_directory=output_directory,
num_procs=num_procs,
num_threads=num_threads,
num_nodes=num_nodes,
launcher=launcher,
setup=setup,
shell_setup=shell_setup,
mpi=mpi,
extra_resources=extra_resources,
debugging=debugging,
)
script_file = os.path.join(crop.location, "__qsub_script__.sh")
with open(script_file, mode='w') as f:
f.write(script)
if scheduler in {'sge', 'pbs'}:
result = subprocess.run(['qsub', script_file], capture_output=True)
elif scheduler == 'slurm':
result = subprocess.run(['sbatch', script_file], capture_output=True)
print(result.stderr.decode())
print(result.stdout.decode())
os.remove(script_file)
def gen_qsub_script(
crop, batch_ids=None, *, scheduler='sge',
**kwargs
): # pragma: no cover
"""Generate a qsub script to grow a Crop. Deprecated in favour of
`gen_cluster_script` and will be removed in the future.
Parameters
----------
crop : Crop
The crop to grow.
batch_ids : int or tuple[int]
Which batch numbers to grow, defaults to all missing batches.
scheduler : {'sge', 'pbs'}, optional
Whether to use a SGE or PBS submission script template.
kwargs
See `gen_cluster_script` for all other parameters.
"""
warnings.warn("'gen_qsub_script' is deprecated in favour of "
"`gen_cluster_script` and will be removed in the future",
FutureWarning)
return gen_cluster_script(crop, scheduler, batch_ids=batch_ids, **kwargs)
def qsub_grow(
crop, batch_ids=None, *, scheduler='sge',
**kwargs
): # pragma: no cover
"""Automagically submit SGE or PBS jobs to grow all missing results.
Deprecated in favour of `grow_cluster` and will be removed in the future.
Parameters
----------
crop : Crop
The crop to grow.
batch_ids : int or tuple[int]
Which batch numbers to grow, defaults to all missing batches.
scheduler : {'sge', 'pbs'}, optional
Whether to use a SGE or PBS submission script template.
kwargs
See `grow_cluster` for all other parameters.
"""
warnings.warn("'qsub_grow' is deprecated in favour of "
"`grow_cluster` and will be removed in the future",
FutureWarning)
grow_cluster(crop, scheduler, batch_ids=batch_ids, **kwargs)
Crop.gen_qsub_script = gen_qsub_script
Crop.qsub_grow = qsub_grow
Crop.gen_cluster_script = gen_cluster_script
Crop.grow_cluster = grow_cluster
Crop.gen_sge_script = functools.partialmethod(Crop.gen_cluster_script,
scheduler='sge')
Crop.grow_sge = functools.partialmethod(Crop.grow_cluster, scheduler='sge')
Crop.gen_pbs_script = functools.partialmethod(Crop.gen_cluster_script,
scheduler='pbs')
Crop.grow_pbs = functools.partialmethod(Crop.grow_cluster, scheduler='pbs')
Crop.gen_slurm_script = functools.partialmethod(Crop.gen_cluster_script,
scheduler='slurm')
Crop.grow_slurm = functools.partialmethod(Crop.grow_cluster, scheduler='slurm')
| jcmgray/xyzpy | xyzpy/gen/cropping.py | Python | mit | 55,241 |
from classes import wunderpy_wrapper
from classes import grocerylist
from classes import grocerystore
wp = wunderpy_wrapper.wunderpy_wrapper('../data/tokens.csv')
obj = wp.get_task_positions_obj(wp.WUNDERLIST_GROCERY)
grocery_store = grocerystore.groceryStore('../data/store_order_zehrs.csv', '../data/ingredient_categories.csv') # use the default zehrs store; good enough
groceries = grocerylist.groceryList(wp.WUNDERLIST_GROCERY, wp)
groceries.get_tasks()
groceries.get_category_for_element(groceries.grocery_list[0], grocery_store)
groceries.get_categories(grocery_store)
groceries.reorder_list(wp)
# wp.update_list_order(groceries.wunderlist_order_obj)
# TODO check reloading of a list when you enter the right sheet
# TODO sort by cat order value, not cat id.
print('done') | briancousins/RecipeBook | tests/test_grocerylist.py | Python | mit | 784 |
import logging
import threading
_lock = threading.Lock()
_logger = None
_name = 'root'
_level = logging.DEBUG
def get_logger():
"""Returns a "global" logger.
Args:
level (optional): The logging level.
name (optional): The name of the logger.
"""
global _lock
global _logger
with _lock:
if(_logger is None):
formatter = logging.Formatter(fmt='%(asctime)s:%(levelname)s:%(module)s - %(message)s')
handler = logging.StreamHandler()
handler.setFormatter(fmt=formatter)
_logger = logging.getLogger(_name)
_logger.addHandler(handler)
_logger.setLevel(_level)
# Don't propogate up to root logger
_logger.propagate = False
return _logger
| robotarium/vizier | vizier/log.py | Python | mit | 790 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import time
from django.test import TestCase
from lrucache_backend import LRUObjectCache
# functions/classes for complex data type tests
def f():
return 42
class C(object):
def m(n):
return 24
class ObjectCacheTests(TestCase):
# Tests copied from django/tests/cache/tests.py
def setUp(self):
self.cache = LRUObjectCache("lru", dict(max_entries=50, cull_frequency=50))
def tearDown(self):
self.cache.clear()
def test_get_missing_key(self):
self.assertEqual(self.cache.get("missing", default=1), 1)
def test_eviction(self):
cache = self.cache
for r in range(50):
cache.set(r, r)
# The ordering below is very strict.
# set will evict
cache.set("a", "a")
self.assertIn("a", cache)
self.assertIn(49, cache)
self.assertNotIn(0, cache)
# In does not promote
self.assertIn(1, cache)
cache.set("b", "b")
self.assertNotIn(1, cache)
# Add will evict
self.assertFalse(cache.add("a", "a"))
self.assertIn(2, cache)
self.assertTrue(cache.add("c", "c"))
self.assertNotIn(2, cache)
# Get does not evict
self.assertEqual(cache.get("c"), "c")
self.assertIn(3, cache)
self.assertIsNone(cache.get("d"))
self.assertIn(3, cache)
# Get promotes
self.assertEqual(cache.get(3), 3)
cache.set("d", "d")
self.assertIn(3, cache)
self.assertNotIn(4, cache)
def test_multiple_caches(self):
"Multiple caches are isolated"
cache = self.cache
cache2 = LRUObjectCache("lru2", dict(max_entries=50))
self.cache.set("value", 42)
self.assertEqual(cache.get("value"), 42)
self.assertIsNone(cache2.get("value"))
def test_incr_decr_timeout(self):
"""incr/decr does not modify expiry time (matches memcached behavior)"""
cache = self.cache
key = "value"
internal_key = cache.make_key(key)
cache.set(key, 1, timeout=10000)
self.assertEqual(cache._cache[internal_key], 1)
exp = cache._expire_info[internal_key]
cache.incr(key)
self.assertEqual(exp, cache._expire_info[internal_key])
cache.decr(key)
self.assertEqual(exp, cache._expire_info[internal_key])
def test_simple(self):
# Simple cache set/get works
cache = self.cache
cache.set("key", "value")
self.assertEqual(cache.get("key"), "value")
def test_add(self):
# A key can be added to a cache
cache = self.cache
cache.add("addkey1", "value")
result = cache.add("addkey1", "newvalue")
self.assertFalse(result)
self.assertEqual(cache.get("addkey1"), "value")
def test_prefix(self):
# Test for same cache key conflicts between shared backend
cache = self.cache
prefixed_cache = LRUObjectCache("prefixed", dict(max_entries=50, KEY_PREFIX="yolo"))
prefixed_cache._cache = cache._cache
cache.set("somekey", "value")
# should not be set in the prefixed cache
self.assertFalse(prefixed_cache.has_key("somekey")) # noqa: W601
prefixed_cache.set("somekey", "value2")
self.assertEqual(cache.get("somekey"), "value")
self.assertEqual(prefixed_cache.get("somekey"), "value2")
def test_non_existent(self):
"""Nonexistent cache keys return as None/default."""
cache = self.cache
self.assertIsNone(cache.get("does_not_exist"))
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
# Multiple cache keys can be returned using get_many
cache = self.cache
cache.set("a", "a")
cache.set("b", "b")
cache.set("c", "c")
cache.set("d", "d")
self.assertEqual(cache.get_many(["a", "c", "d"]), {"a": "a", "c": "c", "d": "d"})
self.assertEqual(cache.get_many(["a", "b", "e"]), {"a": "a", "b": "b"})
def test_delete(self):
# Cache keys can be deleted
cache = self.cache
cache.set("key1", "spam")
cache.set("key2", "eggs")
self.assertEqual(cache.get("key1"), "spam")
cache.delete("key1")
self.assertIsNone(cache.get("key1"))
self.assertEqual(cache.get("key2"), "eggs")
def test_has_key(self):
# The cache can be inspected for cache keys
cache = self.cache
cache.set("hello1", "goodbye1")
self.assertTrue(cache.has_key("hello1")) # noqa: W601
self.assertFalse(cache.has_key("goodbye1")) # noqa: W601
cache.set("no_expiry", "here", None)
self.assertTrue(cache.has_key("no_expiry")) # noqa: W601
def test_in(self):
# The in operator can be used to inspect cache contents
cache = self.cache
cache.set("hello2", "goodbye2")
self.assertIn("hello2", cache)
self.assertNotIn("goodbye2", cache)
def test_incr(self):
# Cache values can be incremented
cache = self.cache
cache.set("answer", 41)
self.assertEqual(cache.incr("answer"), 42)
self.assertEqual(cache.get("answer"), 42)
self.assertEqual(cache.incr("answer", 10), 52)
self.assertEqual(cache.get("answer"), 52)
self.assertEqual(cache.incr("answer", -10), 42)
with self.assertRaises(ValueError):
cache.incr("does_not_exist")
def test_decr(self):
# Cache values can be decremented
cache = self.cache
cache.set("answer", 43)
self.assertEqual(cache.decr("answer"), 42)
self.assertEqual(cache.get("answer"), 42)
self.assertEqual(cache.decr("answer", 10), 32)
self.assertEqual(cache.get("answer"), 32)
self.assertEqual(cache.decr("answer", -10), 42)
with self.assertRaises(ValueError):
cache.decr("does_not_exist")
def test_close(self):
cache = self.cache
self.assertTrue(hasattr(cache, "close"))
cache.close()
def test_data_types(self):
# Many different data types can be cached
cache = self.cache
stuff = {
"string": "this is a string",
"int": 42,
"list": [1, 2, 3, 4],
"tuple": (1, 2, 3, 4),
"dict": {"A": 1, "B": 2},
"function": f,
"class": C,
}
cache.set("stuff", stuff)
self.assertEqual(cache.get("stuff"), stuff)
def test_expiration(self):
# Cache values can be set to expire
cache = self.cache
cache.set("expire1", "very quickly", 1)
cache.set("expire2", "very quickly", 1)
cache.set("expire3", "very quickly", 1)
time.sleep(2)
self.assertIsNone(cache.get("expire1"))
cache.add("expire2", "newvalue")
self.assertEqual(cache.get("expire2"), "newvalue")
self.assertFalse(cache.has_key("expire3")) # noqa: W601
def test_unicode(self):
# Unicode values can be cached
cache = self.cache
stuff = {
"ascii": "ascii_value",
"unicode_ascii": "Iñtërnâtiônàlizætiøn1",
"Iñtërnâtiônàlizætiøn": "Iñtërnâtiônàlizætiøn2",
"ascii2": {"x": 1},
}
# Test `set`
for (key, value) in stuff.items():
cache.set(key, value)
self.assertEqual(cache.get(key), value)
# Test `add`
for (key, value) in stuff.items():
cache.delete(key)
cache.add(key, value)
self.assertEqual(cache.get(key), value)
# Test `set_many`
for (key, value) in stuff.items():
cache.delete(key)
cache.set_many(stuff)
for (key, value) in stuff.items():
self.assertEqual(cache.get(key), value)
def test_binary_string(self):
# Binary strings should be cacheable
cache = self.cache
from zlib import compress, decompress
value = "value_to_be_compressed"
compressed_value = compress(value.encode())
# Test set
cache.set("binary1", compressed_value)
compressed_result = cache.get("binary1")
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test add
cache.add("binary1-add", compressed_value)
compressed_result = cache.get("binary1-add")
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test set_many
cache.set_many({"binary1-set_many": compressed_value})
compressed_result = cache.get("binary1-set_many")
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
def test_set_many(self):
# Multiple keys can be set using set_many
cache = self.cache
cache.set_many({"key1": "spam", "key2": "eggs"})
self.assertEqual(cache.get("key1"), "spam")
self.assertEqual(cache.get("key2"), "eggs")
def test_set_many_expiration(self):
# set_many takes a second ``timeout`` parameter
cache = self.cache
cache.set_many({"key1": "spam", "key2": "eggs"}, 1)
time.sleep(2)
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_delete_many(self):
# Multiple keys can be deleted using delete_many
cache = self.cache
cache.set("key1", "spam")
cache.set("key2", "eggs")
cache.set("key3", "ham")
cache.delete_many(["key1", "key2"])
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
self.assertEqual(cache.get("key3"), "ham")
def test_clear(self):
# The cache can be emptied using clear
cache = self.cache
cache.set("key1", "spam")
cache.set("key2", "eggs")
cache.clear()
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_long_timeout(self):
"""
Followe memcached's convention where a timeout greater than 30 days is
treated as an absolute expiration timestamp instead of a relative
offset (#12399).
"""
cache = self.cache
cache.set("key1", "eggs", 60 * 60 * 24 * 30 + 1) # 30 days + 1 second
self.assertEqual(cache.get("key1"), "eggs")
cache.add("key2", "ham", 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get("key2"), "ham")
cache.set_many({"key3": "sausage", "key4": "lobster bisque"}, 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get("key3"), "sausage")
self.assertEqual(cache.get("key4"), "lobster bisque")
def test_forever_timeout(self):
"""
Passing in None into timeout results in a value that is cached forever
"""
cache = self.cache
cache.set("key1", "eggs", None)
self.assertEqual(cache.get("key1"), "eggs")
cache.add("key2", "ham", None)
self.assertEqual(cache.get("key2"), "ham")
added = cache.add("key1", "new eggs", None)
self.assertIs(added, False)
self.assertEqual(cache.get("key1"), "eggs")
cache.set_many({"key3": "sausage", "key4": "lobster bisque"}, None)
self.assertEqual(cache.get("key3"), "sausage")
self.assertEqual(cache.get("key4"), "lobster bisque")
def test_zero_timeout(self):
"""
Passing in zero into timeout results in a value that is not cached
"""
cache = self.cache
cache.set("key1", "eggs", 0)
self.assertIsNone(cache.get("key1"))
cache.add("key2", "ham", 0)
self.assertIsNone(cache.get("key2"))
cache.set_many({"key3": "sausage", "key4": "lobster bisque"}, 0)
self.assertIsNone(cache.get("key3"))
self.assertIsNone(cache.get("key4"))
def test_float_timeout(self):
# Make sure a timeout given as a float doesn't crash anything.
cache = self.cache
cache.set("key1", "spam", 100.2)
self.assertEqual(cache.get("key1"), "spam")
def test_cache_versioning_get_set(self):
# set, using default version = 1
cache = self.cache
cache.set("answer1", 42)
self.assertEqual(cache.get("answer1"), 42)
self.assertEqual(cache.get("answer1", version=1), 42)
self.assertIsNone(cache.get("answer1", version=2))
cache.set("answer1", 40, version=2)
self.assertEqual(cache.get("answer1", version=1), 42)
self.assertEqual(cache.get("answer1", version=2), 40)
def test_cache_versioning_add(self):
# add, default version = 1, but manually override version = 2
cache = self.cache
cache.add("answer1", 41, version=1)
cache.add("answer1", 42, version=2)
self.assertEqual(cache.get("answer1"), 41)
self.assertEqual(cache.get("answer1", version=1), 41)
self.assertEqual(cache.get("answer1", version=2), 42)
def test_cache_versioning_has_key(self):
cache = self.cache
cache.add("answer1", 41, version=1)
cache.add("answer1", 42, version=2)
self.assertTrue(cache.has_key("answer1", version=1))
self.assertTrue(cache.has_key("answer1", version=2))
self.assertFalse(cache.has_key("answer1", version=3))
def test_cache_versioning_delete(self):
cache = self.cache
cache.set("answer1", 37, version=1)
cache.set("answer1", 42, version=2)
cache.delete("answer1")
self.assertIsNone(cache.get("answer1", version=1))
self.assertEqual(cache.get("answer1", version=2), 42)
cache.set("answer2", 37, version=1)
cache.set("answer2", 42, version=2)
cache.delete("answer2", version=2)
self.assertEqual(cache.get("answer2", version=1), 37)
self.assertIsNone(cache.get("answer2", version=2))
def test_cache_versioning_incr_decr(self):
cache = self.cache
cache.set("answer1", 37, version=1)
cache.set("answer1", 42, version=2)
cache.incr("answer1")
self.assertEqual(cache.get("answer1", version=1), 38)
self.assertEqual(cache.get("answer1", version=2), 42)
cache.decr("answer1")
self.assertEqual(cache.get("answer1", version=1), 37)
self.assertEqual(cache.get("answer1", version=2), 42)
cache.incr("answer1", version=1)
self.assertEqual(cache.get("answer1", version=1), 38)
self.assertEqual(cache.get("answer1", version=2), 42)
cache.incr("answer1", version=2)
self.assertEqual(cache.get("answer1", version=1), 38)
self.assertEqual(cache.get("answer1", version=2), 43)
cache.decr("answer1", version=1)
self.assertEqual(cache.get("answer1", version=1), 37)
self.assertEqual(cache.get("answer1", version=2), 43)
cache.decr("answer1", version=2)
self.assertEqual(cache.get("answer1", version=1), 37)
self.assertEqual(cache.get("answer1", version=2), 42)
def test_cache_versioning_get_set_many(self):
cache = self.cache
cache.set_many({"ford1": 37, "arthur1": 42})
self.assertEqual(cache.get_many(["ford1", "arthur1"]), {"ford1": 37, "arthur1": 42})
self.assertEqual(
cache.get_many(["ford1", "arthur1"], version=1), {"ford1": 37, "arthur1": 42}
)
self.assertEqual(cache.get_many(["ford1", "arthur1"], version=2), {})
cache.set_many({"ford2": 37, "arthur2": 42}, version=2)
self.assertEqual(cache.get_many(["ford2", "arthur2"]), {})
self.assertEqual(cache.get_many(["ford2", "arthur2"], version=1), {})
self.assertEqual(
cache.get_many(["ford2", "arthur2"], version=2), {"ford2": 37, "arthur2": 42}
)
| kogan/django-lrucache-backend | tests/test_backend.py | Python | mit | 16,098 |
from PySide import QtGui, QtCore, QtWebKit
import pygal, os, sys
if getattr(sys, 'frozen', False):
# frozen
program_location = os.path.dirname(sys.executable)
else:
# unfrozen
program_location = os.path.dirname(os.path.realpath(__file__))
monitor_archive = os.path.join(program_location, 'monitor archive')
if not os.path.isdir(monitor_archive):
os.mkdir(monitor_archive)
graphs = os.path.join(monitor_archive, 'graphs')
if not os.path.isdir(graphs):
os.mkdir(graphs)
line_all_svg = os.path.join(graphs, 'allline.svg')
line_size_svg = os.path.join(graphs, 'sizeline.svg')
line_filecount_svg = os.path.join(graphs, 'filecountline.svg')
line_foldercount_svg = os.path.join(graphs, 'foldercountline.svg')
icon = os.path.join(program_location, 'find.png')
class MonitorGraph(QtGui.QWidget):
def __init__(self):
super(MonitorGraph, self).__init__()
self.filemodel = QtGui.QFileSystemModel()
self.fileview = QtGui.QTreeView(self)
self.web_view1 = QtWebKit.QWebView(self)
self.web_view2 = QtWebKit.QWebView(self)
self.web_view3 = QtWebKit.QWebView(self)
self.web_view4 = QtWebKit.QWebView(self)
self.tab_widget = QtGui.QTabWidget(self)
self.initUI()
def initUI(self):
self.createActions()
self.fileview.setModel(self.filemodel)
self.fileview.hideColumn(1)
self.fileview.hideColumn(2)
self.fileview.hideColumn(3)
self.fileview.setRootIndex(self.filemodel.setRootPath(monitor_archive))
self.fileview.setSelectionMode(QtGui.QTreeView.ExtendedSelection)
self.fileview.clicked.connect(self.clicked)
self.fileview.setMinimumWidth(150)
splitter = QtGui.QSplitter(self)
splitter.addWidget(self.fileview)
splitter.addWidget(self.tab_widget)
splitter.setStretchFactor(0, 4)
self.tab_widget.addTab(self.web_view1, 'all')
self.tab_widget.addTab(self.web_view2, 'size')
self.tab_widget.addTab(self.web_view3, 'file count')
self.tab_widget.addTab(self.web_view4, 'folder count')
hlayout = QtGui.QHBoxLayout(self)
hlayout.addWidget(splitter)
self.setWindowTitle("Archive Monitor")
self.setGeometry(100, 100, 1000, 600)
ico = QtGui.QIcon(icon)
self.setWindowIcon(ico)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.dispCurrentMonth()
def dispCurrentMonth(self):
date = QtCore.QDate()
date1 = date.currentDate().toString('MM.dd.yyyy').split('.')
filename = date1[0] + '.' + date1[2] + '.csv'
path = os.path.join(monitor_archive, filename)
if os.path.isfile(path):
data = self.readData(path)
self.plotData(data)
self.displayPlot()
def clicked(self):
index = self.fileview.selectionModel().currentIndex()
path = self.filemodel.filePath(index)
data = self.readData(path)
self.plotData(data)
self.displayPlot()
def readData(self, path):
f = open(path, 'r')
data = []
for line in f:
data.append(line)
return data
def keyPressEvent(self, event):
if event.key() == QtCore.Qt.Key_Escape:
self.close()
def delete(self):
indices = self.fileview.selectionModel().selectedIndexes()
paths = []
for index in indices:
paths.append(self.filemodel.filePath(index))
try:
for item in paths:
os.remove(item)
except:
print('couldnt delete file: ' + item)
def createActions(self):
Close1 = QtGui.QAction(self)
Close1.setShortcut('Ctrl+W')
Close1.triggered.connect(self.close)
self.addAction(Close1)
Close2 = QtGui.QAction(self)
Close2.setShortcut('Ctrl+Q')
Close2.triggered.connect(self.close)
self.addAction(Close2)
deleteAction = QtGui.QAction('delete', self)
deleteAction.triggered.connect(self.delete)
self.fileview.setContextMenuPolicy(QtCore.Qt.ActionsContextMenu)
self.fileview.addAction(deleteAction)
def plotData(self, data):
date = []
filecount = []
foldercount = []
size = []
x = 0
for item in data:
if x > 0:
values = item.split(',')
print(values)
date.append((values[0]))
size.append(float(values[1]))
filecount.append(int(values[2]))
foldercount.append(int(values[3].rstrip('\n')))
x += 1
self.allGraph(date, size, filecount, foldercount)
self.sizeGraph(date, size)
self.filecountGraph(date, filecount)
self.foldercountGraph(date, foldercount)
self.displayPlot()
def allGraph(self, date, size, filecount, foldercount):
plot = pygal.Line()
plot.title = 'archive changes'
plot.x_labels = date
plot.add('size', size)
plot.add('file count', filecount)
plot.add('folder count', foldercount)
plot.render_to_file(line_all_svg)
def sizeGraph(self, date, size):
plot = pygal.Line()
plot.title = 'size change'
plot.x_labels = date
plot.add('total size', size)
plot.render_to_file(line_size_svg)
def filecountGraph(self, date, filecount):
plot = pygal.Line()
plot.title = 'file count change'
plot.x_labels = date
plot.add('filecount', filecount)
plot.render_to_file(line_filecount_svg)
def foldercountGraph(self, date, foldercount):
plot = pygal.Line()
plot.title = 'folder count change'
plot.x_labels = date
plot.add('folder count', foldercount)
plot.render_to_file(line_foldercount_svg)
def displayPlot(self):
self.web_view1.load(line_all_svg)
self.web_view2.load(line_size_svg)
self.web_view3.load(line_filecount_svg)
self.web_view4.load(line_foldercount_svg)
| Symphonia/Searcher | no longer supported/MonitorGraph.py | Python | mit | 6,090 |
from django import forms
from django.contrib import messages
from django.utils import timezone
from django.core.urlresolvers import reverse
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from crispy_forms.layout import Layout, Div, HTML, Field
from crispy_forms.helper import FormHelper
from velo.payment.models import ActivePaymentChannel, Payment, DiscountCode
from velo.payment.utils import create_application_invoice, create_bank_transaction, create_team_invoice, \
approve_payment
from velo.payment.widgets import PaymentTypeWidget, DoNotRenderWidget
from velo.registration.models import Application
from velo.velo.mixins.forms import RequestKwargModelFormMixin, GetClassNameMixin
from velo.velo.utils import load_class
class ApplicationPayUpdateForm(GetClassNameMixin, RequestKwargModelFormMixin, forms.ModelForm):
accept_terms = forms.BooleanField(label=_("I confirm, that: the competition organizers are not responsible for possible injuries of participants, during the competition; my health condition corresponds to the selected distance; I will definitely use a fastened helmet and will observe road traffic regulations and competition regulations; I agree with the conditions for participation in the competition, mentioned in the regulations; I am informed, that the paid participation fee will not be returned and the participant’s starting number shall not be transferred to any other person."),
required=True)
accept_inform_participants = forms.BooleanField(label=_("I will inform all registered participants about rules."),
required=True)
accept_insurance = forms.BooleanField(label="", required=False)
discount_code = forms.CharField(label=_("Discount code"), required=False)
payment_type = forms.ChoiceField(choices=(), label="", widget=PaymentTypeWidget)
prepend = 'payment_'
participants = None
success_url = None
class Meta:
model = Application
fields = ('discount_code', 'company_name', 'company_vat', 'company_regnr', 'company_address', 'company_juridical_address',
'invoice_show_names', 'donation')
widgets = {
'donation': DoNotRenderWidget, # We will add field manually
}
def _post_clean(self):
super()._post_clean()
if not bool(self.errors):
try:
instance = self.instance
instance.set_final_price() # if donation have changed, then we need to recalculate,
# because instance is not yet saved and it means,
# that this function on model is not yet run.
if instance.final_price == 0:
payment = Payment.objects.create(content_object=instance,
total=instance.final_price,
status=Payment.STATUSES.ok,
competition=instance.competition)
approve_payment(payment, self.request.user, self.request)
self.success_url = reverse('application_ok', kwargs={'slug': instance.code})
else:
active_payment_type = ActivePaymentChannel.objects.get(id=self.cleaned_data.get('payment_type'))
if active_payment_type.payment_channel.is_bill:
create_application_invoice(instance, active_payment_type)
self.success_url = reverse('application_ok', kwargs={'slug': instance.code})
messages.success(self.request,
_('Invoice successfully created and sent to %(email)s') % {'email': instance.email})
else:
self.success_url = create_bank_transaction(instance, active_payment_type, self.request)
except:
# TODO We need to catch exception and log it to sentry
self._errors['payment_type'] = self.error_class([_("Error in connection with bank. Try again later.")])
def save(self, commit=True):
instance = super(ApplicationPayUpdateForm, self).save(commit=False)
if self.request:
instance.updated_by = self.request.user
if instance.payment_status < Application.PAY_STATUS.waiting:
instance.payment_status = Application.PAY_STATUS.waiting
instance.params = dict(self.cleaned_data)
instance.params.pop("donation", None)
discount_code = instance.params.pop("discount_code", None)
if discount_code:
instance.params.update({'discount_code': discount_code.code})
if commit:
instance.save()
return instance
def clean_donation(self):
donation = self.cleaned_data.get('donation', 0.00)
# If person have already taken invoice, then we do not allow changing donation amount
if self.instance.invoice:
return float(self.instance.donation)
else:
return donation
def clean_discount_code(self):
code = self.cleaned_data.get('discount_code', "")
if not code:
return None
else:
if isinstance(code, DiscountCode):
return code
try:
return DiscountCode.objects.get(code=code)
except:
return None
def clean(self):
if not self.cleaned_data.get('donation', ''):
self.cleaned_data.update({'donation': 0.00})
super(ApplicationPayUpdateForm, self).clean()
try:
active_payment_type = ActivePaymentChannel.objects.get(id=self.cleaned_data.get('payment_type'))
if self.data.get("discount_code", None) and active_payment_type.payment_channel.is_bill:
active_payment_type = None
self._errors.update({'payment_type': [_("Invoice is not available with discount code."), ]})
except:
active_payment_type = None
if active_payment_type and active_payment_type.payment_channel.is_bill: # Hard coded bill ids.
if self.cleaned_data.get('company_name', '') == '':
self._errors.update({'company_name': [_("Company Name required."), ]})
if self.cleaned_data.get('company_regnr', '') == '':
self._errors.update({'company_regnr': [_("Company registration number required."), ]})
if self.cleaned_data.get('company_address', '') == '':
self._errors.update({'company_address': [_("Company Address required."), ]})
return self.cleaned_data
def __init__(self, *args, **kwargs):
self.participants = kwargs.pop('participants', None)
super(ApplicationPayUpdateForm, self).__init__(*args, **kwargs)
insured_participants = self.participants.exclude(insurance=None)
if insured_participants:
self.fields['accept_insurance'].required = True
insurance_company = insured_participants[0].insurance.insurance_company
terms_doc = "<a href='%s' target='_blank'>%s</a>" % (insurance_company.terms_doc.url, _("Regulation")) if insurance_company.terms_doc else ""
self.fields['accept_insurance'].label = mark_safe("%s %s" % (insurance_company.term, terms_doc))
else:
self.fields['accept_insurance'].widget = forms.HiddenInput()
now = timezone.now()
competition = self.instance.competition
checkboxes = (
'accept_terms',
'accept_inform_participants',
'accept_insurance',
)
if competition.processing_class:
_class = load_class(competition.processing_class)
processing = _class(competition=competition)
if hasattr(processing, 'payment_additional_checkboxes'):
for key, field in processing.payment_additional_checkboxes(application=self.instance):
self.fields[key] = field
checkboxes += (key,)
payments = competition.activepaymentchannel_set.filter(from_date__lte=now, till_date__gte=now).select_related(
'payment_channel')
# If user have already requested bill, then we are not showing possibility to request one more.
if self.instance.invoice:
payments = payments.filter(payment_channel__is_bill=False)
if self.instance.final_price == 0:
self.fields['payment_type'].required = False
self.fields['payment_type'].widget = forms.HiddenInput()
else:
self.fields['payment_type'].choices = [(obj.id, obj) for obj in payments]
if self.instance.discount_code:
self.initial['discount_code'] = self.instance.discount_code.code
self.fields['donation'].required = False
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.layout = Layout(
*checkboxes,
Div(
Div(
Div(
Field(
"discount_code",
css_class="input-field if--50 if--dark js-placeholder-up"
),
),
css_class="input-wrap w100 bottom-margin--15 col-s-24 col-m-12 col-l-12 col-xl-12"
),
css_class="input-wrap w100 bottom-margin--15",
),
Div(
Div(
css_class="w100 bottom-margin--30",
),
Div(
Div(
HTML(_("Payment method")) if self.instance.final_price > 0 else HTML(""),
css_class="fs14 fw700 uppercase w100 bottom-margin--30"
),
Div(
Div(
Field('payment_type', wrapper_class="row row--gutters-20"),
css_class="w100"
),
css_class="input-wrap w100"
),
css_class="inner no-padding--560"
),
css_class="w100 border-top"
),
Div(
Div(
# company_name
Div(
Div(
Field(
"company_name",
css_class="input-field if--50 if--dark js-placeholder-up",
),
css_class="input-wrap w100 bottom-margin--15"
),
css_class="col-xl-8 col-m-12 col-s-24"
),
# company_vat
Div(
Div(
Field(
"company_vat",
css_class="input-field if--50 if--dark js-placeholder-up"
),
css_class="input-wrap w100 bottom-margin--15"
),
css_class="col-xl-8 col-m-12 col-s-24"
),
# company_regnr
Div(
Div(
Field(
"company_regnr",
css_class="input-field if--50 if--dark js-placeholder-up"
),
css_class="input-wrap w100 bottom-margin--15"
),
css_class="col-xl-8 col-m-12 col-s-24"
),
# company_address
Div(
Div(
Field(
"company_address",
css_class="input-field if--50 if--dark js-placeholder-up"
),
css_class="input-wrap w100 bottom-margin--15"
),
css_class="col-xl-8 col-m-12 col-s-24"
),
# company_juridical_address
Div(
Div(
Field(
"company_juridical_address",
css_class="input-field if--50 if--dark js-placeholder-up"
),
css_class="input-wrap w100 bottom-margin--15"
),
css_class="col-xl-8 col-m-12 col-s-24"
),
'invoice_show_names',
css_class=""
),
css_class="invoice_fields"
)
)
class TeamPayForm(GetClassNameMixin, RequestKwargModelFormMixin, forms.ModelForm):
payment_type = forms.ChoiceField(choices=(), label="", widget=PaymentTypeWidget)
prepend = 'payment_'
success_url = None
class Meta:
model = Application
fields = ('company_name', 'company_vat', 'company_regnr', 'company_address', 'company_juridical_address',)
def _post_clean(self):
super(TeamPayForm, self)._post_clean()
if not bool(self.errors):
try:
instance = self.instance
active_payment_type = ActivePaymentChannel.objects.get(id=self.cleaned_data.get('payment_type'))
if active_payment_type.payment_channel.is_bill:
create_team_invoice(instance, active_payment_type)
self.success_url = reverse('account:team', kwargs={'pk2': instance.id})
messages.info(self.request,
_('Invoice successfully created and sent to %(email)s') % {'email': instance.email})
else:
self.success_url = create_bank_transaction(instance, active_payment_type, self.request)
except:
# TODO We need to catch exception and log it to sentry
self._errors['payment_type'] = self.error_class([_("Error in connection with bank. Try again later.")])
def clean(self):
super(TeamPayForm, self).clean()
try:
active_payment_type = ActivePaymentChannel.objects.get(id=self.cleaned_data.get('payment_type'))
except:
active_payment_type = None
if active_payment_type and active_payment_type.payment_channel.is_bill: # Hard coded bill ids.
if self.cleaned_data.get('company_name', '') == '':
self._errors.update({'company_name': [_("Company Name required."), ]})
if self.cleaned_data.get('company_regnr', '') == '':
self._errors.update({'company_regnr': [_("Company registration number required."), ]})
if self.cleaned_data.get('company_address', '') == '':
self._errors.update({'company_address': [_("Company Address required."), ]})
if self.cleaned_data.get('company_juridical_address', '') == '':
self._errors.update({'company_juridical_address': [_("Company Juridical Address required."), ]})
return self.cleaned_data
def __init__(self, *args, **kwargs):
super(TeamPayForm, self).__init__(*args, **kwargs)
now = timezone.now()
competition = self.instance.distance.competition
payments = competition.activepaymentchannel_set.filter(from_date__lte=now, till_date__gte=now).select_related(
'payment_channel')
# If user have already requested bill, then we are not showing possibility to request one more.
if self.instance.invoice:
payments = payments.filter(payment_channel__is_bill=False)
self.fields['payment_type'].choices = [(obj.id, obj) for obj in payments]
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.layout = Layout(
Div(
Div(
css_class="w100 bottom-margin--30",
),
Div(
Div(
HTML(_("Payment method")),
css_class="fs14 fw700 uppercase w100 bottom-margin--30"
),
Div(
Div(
Field('payment_type', wrapper_class="row row--gutters-20"),
css_class="w100"
),
css_class="input-wrap w100"
),
css_class="inner no-padding--560"
),
css_class="w100 border-top"
),
Div(
Div(
# company_name
Div(
Div(
Field(
"company_name",
css_class="input-field if--50 if--dark js-placeholder-up",
),
css_class="input-wrap w100 bottom-margin--15"
),
css_class="col-xl-8 col-m-12 col-s-24"
),
# company_vat
Div(
Div(
Field(
"company_vat",
css_class="input-field if--50 if--dark js-placeholder-up"
),
css_class="input-wrap w100 bottom-margin--15"
),
css_class="col-xl-8 col-m-12 col-s-24"
),
# company_regnr
Div(
Div(
Field(
"company_regnr",
css_class="input-field if--50 if--dark js-placeholder-up"
),
css_class="input-wrap w100 bottom-margin--15"
),
css_class="col-xl-8 col-m-12 col-s-24"
),
# company_address
Div(
Div(
Field(
"company_address",
css_class="input-field if--50 if--dark js-placeholder-up"
),
css_class="input-wrap w100 bottom-margin--15"
),
css_class="col-xl-8 col-m-12 col-s-24"
),
# company_juridical_address
Div(
Div(
Field(
"company_juridical_address",
css_class="input-field if--50 if--dark js-placeholder-up"
),
css_class="input-wrap w100 bottom-margin--15"
),
css_class="col-xl-8 col-m-12 col-s-24"
),
'invoice_show_names',
css_class=""
),
css_class="invoice_fields"
)
)
| eeriks/velo.lv | velo/payment/forms.py | Python | gpl-3.0 | 19,328 |
from django.conf.urls import patterns, include, url
from rememerme.sessions.rest import views
urlpatterns = patterns('',
url(r'^/?$', views.SessionsListView.as_view()),
url(r'^/(?P<session_id>[-\w]+)/?$', views.SessionsSingleView.as_view()),
)
| rememerme/sessions-api | rememerme/sessions/rest/urls.py | Python | apache-2.0 | 254 |
from io import StringIO
import re
import linesep
scenarios = [
(
"empty",
{
"text": "",
"sep": "\n",
"preceded": [],
"terminated": [],
"separated": [""],
"preceded_retained": [],
"terminated_retained": [],
"separated_retained": [""],
},
),
(
"no_sep",
{
"text": "foo",
"sep": "\n",
"preceded": ["foo"],
"terminated": ["foo"],
"separated": ["foo"],
"preceded_retained": ["foo"],
"terminated_retained": ["foo"],
"separated_retained": ["foo"],
},
),
(
"one_sep",
{
"text": "\n",
"sep": "\n",
"preceded": [""],
"terminated": [""],
"separated": ["", ""],
"preceded_retained": ["\n"],
"terminated_retained": ["\n"],
"separated_retained": ["", "\n", ""],
},
),
(
"two_seps",
{
"text": "\n\n",
"sep": "\n",
"preceded": ["", ""],
"terminated": ["", ""],
"separated": ["", "", ""],
"preceded_retained": ["\n", "\n"],
"terminated_retained": ["\n", "\n"],
"separated_retained": ["", "\n", "", "\n", ""],
},
),
(
"text_sep",
{
"text": "foo\n",
"sep": "\n",
"preceded": ["foo", ""],
"preceded_retained": ["foo", "\n"],
"separated": ["foo", ""],
"separated_retained": ["foo", "\n", ""],
"terminated": ["foo"],
"terminated_retained": ["foo\n"],
},
),
(
"sep_text",
{
"text": "\nfoo",
"sep": "\n",
"preceded": ["foo"],
"preceded_retained": ["\nfoo"],
"separated": ["", "foo"],
"separated_retained": ["", "\n", "foo"],
"terminated": ["", "foo"],
"terminated_retained": ["\n", "foo"],
},
),
(
"text_sep_text",
{
"text": "foo\nbar",
"sep": "\n",
"preceded": ["foo", "bar"],
"preceded_retained": ["foo", "\nbar"],
"separated": ["foo", "bar"],
"separated_retained": ["foo", "\n", "bar"],
"terminated": ["foo", "bar"],
"terminated_retained": ["foo\n", "bar"],
},
),
(
"sep_text_sep",
{
"text": "\nfoo\n",
"sep": "\n",
"preceded": ["foo", ""],
"preceded_retained": ["\nfoo", "\n"],
"separated": ["", "foo", ""],
"separated_retained": ["", "\n", "foo", "\n", ""],
"terminated": ["", "foo"],
"terminated_retained": ["\n", "foo\n"],
},
),
(
"sep_sep_text",
{
"text": "\n\nfoo",
"sep": "\n",
"preceded": ["", "foo"],
"preceded_retained": ["\n", "\nfoo"],
"separated": ["", "", "foo"],
"separated_retained": ["", "\n", "", "\n", "foo"],
"terminated": ["", "", "foo"],
"terminated_retained": ["\n", "\n", "foo"],
},
),
(
"text_sep_sep",
{
"text": "foo\n\n",
"sep": "\n",
"preceded": ["foo", "", ""],
"preceded_retained": ["foo", "\n", "\n"],
"separated": ["foo", "", ""],
"separated_retained": ["foo", "\n", "", "\n", ""],
"terminated": ["foo", ""],
"terminated_retained": ["foo\n", "\n"],
},
),
(
"regex01",
{
"text": "abca|bc",
"sep": re.compile(r"a|b"),
"preceded": ["", "c", "|", "c"],
"preceded_retained": ["a", "bc", "a|", "bc"],
"separated": ["", "", "c", "|", "c"],
"separated_retained": ["", "a", "", "b", "c", "a", "|", "b", "c"],
"terminated": ["", "", "c", "|", "c"],
"terminated_retained": ["a", "b", "ca", "|b", "c"],
},
),
(
"regex_literal",
{
"text": "abca|bc",
"sep": "a|b",
"preceded": ["abc", "c"],
"preceded_retained": ["abc", "a|bc"],
"separated": ["abc", "c"],
"separated_retained": ["abc", "a|b", "c"],
"terminated": ["abc", "c"],
"terminated_retained": ["abca|b", "c"],
},
),
(
"regex_groups",
{
"text": "abca|bc",
"sep": re.compile(r"(a)|(b)"),
"preceded": ["", "c", "|", "c"],
"preceded_retained": ["a", "bc", "a|", "bc"],
"separated": ["", "", "c", "|", "c"],
"separated_retained": ["", "a", "", "b", "c", "a", "|", "b", "c"],
"terminated": ["", "", "c", "|", "c"],
"terminated_retained": ["a", "b", "ca", "|b", "c"],
},
),
(
"straddling_delim",
{
"text": "This test is intended to test splitting when the separator is"
" a multicharacter delimiter that straddles the boundary"
" between the 512-character chunks that the `read_*` functions"
" divide their input into. Unfortunately, I'm already bored"
" of writing this test, and I still have 237 characters left"
" to go. Lorem ipsum dolor sit amet, consectetur adipisicing"
" elit, sed do eiusmod tempor incididunt ut labore et dolore"
" magna aliqua. Ut enim ad minim veniam, quis nostrud"
" exercitation ullamco Here it comes ---> |\r\n| <--- There"
" should be a split right there; is there?",
"sep": "\r\n",
"preceded": [
"This test is intended to test splitting when the separator is a"
" multicharacter delimiter that straddles the boundary between the"
" 512-character chunks that the `read_*` functions divide their"
" input into. Unfortunately, I'm already bored of writing this"
" test, and I still have 237 characters left to go. Lorem ipsum"
" dolor sit amet, consectetur adipisicing elit, sed do eiusmod"
" tempor incididunt ut labore et dolore magna aliqua. Ut enim ad"
" minim veniam, quis nostrud exercitation ullamco Here it comes"
" ---> |",
"| <--- There should be a split right there; is there?",
],
"preceded_retained": [
"This test is intended to test splitting when the separator is a"
" multicharacter delimiter that straddles the boundary between the"
" 512-character chunks that the `read_*` functions divide their"
" input into. Unfortunately, I'm already bored of writing this"
" test, and I still have 237 characters left to go. Lorem ipsum"
" dolor sit amet, consectetur adipisicing elit, sed do eiusmod"
" tempor incididunt ut labore et dolore magna aliqua. Ut enim ad"
" minim veniam, quis nostrud exercitation ullamco Here it comes"
" ---> |",
"\r\n| <--- There should be a split right there; is there?",
],
"separated": [
"This test is intended to test splitting when the separator is a"
" multicharacter delimiter that straddles the boundary between the"
" 512-character chunks that the `read_*` functions divide their"
" input into. Unfortunately, I'm already bored of writing this"
" test, and I still have 237 characters left to go. Lorem ipsum"
" dolor sit amet, consectetur adipisicing elit, sed do eiusmod"
" tempor incididunt ut labore et dolore magna aliqua. Ut enim ad"
" minim veniam, quis nostrud exercitation ullamco Here it comes"
" ---> |",
"| <--- There should be a split right there; is there?",
],
"separated_retained": [
"This test is intended to test splitting when the separator is a"
" multicharacter delimiter that straddles the boundary between the"
" 512-character chunks that the `read_*` functions divide their"
" input into. Unfortunately, I'm already bored of writing this"
" test, and I still have 237 characters left to go. Lorem ipsum"
" dolor sit amet, consectetur adipisicing elit, sed do eiusmod"
" tempor incididunt ut labore et dolore magna aliqua. Ut enim ad"
" minim veniam, quis nostrud exercitation ullamco Here it comes"
" ---> |",
"\r\n",
"| <--- There should be a split right there; is there?",
],
"terminated": [
"This test is intended to test splitting when the separator is a"
" multicharacter delimiter that straddles the boundary between the"
" 512-character chunks that the `read_*` functions divide their"
" input into. Unfortunately, I'm already bored of writing this"
" test, and I still have 237 characters left to go. Lorem ipsum"
" dolor sit amet, consectetur adipisicing elit, sed do eiusmod"
" tempor incididunt ut labore et dolore magna aliqua. Ut enim ad"
" minim veniam, quis nostrud exercitation ullamco Here it comes"
" ---> |",
"| <--- There should be a split right there; is there?",
],
"terminated_retained": [
"This test is intended to test splitting when the separator is a"
" multicharacter delimiter that straddles the boundary between the"
" 512-character chunks that the `read_*` functions divide their"
" input into. Unfortunately, I'm already bored of writing this"
" test, and I still have 237 characters left to go. Lorem ipsum"
" dolor sit amet, consectetur adipisicing elit, sed do eiusmod"
" tempor incididunt ut labore et dolore magna aliqua. Ut enim ad"
" minim veniam, quis nostrud exercitation ullamco Here it comes"
" ---> |\r\n",
"| <--- There should be a split right there; is there?",
],
},
),
(
"big_entry",
{
"text": "This test is intended to test splitting when a single entry"
" is longer than the 512-character chunk size. Lorem ipsum"
" dolor sit amet, consectetur adipisicing elit, sed do"
" eiusmod tempor incididunt ut labore et dolore magna aliqua."
" Ut enim ad minim veniam, quis nostrud exercitation ullamco"
" laboris nisi ut aliquip ex ea commodo consequat. Duis aute"
" irure dolor in reprehenderit in voluptate velit esse cillum"
" dolore eu fugiat nulla pariatur. Excepteur sint occaecat"
" cupidatat non proident, sunt in culpa qui officia|\r\n|"
" deserunt mollit anim id est laborum.",
"sep": "\r\n",
"preceded": [
"This test is intended to test splitting when a single entry is"
" longer than the 512-character chunk size. Lorem ipsum dolor sit"
" amet, consectetur adipisicing elit, sed do eiusmod tempor"
" incididunt ut labore et dolore magna aliqua. Ut enim ad minim"
" veniam, quis nostrud exercitation ullamco laboris nisi ut"
" aliquip ex ea commodo consequat. Duis aute irure dolor in"
" reprehenderit in voluptate velit esse cillum dolore eu fugiat"
" nulla pariatur. Excepteur sint occaecat cupidatat non proident,"
" sunt in culpa qui officia|",
"| deserunt mollit anim id est laborum.",
],
"preceded_retained": [
"This test is intended to test splitting when a single entry is"
" longer than the 512-character chunk size. Lorem ipsum dolor sit"
" amet, consectetur adipisicing elit, sed do eiusmod tempor"
" incididunt ut labore et dolore magna aliqua. Ut enim ad minim"
" veniam, quis nostrud exercitation ullamco laboris nisi ut"
" aliquip ex ea commodo consequat. Duis aute irure dolor in"
" reprehenderit in voluptate velit esse cillum dolore eu fugiat"
" nulla pariatur. Excepteur sint occaecat cupidatat non proident,"
" sunt in culpa qui officia|",
"\r\n| deserunt mollit anim id est laborum.",
],
"separated": [
"This test is intended to test splitting when a single entry is"
" longer than the 512-character chunk size. Lorem ipsum dolor sit"
" amet, consectetur adipisicing elit, sed do eiusmod tempor"
" incididunt ut labore et dolore magna aliqua. Ut enim ad minim"
" veniam, quis nostrud exercitation ullamco laboris nisi ut"
" aliquip ex ea commodo consequat. Duis aute irure dolor in"
" reprehenderit in voluptate velit esse cillum dolore eu fugiat"
" nulla pariatur. Excepteur sint occaecat cupidatat non proident,"
" sunt in culpa qui officia|",
"| deserunt mollit anim id est laborum.",
],
"separated_retained": [
"This test is intended to test splitting when a single entry is"
" longer than the 512-character chunk size. Lorem ipsum dolor sit"
" amet, consectetur adipisicing elit, sed do eiusmod tempor"
" incididunt ut labore et dolore magna aliqua. Ut enim ad minim"
" veniam, quis nostrud exercitation ullamco laboris nisi ut"
" aliquip ex ea commodo consequat. Duis aute irure dolor in"
" reprehenderit in voluptate velit esse cillum dolore eu fugiat"
" nulla pariatur. Excepteur sint occaecat cupidatat non proident,"
" sunt in culpa qui officia|",
"\r\n",
"| deserunt mollit anim id est laborum.",
],
"terminated": [
"This test is intended to test splitting when a single entry is"
" longer than the 512-character chunk size. Lorem ipsum dolor sit"
" amet, consectetur adipisicing elit, sed do eiusmod tempor"
" incididunt ut labore et dolore magna aliqua. Ut enim ad minim"
" veniam, quis nostrud exercitation ullamco laboris nisi ut"
" aliquip ex ea commodo consequat. Duis aute irure dolor in"
" reprehenderit in voluptate velit esse cillum dolore eu fugiat"
" nulla pariatur. Excepteur sint occaecat cupidatat non proident,"
" sunt in culpa qui officia|",
"| deserunt mollit anim id est laborum.",
],
"terminated_retained": [
"This test is intended to test splitting when a single entry is"
" longer than the 512-character chunk size. Lorem ipsum dolor sit"
" amet, consectetur adipisicing elit, sed do eiusmod tempor"
" incididunt ut labore et dolore magna aliqua. Ut enim ad minim"
" veniam, quis nostrud exercitation ullamco laboris nisi ut"
" aliquip ex ea commodo consequat. Duis aute irure dolor in"
" reprehenderit in voluptate velit esse cillum dolore eu fugiat"
" nulla pariatur. Excepteur sint occaecat cupidatat non proident,"
" sunt in culpa qui officia|\r\n",
"| deserunt mollit anim id est laborum.",
],
},
),
]
def test_split_preceded(text, sep, preceded):
assert linesep.split_preceded(text, sep, retain=False) == preceded
def test_split_terminated(text, sep, terminated):
assert linesep.split_terminated(text, sep, retain=False) == terminated
def test_split_separated(text, sep, separated):
assert linesep.split_separated(text, sep, retain=False) == separated
def test_split_preceded_retained(text, sep, preceded_retained):
assert linesep.split_preceded(text, sep, retain=True) == preceded_retained
def test_split_terminated_retained(text, sep, terminated_retained):
assert linesep.split_terminated(text, sep, retain=True) == terminated_retained
def test_split_separated_retained(text, sep, separated_retained):
assert linesep.split_separated(text, sep, retain=True) == separated_retained
def test_read_preceded(text, sep, preceded):
assert list(linesep.read_preceded(StringIO(text), sep, retain=False)) == preceded
def test_read_terminated(text, sep, terminated):
assert (
list(linesep.read_terminated(StringIO(text), sep, retain=False)) == terminated
)
def test_read_separated(text, sep, separated):
assert list(linesep.read_separated(StringIO(text), sep, retain=False)) == separated
def test_read_preceded_retained(text, sep, preceded_retained):
assert (
list(linesep.read_preceded(StringIO(text), sep, retain=True))
== preceded_retained
)
def test_read_terminated_retained(text, sep, terminated_retained):
assert (
list(linesep.read_terminated(StringIO(text), sep, retain=True))
== terminated_retained
)
def test_read_separated_retained(text, sep, separated_retained):
assert (
list(linesep.read_separated(StringIO(text), sep, retain=True))
== separated_retained
)
| jwodder/linesep | test/test_core/test_split_text.py | Python | mit | 18,169 |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from cinder import exception
from cinder.image import glance
from cinder.image import image_utils
from cinder.openstack.common import log as logging
from cinder.volume import driver
from cinder.volume.drivers.xenapi import lib as xenapi_lib
LOG = logging.getLogger(__name__)
xenapi_opts = [
cfg.StrOpt('xenapi_connection_url',
default=None,
help='URL for XenAPI connection'),
cfg.StrOpt('xenapi_connection_username',
default='root',
help='Username for XenAPI connection'),
cfg.StrOpt('xenapi_connection_password',
default=None,
help='Password for XenAPI connection',
secret=True),
cfg.StrOpt('xenapi_sr_base_path',
default='/var/run/sr-mount',
help='Base path to the storage repository'),
]
xenapi_nfs_opts = [
cfg.StrOpt('xenapi_nfs_server',
default=None,
help='NFS server to be used by XenAPINFSDriver'),
cfg.StrOpt('xenapi_nfs_serverpath',
default=None,
help='Path of exported NFS, used by XenAPINFSDriver'),
]
CONF = cfg.CONF
CONF.register_opts(xenapi_opts)
CONF.register_opts(xenapi_nfs_opts)
class XenAPINFSDriver(driver.VolumeDriver):
VERSION = "1.0.0"
def __init__(self, *args, **kwargs):
super(XenAPINFSDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(xenapi_opts)
self.configuration.append_config_values(xenapi_nfs_opts)
def do_setup(self, context):
session_factory = xenapi_lib.SessionFactory(
self.configuration.xenapi_connection_url,
self.configuration.xenapi_connection_username,
self.configuration.xenapi_connection_password
)
self.nfs_ops = xenapi_lib.NFSBasedVolumeOperations(session_factory)
def create_cloned_volume(self, volume, src_vref):
raise NotImplementedError()
def create_volume(self, volume):
volume_details = self.nfs_ops.create_volume(
self.configuration.xenapi_nfs_server,
self.configuration.xenapi_nfs_serverpath,
volume['size'],
volume['display_name'],
volume['display_description']
)
location = "%(sr_uuid)s/%(vdi_uuid)s" % volume_details
return dict(provider_location=location)
def create_export(self, context, volume):
pass
def delete_volume(self, volume):
sr_uuid, vdi_uuid = volume['provider_location'].split('/')
self.nfs_ops.delete_volume(
self.configuration.xenapi_nfs_server,
self.configuration.xenapi_nfs_serverpath,
sr_uuid,
vdi_uuid
)
def remove_export(self, context, volume):
pass
def initialize_connection(self, volume, connector):
sr_uuid, vdi_uuid = volume['provider_location'].split('/')
return dict(
driver_volume_type='xensm',
data=dict(
name_label=volume['display_name'] or '',
name_description=volume['display_description'] or '',
sr_uuid=sr_uuid,
vdi_uuid=vdi_uuid,
sr_type='nfs',
server=self.configuration.xenapi_nfs_server,
serverpath=self.configuration.xenapi_nfs_serverpath,
introduce_sr_keys=['sr_type', 'server', 'serverpath']
)
)
def terminate_connection(self, volume, connector, **kwargs):
pass
def check_for_setup_error(self):
"""To override superclass' method."""
def create_volume_from_snapshot(self, volume, snapshot):
return self._copy_volume(
snapshot, volume['display_name'], volume['name_description'])
def create_snapshot(self, snapshot):
volume_id = snapshot['volume_id']
volume = snapshot['volume']
return self._copy_volume(
volume, snapshot['display_name'], snapshot['display_description'])
def _copy_volume(self, volume, target_name, target_desc):
sr_uuid, vdi_uuid = volume['provider_location'].split('/')
volume_details = self.nfs_ops.copy_volume(
self.configuration.xenapi_nfs_server,
self.configuration.xenapi_nfs_serverpath,
sr_uuid,
vdi_uuid,
target_name,
target_desc
)
location = "%(sr_uuid)s/%(vdi_uuid)s" % volume_details
return dict(provider_location=location)
def delete_snapshot(self, snapshot):
self.delete_volume(snapshot)
def ensure_export(self, context, volume):
pass
def copy_image_to_volume(self, context, volume, image_service, image_id):
if image_utils.is_xenserver_image(context, image_service, image_id):
return self._use_glance_plugin_to_copy_image_to_volume(
context, volume, image_service, image_id)
return self._use_image_utils_to_pipe_bytes_to_volume(
context, volume, image_service, image_id)
def _use_image_utils_to_pipe_bytes_to_volume(self, context, volume,
image_service, image_id):
sr_uuid, vdi_uuid = volume['provider_location'].split('/')
with self.nfs_ops.volume_attached_here(CONF.xenapi_nfs_server,
CONF.xenapi_nfs_serverpath,
sr_uuid, vdi_uuid,
False) as device:
image_utils.fetch_to_raw(context,
image_service,
image_id,
device,
self.configuration.volume_dd_blocksize,
size=volume['size'])
def _use_glance_plugin_to_copy_image_to_volume(self, context, volume,
image_service, image_id):
sr_uuid, vdi_uuid = volume['provider_location'].split('/')
api_servers = glance.get_api_servers()
glance_server = api_servers.next()
auth_token = context.auth_token
overwrite_result = self.nfs_ops.use_glance_plugin_to_overwrite_volume(
CONF.xenapi_nfs_server,
CONF.xenapi_nfs_serverpath,
sr_uuid,
vdi_uuid,
glance_server,
image_id,
auth_token,
CONF.xenapi_sr_base_path)
if overwrite_result is False:
raise exception.ImageCopyFailure(reason='Overwriting volume '
'failed.')
self.nfs_ops.resize_volume(
CONF.xenapi_nfs_server,
CONF.xenapi_nfs_serverpath,
sr_uuid,
vdi_uuid,
volume['size'])
def copy_volume_to_image(self, context, volume, image_service, image_meta):
if image_utils.is_xenserver_format(image_meta):
return self._use_glance_plugin_to_upload_volume(
context, volume, image_service, image_meta)
return self._use_image_utils_to_upload_volume(
context, volume, image_service, image_meta)
def _use_image_utils_to_upload_volume(self, context, volume, image_service,
image_meta):
sr_uuid, vdi_uuid = volume['provider_location'].split('/')
with self.nfs_ops.volume_attached_here(CONF.xenapi_nfs_server,
CONF.xenapi_nfs_serverpath,
sr_uuid, vdi_uuid,
True) as device:
image_utils.upload_volume(context,
image_service,
image_meta,
device)
def _use_glance_plugin_to_upload_volume(self, context, volume,
image_service, image_meta):
image_id = image_meta['id']
sr_uuid, vdi_uuid = volume['provider_location'].split('/')
api_servers = glance.get_api_servers()
glance_server = api_servers.next()
auth_token = context.auth_token
self.nfs_ops.use_glance_plugin_to_upload_volume(
CONF.xenapi_nfs_server,
CONF.xenapi_nfs_serverpath,
sr_uuid,
vdi_uuid,
glance_server,
image_id,
auth_token,
CONF.xenapi_sr_base_path)
def get_volume_stats(self, refresh=False):
if refresh or not self._stats:
data = {}
backend_name = self.configuration.safe_get('volume_backend_name')
data["volume_backend_name"] = backend_name or 'XenAPINFS',
data['vendor_name'] = 'Open Source',
data['driver_version'] = self.VERSION
data['storage_protocol'] = 'xensm'
data['total_capacity_gb'] = 'unknown'
data['free_capacity_gb'] = 'unknown'
data['reserved_percentage'] = 0
self._stats = data
return self._stats
def backup_volume(self, context, backup, backup_service):
"""Create a new backup from an existing volume."""
raise NotImplementedError()
def restore_backup(self, context, backup, volume, backup_service):
"""Restore an existing backup to a new or existing volume."""
raise NotImplementedError()
| NeCTAR-RC/cinder | cinder/volume/drivers/xenapi/sm.py | Python | apache-2.0 | 10,307 |
import nltk
text1 = nltk.Text(nltk.word_tokenize(text.lower().replace('.', ' ')))
freq = FreqDist(text1)
keys = freq.keys()[:200]
str = ""
for k in keys:
str = str + k +'\n'
open('stopwords', 'r+').write(str) | 1wheel/whalewords | makeStopWords.py | Python | mit | 211 |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for module management."""
# Do not add any imports to non-standard modules here.
import os
import site
import sys
def _config_modules_directory(root_directory):
"""Get the config modules directory."""
config_dir = os.getenv('CONFIG_DIR_OVERRIDE')
if not config_dir:
config_dir = os.path.join(root_directory, 'src', 'appengine', 'config')
return os.path.join(config_dir, 'modules')
def _patch_appengine_modules_for_bots():
"""Patch out App Engine reliant behaviour from bots."""
if os.getenv('SERVER_SOFTWARE'):
# Not applicable on App Engine.
return
# google.auth uses App Engine credentials based on importability of
# google.appengine.api.app_identity.
try:
from google.auth import app_engine as auth_app_engine
if auth_app_engine.app_identity:
auth_app_engine.app_identity = None
except ImportError:
pass
def fix_module_search_paths():
"""Add directories that we must be able to import from to path."""
root_directory = os.environ['ROOT_DIR']
source_directory = os.path.join(root_directory, 'src')
python_path = os.getenv('PYTHONPATH', '').split(os.pathsep)
third_party_libraries_directory = os.path.join(source_directory,
'third_party')
config_modules_directory = _config_modules_directory(root_directory)
if (os.path.exists(config_modules_directory) and
config_modules_directory not in sys.path):
sys.path.insert(0, config_modules_directory)
python_path.insert(0, config_modules_directory)
if third_party_libraries_directory not in sys.path:
sys.path.insert(0, third_party_libraries_directory)
python_path.insert(0, third_party_libraries_directory)
if source_directory not in sys.path:
sys.path.insert(0, source_directory)
python_path.insert(0, source_directory)
os.environ['PYTHONPATH'] = os.pathsep.join(python_path)
# Add site directory to make from imports work in google namespace.
site.addsitedir(third_party_libraries_directory)
# TODO(ochang): Remove this once SDK is removed from images.
_patch_appengine_modules_for_bots()
| google/clusterfuzz | src/clusterfuzz/_internal/base/modules.py | Python | apache-2.0 | 2,700 |
def main():
print('here')
pass
if (__name__ == '__main__'):
main()
| LairdStreak/MyPyPlayGround | tujData/tujInscriptionAdvice.py | Python | mit | 90 |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Xcode supports build variable substitutions and CPP; sadly, that doesn't work
# because:
#
# 1. Xcode wants to do the Info.plist work before it runs any build phases,
# this means if we were to generate a .h file for INFOPLIST_PREFIX_HEADER
# we'd have to put it in another target so it runs in time.
# 2. Xcode also doesn't check to see if the header being used as a prefix for
# the Info.plist has changed. So even if we updated it, it's only looking
# at the modtime of the info.plist to see if that's changed.
#
# So, we work around all of this by making a script build phase that will run
# during the app build, and simply update the info.plist in place. This way
# by the time the app target is done, the info.plist is correct.
#
from __future__ import print_function
import optparse
import os
import plistlib
import re
import subprocess
import sys
import tempfile
TOP = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
def _ConvertPlist(source_plist, output_plist, fmt):
"""Convert |source_plist| to |fmt| and save as |output_plist|."""
return subprocess.call(
['plutil', '-convert', fmt, '-o', output_plist, source_plist])
def _GetOutput(args):
"""Runs a subprocess and waits for termination. Returns (stdout, returncode)
of the process. stderr is attached to the parent."""
proc = subprocess.Popen(args, stdout=subprocess.PIPE)
(stdout, stderr) = proc.communicate()
return (stdout, proc.returncode)
def _GetOutputNoError(args):
"""Similar to _GetOutput() but ignores stderr. If there's an error launching
the child (like file not found), the exception will be caught and (None, 1)
will be returned to mimic quiet failure."""
try:
proc = subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError:
return (None, 1)
(stdout, stderr) = proc.communicate()
return (stdout, proc.returncode)
def _RemoveKeys(plist, *keys):
"""Removes a varargs of keys from the plist."""
for key in keys:
try:
del plist[key]
except KeyError:
pass
def _ApplyVersionOverrides(version, keys, overrides, separator='.'):
"""Applies version overrides.
Given a |version| string as "a.b.c.d" (assuming a default separator) with
version components named by |keys| then overrides any value that is present
in |overrides|.
>>> _ApplyVersionOverrides('a.b', ['major', 'minor'], {'minor': 'd'})
'a.d'
"""
if not overrides:
return version
version_values = version.split(separator)
for i, (key, value) in enumerate(zip(keys, version_values)):
if key in overrides:
version_values[i] = overrides[key]
return separator.join(version_values)
def _GetVersion(version_format, values, overrides=None):
"""Generates a version number according to |version_format| using the values
from |values| or |overrides| if given."""
result = version_format
for key in values:
if overrides and key in overrides:
value = overrides[key]
else:
value = values[key]
result = result.replace('@%s@' % key, value)
return result
def _AddVersionKeys(
plist, version_format_for_key, version=None, overrides=None):
"""Adds the product version number into the plist. Returns True on success and
False on error. The error will be printed to stderr."""
if not version:
# Pull in the Chrome version number.
VERSION_TOOL = os.path.join(TOP, 'build/util/version.py')
VERSION_FILE = os.path.join(TOP, 'chrome/VERSION')
(stdout, retval) = _GetOutput([
VERSION_TOOL, '-f', VERSION_FILE,
'-t', '@MAJOR@.@MINOR@.@BUILD@.@PATCH@'])
# If the command finished with a non-zero return code, then report the
# error up.
if retval != 0:
return False
version = stdout.strip()
# Parse the given version number, that should be in MAJOR.MINOR.BUILD.PATCH
# format (where each value is a number). Note that str.isdigit() returns
# True if the string is composed only of digits (and thus match \d+ regexp).
groups = version.split('.')
if len(groups) != 4 or not all(element.isdigit() for element in groups):
print('Invalid version string specified: "%s"' % version, file=sys.stderr)
return False
values = dict(zip(('MAJOR', 'MINOR', 'BUILD', 'PATCH'), groups))
for key in version_format_for_key:
plist[key] = _GetVersion(version_format_for_key[key], values, overrides)
# Return with no error.
return True
def _DoSCMKeys(plist, add_keys):
"""Adds the SCM information, visible in about:version, to property list. If
|add_keys| is True, it will insert the keys, otherwise it will remove them."""
scm_revision = None
if add_keys:
# Pull in the Chrome revision number.
VERSION_TOOL = os.path.join(TOP, 'build/util/version.py')
LASTCHANGE_FILE = os.path.join(TOP, 'build/util/LASTCHANGE')
(stdout, retval) = _GetOutput([VERSION_TOOL, '-f', LASTCHANGE_FILE, '-t',
'@LASTCHANGE@'])
if retval:
return False
scm_revision = stdout.rstrip()
# See if the operation failed.
_RemoveKeys(plist, 'SCMRevision')
if scm_revision != None:
plist['SCMRevision'] = scm_revision
elif add_keys:
print('Could not determine SCM revision. This may be OK.', file=sys.stderr)
return True
def _AddBreakpadKeys(plist, branding, platform, staging):
"""Adds the Breakpad keys. This must be called AFTER _AddVersionKeys() and
also requires the |branding| argument."""
plist['BreakpadReportInterval'] = '3600' # Deliberately a string.
plist['BreakpadProduct'] = '%s_%s' % (branding, platform)
plist['BreakpadProductDisplay'] = branding
if staging:
plist['BreakpadURL'] = 'https://clients2.google.com/cr/staging_report'
else:
plist['BreakpadURL'] = 'https://clients2.google.com/cr/report'
# These are both deliberately strings and not boolean.
plist['BreakpadSendAndExit'] = 'YES'
plist['BreakpadSkipConfirm'] = 'YES'
def _RemoveBreakpadKeys(plist):
"""Removes any set Breakpad keys."""
_RemoveKeys(plist,
'BreakpadURL',
'BreakpadReportInterval',
'BreakpadProduct',
'BreakpadProductDisplay',
'BreakpadVersion',
'BreakpadSendAndExit',
'BreakpadSkipConfirm')
def _TagSuffixes():
# Keep this list sorted in the order that tag suffix components are to
# appear in a tag value. That is to say, it should be sorted per ASCII.
components = ('full',)
assert tuple(sorted(components)) == components
components_len = len(components)
combinations = 1 << components_len
tag_suffixes = []
for combination in xrange(0, combinations):
tag_suffix = ''
for component_index in xrange(0, components_len):
if combination & (1 << component_index):
tag_suffix += '-' + components[component_index]
tag_suffixes.append(tag_suffix)
return tag_suffixes
def _AddKeystoneKeys(plist, bundle_identifier):
"""Adds the Keystone keys. This must be called AFTER _AddVersionKeys() and
also requires the |bundle_identifier| argument (com.example.product)."""
plist['KSVersion'] = plist['CFBundleShortVersionString']
plist['KSProductID'] = bundle_identifier
plist['KSUpdateURL'] = 'https://tools.google.com/service/update2'
_RemoveKeys(plist, 'KSChannelID')
for tag_suffix in _TagSuffixes():
if tag_suffix:
plist['KSChannelID' + tag_suffix] = tag_suffix
def _RemoveKeystoneKeys(plist):
"""Removes any set Keystone keys."""
_RemoveKeys(plist,
'KSVersion',
'KSProductID',
'KSUpdateURL')
tag_keys = []
for tag_suffix in _TagSuffixes():
tag_keys.append('KSChannelID' + tag_suffix)
_RemoveKeys(plist, *tag_keys)
def Main(argv):
parser = optparse.OptionParser('%prog [options]')
parser.add_option('--plist', dest='plist_path', action='store',
type='string', default=None, help='The path of the plist to tweak.')
parser.add_option('--output', dest='plist_output', action='store',
type='string', default=None, help='If specified, the path to output ' + \
'the tweaked plist, rather than overwriting the input.')
parser.add_option('--breakpad', dest='use_breakpad', action='store',
type='int', default=False, help='Enable Breakpad [1 or 0]')
parser.add_option('--breakpad_staging', dest='use_breakpad_staging',
action='store_true', default=False,
help='Use staging breakpad to upload reports. Ignored if --breakpad=0.')
parser.add_option('--keystone', dest='use_keystone', action='store',
type='int', default=False, help='Enable Keystone [1 or 0]')
parser.add_option('--scm', dest='add_scm_info', action='store', type='int',
default=True, help='Add SCM metadata [1 or 0]')
parser.add_option('--branding', dest='branding', action='store',
type='string', default=None, help='The branding of the binary')
parser.add_option('--bundle_id', dest='bundle_identifier',
action='store', type='string', default=None,
help='The bundle id of the binary')
parser.add_option('--platform', choices=('ios', 'mac'), default='mac',
help='The target platform of the bundle')
parser.add_option('--version-overrides', action='append',
help='Key-value pair to override specific component of version '
'like key=value (can be passed multiple time to configure '
'more than one override)')
parser.add_option('--format', choices=('binary1', 'xml1', 'json'),
default='xml1', help='Format to use when writing property list '
'(default: %(default)s)')
parser.add_option('--version', dest='version', action='store', type='string',
default=None, help='The version string [major.minor.build.patch]')
(options, args) = parser.parse_args(argv)
if len(args) > 0:
print(parser.get_usage(), file=sys.stderr)
return 1
if not options.plist_path:
print('No --plist specified.', file=sys.stderr)
return 1
# Read the plist into its parsed format. Convert the file to 'xml1' as
# plistlib only supports that format in Python 2.7.
with tempfile.NamedTemporaryFile() as temp_info_plist:
retcode = _ConvertPlist(options.plist_path, temp_info_plist.name, 'xml1')
if retcode != 0:
return retcode
plist = plistlib.readPlist(temp_info_plist.name)
# Convert overrides.
overrides = {}
if options.version_overrides:
for pair in options.version_overrides:
if not '=' in pair:
print('Invalid value for --version-overrides:', pair, file=sys.stderr)
return 1
key, value = pair.split('=', 1)
overrides[key] = value
if key not in ('MAJOR', 'MINOR', 'BUILD', 'PATCH'):
print('Unsupported key for --version-overrides:', key, file=sys.stderr)
return 1
if options.platform == 'mac':
version_format_for_key = {
# Add public version info so "Get Info" works.
'CFBundleShortVersionString': '@MAJOR@.@MINOR@.@BUILD@.@PATCH@',
# Honor the 429496.72.95 limit. The maximum comes from splitting 2^32 - 1
# into 6, 2, 2 digits. The limitation was present in Tiger, but it could
# have been fixed in later OS release, but hasn't been tested (it's easy
# enough to find out with "lsregister -dump).
# http://lists.apple.com/archives/carbon-dev/2006/Jun/msg00139.html
# BUILD will always be an increasing value, so BUILD_PATH gives us
# something unique that meetings what LS wants.
'CFBundleVersion': '@BUILD@.@PATCH@',
}
else:
version_format_for_key = {
'CFBundleShortVersionString': '@MAJOR@.@BUILD@.@PATCH@',
'CFBundleVersion': '@MAJOR@.@MINOR@.@BUILD@.@PATCH@'
}
if options.use_breakpad:
version_format_for_key['BreakpadVersion'] = \
'@MAJOR@.@MINOR@.@BUILD@.@PATCH@'
# Insert the product version.
if not _AddVersionKeys(
plist, version_format_for_key, version=options.version,
overrides=overrides):
return 2
# Add Breakpad if configured to do so.
if options.use_breakpad:
if options.branding is None:
print('Use of Breakpad requires branding.', file=sys.stderr)
return 1
# Map "target_os" passed from gn via the --platform parameter
# to the platform as known by breakpad.
platform = {'mac': 'Mac', 'ios': 'iOS'}[options.platform]
_AddBreakpadKeys(plist, options.branding, platform,
options.use_breakpad_staging)
else:
_RemoveBreakpadKeys(plist)
# Add Keystone if configured to do so.
if options.use_keystone:
if options.bundle_identifier is None:
print('Use of Keystone requires the bundle id.', file=sys.stderr)
return 1
_AddKeystoneKeys(plist, options.bundle_identifier)
else:
_RemoveKeystoneKeys(plist)
# Adds or removes any SCM keys.
if not _DoSCMKeys(plist, options.add_scm_info):
return 3
output_path = options.plist_path
if options.plist_output is not None:
output_path = options.plist_output
# Now that all keys have been mutated, rewrite the file.
with tempfile.NamedTemporaryFile() as temp_info_plist:
plistlib.writePlist(plist, temp_info_plist.name)
# Convert Info.plist to the format requested by the --format flag. Any
# format would work on Mac but iOS requires specific format.
return _ConvertPlist(temp_info_plist.name, output_path, options.format)
if __name__ == '__main__':
sys.exit(Main(sys.argv[1:]))
| endlessm/chromium-browser | build/mac/tweak_info_plist.py | Python | bsd-3-clause | 13,521 |
'''
Harvester for the ASU Digital Repository for the SHARE project
Example API call: http://udspace.udel.edu/dspace-oai/request?verb=ListRecords&metadataPrefix=oai_dc
'''
from __future__ import unicode_literals
from scrapi.base import OAIHarvester
class UdelHarvester(OAIHarvester):
short_name = 'udel'
long_name = 'University of Delaware Institutional Repository'
url = 'http://udspace.udel.edu/dspace-oai/request'
base_url = 'http://udspace.udel.edu/dspace-oai/request'
# TODO - add date back to property list - udel has non-date
# formats in their date field which elasticsearch does not enjoy
property_list = ['identifier', 'type']
timezone_granularity = True
| fabianvf/scrapi | scrapi/harvesters/udel.py | Python | apache-2.0 | 702 |
# This file is part of Indico.
# Copyright (C) 2002 - 2017 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
import os
import re
from indico.util.date_time import format_date, format_datetime, format_time
# fcntl is only available for POSIX systems
if os.name == 'posix':
import fcntl
def utf8rep(text):
# \x -> _x keeps windows systems satisfied
return text.decode('utf-8').encode('unicode_escape').replace('\\x','_x')
def validMail(emailstr, allowMultiple=True):
"""
Check the validity of an email address or serie of email addresses
- emailstr: a string representing a single email address or several
email addresses separated by separators
Returns True if the email/emails is/are valid.
"""
# Convert the separators into valid ones. For now only, mix of whitespaces,
# semi-colons and commas are handled and replaced by commas. This way the
# method only checks the validity of the email addresses without taking
# care of the separators
emails = setValidEmailSeparators(emailstr)
# Creates a list of emails
emaillist = emails.split(",")
if not allowMultiple and len(emaillist) > 1:
return False
# Checks the validity of each email in the list
if emaillist != None or emaillist != []:
for em in emaillist:
if re.search(r"^[-a-zA-Z0-9!#$%&'*+/=?\^_`{|}~]+(?:.[-a-zA-Z0-9!#$%&'*+/=?^_`{|}~]+)*@(?:[a-zA-Z0-9](?:[-a-zA-Z0-9]*[a-zA-Z0-9])?.)+[a-zA-Z0-9](?:[-a-zA-Z0-9]*[a-zA-Z0-9])?$",
em) == None:
# if re.search("^[a-zA-Z][\w\.-]*[a-zA-Z0-9]@[a-zA-Z0-9][\w\.-]*[a-zA-Z0-9]\.[a-zA-Z][a-zA-Z\.]*[a-zA-Z]$",
# em) == None:
return False
return True
def setValidEmailSeparators(emailstr):
"""
Replace occurrences of separators in a string of email addresses by
occurrences of "," in order to get a string of emails valid with the
html 'a' tag. Separators that could be replaced are semi-colons,
whitespaces and mixes of the previous two along with commas. This allows
the handling of multiple email addresses.
- emailstr: the string of emails in which we want to convert the separators
into commas
"""
# remove occurences of separators at the beginning and at the end of
# the string
emails = re.subn(r"(?:^[ ;,]+)|(?:[ ;,]+$)", "", emailstr)[0]
# return the string obtained after replacing the separators
return re.subn(r"[ ;,]+", ",", emails)[0]
def isStringHTML(s):
if not isinstance(s, basestring):
return False
s = s.lower()
return any(tag in s for tag in ('<p>', '<p ', '<br', '<li>'))
def getEmailList(stri):
emailList = []
for email in stri.split(",") :
email = email.strip()
if email!="" and email.rfind(".", email.find("@") )>0 and email not in emailList :
emailList.append(email)
return emailList
def encodeUnicode(text, sourceEncoding = "utf-8"):
try:
tmp = str(text).decode( sourceEncoding )
except:
try:
tmp = str(text).decode( 'iso-8859-1' )
except:
return ""
return tmp.encode('utf-8')
def unicodeSlice(s, start, end, encoding='utf-8'):
"""Returns a slice of the string s, based on its encoding."""
return s.decode(encoding, 'replace')[start:end]
def formatDateTime(dateTime, showWeek=False, format=None, locale=None, server_tz=False):
week = "EEEE" if showWeek else ""
if not format:
return format_datetime(dateTime, week+'d/M/yyyy H:mm', locale=locale, server_tz=server_tz)
else:
return format_datetime(dateTime, format, locale=locale, server_tz=server_tz)
def formatDate(date, showWeek=False, format=None, locale=None, timezone=None):
week = ""
if showWeek:
week = "EEE "
if not format:
return format_date(date, week+'d/M/yyyy', locale=locale, timezone=timezone)
else:
return format_date(date, format, locale=locale, timezone=timezone)
def formatTime(tm, format=None, locale=None, server_tz=False, tz=None):
if not format:
return format_time(tm, 'H:mm', locale=locale, timezone=tz, server_tz=server_tz)
else:
return format_time(tm, format, locale=locale, timezone=tz, server_tz=server_tz)
def formatDuration(duration, units = 'minutes', truncate = True):
""" Formats a duration (a timedelta object)
"""
seconds = duration.days * 86400 + duration.seconds
if units == 'seconds':
result = seconds
elif units == 'minutes':
result = seconds / 60
elif units == 'hours':
result = seconds / 3600
elif units == 'days':
result = seconds / 86400
elif units == 'hours_minutes':
#truncate has no effect here
minutes = int(seconds / 60) % 60
hours = int(seconds / 3600)
return str(hours) + 'h' + str(minutes).zfill(2) + 'm'
elif units == '(hours)_minutes':
#truncate has no effect here
minutes = int(seconds / 60) % 60
hours = int(seconds / 3600)
if hours:
return str(hours) + 'h' + str(minutes).zfill(2) + 'm'
else:
return str(minutes) + 'm'
else:
raise Exception("Unknown duration unit: " + str(units))
if truncate:
return int(result)
else:
return result
class OSSpecific(object):
"""
Namespace for OS Specific operations:
- file locking
"""
@classmethod
def _lockFilePosix(cls, f, lockType):
"""
Locks file f with lock type lockType
"""
fcntl.flock(f, lockType)
@classmethod
def _lockFileOthers(cls, f, lockType):
"""
Win32/others file locking could be implemented here
"""
pass
@classmethod
def lockFile(cls, f, lockType):
"""
API method - locks a file
f - file handler
lockType - string: LOCK_EX | LOCK_UN | LOCK_SH
"""
cls._lockFile(f, cls._lockTranslationTable[lockType])
# Check OS and choose correct locking method
if os.name == 'posix':
_lockFile = _lockFilePosix
_lockTranslationTable = {
'LOCK_EX': fcntl.LOCK_EX,
'LOCK_UN': fcntl.LOCK_UN,
'LOCK_SH': fcntl.LOCK_SH
}
else:
_lockFile = _lockFileOthers
_lockTranslationTable = {
'LOCK_EX': None,
'LOCK_UN': None,
'LOCK_SH': None
}
| nop33/indico | indico/legacy/common/utils.py | Python | gpl-3.0 | 7,100 |
# This stores all the dialogue related stuff
import screen
class Dialogue(object):
"""Stores the dialogue tree for an individual NPC"""
def __init__(self, npc):
super(Dialogue, self).__init__()
self.npc = npc
self.game = npc.game
self.root = None
self.currentNode = None
def setRootNode(self, node):
self.root = node
def resetCurrentNode(self):
self.currentNode = self.root
def beginConversation(self):
self.resetCurrentNode()
self.runNextNode()
def runNextNode(self):
if self.currentNode is None:
return
# Grab all the DialogueChoices that should be shown
availableChoices = []
for (choice, predicate, child) in self.currentNode.choices:
if predicate is not None:
if predicate():
availableChoices.append((choice, child))
else:
availableChoices.append((choice, child))
npcName = None
if self.game.player.notebook.isNpcKnown(self.npc):
npcName = self.npc.firstName + " " + self.npc.lastName
choiceTexts = [choice.choiceText for (choice, child) in availableChoices]
screen.printDialogueChoices(self.game.screen, self.game.player,
choiceTexts, npcName)
choiceIdx = self.game.getDialogueChoice(len(choiceTexts)) - 1
self.game.draw()
(choice, nextNode) = availableChoices[choiceIdx]
response = ""
response += choice.response
if choice.responseFunction is not None:
response = choice.responseFunction(self.npc, response)
self.game.printDescription(response, npcName)
self.currentNode = nextNode
self.runNextNode()
class DialogueNode(object):
"""A single node of the dialogue tree"""
def __init__(self):
super(DialogueNode, self).__init__()
self.choices = []
def addChoice(self, choice, choicePredicate=None, childNode=None):
self.choices.append((choice, choicePredicate, childNode))
class DialogueChoice(object):
"""Stores the choice/function pair"""
def __init__(self, choiceText, response, responseFunction=None):
super(DialogueChoice, self).__init__()
self.choiceText = choiceText
self.response = response
self.responseFunction = responseFunction
def callResponseFunction(self, npcArgument, response):
if responseFunction is not None:
self.responseFunction(npcArgument, response) | mjdarby/RogueDetective | dialogue.py | Python | gpl-2.0 | 2,401 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
# Author: echel0n <sickrage.tv@gmail.com>
# URL: http://www.github.com/sickragetv/sickrage/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import logging
import traceback
from sickbeard import tvcache
from sickbeard.providers import generic
from sickbeard.bs4_parser import BS4Parser
class CpasbienProvider(generic.TorrentProvider):
def __init__(self):
generic.TorrentProvider.__init__(self, "Cpasbien")
self.supportsBacklog = True
self.public = True
self.ratio = None
self.url = "http://www.cpasbien.io"
self.proper_strings = ['PROPER', 'REPACK']
self.cache = CpasbienCache(self)
def _doSearch(self, search_params, search_mode='eponly', epcount=0, age=0, epObj=None):
results = []
items = {'Season': [], 'Episode': [], 'RSS': []}
for mode in search_params.keys():
logging.debug("Search Mode: %s" % mode)
for search_string in search_params[mode]:
if mode is not 'RSS':
logging.debug("Search string: %s " % search_string)
searchURL = self.url + '/recherche/' + search_string.replace('.', '-') + '.html'
logging.debug("Search URL: %s" % searchURL)
data = self.getURL(searchURL)
if not data:
continue
try:
with BS4Parser(data, features=["html5lib", "permissive"]) as html:
lin = erlin = 0
resultdiv = []
while erlin == 0:
try:
classlin = 'ligne' + str(lin)
resultlin = html.findAll(attrs={'class': [classlin]})
if resultlin:
for ele in resultlin:
resultdiv.append(ele)
lin += 1
else:
erlin = 1
except Exception:
erlin = 1
for row in resultdiv:
try:
link = row.find("a", title=True)
title = link.text.lower().strip()
pageURL = link[b'href']
# downloadTorrentLink = torrentSoup.find("a", title.startswith('Cliquer'))
tmp = pageURL.split('/')[-1].replace('.html', '.torrent')
downloadTorrentLink = ('http://www.cpasbien.io/telechargement/%s' % tmp)
if downloadTorrentLink:
download_url = downloadTorrentLink
# FIXME
size = -1
seeders = 1
leechers = 0
except (AttributeError, TypeError):
continue
if not all([title, download_url]):
continue
item = title, download_url, size, seeders, leechers
if mode is not 'RSS':
logging.debug("Found result: %s " % title)
items[mode].append(item)
except Exception as e:
logging.error("Failed parsing provider. Traceback: %s" % traceback.format_exc())
# For each search mode sort all the items by seeders if available
items[mode].sort(key=lambda tup: tup[3], reverse=True)
results += items[mode]
return results
def seedRatio(self):
return self.ratio
class CpasbienCache(tvcache.TVCache):
def __init__(self, provider_obj):
tvcache.TVCache.__init__(self, provider_obj)
self.minTime = 30
def _getRSSData(self):
# search_strings = {'RSS': ['']}
return {'entries': {}}
provider = CpasbienProvider()
| mcus/SickRage | sickbeard/providers/cpasbien.py | Python | gpl-3.0 | 4,884 |
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack import extensions
class User_data(extensions.ExtensionDescriptor):
"""Add user_data to the Create Server v1.1 API."""
name = "UserData"
alias = "os-user-data"
namespace = ("http://docs.openstack.org/compute/ext/"
"userdata/api/v1.1")
updated = "2012-08-07T00:00:00+00:00"
| petrutlucian94/nova_dev | nova/api/openstack/compute/contrib/user_data.py | Python | apache-2.0 | 949 |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Fastqvalidator(MakefilePackage):
"""The fastQValidator validates the format of fastq files."""
homepage = "http://genome.sph.umich.edu/wiki/FastQValidator"
url = "https://github.com/statgen/fastQValidator/archive/v0.1.1a.tar.gz"
version('2017-01-10', commit='6d619a34749e9d33c34ef0d3e0e87324ca77f320',
git='https://github.com/statgen/fastQValidator.git')
resource(
name='libStatGen',
git='https://github.com/statgen/libStatGen.git',
commit='9db9c23e176a6ce6f421a3c21ccadedca892ac0c'
)
@property
def build_targets(self):
return ['LIB_PATH_GENERAL={0}'.format(
join_path(self.stage.source_path, 'libStatGen'))]
@property
def install_targets(self):
return [
'INSTALLDIR={0}'.format(self.prefix.bin),
'LIB_PATH_GENERAL={0}'.format(
join_path(self.stage.source_path, 'libStatGen')),
'install'
]
| lgarren/spack | var/spack/repos/builtin/packages/fastqvalidator/package.py | Python | lgpl-2.1 | 2,230 |
#
# main.py -- reference viewer for the Ginga toolkit.
#
# Eric Jeschke (eric@naoj.org, eric@redskiesatnight.com)
#
"""
Copyright (c) 2011-2015 Eric R. Jeschke
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the
distribution.
Neither the name of the Eric R. Jeschke nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
'AS IS' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from __future__ import print_function
# stdlib imports
import sys, os
import logging, logging.handlers
import threading
import traceback
# Local application imports
from ginga.misc.Bunch import Bunch
from ginga.misc import Task, ModuleManager, Datasrc, Settings, log
import ginga.version as version
import ginga.toolkit as ginga_toolkit
from ginga import AstroImage
from ginga.util import paths
default_layout = ['seq', {},
['vbox', dict(name='top', width=1520, height=900),
dict(row=['hbox', dict(name='menu')],
stretch=0),
dict(row=['hpanel', dict(name='hpnl'),
['ws', dict(name='left', width=300, group=2),
# (tabname, layout), ...
[("Info", ['vpanel', {},
['ws', dict(name='uleft', height=300,
show_tabs=False, group=3)],
['ws', dict(name='lleft', height=430,
show_tabs=True, group=3)],
]
)]],
['vbox', dict(name='main', width=700),
dict(row=['ws', dict(wstype='tabs', name='channels',
group=1)], stretch=1)],
['ws', dict(name='right', width=430, group=2),
# (tabname, layout), ...
[("Dialogs", ['ws', dict(name='dialogs', group=2)
]
)]
],
], stretch=1),
dict(row=['ws', dict(name='toolbar', height=40,
show_tabs=False, group=2)],
stretch=0),
dict(row=['hbox', dict(name='status')], stretch=0),
]]
global_plugins = [
Bunch(module='Toolbar', tab='Toolbar', ws='toolbar'),
Bunch(module='Pan', tab='_pan', ws='uleft', raisekey=None),
Bunch(module='Info', tab='Synopsis', ws='lleft', raisekey=None),
Bunch(module='Header', tab='Header', ws='left', raisekey='H'),
Bunch(module='Zoom', tab='Zoom', ws='left', raisekey='Z'),
Bunch(module='Thumbs', tab='Thumbs', ws='right', raisekey='T'),
Bunch(module='Contents', tab='Contents', ws='right', raisekey='c'),
Bunch(module='WBrowser', tab='Help', ws='channels', raisekey='?', start=False),
Bunch(module='Errors', tab='Errors', ws='right', start=True),
Bunch(module='RC', tab='RC', ws='right', start=False),
Bunch(module='SAMP', tab='SAMP', ws='right', start=False),
Bunch(module='IRAF', tab='IRAF', ws='right', start=False),
Bunch(module='Log', tab='Log', ws='right', start=False),
Bunch(module='Debug', tab='Debug', ws='right', start=False),
]
local_plugins = [
Bunch(module='Pick', ws='dialogs', shortkey='f1'),
Bunch(module='Ruler', ws='dialogs', shortkey='f2'),
Bunch(module='MultiDim', ws='lleft', shortkey='f4'),
Bunch(module='Cuts', ws='dialogs', shortkey='f5'),
Bunch(module='Histogram', ws='dialogs', shortkey='f6'),
Bunch(module='Crosshair', ws='dialogs'),
Bunch(module='Overlays', ws='dialogs'),
Bunch(module='Blink', ws='dialogs'),
Bunch(module='LineProfile', ws='dialogs'),
Bunch(module='PixTable', ws='dialogs', shortkey='f7'),
Bunch(module='Preferences', ws='dialogs', shortkey='f9'),
Bunch(module='Catalogs', ws='dialogs', shortkey='f10'),
Bunch(module='Mosaic', ws='dialogs'),
# Not ready for prime time
#Bunch(module='Pipeline', ws='dialogs'),
Bunch(module='Drawing', ws='dialogs', shortkey='f11'),
Bunch(module='FBrowser', ws='dialogs', shortkey='f12'),
Bunch(module='Compose', ws='dialogs'),
]
class ReferenceViewer(object):
"""
This class exists solely to be able to customize the reference
viewer startup.
"""
def __init__(self, layout=default_layout):
self.local_plugins = []
self.global_plugins = []
self.layout = layout
def add_local_plugin(self, module_name, ws_name, pfx=None):
self.local_plugins.append(
Bunch(module=module_name, ws=ws_name, pfx=pfx))
def add_global_plugin(self, module_name, ws_name,
tab_name=None, start_plugin=True, pfx=None):
if tab_name is None:
tab_name = module_name
self.global_plugins.append(
Bunch(module=module_name, ws=ws_name, tab=tab_name,
start=start_plugin, pfx=pfx))
def add_default_plugins(self):
"""
Add the ginga-distributed default set of plugins to the
reference viewer.
"""
# add default global plugins
for bnch in global_plugins:
start = bnch.get('start', True)
pfx = bnch.get('pfx', None)
self.add_global_plugin(bnch.module, bnch.ws,
tab_name=bnch.tab, start_plugin=start, pfx=pfx)
# add default local plugins
for bnch in local_plugins:
pfx = bnch.get('pfx', None)
self.add_local_plugin(bnch.module, bnch.ws, pfx=pfx)
def add_default_options(self, optprs):
"""
Adds the default reference viewer startup options to an
OptionParser instance `optprs`.
"""
optprs.add_option("--bufsize", dest="bufsize", metavar="NUM",
type="int", default=10,
help="Buffer length to NUM")
optprs.add_option("--channels", dest="channels", default="Image",
help="Specify list of channels to create")
optprs.add_option("--debug", dest="debug", default=False, action="store_true",
help="Enter the pdb debugger on main()")
optprs.add_option("--disable-plugins", dest="disable_plugins",
metavar="NAMES",
help="Specify plugins that should be disabled")
optprs.add_option("--display", dest="display", metavar="HOST:N",
help="Use X display on HOST:N")
optprs.add_option("--fitspkg", dest="fitspkg", metavar="NAME",
default=None,
help="Prefer FITS I/O module NAME")
optprs.add_option("-g", "--geometry", dest="geometry",
metavar="GEOM",
help="X geometry for initial size and placement")
optprs.add_option("--log", dest="logfile", metavar="FILE",
help="Write logging output to FILE")
optprs.add_option("--loglevel", dest="loglevel", metavar="LEVEL",
type='int', default=logging.INFO,
help="Set logging level to LEVEL")
optprs.add_option("--lognull", dest="nulllogger", default=False,
action="store_true",
help="Use a null logger")
optprs.add_option("--modules", dest="modules", metavar="NAMES",
help="Specify additional modules to load")
optprs.add_option("--nosplash", dest="nosplash", default=False,
action="store_true",
help="Don't display the splash screen")
optprs.add_option("--numthreads", dest="numthreads", type="int",
default=30, metavar="NUM",
help="Start NUM threads in thread pool")
optprs.add_option("--opencv", dest="opencv", default=False,
action="store_true",
help="Use OpenCv acceleration")
optprs.add_option("--stderr", dest="logstderr", default=False,
action="store_true",
help="Copy logging also to stderr")
optprs.add_option("--plugins", dest="plugins", metavar="NAMES",
help="Specify additional plugins to load")
optprs.add_option("--profile", dest="profile", action="store_true",
default=False,
help="Run the profiler on main()")
optprs.add_option("-t", "--toolkit", dest="toolkit", metavar="NAME",
default=None,
help="Prefer GUI toolkit (gtk|qt)")
optprs.add_option("--wcspkg", dest="wcspkg", metavar="NAME",
default=None,
help="Prefer WCS module NAME")
def main(self, options, args):
"""
Main routine for running the reference viewer.
`options` is a OptionParser object that has been populated with
values from parsing the command line. It should at least include
the options from add_default_options()
`args` is a list of arguments to the viewer after parsing out
options. It should contain a list of files or URLs to load.
"""
# Create a logger
logger = log.get_logger(name='ginga', options=options)
# Get settings (preferences)
basedir = paths.ginga_home
if not os.path.exists(basedir):
try:
os.mkdir(basedir)
except OSError as e:
logger.warn("Couldn't create ginga settings area (%s): %s" % (
basedir, str(e)))
logger.warn("Preferences will not be able to be saved")
# Set up preferences
prefs = Settings.Preferences(basefolder=basedir, logger=logger)
settings = prefs.createCategory('general')
settings.load(onError='silent')
settings.setDefaults(useMatplotlibColormaps=False,
widgetSet='choose',
WCSpkg='choose', FITSpkg='choose',
recursion_limit=2000)
# default of 1000 is a little too small
sys.setrecursionlimit(settings.get('recursion_limit'))
# So we can find our plugins
sys.path.insert(0, basedir)
moduleHome = os.path.split(sys.modules['ginga.version'].__file__)[0]
childDir = os.path.join(moduleHome, 'misc', 'plugins')
sys.path.insert(0, childDir)
pluginDir = os.path.join(basedir, 'plugins')
sys.path.insert(0, pluginDir)
# Choose a toolkit
if options.toolkit:
toolkit = options.toolkit
else:
toolkit = settings.get('widgetSet', 'choose')
ginga_toolkit.use(toolkit)
tkname = ginga_toolkit.get_family()
logger.info("Chosen toolkit (%s) family is '%s'" % (
ginga_toolkit.toolkit, tkname))
if tkname == 'gtk':
from ginga.gtkw.GingaGtk import GingaView
elif tkname == 'qt':
from ginga.qtw.GingaQt import GingaView
else:
try:
from ginga.qtw.GingaQt import GingaView
except ImportError:
try:
from ginga.gtkw.GingaGtk import GingaView
except ImportError:
print("You need python-gtk or python-qt4 to run Ginga!")
sys.exit(1)
# these imports have to be here, otherwise they force the choice
# of toolkit too early
from ginga.Control import GingaControl, GuiLogHandler
# Define class dynamically based on toolkit choice
class Ginga(GingaControl, GingaView):
def __init__(self, logger, threadPool, module_manager, prefs,
ev_quit=None):
GingaView.__init__(self, logger, ev_quit)
GingaControl.__init__(self, logger, threadPool, module_manager,
prefs, ev_quit=ev_quit)
if settings.get('useMatplotlibColormaps', False):
# Add matplotlib color maps if matplotlib is installed
try:
from ginga import cmap
cmap.add_matplotlib_cmaps()
except Exception as e:
logger.warn("failed to load matplotlib colormaps: %s" % (str(e)))
# User wants to customize the WCS package?
if options.wcspkg:
wcspkg = options.wcspkg
else:
wcspkg = settings.get('WCSpkg', 'choose')
try:
from ginga.util import wcsmod
assert wcsmod.use(wcspkg) == True
except Exception as e:
logger.warn("failed to set WCS package preference: %s" % (str(e)))
# User wants to customize the FITS package?
if options.fitspkg:
fitspkg = options.fitspkg
else:
fitspkg = settings.get('FITSpkg', 'choose')
try:
from ginga.util import io_fits
assert io_fits.use(fitspkg) == True
except Exception as e:
logger.warn("failed to set FITS package preference: %s" % (str(e)))
# Check whether user wants to use OpenCv
use_opencv = settings.get('use_opencv', False)
if use_opencv or options.opencv:
from ginga import trcalc
try:
trcalc.use('opencv')
except Exception as e:
logger.warn("failed to set OpenCv preference: %s" % (str(e)))
# Create the dynamic module manager
mm = ModuleManager.ModuleManager(logger)
# Create and start thread pool
ev_quit = threading.Event()
threadPool = Task.ThreadPool(options.numthreads, logger,
ev_quit=ev_quit)
threadPool.startall()
# Create the Ginga main object
ginga = Ginga(logger, threadPool, mm, prefs, ev_quit=ev_quit)
ginga.set_layout(self.layout)
gc = os.path.join(basedir, "ginga_config.py")
have_ginga_config = os.path.exists(gc)
# User configuration (custom star catalogs, etc.)
if have_ginga_config:
try:
import ginga_config
ginga_config.pre_gui_config(ginga)
except Exception as e:
try:
(type, value, tb) = sys.exc_info()
tb_str = "\n".join(traceback.format_tb(tb))
except Exception:
tb_str = "Traceback information unavailable."
logger.error("Error importing Ginga config file: %s" % (
str(e)))
logger.error("Traceback:\n%s" % (tb_str))
# Build desired layout
ginga.build_toplevel()
# Did user specify a particular geometry?
if options.geometry:
ginga.setGeometry(options.geometry)
# make the list of disabled plugins
disabled_plugins = []
if not (options.disable_plugins is None):
disabled_plugins = options.disable_plugins.lower().split(',')
# Add desired global plugins
for spec in self.global_plugins:
if not spec.module.lower() in disabled_plugins:
ginga.add_global_plugin(spec)
# Add GUI log handler (for "Log" global plugin)
guiHdlr = GuiLogHandler(ginga)
guiHdlr.setLevel(options.loglevel)
fmt = logging.Formatter(log.LOG_FORMAT)
guiHdlr.setFormatter(fmt)
logger.addHandler(guiHdlr)
# Load any custom modules
if options.modules:
modules = options.modules.split(',')
for longPluginName in modules:
if '.' in longPluginName:
tmpstr = longPluginName.split('.')
pluginName = tmpstr[-1]
pfx = '.'.join(tmpstr[:-1])
else:
pluginName = longPluginName
pfx = None
spec = Bunch(name=pluginName, module=pluginName,
tab=pluginName, ws='right', pfx=pfx)
ginga.add_global_plugin(spec)
# Load modules for "local" (per-channel) plug ins
for spec in self.local_plugins:
if not spec.module.lower() in disabled_plugins:
ginga.add_local_plugin(spec)
# Load any custom plugins
if options.plugins:
plugins = options.plugins.split(',')
for longPluginName in plugins:
if '.' in longPluginName:
tmpstr = longPluginName.split('.')
pluginName = tmpstr[-1]
pfx = '.'.join(tmpstr[:-1])
else:
pluginName = longPluginName
pfx = None
spec = Bunch(module=pluginName, ws='dialogs',
hidden=False, pfx=pfx)
ginga.add_local_plugin(spec)
ginga.update_pending()
# TEMP?
tab_names = list(map(lambda name: name.lower(),
ginga.ds.get_tabnames(group=None)))
if 'info' in tab_names:
ginga.ds.raise_tab('Info')
if 'thumbs' in tab_names:
ginga.ds.raise_tab('Thumbs')
# User configuration (custom star catalogs, etc.)
if have_ginga_config:
try:
ginga_config.post_gui_config(ginga)
except Exception as e:
try:
(type, value, tb) = sys.exc_info()
tb_str = "\n".join(traceback.format_tb(tb))
except Exception:
tb_str = "Traceback information unavailable."
logger.error("Error processing Ginga config file: %s" % (
str(e)))
logger.error("Traceback:\n%s" % (tb_str))
# Add custom channels
channels = options.channels.split(',')
for chname in channels:
datasrc = Datasrc.Datasrc(length=options.bufsize)
ginga.add_channel(chname, datasrc)
ginga.change_channel(channels[0])
# Display banner the first time run, unless suppressed
showBanner = True
try:
showBanner = settings.get('showBanner')
except KeyError:
# disable for subsequent runs
settings.set(showBanner=False)
settings.save()
if (not options.nosplash) and (len(args) == 0) and showBanner:
ginga.banner()
# Assume remaining arguments are fits files and load them.
for imgfile in args:
ginga.nongui_do(ginga.load_file, imgfile)
try:
try:
# Main loop to handle GUI events
logger.info("Entering mainloop...")
ginga.mainloop(timeout=0.001)
except KeyboardInterrupt:
logger.error("Received keyboard interrupt!")
finally:
logger.info("Shutting down...")
ev_quit.set()
sys.exit(0)
def reference_viewer(sys_argv):
viewer = ReferenceViewer(layout=default_layout)
viewer.add_default_plugins()
# Parse command line options with optparse module
from optparse import OptionParser
usage = "usage: %prog [options] cmd [args]"
optprs = OptionParser(usage=usage,
version=('%%prog %s' % version.version))
viewer.add_default_options(optprs)
(options, args) = optprs.parse_args(sys_argv[1:])
if options.display:
os.environ['DISPLAY'] = options.display
# Are we debugging this?
if options.debug:
import pdb
pdb.run('viewer.main(options, args)')
# Are we profiling this?
elif options.profile:
import profile
print(("%s profile:" % sys_argv[0]))
profile.run('viewer.main(options, args)')
else:
viewer.main(options, args)
# END
| eteq/ginga | ginga/main.py | Python | bsd-3-clause | 21,410 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-07-19 12:18
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('pyconkr', '0010_tutorialproposal_type'),
]
operations = [
migrations.CreateModel(
name='TutorialCheckin',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tutorial', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pyconkr.TutorialProposal')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AlterUniqueTogether(
name='tutorialcheckin',
unique_together=set([('user', 'tutorial')]),
),
]
| pythonkr/pyconapac-2016 | pyconkr/migrations/0011_auto_20160719_2118.py | Python | mit | 1,051 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "muebles_quetzal.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| AlexMaguey/ESCOM-C1-PP1 | muebles_quetzal/manage.py | Python | gpl-3.0 | 258 |
# -*- coding: utf-8 -*-
r"""Useful constants for neutron scattering calculations, including:
* ``magnetic_form_factors()`` : Magnetic Ion j-values
* ``periodic_table()`` : Periodic table values
* ``scattering_lengths()`` : Neutron scattering lengths
* ``symmetry()`` : Space group information
* ``JOULES_TO_MEV`` : Joules-to-meV conversion factor
* ``BOLTZMANN_IN_MEV_K`` : Boltzmann constant in meV/K
* ``N_A`` : Avogadro constant
* ``neutron_mass`` : Mass of a neutron in grams
* ``e`` : Electric charge of an electron in Coulombs
"""
import json
import os
def magnetic_ion_j():
r"""Loads j values for Magnetic ions.
Parameters
----------
None
Returns
-------
magnetic_ion_j : dict
Database of j-values for magnetic ions
"""
with open(os.path.join(os.path.dirname(__file__),
"database/magnetic_form_factors.json"), 'r') as infile:
return json.load(infile)
def periodic_table():
r"""Loads periodic table database.
mass, and long-form name.
Parameters
----------
None
Returns
-------
periodic_table : dict
Database of mass, atomic number, density, mass, and name for all
elements in the Periodic table
"""
with open(os.path.join(os.path.dirname(__file__),
"database/periodic_table.json"), 'r') as infile:
return json.load(infile)
def scattering_lengths():
r"""Loads neutron scattering lengths.
Parameters
----------
None
Returns
-------
scattering_lengths : dict
Database of elements containing the absolute, coherent, incoheret, and
scattering cross-sections and scattering lengths
"""
with open(os.path.join(os.path.dirname(__file__),
"database/scattering_lengths.json"), 'r') as infile:
return json.load(infile)
def symmetry():
r"""Loads crystal lattice space groups.
Parameters
----------
None
Returns
-------
lattice_space_groups : dict
Database of 230 crystal lattice space groups and their generators
"""
with open(os.path.join(os.path.dirname(__file__),
"database/symmetry.json"), 'r') as infile:
return json.load(infile)
JOULES_TO_MEV = 1. / 1.6021766208e-19 * 1.e3 # Joules to meV
BOLTZMANN_IN_MEV_K = 8.6173303e-05 * 1.e3 # Boltzmann constant in meV/K
N_A = 6.022140857e+23
neutron_mass = 1.674927211e-24 # mass of a neutron in grams
hbar = 1.054571628e-34 # hbar in m2 kg / s
e = 1.602176487e-19 # coulombs
| granrothge/neutronpy | neutronpy/constants.py | Python | mit | 2,584 |
# Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: swift_conn
# You'll see swift_conn passed around a few places in this file. This is the
# source httplib connection of whatever it is attached to.
# It is used when early termination of reading from the connection should
# happen, such as when a range request is satisfied but there's still more the
# source connection would like to send. To prevent having to read all the data
# that could be left, the source connection can be .close() and then reads
# commence to empty out any buffers.
# These shenanigans are to ensure all related objects can be garbage
# collected. We've seen objects hang around forever otherwise.
import itertools
import mimetypes
import re
import time
from datetime import datetime
from urllib import unquote, quote
from hashlib import md5
from eventlet import sleep, GreenPile
from eventlet.queue import Queue
from eventlet.timeout import Timeout
from swift.common.utils import ContextPool, normalize_timestamp, \
config_true_value, public, json, csv_append
from swift.common.bufferedhttp import http_connect
from swift.common.constraints import check_metadata, check_object_creation, \
CONTAINER_LISTING_LIMIT, MAX_FILE_SIZE
from swift.common.exceptions import ChunkReadTimeout, \
ChunkWriteTimeout, ConnectionTimeout, ListingIterNotFound, \
ListingIterNotAuthorized, ListingIterError, SloSegmentError
from swift.common.http import is_success, is_client_error, HTTP_CONTINUE, \
HTTP_CREATED, HTTP_MULTIPLE_CHOICES, HTTP_NOT_FOUND, HTTP_CONFLICT, \
HTTP_INTERNAL_SERVER_ERROR, HTTP_SERVICE_UNAVAILABLE, \
HTTP_INSUFFICIENT_STORAGE, HTTP_OK
from swift.proxy.controllers.base import Controller, delay_denial, \
cors_validation
from swift.common.swob import HTTPAccepted, HTTPBadRequest, HTTPNotFound, \
HTTPPreconditionFailed, HTTPRequestEntityTooLarge, HTTPRequestTimeout, \
HTTPServerError, HTTPServiceUnavailable, Request, Response, \
HTTPClientDisconnect
def segment_listing_iter(listing):
listing = iter(listing)
while True:
seg_dict = listing.next()
if isinstance(seg_dict['name'], unicode):
seg_dict['name'] = seg_dict['name'].encode('utf-8')
yield seg_dict
def copy_headers_into(from_r, to_r):
"""
Will copy desired headers from from_r to to_r
:params from_r: a swob Request or Response
:params to_r: a swob Request or Response
"""
for k, v in from_r.headers.items():
if k.lower().startswith('x-object-meta-'):
to_r.headers[k] = v
def check_content_type(req):
if not req.environ.get('swift.content_type_overriden') and \
';' in req.headers.get('content-type', ''):
for param in req.headers['content-type'].split(';')[1:]:
if param.lstrip().startswith('swift_'):
return HTTPBadRequest("Invalid Content-Type, "
"swift_* is not a valid parameter name.")
return None
class SegmentedIterable(object):
"""
Iterable that returns the object contents for a segmented object in Swift.
If there's a failure that cuts the transfer short, the response's
`status_int` will be updated (again, just for logging since the original
status would have already been sent to the client).
:param controller: The ObjectController instance to work with.
:param container: The container the object segments are within. If
container is None will derive container from elements
in listing using split('/', 1).
:param listing: The listing of object segments to iterate over; this may
be an iterator or list that returns dicts with 'name' and
'bytes' keys.
:param response: The swob.Response this iterable is associated with, if
any (default: None)
"""
def __init__(self, controller, container, listing, response=None,
is_slo=False):
self.controller = controller
self.container = container
self.listing = segment_listing_iter(listing)
self.is_slo = is_slo
self.segment = 0
self.segment_dict = None
self.segment_peek = None
self.seek = 0
self.segment_iter = None
# See NOTE: swift_conn at top of file about this.
self.segment_iter_swift_conn = None
self.position = 0
self.response = response
if not self.response:
self.response = Response()
self.next_get_time = 0
def _load_next_segment(self):
"""
Loads the self.segment_iter with the next object segment's contents.
:raises: StopIteration when there are no more object segments or
segment no longer matches SLO manifest specifications.
"""
try:
self.segment += 1
self.segment_dict = self.segment_peek or self.listing.next()
self.segment_peek = None
if self.container is None:
container, obj = \
self.segment_dict['name'].lstrip('/').split('/', 1)
else:
container, obj = self.container, self.segment_dict['name']
partition, nodes = self.controller.app.object_ring.get_nodes(
self.controller.account_name, container, obj)
path = '/%s/%s/%s' % (self.controller.account_name, container, obj)
req = Request.blank(path)
if self.seek:
req.range = 'bytes=%s-' % self.seek
self.seek = 0
if not self.is_slo and self.segment > \
self.controller.app.rate_limit_after_segment:
sleep(max(self.next_get_time - time.time(), 0))
self.next_get_time = time.time() + \
1.0 / self.controller.app.rate_limit_segments_per_sec
nodes = self.controller.app.sort_nodes(nodes)
resp = self.controller.GETorHEAD_base(
req, _('Object'), partition,
self.controller.iter_nodes(partition, nodes,
self.controller.app.object_ring),
path, len(nodes))
if self.is_slo and resp.status_int == HTTP_NOT_FOUND:
raise SloSegmentError(_(
'Could not load object segment %(path)s:'
' %(status)s') % {'path': path, 'status': resp.status_int})
if not is_success(resp.status_int):
raise Exception(_(
'Could not load object segment %(path)s:'
' %(status)s') % {'path': path, 'status': resp.status_int})
if self.is_slo:
if (resp.content_length != self.segment_dict['bytes'] or
resp.etag != self.segment_dict['hash']):
raise SloSegmentError(_(
'Object segment no longer valid: '
'%(path)s etag: %(r_etag)s != %(s_etag)s or '
'size: %(r_size)s != %(s_size)s') %
{'path': path, 'r_etag': resp.etag,
's_etag': self.segment_dict['hash'],
'r_size': resp.content_length,
's_size': self.segment_dict['bytes']})
self.segment_iter = resp.app_iter
# See NOTE: swift_conn at top of file about this.
self.segment_iter_swift_conn = getattr(resp, 'swift_conn', None)
except StopIteration:
raise
except SloSegmentError, err:
if not getattr(err, 'swift_logged', False):
self.controller.app.logger.error(_(
'ERROR: While processing manifest '
'/%(acc)s/%(cont)s/%(obj)s, %(err)s'),
{'acc': self.controller.account_name,
'cont': self.controller.container_name,
'obj': self.controller.object_name, 'err': err})
err.swift_logged = True
self.response.status_int = HTTP_CONFLICT
raise StopIteration('Invalid manifiest segment')
except (Exception, Timeout), err:
if not getattr(err, 'swift_logged', False):
self.controller.app.logger.exception(_(
'ERROR: While processing manifest '
'/%(acc)s/%(cont)s/%(obj)s'),
{'acc': self.controller.account_name,
'cont': self.controller.container_name,
'obj': self.controller.object_name})
err.swift_logged = True
self.response.status_int = HTTP_SERVICE_UNAVAILABLE
raise
def next(self):
return iter(self).next()
def __iter__(self):
""" Standard iterator function that returns the object's contents. """
try:
while True:
if not self.segment_iter:
self._load_next_segment()
while True:
with ChunkReadTimeout(self.controller.app.node_timeout):
try:
chunk = self.segment_iter.next()
break
except StopIteration:
self._load_next_segment()
self.position += len(chunk)
yield chunk
except StopIteration:
raise
except (Exception, Timeout), err:
if not getattr(err, 'swift_logged', False):
self.controller.app.logger.exception(_(
'ERROR: While processing manifest '
'/%(acc)s/%(cont)s/%(obj)s'),
{'acc': self.controller.account_name,
'cont': self.controller.container_name,
'obj': self.controller.object_name})
err.swift_logged = True
self.response.status_int = HTTP_SERVICE_UNAVAILABLE
raise
def app_iter_range(self, start, stop):
"""
Non-standard iterator function for use with Swob in serving Range
requests more quickly. This will skip over segments and do a range
request on the first segment to return data from, if needed.
:param start: The first byte (zero-based) to return. None for 0.
:param stop: The last byte (zero-based) to return. None for end.
"""
try:
if start:
self.segment_peek = self.listing.next()
while start >= self.position + self.segment_peek['bytes']:
self.segment += 1
self.position += self.segment_peek['bytes']
self.segment_peek = self.listing.next()
self.seek = start - self.position
else:
start = 0
if stop is not None:
length = stop - start
else:
length = None
for chunk in self:
if length is not None:
length -= len(chunk)
if length < 0:
# Chop off the extra:
yield chunk[:length]
break
yield chunk
# See NOTE: swift_conn at top of file about this.
if self.segment_iter_swift_conn:
try:
self.segment_iter_swift_conn.close()
except Exception:
pass
self.segment_iter_swift_conn = None
if self.segment_iter:
try:
while self.segment_iter.next():
pass
except Exception:
pass
self.segment_iter = None
except StopIteration:
raise
except (Exception, Timeout), err:
if not getattr(err, 'swift_logged', False):
self.controller.app.logger.exception(_(
'ERROR: While processing manifest '
'/%(acc)s/%(cont)s/%(obj)s'),
{'acc': self.controller.account_name,
'cont': self.controller.container_name,
'obj': self.controller.object_name})
err.swift_logged = True
self.response.status_int = HTTP_SERVICE_UNAVAILABLE
raise
class ObjectController(Controller):
"""WSGI controller for object requests."""
server_type = 'Object'
def __init__(self, app, account_name, container_name, object_name,
**kwargs):
Controller.__init__(self, app)
self.account_name = unquote(account_name)
self.container_name = unquote(container_name)
self.object_name = unquote(object_name)
def _listing_iter(self, lcontainer, lprefix, env):
for page in self._listing_pages_iter(lcontainer, lprefix, env):
for item in page:
yield item
def _listing_pages_iter(self, lcontainer, lprefix, env):
lpartition, lnodes = self.app.container_ring.get_nodes(
self.account_name, lcontainer)
marker = ''
while True:
lreq = Request.blank('i will be overridden by env', environ=env)
# Don't quote PATH_INFO, by WSGI spec
lreq.environ['PATH_INFO'] = \
'/%s/%s' % (self.account_name, lcontainer)
lreq.environ['REQUEST_METHOD'] = 'GET'
lreq.environ['QUERY_STRING'] = \
'format=json&prefix=%s&marker=%s' % (quote(lprefix),
quote(marker))
nodes = self.app.sort_nodes(lnodes)
lresp = self.GETorHEAD_base(
lreq, _('Container'), lpartition, lnodes, lreq.path_info,
len(lnodes))
if 'swift.authorize' in env:
lreq.acl = lresp.headers.get('x-container-read')
aresp = env['swift.authorize'](lreq)
if aresp:
raise ListingIterNotAuthorized(aresp)
if lresp.status_int == HTTP_NOT_FOUND:
raise ListingIterNotFound()
elif not is_success(lresp.status_int):
raise ListingIterError()
if not lresp.body:
break
sublisting = json.loads(lresp.body)
if not sublisting:
break
marker = sublisting[-1]['name'].encode('utf-8')
yield sublisting
def _remaining_items(self, listing_iter):
"""
Returns an item-by-item iterator for a page-by-page iterator
of item listings.
Swallows listing-related errors; this iterator is only used
after we've already started streaming a response to the
client, and so if we start getting errors from the container
servers now, it's too late to send an error to the client, so
we just quit looking for segments.
"""
try:
for page in listing_iter:
for item in page:
yield item
except ListingIterNotFound:
pass
except ListingIterError:
pass
except ListingIterNotAuthorized:
pass
def is_good_source(self, src):
"""
Indicates whether or not the request made to the backend found
what it was looking for.
In the case of an object, a 416 indicates that we found a
backend with the object.
"""
return src.status == 416 or \
super(ObjectController, self).is_good_source(src)
def GETorHEAD(self, req):
"""Handle HTTP GET or HEAD requests."""
container_info = self.container_info(self.account_name,
self.container_name)
req.acl = container_info['read_acl']
if 'swift.authorize' in req.environ:
aresp = req.environ['swift.authorize'](req)
if aresp:
return aresp
partition, nodes = self.app.object_ring.get_nodes(
self.account_name, self.container_name, self.object_name)
nodes = self.app.sort_nodes(nodes)
resp = self.GETorHEAD_base(
req, _('Object'), partition,
self.iter_nodes(partition, nodes, self.app.object_ring),
req.path_info, len(nodes))
if ';' in resp.headers.get('content-type', ''):
# strip off swift_bytes from content-type
content_type, check_extra_meta = \
resp.headers['content-type'].rsplit(';', 1)
if check_extra_meta.lstrip().startswith('swift_bytes='):
resp.content_type = content_type
large_object = None
if config_true_value(resp.headers.get('x-static-large-object')) and \
req.params.get('multipart-manifest') != 'get' and \
self.app.allow_static_large_object:
large_object = 'SLO'
listing_page1 = ()
listing = []
lcontainer = None # container name is included in listing
if resp.status_int == HTTP_OK and \
req.method == 'GET' and not req.range:
try:
listing = json.loads(resp.body)
except ValueError:
listing = []
else:
# need to make a second request to get whole manifest
new_req = req.copy_get()
new_req.method = 'GET'
new_req.range = None
nodes = self.app.sort_nodes(nodes)
new_resp = self.GETorHEAD_base(
new_req, _('Object'), partition,
self.iter_nodes(partition, nodes, self.app.object_ring),
req.path_info, len(nodes))
if new_resp.status_int // 100 == 2:
try:
listing = json.loads(new_resp.body)
except ValueError:
listing = []
else:
return HTTPServiceUnavailable(
"Unable to load SLO manifest", request=req)
if 'x-object-manifest' in resp.headers and \
req.params.get('multipart-manifest') != 'get':
large_object = 'DLO'
lcontainer, lprefix = \
resp.headers['x-object-manifest'].split('/', 1)
lcontainer = unquote(lcontainer)
lprefix = unquote(lprefix)
try:
pages_iter = iter(self._listing_pages_iter(lcontainer, lprefix,
req.environ))
listing_page1 = pages_iter.next()
listing = itertools.chain(listing_page1,
self._remaining_items(pages_iter))
except ListingIterNotFound:
return HTTPNotFound(request=req)
except ListingIterNotAuthorized, err:
return err.aresp
except ListingIterError:
return HTTPServerError(request=req)
except StopIteration:
listing_page1 = listing = ()
if large_object:
if len(listing_page1) >= CONTAINER_LISTING_LIMIT:
resp = Response(headers=resp.headers, request=req,
conditional_response=True)
if req.method == 'HEAD':
# These shenanigans are because swob translates the HEAD
# request into a swob EmptyResponse for the body, which
# has a len, which eventlet translates as needing a
# content-length header added. So we call the original
# swob resp for the headers but return an empty iterator
# for the body.
def head_response(environ, start_response):
resp(environ, start_response)
return iter([])
head_response.status_int = resp.status_int
return head_response
else:
resp.app_iter = SegmentedIterable(
self, lcontainer, listing, resp,
is_slo=(large_object == 'SLO'))
else:
# For objects with a reasonable number of segments, we'll serve
# them with a set content-length and computed etag.
if listing:
listing = list(listing)
try:
content_length = sum(o['bytes'] for o in listing)
last_modified = \
max(o['last_modified'] for o in listing)
last_modified = datetime(*map(int, re.split('[^\d]',
last_modified)[:-1]))
etag = md5(
''.join(o['hash'] for o in listing)).hexdigest()
except KeyError:
return HTTPServerError('Invalid Manifest File',
request=req)
else:
content_length = 0
last_modified = resp.last_modified
etag = md5().hexdigest()
resp = Response(headers=resp.headers, request=req,
conditional_response=True)
resp.app_iter = SegmentedIterable(
self, lcontainer, listing, resp,
is_slo=(large_object == 'SLO'))
resp.content_length = content_length
resp.last_modified = last_modified
resp.etag = etag
resp.headers['accept-ranges'] = 'bytes'
# In case of a manifest file of nonzero length, the
# backend may have sent back a Content-Range header for
# the manifest. It's wrong for the client, though.
resp.content_range = None
return resp
@public
@cors_validation
@delay_denial
def GET(self, req):
"""Handler for HTTP GET requests."""
return self.GETorHEAD(req)
@public
@cors_validation
@delay_denial
def HEAD(self, req):
"""Handler for HTTP HEAD requests."""
return self.GETorHEAD(req)
@public
@cors_validation
@delay_denial
def POST(self, req):
"""HTTP POST request handler."""
if 'x-delete-after' in req.headers:
try:
x_delete_after = int(req.headers['x-delete-after'])
except ValueError:
return HTTPBadRequest(request=req,
content_type='text/plain',
body='Non-integer X-Delete-After')
req.headers['x-delete-at'] = '%d' % (time.time() + x_delete_after)
if self.app.object_post_as_copy:
req.method = 'PUT'
req.path_info = '/%s/%s/%s' % (
self.account_name, self.container_name, self.object_name)
req.headers['Content-Length'] = 0
req.headers['X-Copy-From'] = quote('/%s/%s' % (self.container_name,
self.object_name))
req.headers['X-Fresh-Metadata'] = 'true'
req.environ['swift_versioned_copy'] = True
if req.environ.get('QUERY_STRING'):
req.environ['QUERY_STRING'] += '&multipart-manifest=get'
else:
req.environ['QUERY_STRING'] = 'multipart-manifest=get'
resp = self.PUT(req)
# Older editions returned 202 Accepted on object POSTs, so we'll
# convert any 201 Created responses to that for compatibility with
# picky clients.
if resp.status_int != HTTP_CREATED:
return resp
return HTTPAccepted(request=req)
else:
error_response = check_metadata(req, 'object')
if error_response:
return error_response
container_info = self.container_info(
self.account_name, self.container_name,
account_autocreate=self.app.account_autocreate)
container_partition = container_info['partition']
containers = container_info['nodes']
req.acl = container_info['write_acl']
if 'swift.authorize' in req.environ:
aresp = req.environ['swift.authorize'](req)
if aresp:
return aresp
if not containers:
return HTTPNotFound(request=req)
if 'x-delete-at' in req.headers:
try:
x_delete_at = int(req.headers['x-delete-at'])
if x_delete_at < time.time():
return HTTPBadRequest(
body='X-Delete-At in past', request=req,
content_type='text/plain')
except ValueError:
return HTTPBadRequest(request=req,
content_type='text/plain',
body='Non-integer X-Delete-At')
delete_at_container = str(
x_delete_at /
self.app.expiring_objects_container_divisor *
self.app.expiring_objects_container_divisor)
delete_at_part, delete_at_nodes = \
self.app.container_ring.get_nodes(
self.app.expiring_objects_account, delete_at_container)
else:
delete_at_part = delete_at_nodes = None
partition, nodes = self.app.object_ring.get_nodes(
self.account_name, self.container_name, self.object_name)
req.headers['X-Timestamp'] = normalize_timestamp(time.time())
headers = self._backend_requests(
req, len(nodes), container_partition, containers,
delete_at_part, delete_at_nodes)
resp = self.make_requests(req, self.app.object_ring, partition,
'POST', req.path_info, headers)
return resp
def _backend_requests(self, req, n_outgoing,
container_partition, containers,
delete_at_partition=None, delete_at_nodes=None):
headers = [dict(req.headers.iteritems())
for _junk in range(n_outgoing)]
for header in headers:
header['Connection'] = 'close'
for i, container in enumerate(containers):
i = i % len(headers)
headers[i]['X-Container-Partition'] = container_partition
headers[i]['X-Container-Host'] = csv_append(
headers[i].get('X-Container-Host'),
'%(ip)s:%(port)s' % container)
headers[i]['X-Container-Device'] = csv_append(
headers[i].get('X-Container-Device'),
container['device'])
for i, node in enumerate(delete_at_nodes or []):
i = i % len(headers)
headers[i]['X-Delete-At-Partition'] = delete_at_partition
headers[i]['X-Delete-At-Host'] = csv_append(
headers[i].get('X-Delete-At-Host'),
'%(ip)s:%(port)s' % node)
headers[i]['X-Delete-At-Device'] = csv_append(
headers[i].get('X-Delete-At-Device'),
node['device'])
return headers
def _send_file(self, conn, path):
"""Method for a file PUT coro"""
while True:
chunk = conn.queue.get()
if not conn.failed:
try:
with ChunkWriteTimeout(self.app.node_timeout):
conn.send(chunk)
except (Exception, ChunkWriteTimeout):
conn.failed = True
self.exception_occurred(conn.node, _('Object'),
_('Trying to write to %s') % path)
conn.queue.task_done()
def _connect_put_node(self, nodes, part, path, headers,
logger_thread_locals):
"""Method for a file PUT connect"""
self.app.logger.thread_locals = logger_thread_locals
for node in nodes:
try:
start_time = time.time()
with ConnectionTimeout(self.app.conn_timeout):
conn = http_connect(
node['ip'], node['port'], node['device'], part, 'PUT',
path, headers)
self.app.set_node_timing(node, time.time() - start_time)
with Timeout(self.app.node_timeout):
resp = conn.getexpect()
if resp.status == HTTP_CONTINUE:
conn.resp = None
conn.node = node
return conn
elif is_success(resp.status):
conn.resp = resp
conn.node = node
return conn
elif resp.status == HTTP_INSUFFICIENT_STORAGE:
self.error_limit(node)
except:
self.exception_occurred(node, _('Object'),
_('Expect: 100-continue on %s') % path)
@public
@cors_validation
@delay_denial
def PUT(self, req):
"""HTTP PUT request handler."""
container_info = self.container_info(
self.account_name, self.container_name,
account_autocreate=self.app.account_autocreate)
container_partition = container_info['partition']
containers = container_info['nodes']
req.acl = container_info['write_acl']
req.environ['swift_sync_key'] = container_info['sync_key']
object_versions = container_info['versions']
if 'swift.authorize' in req.environ:
aresp = req.environ['swift.authorize'](req)
if aresp:
return aresp
if not containers:
return HTTPNotFound(request=req)
if 'x-delete-after' in req.headers:
try:
x_delete_after = int(req.headers['x-delete-after'])
except ValueError:
return HTTPBadRequest(request=req,
content_type='text/plain',
body='Non-integer X-Delete-After')
req.headers['x-delete-at'] = '%d' % (time.time() + x_delete_after)
if 'x-delete-at' in req.headers:
try:
x_delete_at = int(req.headers['x-delete-at'])
if x_delete_at < time.time():
return HTTPBadRequest(
body='X-Delete-At in past', request=req,
content_type='text/plain')
except ValueError:
return HTTPBadRequest(request=req, content_type='text/plain',
body='Non-integer X-Delete-At')
delete_at_container = str(
x_delete_at /
self.app.expiring_objects_container_divisor *
self.app.expiring_objects_container_divisor)
delete_at_part, delete_at_nodes = \
self.app.container_ring.get_nodes(
self.app.expiring_objects_account, delete_at_container)
else:
delete_at_part = delete_at_nodes = None
partition, nodes = self.app.object_ring.get_nodes(
self.account_name, self.container_name, self.object_name)
# do a HEAD request for container sync and checking object versions
if 'x-timestamp' in req.headers or \
(object_versions and not
req.environ.get('swift_versioned_copy')):
hreq = Request.blank(req.path_info, headers={'X-Newest': 'True'},
environ={'REQUEST_METHOD': 'HEAD'})
hresp = self.GETorHEAD_base(hreq, _('Object'), partition, nodes,
hreq.path_info, len(nodes))
# Used by container sync feature
if 'x-timestamp' in req.headers:
try:
req.headers['X-Timestamp'] = \
normalize_timestamp(float(req.headers['x-timestamp']))
if hresp.environ and 'swift_x_timestamp' in hresp.environ and \
float(hresp.environ['swift_x_timestamp']) >= \
float(req.headers['x-timestamp']):
return HTTPAccepted(request=req)
except ValueError:
return HTTPBadRequest(
request=req, content_type='text/plain',
body='X-Timestamp should be a UNIX timestamp float value; '
'was %r' % req.headers['x-timestamp'])
else:
req.headers['X-Timestamp'] = normalize_timestamp(time.time())
# Sometimes the 'content-type' header exists, but is set to None.
content_type_manually_set = True
if not req.headers.get('content-type'):
guessed_type, _junk = mimetypes.guess_type(req.path_info)
req.headers['Content-Type'] = guessed_type or \
'application/octet-stream'
content_type_manually_set = False
error_response = check_object_creation(req, self.object_name) or \
check_content_type(req)
if error_response:
return error_response
if object_versions and not req.environ.get('swift_versioned_copy'):
is_manifest = 'x-object-manifest' in req.headers or \
'x-object-manifest' in hresp.headers
if hresp.status_int != HTTP_NOT_FOUND and not is_manifest:
# This is a version manifest and needs to be handled
# differently. First copy the existing data to a new object,
# then write the data from this request to the version manifest
# object.
lcontainer = object_versions.split('/')[0]
prefix_len = '%03x' % len(self.object_name)
lprefix = prefix_len + self.object_name + '/'
ts_source = hresp.environ.get('swift_x_timestamp')
if ts_source is None:
ts_source = time.mktime(time.strptime(
hresp.headers['last-modified'],
'%a, %d %b %Y %H:%M:%S GMT'))
new_ts = normalize_timestamp(ts_source)
vers_obj_name = lprefix + new_ts
copy_headers = {
'Destination': '%s/%s' % (lcontainer, vers_obj_name)}
copy_environ = {'REQUEST_METHOD': 'COPY',
'swift_versioned_copy': True
}
copy_req = Request.blank(req.path_info, headers=copy_headers,
environ=copy_environ)
copy_resp = self.COPY(copy_req)
if is_client_error(copy_resp.status_int):
# missing container or bad permissions
return HTTPPreconditionFailed(request=req)
elif not is_success(copy_resp.status_int):
# could not copy the data, bail
return HTTPServiceUnavailable(request=req)
reader = req.environ['wsgi.input'].read
data_source = iter(lambda: reader(self.app.client_chunk_size), '')
source_header = req.headers.get('X-Copy-From')
source_resp = None
if source_header:
source_header = unquote(source_header)
acct = req.path_info.split('/', 2)[1]
if isinstance(acct, unicode):
acct = acct.encode('utf-8')
if not source_header.startswith('/'):
source_header = '/' + source_header
source_header = '/' + acct + source_header
try:
src_container_name, src_obj_name = \
source_header.split('/', 3)[2:]
except ValueError:
return HTTPPreconditionFailed(
request=req,
body='X-Copy-From header must be of the form'
'<container name>/<object name>')
source_req = req.copy_get()
source_req.path_info = source_header
source_req.headers['X-Newest'] = 'true'
orig_obj_name = self.object_name
orig_container_name = self.container_name
self.object_name = src_obj_name
self.container_name = src_container_name
source_resp = self.GET(source_req)
if source_resp.status_int >= HTTP_MULTIPLE_CHOICES:
return source_resp
self.object_name = orig_obj_name
self.container_name = orig_container_name
new_req = Request.blank(req.path_info,
environ=req.environ, headers=req.headers)
data_source = source_resp.app_iter
new_req.content_length = source_resp.content_length
if new_req.content_length is None:
# This indicates a transfer-encoding: chunked source object,
# which currently only happens because there are more than
# CONTAINER_LISTING_LIMIT segments in a segmented object. In
# this case, we're going to refuse to do the server-side copy.
return HTTPRequestEntityTooLarge(request=req)
new_req.etag = source_resp.etag
# we no longer need the X-Copy-From header
del new_req.headers['X-Copy-From']
if not content_type_manually_set:
new_req.headers['Content-Type'] = \
source_resp.headers['Content-Type']
if not config_true_value(
new_req.headers.get('x-fresh-metadata', 'false')):
copy_headers_into(source_resp, new_req)
copy_headers_into(req, new_req)
# copy over x-static-large-object for POSTs and manifest copies
if 'X-Static-Large-Object' in source_resp.headers and \
req.params.get('multipart-manifest') == 'get':
new_req.headers['X-Static-Large-Object'] = \
source_resp.headers['X-Static-Large-Object']
req = new_req
node_iter = self.iter_nodes(partition, nodes, self.app.object_ring)
pile = GreenPile(len(nodes))
chunked = req.headers.get('transfer-encoding')
outgoing_headers = self._backend_requests(
req, len(nodes), container_partition, containers,
delete_at_part, delete_at_nodes)
for nheaders in outgoing_headers:
# RFC2616:8.2.3 disallows 100-continue without a body
if (req.content_length > 0) or chunked:
nheaders['Expect'] = '100-continue'
pile.spawn(self._connect_put_node, node_iter, partition,
req.path_info, nheaders, self.app.logger.thread_locals)
conns = [conn for conn in pile if conn]
if len(conns) <= len(nodes) / 2:
self.app.logger.error(
_('Object PUT returning 503, %(conns)s/%(nodes)s '
'required connections'),
{'conns': len(conns), 'nodes': len(nodes) // 2 + 1})
return HTTPServiceUnavailable(request=req)
bytes_transferred = 0
try:
with ContextPool(len(nodes)) as pool:
for conn in conns:
conn.failed = False
conn.queue = Queue(self.app.put_queue_depth)
pool.spawn(self._send_file, conn, req.path)
while True:
with ChunkReadTimeout(self.app.client_timeout):
try:
chunk = next(data_source)
except StopIteration:
if chunked:
[conn.queue.put('0\r\n\r\n') for conn in conns]
break
bytes_transferred += len(chunk)
if bytes_transferred > MAX_FILE_SIZE:
return HTTPRequestEntityTooLarge(request=req)
for conn in list(conns):
if not conn.failed:
conn.queue.put(
'%x\r\n%s\r\n' % (len(chunk), chunk)
if chunked else chunk)
else:
conns.remove(conn)
if len(conns) <= len(nodes) / 2:
self.app.logger.error(_(
'Object PUT exceptions during'
' send, %(conns)s/%(nodes)s required connections'),
{'conns': len(conns), 'nodes': len(nodes) / 2 + 1})
return HTTPServiceUnavailable(request=req)
for conn in conns:
if conn.queue.unfinished_tasks:
conn.queue.join()
conns = [conn for conn in conns if not conn.failed]
except ChunkReadTimeout, err:
self.app.logger.warn(
_('ERROR Client read timeout (%ss)'), err.seconds)
self.app.logger.increment('client_timeouts')
return HTTPRequestTimeout(request=req)
except (Exception, Timeout):
self.app.logger.exception(
_('ERROR Exception causing client disconnect'))
return HTTPClientDisconnect(request=req)
if req.content_length and bytes_transferred < req.content_length:
req.client_disconnect = True
self.app.logger.warn(
_('Client disconnected without sending enough data'))
self.app.logger.increment('client_disconnects')
return HTTPClientDisconnect(request=req)
statuses = []
reasons = []
bodies = []
etags = set()
for conn in conns:
try:
with Timeout(self.app.node_timeout):
if conn.resp:
response = conn.resp
else:
response = conn.getresponse()
statuses.append(response.status)
reasons.append(response.reason)
bodies.append(response.read())
if response.status >= HTTP_INTERNAL_SERVER_ERROR:
self.error_occurred(
conn.node,
_('ERROR %(status)d %(body)s From Object Server '
're: %(path)s') %
{'status': response.status,
'body': bodies[-1][:1024], 'path': req.path})
elif is_success(response.status):
etags.add(response.getheader('etag').strip('"'))
except (Exception, Timeout):
self.exception_occurred(
conn.node, _('Object'),
_('Trying to get final status of PUT to %s') % req.path)
if len(etags) > 1:
self.app.logger.error(
_('Object servers returned %s mismatched etags'), len(etags))
return HTTPServerError(request=req)
etag = len(etags) and etags.pop() or None
while len(statuses) < len(nodes):
statuses.append(HTTP_SERVICE_UNAVAILABLE)
reasons.append('')
bodies.append('')
resp = self.best_response(req, statuses, reasons, bodies,
_('Object PUT'), etag=etag)
if source_header:
resp.headers['X-Copied-From'] = quote(
source_header.split('/', 2)[2])
if 'last-modified' in source_resp.headers:
resp.headers['X-Copied-From-Last-Modified'] = \
source_resp.headers['last-modified']
copy_headers_into(req, resp)
resp.last_modified = float(req.headers['X-Timestamp'])
return resp
@public
@cors_validation
@delay_denial
def DELETE(self, req):
"""HTTP DELETE request handler."""
container_info = self.container_info(self.account_name,
self.container_name)
container_partition = container_info['partition']
containers = container_info['nodes']
req.acl = container_info['write_acl']
req.environ['swift_sync_key'] = container_info['sync_key']
object_versions = container_info['versions']
if object_versions:
# this is a version manifest and needs to be handled differently
lcontainer = object_versions.split('/')[0]
prefix_len = '%03x' % len(self.object_name)
lprefix = prefix_len + self.object_name + '/'
last_item = None
try:
for last_item in self._listing_iter(lcontainer, lprefix,
req.environ):
pass
except ListingIterNotFound:
# no worries, last_item is None
pass
except ListingIterNotAuthorized, err:
return err.aresp
except ListingIterError:
return HTTPServerError(request=req)
if last_item:
# there are older versions so copy the previous version to the
# current object and delete the previous version
orig_container = self.container_name
orig_obj = self.object_name
self.container_name = lcontainer
self.object_name = last_item['name']
copy_path = '/' + self.account_name + '/' + \
self.container_name + '/' + self.object_name
copy_headers = {'X-Newest': 'True',
'Destination': orig_container + '/' + orig_obj
}
copy_environ = {'REQUEST_METHOD': 'COPY',
'swift_versioned_copy': True
}
creq = Request.blank(copy_path, headers=copy_headers,
environ=copy_environ)
copy_resp = self.COPY(creq)
if is_client_error(copy_resp.status_int):
# some user error, maybe permissions
return HTTPPreconditionFailed(request=req)
elif not is_success(copy_resp.status_int):
# could not copy the data, bail
return HTTPServiceUnavailable(request=req)
# reset these because the COPY changed them
self.container_name = lcontainer
self.object_name = last_item['name']
new_del_req = Request.blank(copy_path, environ=req.environ)
container_info = self.container_info(self.account_name,
self.container_name)
container_partition = container_info['partition']
containers = container_info['nodes']
new_del_req.acl = container_info['write_acl']
new_del_req.path_info = copy_path
req = new_del_req
# remove 'X-If-Delete-At', since it is not for the older copy
if 'X-If-Delete-At' in req.headers:
del req.headers['X-If-Delete-At']
if 'swift.authorize' in req.environ:
aresp = req.environ['swift.authorize'](req)
if aresp:
return aresp
if not containers:
return HTTPNotFound(request=req)
partition, nodes = self.app.object_ring.get_nodes(
self.account_name, self.container_name, self.object_name)
# Used by container sync feature
if 'x-timestamp' in req.headers:
try:
req.headers['X-Timestamp'] = \
normalize_timestamp(float(req.headers['x-timestamp']))
except ValueError:
return HTTPBadRequest(
request=req, content_type='text/plain',
body='X-Timestamp should be a UNIX timestamp float value; '
'was %r' % req.headers['x-timestamp'])
else:
req.headers['X-Timestamp'] = normalize_timestamp(time.time())
headers = self._backend_requests(
req, len(nodes), container_partition, containers)
resp = self.make_requests(req, self.app.object_ring,
partition, 'DELETE', req.path_info, headers)
return resp
@public
@cors_validation
@delay_denial
def COPY(self, req):
"""HTTP COPY request handler."""
dest = req.headers.get('Destination')
if not dest:
return HTTPPreconditionFailed(request=req,
body='Destination header required')
dest = unquote(dest)
if not dest.startswith('/'):
dest = '/' + dest
try:
_junk, dest_container, dest_object = dest.split('/', 2)
except ValueError:
return HTTPPreconditionFailed(
request=req,
body='Destination header must be of the form '
'<container name>/<object name>')
source = '/' + self.container_name + '/' + self.object_name
self.container_name = dest_container
self.object_name = dest_object
# re-write the existing request as a PUT instead of creating a new one
# since this one is already attached to the posthooklogger
req.method = 'PUT'
req.path_info = '/' + self.account_name + dest
req.headers['Content-Length'] = 0
req.headers['X-Copy-From'] = quote(source)
del req.headers['Destination']
return self.PUT(req)
| Triv90/SwiftUml | swift/proxy/controllers/obj.py | Python | apache-2.0 | 50,946 |
import recordings as recordings
import numpy as np
import operator
import math
samplerate=44100.0
from scipy.signal import argrelmax
from essentia.standard import *
def silenceGaps(frames,order=20):
from scipy.signal import argrelmax
consecutiveNumbers = [0]
for i in range(0,len(frames)-2):
counter = 0
j = i
while 1:
if j < len(frames)-1:
if frames[j+1]==1:
counter+=1
j+=1
else:
break
else:
break
consecutiveNumbers.append(counter)
featureArray = np.asarray(consecutiveNumbers)
print order
if order<1:
order = 1
maxima = argrelmax(featureArray, order=order)
featureMaxima = maxima[0] -1
silenceGaps = []
for maximum in featureMaxima:
silenceGaps.append([maximum,maximum+consecutiveNumbers[maximum+1]])
return silenceGaps
def groupBySilence(audio,hopSize=1024,t_silence=0.04,plot=0,orderDivisor=15,minGapSize=8):
timestamps = len(audio)
loudness = essentia.standard.Loudness()
energy = Energy()
silenceFrames = []
for frame in FrameGenerator(audio, frameSize = hopSize*2, hopSize = hopSize):
if loudness(frame) >= t_silence:
silenceFrames.append(0)
else:
silenceFrames.append(1)
gaps = silenceGaps(silenceFrames,int(len(audio)/samplerate/orderDivisor))
gapFrames = []
for gap in gaps:
if (gap[1]-gap[0])>minGapSize:#10
gapFrames.extend(range(gap[0],gap[1]))
audioFrames = range(0,len(audio)/hopSize)
groupFrames = [x for x in audioFrames if x not in gapFrames]
from numpy import array, diff, where, split
result= split(groupFrames, where(diff(groupFrames)>2)[0]+1)
splitGroupFrames = map(list, result)
groups = []
for group in splitGroupFrames:
if len(group) > 4:
groups.append([group[0]*hopSize,((group[-1]+1)*hopSize)])
return groups | bastustrump/genimpro | grouping.py | Python | mit | 2,122 |
from paddle.trainer_config_helpers import *
settings(batch_size=1000, learning_rate=1e-4)
probs = data_layer(name='probs', size=100)
outputs(
sampling_id_layer(input=probs), # It seems not support training
# It seems this layer is not correct, and should be rewrite.
# block_expand_layer(input=probs, channel=1, block_x=1, block_y=3),
)
| helinwang/Paddle | python/paddle/trainer_config_helpers/tests/configs/unused_layers.py | Python | apache-2.0 | 353 |
#!/usr/bin/env python
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
from pkg_resources import parse_version
def check_dependencies():
'''
setuptools causes problems for installing packages (especially
statsmodels). Use this function to abort installation instead.
'''
try:
import cython
except ImportError:
raise ImportError("Install cython before installing TurbuStat.")
try:
import matplotlib
mpl_version = matplotlib.__version__
if parse_version(mpl_version) < parse_version('1.2'):
print("***Before installing, upgrade matplotlib to 1.2***")
raise ImportError
except:
raise ImportError(
"Install or upgrade matplotlib before installing TurbuStat.")
try:
from numpy.version import version as np_version
if parse_version(np_version) < parse_version('1.6'):
print("***Before installing, upgrade numpy to 1.6***")
raise ImportError
except:
raise ImportError(
"Install or upgrade numpy before installing TurbuStat.")
try:
from scipy.version import version as sc_version
if parse_version(sc_version) < parse_version('0.12'):
print("***Before installing, upgrade scipy to 0.12***")
raise ImportError
except:
raise ImportError(
"Install or upgrade scipy before installing TurbuStat.")
try:
from pandas.version import version as pa_version
if parse_version(pa_version) < parse_version('0.13'):
print("***Before installing, upgrade pandas to 0.13***")
raise ImportError
except:
raise ImportError(
"Install or upgrade pandas before installing TurbuStat.")
try:
from statsmodels.version import version as sm_version
if parse_version(sm_version) < parse_version('0.4.0'):
print("***Before installing, upgrade statsmodels to 0.4.0***")
raise ImportError
except:
raise ImportError(
"Install or upgrade statsmodels before installing TurbuStat.")
try:
import sklearn
skl_version = sklearn.__version__
if parse_version(skl_version) < parse_version('0.13.0'):
print("***Before installing, upgrade sklearn to 0.13.0***")
raise ImportError
except:
raise ImportError(
"Install or upgrade sklearn before installing TurbuStat.")
try:
from astropy.version import version as ast_version
if parse_version(ast_version[:3]) < parse_version('0.4'):
print(("""***Before installing, upgrade astropy to 0.4.
NOTE: This is the dev version as of 17/06/14.***"""))
raise ImportError("")
except:
raise ImportError(
"Install or upgrade astropy before installing TurbuStat.")
try:
import astrodendro
except:
raise ImportError(("""Install or upgrade astrodendro before installing
TurbuStat. ***NOTE: Need dev version as
of 17/06/14.***"""))
if __name__ == "__main__":
check_dependencies()
setup(name='turbustat',
version='0.0',
description='Distance metrics for comparing spectral line data cubes.',
author='Eric Koch, Caleb Ward, Jason Loeppky and Erik Rosolowsky',
author_email='koch.eric.w@gmail.com',
url='http://github.com/Astroua/TurbuStat',
scripts=[],
packages=find_packages(
exclude=["*.tests", "*.tests.*", "tests.*", "tests"])
)
| keflavich/TurbuStat | setup.py | Python | mit | 3,706 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
# Install prereqs here and now if we can.
from setuptools import setup
kw = { 'install_requires': [
'pygit2>=0.16.1',
'json_diff>=1.2.9'
] }
except ImportError:
from distutils.core import setup
print 'No setuptools. Do\n\n $ pip install pygit2\n $ pip install json_diff\n\nto install dependencies.'
kw = {}
execfile('jsongit/version.py')
packages = ['jsongit']
setup(
name='jsongit',
version=__version__,
description='Git for JSON',
long_description=open('README.rst').read(),
author='John Krauss',
author_email='john@accursedware.com',
url='http://github.com/talos/jsongit',
packages=packages,
license='BSD',
classifiers=(
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development',
'Natural Language :: English',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7'
),
**kw
)
| talos/jsongit | setup.py | Python | bsd-3-clause | 1,102 |
"""MySensors platform that offers a Climate (MySensors-HVAC) component."""
from homeassistant.components import mysensors
from homeassistant.components.climate import ClimateDevice
from homeassistant.components.climate.const import (
ATTR_TARGET_TEMP_HIGH, ATTR_TARGET_TEMP_LOW, DOMAIN, HVAC_MODE_AUTO,
HVAC_MODE_COOL, HVAC_MODE_HEAT, SUPPORT_FAN_MODE,
SUPPORT_TARGET_TEMPERATURE,
SUPPORT_TARGET_TEMPERATURE_RANGE,
HVAC_MODE_OFF)
from homeassistant.const import (
ATTR_TEMPERATURE, TEMP_CELSIUS, TEMP_FAHRENHEIT)
DICT_HA_TO_MYS = {
HVAC_MODE_AUTO: 'AutoChangeOver',
HVAC_MODE_COOL: 'CoolOn',
HVAC_MODE_HEAT: 'HeatOn',
HVAC_MODE_OFF: 'Off',
}
DICT_MYS_TO_HA = {
'AutoChangeOver': HVAC_MODE_AUTO,
'CoolOn': HVAC_MODE_COOL,
'HeatOn': HVAC_MODE_HEAT,
'Off': HVAC_MODE_OFF,
}
FAN_LIST = ['Auto', 'Min', 'Normal', 'Max']
OPERATION_LIST = [HVAC_MODE_OFF, HVAC_MODE_AUTO, HVAC_MODE_COOL,
HVAC_MODE_HEAT]
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Set up the mysensors climate."""
mysensors.setup_mysensors_platform(
hass, DOMAIN, discovery_info, MySensorsHVAC,
async_add_entities=async_add_entities)
class MySensorsHVAC(mysensors.device.MySensorsEntity, ClimateDevice):
"""Representation of a MySensors HVAC."""
@property
def supported_features(self):
"""Return the list of supported features."""
features = 0
set_req = self.gateway.const.SetReq
if set_req.V_HVAC_SPEED in self._values:
features = features | SUPPORT_FAN_MODE
if (set_req.V_HVAC_SETPOINT_COOL in self._values and
set_req.V_HVAC_SETPOINT_HEAT in self._values):
features = (
features | SUPPORT_TARGET_TEMPERATURE_RANGE)
else:
features = features | SUPPORT_TARGET_TEMPERATURE
return features
@property
def assumed_state(self):
"""Return True if unable to access real state of entity."""
return self.gateway.optimistic
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS if self.gateway.metric else TEMP_FAHRENHEIT
@property
def current_temperature(self):
"""Return the current temperature."""
value = self._values.get(self.gateway.const.SetReq.V_TEMP)
if value is not None:
value = float(value)
return value
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
set_req = self.gateway.const.SetReq
if set_req.V_HVAC_SETPOINT_COOL in self._values and \
set_req.V_HVAC_SETPOINT_HEAT in self._values:
return None
temp = self._values.get(set_req.V_HVAC_SETPOINT_COOL)
if temp is None:
temp = self._values.get(set_req.V_HVAC_SETPOINT_HEAT)
return float(temp) if temp is not None else None
@property
def target_temperature_high(self):
"""Return the highbound target temperature we try to reach."""
set_req = self.gateway.const.SetReq
if set_req.V_HVAC_SETPOINT_HEAT in self._values:
temp = self._values.get(set_req.V_HVAC_SETPOINT_COOL)
return float(temp) if temp is not None else None
@property
def target_temperature_low(self):
"""Return the lowbound target temperature we try to reach."""
set_req = self.gateway.const.SetReq
if set_req.V_HVAC_SETPOINT_COOL in self._values:
temp = self._values.get(set_req.V_HVAC_SETPOINT_HEAT)
return float(temp) if temp is not None else None
@property
def hvac_mode(self):
"""Return current operation ie. heat, cool, idle."""
return self._values.get(self.value_type)
@property
def hvac_modes(self):
"""List of available operation modes."""
return OPERATION_LIST
@property
def fan_mode(self):
"""Return the fan setting."""
return self._values.get(self.gateway.const.SetReq.V_HVAC_SPEED)
@property
def fan_modes(self):
"""List of available fan modes."""
return FAN_LIST
async def async_set_temperature(self, **kwargs):
"""Set new target temperature."""
set_req = self.gateway.const.SetReq
temp = kwargs.get(ATTR_TEMPERATURE)
low = kwargs.get(ATTR_TARGET_TEMP_LOW)
high = kwargs.get(ATTR_TARGET_TEMP_HIGH)
heat = self._values.get(set_req.V_HVAC_SETPOINT_HEAT)
cool = self._values.get(set_req.V_HVAC_SETPOINT_COOL)
updates = []
if temp is not None:
if heat is not None:
# Set HEAT Target temperature
value_type = set_req.V_HVAC_SETPOINT_HEAT
elif cool is not None:
# Set COOL Target temperature
value_type = set_req.V_HVAC_SETPOINT_COOL
if heat is not None or cool is not None:
updates = [(value_type, temp)]
elif all(val is not None for val in (low, high, heat, cool)):
updates = [
(set_req.V_HVAC_SETPOINT_HEAT, low),
(set_req.V_HVAC_SETPOINT_COOL, high)]
for value_type, value in updates:
self.gateway.set_child_value(
self.node_id, self.child_id, value_type, value)
if self.gateway.optimistic:
# Optimistically assume that device has changed state
self._values[value_type] = value
self.async_schedule_update_ha_state()
async def async_set_fan_mode(self, fan_mode):
"""Set new target temperature."""
set_req = self.gateway.const.SetReq
self.gateway.set_child_value(
self.node_id, self.child_id, set_req.V_HVAC_SPEED, fan_mode)
if self.gateway.optimistic:
# Optimistically assume that device has changed state
self._values[set_req.V_HVAC_SPEED] = fan_mode
self.async_schedule_update_ha_state()
async def async_set_hvac_mode(self, hvac_mode):
"""Set new target temperature."""
self.gateway.set_child_value(
self.node_id, self.child_id, self.value_type,
DICT_HA_TO_MYS[hvac_mode])
if self.gateway.optimistic:
# Optimistically assume that device has changed state
self._values[self.value_type] = hvac_mode
self.async_schedule_update_ha_state()
async def async_update(self):
"""Update the controller with the latest value from a sensor."""
await super().async_update()
self._values[self.value_type] = DICT_MYS_TO_HA[
self._values[self.value_type]]
| jabesq/home-assistant | homeassistant/components/mysensors/climate.py | Python | apache-2.0 | 6,774 |
import numpy as np
from ..weight import RankingBasedSelection
class NESSelection(RankingBasedSelection):
"""
This selection scheme is Non-increasing transformation as NES weight. See also,
[Wierstra et. al., 2014]<http://jmlr.org/papers/v15/wierstra14a.html>
"""
def transform(self, rank_based_vals, xp=np):
lam = len(rank_based_vals)
weight = xp.maximum(0, xp.log((lam / 2) + 1) - xp.log(rank_based_vals))
weight /= weight.sum()
return weight - 1. / lam
| satuma777/evoltier | evoltier/selection/nes_selection.py | Python | gpl-3.0 | 510 |
'''
This code is forked from https://github.com/fchollet/keras/blob/master/examples/mnist_mlp.py
and modified to use as MXNet-Keras integration testing for functionality and sanity performance
benchmarking.
Trains a simple deep NN on the MNIST dataset.
Gets to 98.40% test accuracy after 20 epochs
(there is *a lot* of margin for parameter tuning).
2 seconds per epoch on a K520 GPU.
'''
from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility
from os import environ
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD
from keras.utils import np_utils
# Imports for benchmarking
from profiler import profile
from model_util import make_model
# Imports for assertions
from assertion_util import assert_results
# Other environment variables
MACHINE_TYPE = environ['MXNET_KERAS_TEST_MACHINE']
IS_GPU = (environ['MXNET_KERAS_TEST_MACHINE'] == 'GPU')
MACHINE_TYPE = 'GPU' if IS_GPU else 'CPU'
GPU_NUM = int(environ['GPU_NUM']) if IS_GPU else 0
# Expected Benchmark Numbers
CPU_BENCHMARK_RESULTS = {'TRAINING_TIME':550.0, 'MEM_CONSUMPTION':400.0, 'TRAIN_ACCURACY': 0.85, 'TEST_ACCURACY':0.85}
GPU_1_BENCHMARK_RESULTS = {'TRAINING_TIME':40.0, 'MEM_CONSUMPTION':200, 'TRAIN_ACCURACY': 0.85, 'TEST_ACCURACY':0.85}
# TODO: Fix Train and Test accuracy numbers in multiple gpu mode. Setting it to 0 for now to get whole integration set up done
GPU_2_BENCHMARK_RESULTS = {'TRAINING_TIME':45.0, 'MEM_CONSUMPTION':375, 'TRAIN_ACCURACY': 0.0, 'TEST_ACCURACY':0.0}
GPU_4_BENCHMARK_RESULTS = {'TRAINING_TIME':55.0, 'MEM_CONSUMPTION':750.0, 'TRAIN_ACCURACY': 0.0, 'TEST_ACCURACY':0.0}
GPU_8_BENCHMARK_RESULTS = {'TRAINING_TIME':100.0, 'MEM_CONSUMPTION':1800.0, 'TRAIN_ACCURACY': 0.0, 'TEST_ACCURACY':0.0}
# Dictionary to store profiling output
profile_output = {}
batch_size = 128
nb_classes = 10
nb_epoch = 20
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
model = Sequential()
model.add(Dense(512, input_shape=(784,)))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(10))
model.add(Activation('softmax'))
model.summary()
make_model(model, loss='categorical_crossentropy', optimizer=SGD(), metrics=['accuracy'])
def train_model():
history = model.fit(X_train, Y_train,
batch_size=batch_size, nb_epoch=nb_epoch,
verbose=1, validation_data=(X_test, Y_test))
profile_output['TRAIN_ACCURACY'] = history.history['acc'][-1]
def test_run():
# Calling training and profile memory usage
profile_output["MODEL"] = "MNIST MLP"
run_time, memory_usage = profile(train_model)
profile_output['TRAINING_TIME'] = float(run_time)
profile_output['MEM_CONSUMPTION'] = float(memory_usage)
score = model.evaluate(X_test, Y_test, verbose=0)
profile_output["TEST_ACCURACY"] = score[1]
assert_results(MACHINE_TYPE, IS_GPU, GPU_NUM, profile_output, CPU_BENCHMARK_RESULTS, GPU_1_BENCHMARK_RESULTS, GPU_2_BENCHMARK_RESULTS, GPU_4_BENCHMARK_RESULTS, GPU_8_BENCHMARK_RESULTS)
| likelyzhao/mxnet | tests/nightly/mxnet_keras_integration_tests/test_mnist_mlp.py | Python | apache-2.0 | 3,578 |
import pytest
import numpy as np
from numpy.testing import assert_allclose
from keras.utils.test_utils import layer_test, keras_test
from keras.utils.np_utils import conv_input_length
from keras import backend as K
from keras.layers import convolutional, pooling
@keras_test
def test_convolution_1d():
nb_samples = 2
nb_steps = 8
input_dim = 2
filter_length = 3
nb_filter = 3
for border_mode in ['valid', 'same']:
for subsample_length in [1]:
if border_mode == 'same' and subsample_length != 1:
continue
layer_test(convolutional.Convolution1D,
kwargs={'nb_filter': nb_filter,
'filter_length': filter_length,
'border_mode': border_mode,
'subsample_length': subsample_length},
input_shape=(nb_samples, nb_steps, input_dim))
layer_test(convolutional.Convolution1D,
kwargs={'nb_filter': nb_filter,
'filter_length': filter_length,
'border_mode': border_mode,
'W_regularizer': 'l2',
'b_regularizer': 'l2',
'activity_regularizer': 'activity_l2',
'subsample_length': subsample_length},
input_shape=(nb_samples, nb_steps, input_dim))
@keras_test
def test_maxpooling_1d():
for stride in [1, 2]:
layer_test(convolutional.MaxPooling1D,
kwargs={'stride': stride,
'border_mode': 'valid'},
input_shape=(3, 5, 4))
@keras_test
def test_averagepooling_1d():
for stride in [1, 2]:
layer_test(convolutional.AveragePooling1D,
kwargs={'stride': stride,
'border_mode': 'valid'},
input_shape=(3, 5, 4))
@keras_test
def test_convolution_2d():
nb_samples = 2
nb_filter = 2
stack_size = 3
nb_row = 10
nb_col = 6
for border_mode in ['valid', 'same']:
for subsample in [(1, 1), (2, 2)]:
if border_mode == 'same' and subsample != (1, 1):
continue
layer_test(convolutional.Convolution2D,
kwargs={'nb_filter': nb_filter,
'nb_row': 3,
'nb_col': 3,
'border_mode': border_mode,
'subsample': subsample},
input_shape=(nb_samples, stack_size, nb_row, nb_col))
layer_test(convolutional.Convolution2D,
kwargs={'nb_filter': nb_filter,
'nb_row': 3,
'nb_col': 3,
'border_mode': border_mode,
'W_regularizer': 'l2',
'b_regularizer': 'l2',
'activity_regularizer': 'activity_l2',
'subsample': subsample},
input_shape=(nb_samples, stack_size, nb_row, nb_col))
@keras_test
def test_deconvolution_2d():
nb_samples = 2
nb_filter = 2
stack_size = 3
nb_row = 10
nb_col = 6
for border_mode in ['valid', 'same']:
for subsample in [(1, 1), (2, 2)]:
if border_mode == 'same' and subsample != (1, 1):
continue
rows = conv_input_length(nb_row, 3, border_mode, subsample[0])
cols = conv_input_length(nb_col, 3, border_mode, subsample[1])
layer_test(convolutional.Deconvolution2D,
kwargs={'nb_filter': nb_filter,
'nb_row': 3,
'nb_col': 3,
'output_shape': (nb_samples, nb_filter, rows, cols),
'border_mode': border_mode,
'subsample': subsample},
input_shape=(nb_samples, stack_size, nb_row, nb_col),
fixed_batch_size=True)
layer_test(convolutional.Deconvolution2D,
kwargs={'nb_filter': nb_filter,
'nb_row': 3,
'nb_col': 3,
'output_shape': (nb_samples, nb_filter, rows, cols),
'border_mode': border_mode,
'W_regularizer': 'l2',
'b_regularizer': 'l2',
'activity_regularizer': 'activity_l2',
'subsample': subsample},
input_shape=(nb_samples, stack_size, nb_row, nb_col),
fixed_batch_size=True)
@keras_test
def test_atrous_conv_2d():
nb_samples = 2
nb_filter = 2
stack_size = 3
nb_row = 10
nb_col = 6
for border_mode in ['valid', 'same']:
for subsample in [(1, 1), (2, 2)]:
for atrous_rate in [(1, 1), (2, 2)]:
if border_mode == 'same' and subsample != (1, 1):
continue
if subsample != (1, 1) and atrous_rate != (1, 1):
continue
layer_test(convolutional.AtrousConv2D,
kwargs={'nb_filter': nb_filter,
'nb_row': 3,
'nb_col': 3,
'border_mode': border_mode,
'subsample': subsample,
'atrous_rate': atrous_rate},
input_shape=(nb_samples, stack_size, nb_row, nb_col))
layer_test(convolutional.AtrousConv2D,
kwargs={'nb_filter': nb_filter,
'nb_row': 3,
'nb_col': 3,
'border_mode': border_mode,
'W_regularizer': 'l2',
'b_regularizer': 'l2',
'activity_regularizer': 'activity_l2',
'subsample': subsample,
'atrous_rate': atrous_rate},
input_shape=(nb_samples, stack_size, nb_row, nb_col))
@pytest.mark.skipif(K._BACKEND != 'tensorflow', reason="Requires TF backend")
@keras_test
def test_separable_conv_2d():
nb_samples = 2
nb_filter = 6
stack_size = 3
nb_row = 10
nb_col = 6
for border_mode in ['valid', 'same']:
for subsample in [(1, 1), (2, 2)]:
for multiplier in [1, 2]:
if border_mode == 'same' and subsample != (1, 1):
continue
layer_test(convolutional.SeparableConv2D,
kwargs={'nb_filter': nb_filter,
'nb_row': 3,
'nb_col': 3,
'border_mode': border_mode,
'subsample': subsample,
'depth_multiplier': multiplier},
input_shape=(nb_samples, stack_size, nb_row, nb_col))
layer_test(convolutional.SeparableConv2D,
kwargs={'nb_filter': nb_filter,
'nb_row': 3,
'nb_col': 3,
'border_mode': border_mode,
'depthwise_regularizer': 'l2',
'pointwise_regularizer': 'l2',
'b_regularizer': 'l2',
'activity_regularizer': 'activity_l2',
'pointwise_constraint': 'unitnorm',
'depthwise_constraint': 'unitnorm',
'subsample': subsample,
'depth_multiplier': multiplier},
input_shape=(nb_samples, stack_size, nb_row, nb_col))
@keras_test
def test_globalpooling_1d():
layer_test(pooling.GlobalMaxPooling1D,
input_shape=(3, 4, 5))
layer_test(pooling.GlobalAveragePooling1D,
input_shape=(3, 4, 5))
@keras_test
def test_globalpooling_2d():
layer_test(pooling.GlobalMaxPooling2D,
kwargs={'dim_ordering': 'th'},
input_shape=(3, 4, 5, 6))
layer_test(pooling.GlobalMaxPooling2D,
kwargs={'dim_ordering': 'tf'},
input_shape=(3, 5, 6, 4))
layer_test(pooling.GlobalAveragePooling2D,
kwargs={'dim_ordering': 'th'},
input_shape=(3, 4, 5, 6))
layer_test(pooling.GlobalAveragePooling2D,
kwargs={'dim_ordering': 'tf'},
input_shape=(3, 5, 6, 4))
@keras_test
def test_maxpooling_2d():
pool_size = (3, 3)
for strides in [(1, 1), (2, 2)]:
layer_test(convolutional.MaxPooling2D,
kwargs={'strides': strides,
'border_mode': 'valid',
'pool_size': pool_size},
input_shape=(3, 4, 11, 12))
@keras_test
def test_averagepooling_2d():
pool_size = (3, 3)
for border_mode in ['valid', 'same']:
for pool_size in [(2, 2), (3, 3), (4, 4), (5, 5)]:
for strides in [(1, 1), (2, 2)]:
layer_test(convolutional.MaxPooling2D,
kwargs={'strides': strides,
'border_mode': border_mode,
'pool_size': pool_size},
input_shape=(3, 4, 11, 12))
@keras_test
def test_convolution_3d():
nb_samples = 2
nb_filter = 2
stack_size = 3
kernel_dim1 = 2
kernel_dim2 = 3
kernel_dim3 = 1
input_len_dim1 = 10
input_len_dim2 = 11
input_len_dim3 = 12
for border_mode in ['same', 'valid']:
for subsample in [(1, 1, 1), (2, 2, 2)]:
if border_mode == 'same' and subsample != (1, 1, 1):
continue
layer_test(convolutional.Convolution3D,
kwargs={'nb_filter': nb_filter,
'kernel_dim1': kernel_dim1,
'kernel_dim2': kernel_dim2,
'kernel_dim3': kernel_dim3,
'border_mode': border_mode,
'subsample': subsample},
input_shape=(nb_samples, stack_size,
input_len_dim1, input_len_dim2, input_len_dim3))
layer_test(convolutional.Convolution3D,
kwargs={'nb_filter': nb_filter,
'kernel_dim1': kernel_dim1,
'kernel_dim2': kernel_dim2,
'kernel_dim3': kernel_dim3,
'border_mode': border_mode,
'W_regularizer': 'l2',
'b_regularizer': 'l2',
'activity_regularizer': 'activity_l2',
'subsample': subsample},
input_shape=(nb_samples, stack_size,
input_len_dim1, input_len_dim2, input_len_dim3))
@keras_test
def test_maxpooling_3d():
pool_size = (3, 3, 3)
for strides in [(1, 1, 1), (2, 2, 2)]:
layer_test(convolutional.MaxPooling3D,
kwargs={'strides': strides,
'border_mode': 'valid',
'pool_size': pool_size},
input_shape=(3, 4, 11, 12, 10))
@keras_test
def test_averagepooling_3d():
pool_size = (3, 3, 3)
for strides in [(1, 1, 1), (2, 2, 2)]:
layer_test(convolutional.AveragePooling3D,
kwargs={'strides': strides,
'border_mode': 'valid',
'pool_size': pool_size},
input_shape=(3, 4, 11, 12, 10))
@keras_test
def test_zero_padding_2d():
nb_samples = 2
stack_size = 2
input_nb_row = 11
input_nb_col = 12
input = np.ones((nb_samples, stack_size, input_nb_row, input_nb_col))
# basic test
layer_test(convolutional.ZeroPadding2D,
kwargs={'padding': (2, 2)},
input_shape=input.shape)
# correctness test
layer = convolutional.ZeroPadding2D(padding=(2, 2))
layer.set_input(K.variable(input), shape=input.shape)
out = K.eval(layer.output)
for offset in [0, 1, -1, -2]:
assert_allclose(out[:, :, offset, :], 0.)
assert_allclose(out[:, :, :, offset], 0.)
assert_allclose(out[:, :, 2:-2, 2:-2], 1.)
layer.get_config()
def test_zero_padding_3d():
nb_samples = 2
stack_size = 2
input_len_dim1 = 10
input_len_dim2 = 11
input_len_dim3 = 12
input = np.ones((nb_samples, stack_size, input_len_dim1,
input_len_dim2, input_len_dim3))
# basic test
layer_test(convolutional.ZeroPadding3D,
kwargs={'padding': (2, 2, 2)},
input_shape=input.shape)
# correctness test
layer = convolutional.ZeroPadding3D(padding=(2, 2, 2))
layer.set_input(K.variable(input), shape=input.shape)
out = K.eval(layer.output)
for offset in [0, 1, -1, -2]:
assert_allclose(out[:, :, offset, :, :], 0.)
assert_allclose(out[:, :, :, offset, :], 0.)
assert_allclose(out[:, :, :, :, offset], 0.)
assert_allclose(out[:, :, 2:-2, 2:-2, 2:-2], 1.)
layer.get_config()
@keras_test
def test_upsampling_1d():
layer_test(convolutional.UpSampling1D,
kwargs={'length': 2},
input_shape=(3, 5, 4))
@keras_test
def test_upsampling_2d():
nb_samples = 2
stack_size = 2
input_nb_row = 11
input_nb_col = 12
for dim_ordering in ['th', 'tf']:
if dim_ordering == 'th':
input = np.random.rand(nb_samples, stack_size, input_nb_row,
input_nb_col)
else: # tf
input = np.random.rand(nb_samples, input_nb_row, input_nb_col,
stack_size)
for length_row in [2, 3, 9]:
for length_col in [2, 3, 9]:
layer = convolutional.UpSampling2D(
size=(length_row, length_col),
dim_ordering=dim_ordering)
layer.set_input(K.variable(input), shape=input.shape)
out = K.eval(layer.output)
if dim_ordering == 'th':
assert out.shape[2] == length_row * input_nb_row
assert out.shape[3] == length_col * input_nb_col
else: # tf
assert out.shape[1] == length_row * input_nb_row
assert out.shape[2] == length_col * input_nb_col
# compare with numpy
if dim_ordering == 'th':
expected_out = np.repeat(input, length_row, axis=2)
expected_out = np.repeat(expected_out, length_col, axis=3)
else: # tf
expected_out = np.repeat(input, length_row, axis=1)
expected_out = np.repeat(expected_out, length_col, axis=2)
assert_allclose(out, expected_out)
def test_upsampling_3d():
nb_samples = 2
stack_size = 2
input_len_dim1 = 10
input_len_dim2 = 11
input_len_dim3 = 12
for dim_ordering in ['th', 'tf']:
if dim_ordering == 'th':
input = np.random.rand(nb_samples, stack_size, input_len_dim1, input_len_dim2,
input_len_dim3)
else: # tf
input = np.random.rand(nb_samples, input_len_dim1, input_len_dim2, input_len_dim3,
stack_size)
for length_dim1 in [2, 3, 9]:
for length_dim2 in [2, 3, 9]:
for length_dim3 in [2, 3, 9]:
layer = convolutional.UpSampling3D(
size=(length_dim1, length_dim2, length_dim3),
dim_ordering=dim_ordering)
layer.set_input(K.variable(input), shape=input.shape)
out = K.eval(layer.output)
if dim_ordering == 'th':
assert out.shape[2] == length_dim1 * input_len_dim1
assert out.shape[3] == length_dim2 * input_len_dim2
assert out.shape[4] == length_dim3 * input_len_dim3
else: # tf
assert out.shape[1] == length_dim1 * input_len_dim1
assert out.shape[2] == length_dim2 * input_len_dim2
assert out.shape[3] == length_dim3 * input_len_dim3
# compare with numpy
if dim_ordering == 'th':
expected_out = np.repeat(input, length_dim1, axis=2)
expected_out = np.repeat(expected_out, length_dim2, axis=3)
expected_out = np.repeat(expected_out, length_dim3, axis=4)
else: # tf
expected_out = np.repeat(input, length_dim1, axis=1)
expected_out = np.repeat(expected_out, length_dim2, axis=2)
expected_out = np.repeat(expected_out, length_dim3, axis=3)
assert_allclose(out, expected_out)
@keras_test
def test_cropping_1d():
nb_samples = 2
time_length = 10
input_len_dim1 = 2
input = np.random.rand(nb_samples, time_length, input_len_dim1)
layer_test(convolutional.Cropping1D,
kwargs={'cropping': (2, 2)},
input_shape=input.shape)
def test_cropping_2d():
nb_samples = 2
stack_size = 2
input_len_dim1 = 8
input_len_dim2 = 8
cropping = ((2, 2), (3, 3))
dim_ordering = K.image_dim_ordering()
if dim_ordering == 'th':
input = np.random.rand(nb_samples, stack_size, input_len_dim1, input_len_dim2)
else:
input = np.random.rand(nb_samples, input_len_dim1, input_len_dim2, stack_size)
# basic test
layer_test(convolutional.Cropping2D,
kwargs={'cropping': cropping,
'dim_ordering': dim_ordering},
input_shape=input.shape)
# correctness test
layer = convolutional.Cropping2D(cropping=cropping, dim_ordering=dim_ordering)
layer.set_input(K.variable(input), shape=input.shape)
out = K.eval(layer.output)
# compare with numpy
if dim_ordering == 'th':
expected_out = input[:,
:,
cropping[0][0]:-cropping[0][1],
cropping[1][0]:-cropping[1][1]]
else:
expected_out = input[:,
cropping[0][0]:-cropping[0][1],
cropping[1][0]:-cropping[1][1],
:]
assert_allclose(out, expected_out)
def test_cropping_3d():
nb_samples = 2
stack_size = 2
input_len_dim1 = 8
input_len_dim2 = 8
input_len_dim3 = 8
cropping = ((2, 2), (3, 3), (2, 3))
dim_ordering = K.image_dim_ordering()
if dim_ordering == 'th':
input = np.random.rand(nb_samples, stack_size, input_len_dim1, input_len_dim2, input_len_dim3)
else:
input = np.random.rand(nb_samples, input_len_dim1, input_len_dim2, input_len_dim3, stack_size)
# basic test
layer_test(convolutional.Cropping3D,
kwargs={'cropping': cropping,
'dim_ordering': dim_ordering},
input_shape=input.shape)
# correctness test
layer = convolutional.Cropping3D(cropping=cropping, dim_ordering=dim_ordering)
layer.set_input(K.variable(input), shape=input.shape)
out = K.eval(layer.output)
# compare with numpy
if dim_ordering == 'th':
expected_out = input[:,
:,
cropping[0][0]:-cropping[0][1],
cropping[1][0]:-cropping[1][1],
cropping[2][0]:-cropping[2][1]]
else:
expected_out = input[:,
cropping[0][0]:-cropping[0][1],
cropping[1][0]:-cropping[1][1],
cropping[2][0]:-cropping[2][1],
:]
assert_allclose(out, expected_out)
if __name__ == '__main__':
pytest.main([__file__])
| nebw/keras | tests/keras/layers/test_convolutional.py | Python | mit | 20,892 |
#!/usr/bin/env python
__author__ = "Jesse Zaneveld"
__copyright__ = "Copyright 2007-2012, The Cogent Project"
__credits__ = ["Jesse Zaneveld", "Rob Knight"]
__license__ = "GPL"
__version__ = "1.5.3"
__maintainer__ = "Jesse Zaneveld"
__email__ = "zaneveld@gmail.com"
__status__ = "Development"
from sys import argv
from string import strip
from os import listdir,path
from optparse import OptionParser
from datetime import datetime
def parse_kegg_taxonomy(lines):
"""Returns successive taxonomy entries from lines.
Format of return value is four levels of taxonomy (sometimes empty),
unique id, three-letter kegg code, abbreviated name, full name,
genus, species, and common name if present.
Taxonomic level information is implicit in the number of hashes read
at the beginning of the last line with hashes. Need to keep track of
the last level read.
Each hash line has a number of hashes indicating the level, and a name
for that taxonomic level. Note that this is not as detailed as the real
taxonomy in the genome file!
Maximum taxonomic level as of this writing is 4: exclude any levels more
detailed than this.
Each non-taxon line is tab-delimited: has a unique id of some kind, then
the three-letter KEGG code, then the short name (should be the same as
the names of the individual species files for genes, etc.), then the
genus and species names which may have a common name in parentheses
afterwards.
"""
max_taxonomy_length = 4
taxonomy_stack = []
for line in lines:
#bail out if it's a blank line
line = line.rstrip()
if not line:
continue
if line.startswith('#'): #line defining taxonomic level
hashes, name = line.split(None, 1)
name = name.strip()
level = len(hashes)
if level == len(taxonomy_stack): #new entry at same level
taxonomy_stack[-1] = name
elif level > len(taxonomy_stack): #add level: assume sequential
taxonomy_stack.append(name)
else: #level must be less than stack length: truncate
del taxonomy_stack[level:]
taxonomy_stack[level-1] = name
else: #line defining an individual taxonomy entry
fields = map(strip, line.split('\t'))
#add genus, species, and common name as three additional fields
raw_species_name = fields[-1]
species_fields = raw_species_name.split()
if not species_fields:
print "ERROR"
print line
genus_name = species_fields[0]
if len(species_fields) > 1:
species_name = species_fields[1]
else:
species_name = ''
#check for common name
if '(' in raw_species_name:
prefix, common_name = raw_species_name.split('(', 1)
common_name, ignored = common_name.split(')', 1)
else:
common_name = ''
output_taxon = taxonomy_stack + \
['']*(max_taxonomy_length-len(taxonomy_stack)) \
+ fields + [genus_name, species_name, common_name]
yield "\t".join(output_taxon) + "\n"
if __name__ == '__main__':
from sys import argv
filename = argv[1]
for result_line in parse_kegg_taxonomy(open(filename,"U")):
print result_line.strip()
| sauloal/cnidaria | scripts/venv/lib/python2.7/site-packages/cogent/parse/kegg_taxonomy.py | Python | mit | 3,470 |
# Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import os
import random
import re
import tempfile
import flask
import numpy as np
import werkzeug.exceptions
from .forms import ImageClassificationModelForm
from .job import ImageClassificationModelJob
import digits
from digits import frameworks
from digits import utils
from digits.config import config_value
from digits.dataset import ImageClassificationDatasetJob
from digits.inference import ImageInferenceJob
from digits.status import Status
from digits.utils import filesystem as fs
from digits.utils.forms import fill_form_if_cloned, save_form_to_job
from digits.utils.routing import request_wants_json, job_from_request
from digits.webapp import app, scheduler
blueprint = flask.Blueprint(__name__, __name__)
"""
Read image list
"""
def read_image_list(image_list, image_folder, num_test_images):
paths = []
ground_truths = []
for line in image_list.readlines():
line = line.strip()
if not line:
continue
# might contain a numerical label at the end
match = re.match(r'(.*\S)\s+(\d+)$', line)
if match:
path = match.group(1)
ground_truth = int(match.group(2))
else:
path = line
ground_truth = None
if not utils.is_url(path) and image_folder and not os.path.isabs(path):
path = os.path.join(image_folder, path)
paths.append(path)
ground_truths.append(ground_truth)
if num_test_images is not None and len(paths) >= num_test_images:
break
return paths, ground_truths
@blueprint.route('/new', methods=['GET'])
@utils.auth.requires_login
def new():
"""
Return a form for a new ImageClassificationModelJob
"""
form = ImageClassificationModelForm()
form.dataset.choices = get_datasets()
form.standard_networks.choices = get_standard_networks()
form.standard_networks.default = get_default_standard_network()
form.previous_networks.choices = get_previous_networks()
prev_network_snapshots = get_previous_network_snapshots()
## Is there a request to clone a job with ?clone=<job_id>
fill_form_if_cloned(form)
return flask.render_template('models/images/classification/new.html',
form = form,
frameworks = frameworks.get_frameworks(),
previous_network_snapshots = prev_network_snapshots,
previous_networks_fullinfo = get_previous_networks_fulldetails(),
multi_gpu = config_value('caffe_root')['multi_gpu'],
)
@blueprint.route('.json', methods=['POST'])
@blueprint.route('', methods=['POST'], strict_slashes=False)
@utils.auth.requires_login(redirect=False)
def create():
"""
Create a new ImageClassificationModelJob
Returns JSON when requested: {job_id,name,status} or {errors:[]}
"""
form = ImageClassificationModelForm()
form.dataset.choices = get_datasets()
form.standard_networks.choices = get_standard_networks()
form.standard_networks.default = get_default_standard_network()
form.previous_networks.choices = get_previous_networks()
prev_network_snapshots = get_previous_network_snapshots()
## Is there a request to clone a job with ?clone=<job_id>
fill_form_if_cloned(form)
if not form.validate_on_submit():
if request_wants_json():
return flask.jsonify({'errors': form.errors}), 400
else:
return flask.render_template('models/images/classification/new.html',
form = form,
frameworks = frameworks.get_frameworks(),
previous_network_snapshots = prev_network_snapshots,
previous_networks_fullinfo = get_previous_networks_fulldetails(),
multi_gpu = config_value('caffe_root')['multi_gpu'],
), 400
datasetJob = scheduler.get_job(form.dataset.data)
if not datasetJob:
raise werkzeug.exceptions.BadRequest(
'Unknown dataset job_id "%s"' % form.dataset.data)
job = None
try:
job = ImageClassificationModelJob(
username = utils.auth.get_username(),
name = form.model_name.data,
dataset_id = datasetJob.id(),
)
# get handle to framework object
fw = frameworks.get_framework_by_id(form.framework.data)
pretrained_model = None
if form.method.data == 'standard':
found = False
# can we find it in standard networks?
network_desc = fw.get_standard_network_desc(form.standard_networks.data)
if network_desc:
found = True
network = fw.get_network_from_desc(network_desc)
if not found:
raise werkzeug.exceptions.BadRequest(
'Unknown standard model "%s"' % form.standard_networks.data)
elif form.method.data == 'previous':
old_job = scheduler.get_job(form.previous_networks.data)
if not old_job:
raise werkzeug.exceptions.BadRequest(
'Job not found: %s' % form.previous_networks.data)
use_same_dataset = (old_job.dataset_id == job.dataset_id)
network = fw.get_network_from_previous(old_job.train_task().network, use_same_dataset)
for choice in form.previous_networks.choices:
if choice[0] == form.previous_networks.data:
epoch = float(flask.request.form['%s-snapshot' % form.previous_networks.data])
if epoch == 0:
pass
elif epoch == -1:
pretrained_model = old_job.train_task().pretrained_model
else:
for filename, e in old_job.train_task().snapshots:
if e == epoch:
pretrained_model = filename
break
if pretrained_model is None:
raise werkzeug.exceptions.BadRequest(
"For the job %s, selected pretrained_model for epoch %d is invalid!"
% (form.previous_networks.data, epoch))
if not (os.path.exists(pretrained_model)):
raise werkzeug.exceptions.BadRequest(
"Pretrained_model for the selected epoch doesn't exists. May be deleted by another user/process. Please restart the server to load the correct pretrained_model details")
break
elif form.method.data == 'custom':
network = fw.get_network_from_desc(form.custom_network.data)
pretrained_model = form.custom_network_snapshot.data.strip()
else:
raise werkzeug.exceptions.BadRequest(
'Unrecognized method: "%s"' % form.method.data)
policy = {'policy': form.lr_policy.data}
if form.lr_policy.data == 'fixed':
pass
elif form.lr_policy.data == 'step':
policy['stepsize'] = form.lr_step_size.data
policy['gamma'] = form.lr_step_gamma.data
elif form.lr_policy.data == 'multistep':
policy['stepvalue'] = form.lr_multistep_values.data
policy['gamma'] = form.lr_multistep_gamma.data
elif form.lr_policy.data == 'exp':
policy['gamma'] = form.lr_exp_gamma.data
elif form.lr_policy.data == 'inv':
policy['gamma'] = form.lr_inv_gamma.data
policy['power'] = form.lr_inv_power.data
elif form.lr_policy.data == 'poly':
policy['power'] = form.lr_poly_power.data
elif form.lr_policy.data == 'sigmoid':
policy['stepsize'] = form.lr_sigmoid_step.data
policy['gamma'] = form.lr_sigmoid_gamma.data
else:
raise werkzeug.exceptions.BadRequest(
'Invalid learning rate policy')
if config_value('caffe_root')['multi_gpu']:
if form.select_gpus.data:
selected_gpus = [str(gpu) for gpu in form.select_gpus.data]
gpu_count = None
elif form.select_gpu_count.data:
gpu_count = form.select_gpu_count.data
selected_gpus = None
else:
gpu_count = 1
selected_gpus = None
else:
if form.select_gpu.data == 'next':
gpu_count = 1
selected_gpus = None
else:
selected_gpus = [str(form.select_gpu.data)]
gpu_count = None
# Python Layer File may be on the server or copied from the client.
fs.copy_python_layer_file(
bool(form.python_layer_from_client.data),
job.dir(),
(flask.request.files[form.python_layer_client_file.name]
if form.python_layer_client_file.name in flask.request.files
else ''), form.python_layer_server_file.data)
job.tasks.append(fw.create_train_task(
job_dir = job.dir(),
dataset = datasetJob,
train_epochs = form.train_epochs.data,
snapshot_interval = form.snapshot_interval.data,
learning_rate = form.learning_rate.data,
lr_policy = policy,
gpu_count = gpu_count,
selected_gpus = selected_gpus,
batch_size = form.batch_size.data,
val_interval = form.val_interval.data,
pretrained_model= pretrained_model,
crop_size = form.crop_size.data,
use_mean = form.use_mean.data,
network = network,
random_seed = form.random_seed.data,
solver_type = form.solver_type.data,
shuffle = form.shuffle.data,
)
)
## Save form data with the job so we can easily clone it later.
save_form_to_job(job, form)
scheduler.add_job(job)
if request_wants_json():
return flask.jsonify(job.json_dict())
else:
return flask.redirect(flask.url_for('digits.model.views.show', job_id=job.id()))
except:
if job:
scheduler.delete_job(job)
raise
def show(job):
"""
Called from digits.model.views.models_show()
"""
return flask.render_template('models/images/classification/show.html', job=job, framework_ids = [fw.get_id() for fw in frameworks.get_frameworks()])
@blueprint.route('/large_graph', methods=['GET'])
def large_graph():
"""
Show the loss/accuracy graph, but bigger
"""
job = job_from_request()
return flask.render_template('models/images/classification/large_graph.html', job=job)
@blueprint.route('/classify_one.json', methods=['POST'])
@blueprint.route('/classify_one', methods=['POST', 'GET'])
def classify_one():
"""
Classify one image and return the top 5 classifications
Returns JSON when requested: {predictions: {category: confidence,...}}
"""
model_job = job_from_request()
remove_image_path = False
if 'image_path' in flask.request.form and flask.request.form['image_path']:
image_path = flask.request.form['image_path']
elif 'image_file' in flask.request.files and flask.request.files['image_file']:
outfile = tempfile.mkstemp(suffix='.png')
flask.request.files['image_file'].save(outfile[1])
image_path = outfile[1]
os.close(outfile[0])
remove_image_path = True
else:
raise werkzeug.exceptions.BadRequest('must provide image_path or image_file')
epoch = None
if 'snapshot_epoch' in flask.request.form:
epoch = float(flask.request.form['snapshot_epoch'])
layers = 'none'
if 'show_visualizations' in flask.request.form and flask.request.form['show_visualizations']:
layers = 'all'
# create inference job
inference_job = ImageInferenceJob(
username = utils.auth.get_username(),
name = "Classify One Image",
model = model_job,
images = [image_path],
epoch = epoch,
layers = layers
)
# schedule tasks
scheduler.add_job(inference_job)
# wait for job to complete
inference_job.wait_completion()
# retrieve inference data
inputs, outputs, visualizations = inference_job.get_data()
# delete job
scheduler.delete_job(inference_job)
if remove_image_path:
os.remove(image_path)
image = None
predictions = []
if inputs is not None and len(inputs['data']) == 1:
image = utils.image.embed_image_html(inputs['data'][0])
# convert to class probabilities for viewing
last_output_name, last_output_data = outputs.items()[-1]
if len(last_output_data) == 1:
scores = last_output_data[0].flatten()
indices = (-scores).argsort()
labels = model_job.train_task().get_labels()
predictions = []
for i in indices:
# ignore prediction if we don't have a label for the corresponding class
# the user might have set the final fully-connected layer's num_output to
# too high a value
if i < len(labels):
predictions.append( (labels[i], scores[i]) )
predictions = [(p[0], round(100.0*p[1],2)) for p in predictions[:5]]
if request_wants_json():
return flask.jsonify({'predictions': predictions})
else:
return flask.render_template('models/images/classification/classify_one.html',
model_job = model_job,
job = inference_job,
image_src = image,
predictions = predictions,
visualizations = visualizations,
total_parameters= sum(v['param_count'] for v in visualizations if v['vis_type'] == 'Weights'),
)
@blueprint.route('/classify_many.json', methods=['POST'])
@blueprint.route('/classify_many', methods=['POST', 'GET'])
def classify_many():
"""
Classify many images and return the top 5 classifications for each
Returns JSON when requested: {classifications: {filename: [[category,confidence],...],...}}
"""
model_job = job_from_request()
image_list = flask.request.files.get('image_list')
if not image_list:
raise werkzeug.exceptions.BadRequest('image_list is a required field')
if 'image_folder' in flask.request.form and flask.request.form['image_folder'].strip():
image_folder = flask.request.form['image_folder']
if not os.path.exists(image_folder):
raise werkzeug.exceptions.BadRequest('image_folder "%s" does not exit' % image_folder)
else:
image_folder = None
if 'num_test_images' in flask.request.form and flask.request.form['num_test_images'].strip():
num_test_images = int(flask.request.form['num_test_images'])
else:
num_test_images = None
epoch = None
if 'snapshot_epoch' in flask.request.form:
epoch = float(flask.request.form['snapshot_epoch'])
paths, ground_truths = read_image_list(image_list, image_folder, num_test_images)
# create inference job
inference_job = ImageInferenceJob(
username = utils.auth.get_username(),
name = "Classify Many Images",
model = model_job,
images = paths,
epoch = epoch,
layers = 'none'
)
# schedule tasks
scheduler.add_job(inference_job)
# wait for job to complete
inference_job.wait_completion()
# retrieve inference data
inputs, outputs, _ = inference_job.get_data()
# delete job
scheduler.delete_job(inference_job)
if outputs is not None and len(outputs) < 1:
# an error occurred
outputs = None
if inputs is not None:
# retrieve path and ground truth of images that were successfully processed
paths = [paths[idx] for idx in inputs['ids']]
ground_truths = [ground_truths[idx] for idx in inputs['ids']]
# defaults
classifications = None
show_ground_truth = None
top1_accuracy = None
top5_accuracy = None
confusion_matrix = None
per_class_accuracy = None
labels = None
if outputs is not None:
# convert to class probabilities for viewing
last_output_name, last_output_data = outputs.items()[-1]
if len(last_output_data) < 1:
raise werkzeug.exceptions.BadRequest(
'Unable to classify any image from the file')
scores = last_output_data
# take top 5
indices = (-scores).argsort()[:, :5]
labels = model_job.train_task().get_labels()
n_labels = len(labels)
# remove invalid ground truth
ground_truths = [x if x is not None and (0 <= x < n_labels) else None for x in ground_truths]
# how many pieces of ground truth to we have?
n_ground_truth = len([1 for x in ground_truths if x is not None])
show_ground_truth = n_ground_truth > 0
# compute classifications and statistics
classifications = []
n_top1_accurate = 0
n_top5_accurate = 0
confusion_matrix = np.zeros((n_labels,n_labels), dtype=np.dtype(int))
for image_index, index_list in enumerate(indices):
result = []
if ground_truths[image_index] is not None:
if ground_truths[image_index] == index_list[0]:
n_top1_accurate += 1
if ground_truths[image_index] in index_list:
n_top5_accurate += 1
if (0 <= ground_truths[image_index] < n_labels) and (0 <= index_list[0] < n_labels):
confusion_matrix[ground_truths[image_index], index_list[0]] += 1
for i in index_list:
# `i` is a category in labels and also an index into scores
# ignore prediction if we don't have a label for the corresponding class
# the user might have set the final fully-connected layer's num_output to
# too high a value
if i < len(labels):
result.append((labels[i], round(100.0*scores[image_index, i],2)))
classifications.append(result)
# accuracy
if show_ground_truth:
top1_accuracy = round(100.0 * n_top1_accurate / n_ground_truth, 2)
top5_accuracy = round(100.0 * n_top5_accurate / n_ground_truth, 2)
per_class_accuracy = []
for x in xrange(n_labels):
n_examples = sum(confusion_matrix[x])
per_class_accuracy.append(round(100.0 * confusion_matrix[x,x] / n_examples, 2) if n_examples > 0 else None)
else:
top1_accuracy = None
top5_accuracy = None
per_class_accuracy = None
# replace ground truth indices with labels
ground_truths = [labels[x] if x is not None and (0 <= x < n_labels ) else None for x in ground_truths]
if request_wants_json():
joined = dict(zip(paths, classifications))
return flask.jsonify({'classifications': joined})
else:
return flask.render_template('models/images/classification/classify_many.html',
model_job = model_job,
job = inference_job,
paths = paths,
classifications = classifications,
show_ground_truth = show_ground_truth,
ground_truths = ground_truths,
top1_accuracy = top1_accuracy,
top5_accuracy = top5_accuracy,
confusion_matrix = confusion_matrix,
per_class_accuracy = per_class_accuracy,
labels = labels,
)
@blueprint.route('/top_n', methods=['POST'])
def top_n():
"""
Classify many images and show the top N images per category by confidence
"""
model_job = job_from_request()
image_list = flask.request.files['image_list']
if not image_list:
raise werkzeug.exceptions.BadRequest('File upload not found')
epoch = None
if 'snapshot_epoch' in flask.request.form:
epoch = float(flask.request.form['snapshot_epoch'])
if 'top_n' in flask.request.form and flask.request.form['top_n'].strip():
top_n = int(flask.request.form['top_n'])
else:
top_n = 9
if 'image_folder' in flask.request.form and flask.request.form['image_folder'].strip():
image_folder = flask.request.form['image_folder']
if not os.path.exists(image_folder):
raise werkzeug.exceptions.BadRequest('image_folder "%s" does not exit' % image_folder)
else:
image_folder = None
if 'num_test_images' in flask.request.form and flask.request.form['num_test_images'].strip():
num_test_images = int(flask.request.form['num_test_images'])
else:
num_test_images = None
paths, _ = read_image_list(image_list, image_folder, num_test_images)
# create inference job
inference_job = ImageInferenceJob(
username = utils.auth.get_username(),
name = "TopN Image Classification",
model = model_job,
images = paths,
epoch = epoch,
layers = 'none'
)
# schedule tasks
scheduler.add_job(inference_job)
# wait for job to complete
inference_job.wait_completion()
# retrieve inference data
inputs, outputs, _ = inference_job.get_data()
# delete job
scheduler.delete_job(inference_job)
results = None
if outputs is not None and len(outputs) > 0:
# convert to class probabilities for viewing
last_output_name, last_output_data = outputs.items()[-1]
scores = last_output_data
if scores is None:
raise RuntimeError('An error occured while processing the images')
labels = model_job.train_task().get_labels()
images = inputs['data']
indices = (-scores).argsort(axis=0)[:top_n]
results = []
# Can't have more images per category than the number of images
images_per_category = min(top_n, len(images))
# Can't have more categories than the number of labels or the number of outputs
n_categories = min(indices.shape[1], len(labels))
for i in xrange(n_categories):
result_images = []
for j in xrange(images_per_category):
result_images.append(images[indices[j][i]])
results.append((
labels[i],
utils.image.embed_image_html(
utils.image.vis_square(np.array(result_images),
colormap='white')
)
))
return flask.render_template('models/images/classification/top_n.html',
model_job = model_job,
job = inference_job,
results = results,
)
def get_datasets():
return [(j.id(), j.name()) for j in sorted(
[j for j in scheduler.jobs.values() if isinstance(j, ImageClassificationDatasetJob) and (j.status.is_running() or j.status == Status.DONE)],
cmp=lambda x,y: cmp(y.id(), x.id())
)
]
def get_standard_networks():
return [
('lenet', 'LeNet'),
('alexnet', 'AlexNet'),
#('vgg-16', 'VGG (16-layer)'), #XXX model won't learn
('googlenet', 'GoogLeNet'),
]
def get_default_standard_network():
return 'alexnet'
def get_previous_networks():
return [(j.id(), j.name()) for j in sorted(
[j for j in scheduler.jobs.values() if isinstance(j, ImageClassificationModelJob)],
cmp=lambda x,y: cmp(y.id(), x.id())
)
]
def get_previous_networks_fulldetails():
return [(j) for j in sorted(
[j for j in scheduler.jobs.values() if isinstance(j, ImageClassificationModelJob)],
cmp=lambda x,y: cmp(y.id(), x.id())
)
]
def get_previous_network_snapshots():
prev_network_snapshots = []
for job_id, _ in get_previous_networks():
job = scheduler.get_job(job_id)
e = [(0, 'None')] + [(epoch, 'Epoch #%s' % epoch)
for _, epoch in reversed(job.train_task().snapshots)]
if job.train_task().pretrained_model:
e.insert(0, (-1, 'Previous pretrained model'))
prev_network_snapshots.append(e)
return prev_network_snapshots
| AgentVi/DIGITS | digits/model/images/classification/views.py | Python | bsd-3-clause | 25,236 |
"""
Admin site bindings for splash screen
"""
from django.contrib import admin
from config_models.admin import ConfigurationModelAdmin
from splash.models import SplashConfig
admin.site.register(SplashConfig, ConfigurationModelAdmin)
| GbalsaC/bitnamiP | django-splash/splash/admin.py | Python | agpl-3.0 | 236 |
def main() -> None:
S = [1 if s == "R" else -1 for s in input()]
N = len(S)
ans = [0] * N
for i in range(N-1):
if S[i] == 1 and S[i+1] == -1:
left, right = i, i+1
while left >= 0 and S[left] == 1:
ans_idx = i if (i - left) % 2 == 0 else i+1
ans[ans_idx] += 1
left -= 1
while right < N and S[right] == -1:
ans_idx = i+1 if (right - i - 1) % 2 == 0 else i
ans[ans_idx] += 1
right += 1
print(*ans)
if __name__ == '__main__':
main()
| knuu/competitive-programming | atcoder/abc/abc136_d.py | Python | mit | 595 |
# -*- coding: utf8 -*-
#
# Copyright (C) 2017 NDP Systèmes (<http://www.ndp-systemes.fr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
#
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
import base64
from openerp import modules, models, fields, api, exceptions, _
from openerp.exceptions import ValidationError
class OdooOnlineDocumentation(models.Model):
_name = 'odoo.online.documentation'
name = fields.Char(string=u"Document's name", required=True)
path = fields.Char(string=u"Path", readonly=True)
file = fields.Binary(string=u"File", attachment=True)
doc_type_id = fields.Many2one('odoo.online.document.type', string=u"Document type")
nature = fields.Selection([('PJ', _(u"Attached document")), ('static_res', _(u"Static resource"))],
string=u"Nature", default='PJ', readonly=True)
seen_in_sales = fields.Boolean(string=u"Must be seen in sales", default=False)
seen_in_purchases = fields.Boolean(string=u"Must be seen in purchases", default=False)
seen_in_prod = fields.Boolean(string=u"Must be seen in manufacturing", default=False)
@api.multi
def remove_attachments(self):
self.env['ir.attachment'].search([('res_model', '=', self._name), ('res_id', 'in', self.ids)]).unlink()
@api.multi
def create_attachment(self, binary, name):
self.ensure_one()
if not binary:
return False
return self.env['ir.attachment'].create({
'type': 'binary',
'res_model': self._name,
'res_name': name,
'datas_fname': name,
'name': name,
'datas': binary,
'res_id': self.id,
})
@api.model
def _get_action_url(self, attachment):
url = "/web/binary/saveas?model=ir.attachment&field=datas&id=%s&filename_field=name" % attachment.id
return {
"type": "ir.actions.act_url",
"url": url,
"target": "self"
}
@api.multi
def _open_pj(self):
self.ensure_one()
attachment = self.create_attachment(self.file, self.name)
if not attachment:
raise exceptions.except_orm(_(u"Error!"), _(u"No file related to this documentation."))
return self._get_action_url(attachment)
@api.multi
def _open_static_resource(self):
path_splitted = self.path.split(os.sep)
module_path = modules.get_module_path(path_splitted[0])
file_path = module_path + os.sep + os.sep.join(path_splitted[1:])
with open(file_path) as file:
self.remove_attachments()
attachment = self.create_attachment(base64.encodestring(file.read()), path_splitted[len(path_splitted) - 1])
if not attachment:
raise exceptions.except_orm(_(u"Error!"), _(u"No file related to this documentation."))
return self._get_action_url(attachment)
@api.multi
def open_documentation(self):
"""
Function to open an attached document, can call other functions depending on the 'nature' of the document.
"""
self.ensure_one()
self.remove_attachments()
if self.nature == u"PJ":
return self._open_pj()
elif self.nature == u"static_res":
return self._open_static_resource()
@api.model
def exist_path_doublons(self, path, id):
return self.env['odoo.online.documentation']\
.search_count([('path', '=', path),
('path', '!=', False),
('nature', '=', 'sharepoint'),
('id', '!=', id)]) > 0
@api.constrains('path', 'nature')
def _check_unique_path_for_sharepoints(self):
for rec in self:
if rec.nature == 'sharepoint':
if not rec.path:
raise ValidationError(
_(u"Path must be completed for document of type sharepoint %s" % rec.name))
if self.exist_path_doublons(rec.path, rec.id):
raise ValidationError(_(u"Different document of type sharepoint cannot have the same path %s"
% rec.path))
class OdooOnlineDocumentType(models.Model):
_name = 'odoo.online.document.type'
name = fields.Char(string=u"Document type")
_sql_constraints = [('type_unique', 'UNIQUE(name)', _(u"The type must be unique."))]
| ndp-systemes/odoo-addons | odoo_online_documentation/odoo_online_documentation.py | Python | agpl-3.0 | 5,053 |
import inspect
import os
class Shim(object):
_instances = {}
def __init__(self, target, xtb):
self.target = target
self.xtb = xtb
def __repr__(self):
raise NotImplementedError()
@classmethod
def get_instance(cls, target, xtb):
oid = id(target)
if oid not in cls._instances:
cls._instances[oid] = cls(target, xtb)
return cls._instances[oid]
class ModuleShim(Shim):
def __init__(self, target, xtb):
super(ModuleShim, self).__init__(target, xtb)
self.package = False
try:
self.filename = inspect.getsourcefile(target)
except TypeError:
self.filename = None
if self.filename is not None:
if os.path.basename(self.filename) == "__init__.py":
self.package = True
self.filename = os.path.dirname(self.filename)
self.filename = self.xtb._format_filename(self.filename)
def __repr__(self):
if self.filename is None:
return repr(self.target)
return "<%s '%s' from=%r>" % (self.package and "package" or "module",
self.target.__name__,
self.filename)
| Hypernode/xtraceback | xtraceback/shim.py | Python | mit | 1,261 |
#!/usr/bin/python3
from amazonia.classes.asg_config import AsgConfig
from amazonia.classes.amz_autoscaling import AutoscalingLeaf
from amazonia.classes.block_devices_config import BlockDevicesConfig
from amazonia.classes.elb_config import ElbConfig, ElbListenersConfig
from troposphere import Template
def main():
userdata = """
#cloud-config
repo_update: true
repo_upgrade: all
packages:
- httpd
runcmd:
- service httpd start
"""
template = Template()
elb_listeners_config = [
ElbListenersConfig(
instance_port='80',
loadbalancer_port='80',
loadbalancer_protocol='HTTP',
instance_protocol='HTTP',
sticky_app_cookie=[]
),
ElbListenersConfig(
instance_port='8080',
loadbalancer_port='8080',
loadbalancer_protocol='HTTP',
instance_protocol='HTTP',
sticky_app_cookie='JSESSION'
)
]
elb_config = ElbConfig(
elb_health_check='TCP:80',
elb_log_bucket=None,
public_unit=False,
ssl_certificate_id=None,
healthy_threshold=10,
unhealthy_threshold=2,
interval=300,
timeout=30,
elb_listeners_config=elb_listeners_config
)
block_devices_config = [BlockDevicesConfig(device_name='/dev/xvda',
ebs_volume_size='15',
ebs_volume_type='gp2',
ebs_encrypted=False,
ebs_snapshot_id=None,
virtual_name=False)
]
asg_config = AsgConfig(
minsize=1,
maxsize=1,
health_check_grace_period=300,
health_check_type='ELB',
image_id='ami-dc361ebf',
instance_type='t2.nano',
userdata=userdata,
iam_instance_profile_arn=None,
block_devices_config=block_devices_config,
simple_scaling_policy_config=None
)
AutoscalingLeaf(
leaf_title='app1',
template=template,
dependencies=['MyDb:5432'],
elb_config=elb_config,
asg_config=asg_config,
availability_zones=['ap-southeast-2a', 'ap-southeast-2b', 'ap-southeast-2c'],
public_cidr={'name': 'PublicIp', 'cidr': '0.0.0.0/0'},
tree_name='tree',
cd_service_role_arn=None,
public_hosted_zone_name=None,
keypair='INSERT_YOUR_KEYPAIR_HERE'
)
print(template.to_json(indent=2, separators=(',', ': ')))
if __name__ == '__main__':
main()
| GeoscienceAustralia/Geodesy-Web-Services | aws/amazonia/test/sys_tests/test_sys_autoscaling_leaf.py | Python | bsd-3-clause | 2,660 |
print('Got this: "%s"' % input())
import sys
data = sys.stdin.readline()[:-1]
print('The meaning of life is', data, int(data) * 2)
| simontakite/sysadmin | pythonscripts/programmingpython/System/Streams/reader.py | Python | gpl-2.0 | 135 |
__version_info__ = (0, 3, 0)
__version__ = ".".join(str(x) for x in __version_info__)
| Pulgama/supriya | supriya/_version.py | Python | mit | 86 |
import os
import shutil
import logging
import json
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
from colorz import order_by_hue
from django.core.files.storage import default_storage
from core.management.commands import CommunityCommand
from core.utils.configuration import DecodeConfigAction
from sources.models import ImageDownload
from future_fashion.colors import (extract_dominant_colors, get_vector_from_colors, get_colors_frame,
get_colors_individual)
log = logging.getLogger("datascope")
class Command(CommunityCommand):
"""
Example: ./manage.py match_image_colors ClothingDataCommunity -i ~/Downloads/fairy-tale.jpg -a tagged_kleding
"""
def add_arguments(self, parser):
parser.add_argument('community', type=str, nargs="?", default=self.community_model)
parser.add_argument('-a', '--args', type=str, nargs="*", default="")
parser.add_argument('-c', '--config', type=str, action=DecodeConfigAction, nargs="?", default={})
parser.add_argument('-i', '--image', type=str)
parser.add_argument('-n', '--number-colors', type=int, default=3)
parser.add_argument('-s', '--similarity', action='store_true')
def handle_inventory_matches(self, matches, destination):
for ix, match_info in enumerate(matches):
similarity, match = match_info
name, ext = os.path.splitext(match["path"])
shutil.copy2(
match["path"],
os.path.join(destination, str(ix) + "-" + str(round(similarity, ndigits=3)) + ext)
)
def handle_data_matches(self, matches, destination):
for ix, match_info in enumerate(matches):
similarity, match = match_info
uri = ImageDownload.uri_from_url(match["image"])
try:
download = ImageDownload.objects.get(uri=uri)
except ImageDownload.DoesNotExist:
continue
if not download.success:
continue
name, ext = os.path.splitext(download.body)
shutil.copy2(
os.path.join(default_storage.location, download.body),
os.path.join(destination, str(ix) + "-" + str(round(similarity, ndigits=3)) + ext)
)
def get_similarity_matches(self, colors, content, num_colors):
colors = order_by_hue(colors)
vector = get_vector_from_colors(colors)
colors_frame = get_colors_frame(content, num_colors=num_colors, by_hue=True)
log.info("Color frame shape: {}".format(colors_frame.shape))
similarity = cosine_similarity(colors_frame, np.array(vector).reshape(1, -1)).flatten()
# Find indices for ten most similar objects and sort by most similar
indices = np.argsort(similarity)[-10:]
matches = [(similarity[ix], content[ix],) for ix in indices]
matches.reverse()
return matches
def get_prominent_matches(self, colors, content, num_colors):
vector = get_vector_from_colors(colors)
colors_frame = get_colors_frame(content, num_colors=num_colors)
log.info("Color frame shape: {}".format(colors_frame.shape))
for num in range(0, num_colors):
color_vector = vector[num:num+3]
color_columns = colors_frame.columns[num:num+3]
color_similarity = cosine_similarity(colors_frame.loc[:,color_columns], np.array(color_vector).reshape(1, -1)).flatten()
indices = np.argsort(color_similarity)
cut_ix = next((num for num, ix in enumerate(indices[::-1]) if color_similarity[ix] < 0.99), None)
if cut_ix is None:
log.info("Terminating match at color: {}".format(num))
break
colors_frame = colors_frame.iloc[indices[-1 * cut_ix:]]
else:
log.info("Taking all {} colors into account".format(num_colors))
indices = list(colors_frame.index.values)
matches = [(prio, content[ix],) for prio, ix in enumerate(indices)]
matches.reverse()
return matches
def handle_community(self, community, *args, **options):
# Read from options
num_colors = options["number_colors"]
image = options["image"]
similarity = options["similarity"]
# Get colors from input file
main_colors, balance = extract_dominant_colors(image, num=num_colors)
# Get colors from community data
# This loads all data into memory
content = list(community.kernel.content)
if similarity:
matches = self.get_similarity_matches(main_colors, content, num_colors)
else:
matches = self.get_prominent_matches(main_colors, content, num_colors)
# Create directory for input and copy matches there
basename = os.path.basename(image)
name, ext = os.path.splitext(basename)
dest = os.path.join(default_storage.location, community.get_name(), "colorz", name)
if not os.path.exists(dest):
os.makedirs(dest, exist_ok=True)
shutil.copy2(image, os.path.join(dest, basename))
color_data = {
"input": {
"colors": [
"#{0:02x}{1:02x}{2:02x}".format(color[0], color[1], color[2])
for color in main_colors
],
"links": [
"http://www.color-hex.com/color/{0:02x}{1:02x}{2:02x}".format(color[0], color[1], color[2])
for color in main_colors
]
},
"output": [
{
"similarity": round(similarity, ndigits=3),
"colors": [
"#{0:02x}{1:02x}{2:02x}".format(color[0], color[1], color[2])
for color in get_colors_individual(match, num_colors=num_colors, space="rgb")
],
"links": [
"http://www.color-hex.com/color/{0:02x}{1:02x}{2:02x}".format(color[0], color[1], color[2])
for color in get_colors_individual(match, num_colors=num_colors, space="rgb")
]
}
for similarity, match in matches
]
}
with open(os.path.join(dest, "colors.js"), "w") as jf:
json.dump(color_data, jf, indent=4)
if community.get_name() == "fashion_data":
self.handle_data_matches(matches, dest)
else:
self.handle_inventory_matches(matches, dest)
| fako/datascope | src/future_fashion/management/commands/match_image_colors.py | Python | gpl-3.0 | 6,593 |
#!/usr/bin/env python
# encoding: utf-8
from dnslib import DNSHeader, DNSRecord, RR, A
from gevent.server import DatagramServer
import re
class DnsCache():
def __init__(self):
self.cache = dict()
def get(self, domain):
return self.cache.get(domain, None)
def set(self, domain, info):
self.cache[domain] = info
def delete(self, domain):
self.cache.pop(domain, None)
cache = DnsCache()
class DnsQueryHandle():
def __init__(self, data):
self.data = data
self.parse_request()
def parse_request(self):
self.query = DNSRecord.parse(self.data)
self.qid = self.query.header.id
self.qtype = self.query.q.qtype
self.qname = self.query.q.qname
def handle_response(self):
self.handle_request()
self.reply = DNSRecord(DNSHeader(id=self.qid, qr=1, aa=1, ra=1), q=self.query.q, a=RR(self.qname, rdata=A(self.iplist[0])))
return self.reply.pack()
def handle_request(self):
next_query = DNSRecord.question(self.qname)
next_reply = next_query.send("114.114.114.114")
iplist = re.findall('\xC0.\x00\x01\x00\x01.{6}(.{4})', next_reply)
self.iplist = ['.'.join(str(ord(x)) for x in s) for s in iplist]
class DnsServer(DatagramServer):
def handle(self, data, address):
query = DnsQueryHandle(data)
self.socket.sendto(query.handle_response(), address)
if __name__ == "__main__":
print "DnsServer running"
DnsServer("127.0.0.1:53").serve_forever() | ZenQ3/PyDNS | server.py | Python | mit | 1,588 |
# -*- coding: utf-8 -*-
#
# Converse.js documentation build configuration file, created by
# sphinx-quickstart on Fri Apr 26 20:48:03 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.todo']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Converse.js'
copyright = u'2014, JC Brand'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.8.6'
# The full version, including alpha/beta/rc tags.
release = '0.8.6'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
import sphinx_bootstrap_theme
html_theme = 'bootstrap'
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# (Optional) Logo. Should be small enough to fit the navbar (ideally 24x24).
# Path should be relative to the ``_static`` files directory.
html_logo = "_static/conversejs_small.png"
# Theme options are theme-specific and customize the look and feel of a
# theme further.
html_theme_options = {
# Navigation bar title. (Default: ``project`` value)
'navbar_title': "Converse.js",
# Tab name for entire site. (Default: "Site")
'navbar_site_name': "Table of Contents",
# A list of tuples containing pages or urls to link to.
# Valid tuples should be in the following forms:
# (name, page) # a link to a page
# (name, "/aa/bb", 1) # a link to an arbitrary relative url
# (name, "http://example.com", True) # arbitrary absolute url
# Note the "1" or "True" value above as the third argument to indicate
# an arbitrary url.
'navbar_links': [
("Homepage", "https://conversejs.org", True)
],
# Render the next and previous page links in navbar. (Default: true)
'navbar_sidebarrel': True,
# Render the current pages TOC in the navbar. (Default: true)
'navbar_pagenav': True,
# Tab name for the current pages TOC. (Default: "Page")
'navbar_pagenav_name': "Current Page",
# Global TOC depth for "site" navbar tab. (Default: 1)
# Switching to -1 shows all levels.
'globaltoc_depth': 2,
# Include hidden TOCs in Site navbar?
#
# Note: If this is "false", you cannot have mixed ``:hidden:`` and
# non-hidden ``toctree`` directives in the same page, or else the build
# will break.
#
# Values: "true" (default) or "false"
'globaltoc_includehidden': "true",
# HTML navbar class (Default: "navbar") to attach to <div> element.
# For black navbar, do "navbar navbar-inverse"
'navbar_class': "navbar",
# Fix navigation bar to top of page?
# Values: "true" (default) or "false"
'navbar_fixed_top': "true",
# Location of link to source.
# Options are "nav" (default), "footer" or anything else to exclude.
'source_link_position': "footer",
# Bootswatch (http://bootswatch.com/) theme.
# Options are nothing (default) or the name of a valid theme
# such as "amelia" or "cosmo".
# 'bootswatch_theme': "yeti",
# Choose Bootstrap version.
# Values: "3" (default) or "2" (in quotes)
'bootstrap_version': "3",
}
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "_static/favicon.ico"
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Conversejsdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Conversejs.tex', u'Converse.js Documentation',
u'JC Brand', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'conversejs', u'Converse.js Documentation',
[u'JC Brand'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Conversejs', u'Converse.js Documentation',
u'JC Brand', 'Conversejs', 'Open Source XMPP webchat',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| deependhulla/technomail-debian | files/html_oldx/groupoffice/modules/chat/converse.js-0.8.6/docs/source/conf.py | Python | gpl-3.0 | 9,866 |
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3'
}
}
INSTALLED_APPS = (
'memoize',
)
SECRET_KEY = "1234EFGH"
| vinu76jsr/django-memoize | test_settings.py | Python | bsd-3-clause | 148 |
import copy
import json
import re
from typing import Any, Dict, List, Mapping, Optional
import markdown
from markdown.extensions import Extension
from markdown.preprocessors import Preprocessor
from zerver.openapi.openapi import check_deprecated_consistency, get_openapi_return_values
from .api_arguments_table_generator import generate_data_type
REGEXP = re.compile(r"\{generate_return_values_table\|\s*(.+?)\s*\|\s*(.+)\s*\}")
class MarkdownReturnValuesTableGenerator(Extension):
def __init__(self, configs: Mapping[str, Any] = {}) -> None:
self.config: Dict[str, Any] = {}
def extendMarkdown(self, md: markdown.Markdown) -> None:
md.preprocessors.register(
APIReturnValuesTablePreprocessor(md, self.getConfigs()), "generate_return_values", 510
)
class APIReturnValuesTablePreprocessor(Preprocessor):
def __init__(self, md: markdown.Markdown, config: Mapping[str, Any]) -> None:
super().__init__(md)
def run(self, lines: List[str]) -> List[str]:
done = False
while not done:
for line in lines:
loc = lines.index(line)
match = REGEXP.search(line)
if not match:
continue
doc_name = match.group(2)
endpoint, method = doc_name.rsplit(":", 1)
return_values = get_openapi_return_values(endpoint, method)
if doc_name == "/events:get":
return_values = copy.deepcopy(return_values)
events = return_values["events"].pop("items", None)
text = self.render_table(return_values, 0)
# Another heading for the events documentation
text.append("\n\n## Events\n\n")
text += self.render_events(events)
else:
text = self.render_table(return_values, 0)
if len(text) > 0:
text = ["#### Return values"] + text
line_split = REGEXP.split(line, maxsplit=0)
preceding = line_split[0]
following = line_split[-1]
text = [preceding, *text, following]
lines = lines[:loc] + text + lines[loc + 1 :]
break
else:
done = True
return lines
def render_desc(
self, description: str, spacing: int, data_type: str, return_value: Optional[str] = None
) -> str:
description = description.replace("\n", "\n" + ((spacing + 4) * " "))
if return_value is None:
# HACK: It's not clear how to use OpenAPI data to identify
# the `key` fields for objects where e.g. the keys are
# user/stream IDs being mapped to data associated with
# those IDs. We hackily describe those fields by
# requiring that the descriptions be written as `key_name:
# key_description` and parsing for that pattern; we need
# to be careful to skip cases where we'd have `Note: ...`
# on a later line.
#
# More correctly, we should be doing something that looks at the types;
# print statements and test_api_doc_endpoint is useful for testing.
arr = description.split(": ", 1)
if len(arr) == 1 or "\n" in arr[0]:
return (spacing * " ") + "* " + description
(key_name, key_description) = arr
return (
(spacing * " ")
+ "* "
+ key_name
+ ": "
+ '<span class="api-field-type">'
+ data_type
+ "</span> "
+ key_description
)
return (
(spacing * " ")
+ "* `"
+ return_value
+ "`: "
+ '<span class="api-field-type">'
+ data_type
+ "</span> "
+ description
)
def render_table(self, return_values: Dict[str, Any], spacing: int) -> List[str]:
IGNORE = ["result", "msg"]
ans = []
for return_value in return_values:
if return_value in IGNORE:
continue
if "oneOf" in return_values[return_value]:
# For elements using oneOf there are two descriptions. The first description
# should be at level with the oneOf and should contain the basic non-specific
# description of the endpoint. Then for each element of oneOf there is a
# specialized description for that particular case. The description used
# right below is the main description.
data_type = generate_data_type(return_values[return_value])
ans.append(
self.render_desc(
return_values[return_value]["description"], spacing, data_type, return_value
)
)
for element in return_values[return_value]["oneOf"]:
if "description" not in element:
continue
# Add the specialized description of the oneOf element.
data_type = generate_data_type(element)
ans.append(self.render_desc(element["description"], spacing + 4, data_type))
# If the oneOf element is an object schema then render the documentation
# of its keys.
if "properties" in element:
ans += self.render_table(element["properties"], spacing + 8)
continue
description = return_values[return_value]["description"]
data_type = generate_data_type(return_values[return_value])
check_deprecated_consistency(return_values[return_value], description)
ans.append(self.render_desc(description, spacing, data_type, return_value))
if "properties" in return_values[return_value]:
ans += self.render_table(return_values[return_value]["properties"], spacing + 4)
if return_values[return_value].get("additionalProperties", False):
data_type = generate_data_type(return_values[return_value]["additionalProperties"])
ans.append(
self.render_desc(
return_values[return_value]["additionalProperties"]["description"],
spacing + 4,
data_type,
)
)
if "properties" in return_values[return_value]["additionalProperties"]:
ans += self.render_table(
return_values[return_value]["additionalProperties"]["properties"],
spacing + 8,
)
if (
"items" in return_values[return_value]
and "properties" in return_values[return_value]["items"]
):
ans += self.render_table(
return_values[return_value]["items"]["properties"], spacing + 4
)
return ans
def render_events(self, events_dict: Dict[str, Any]) -> List[str]:
text: List[str] = []
# Use argument section design for better visuals
# Directly using `###` for subheading causes errors so use h3 with made up id.
argument_template = (
'<div class="api-argument"><p class="api-argument-name"><h3 id="{h3_id}">'
+ " {event_type} {op}</h3></p></div> \n{description}\n\n\n"
)
for events in events_dict["oneOf"]:
event_type: Dict[str, Any] = events["properties"]["type"]
event_type_str: str = event_type["enum"][0]
# Internal hyperlink name
h3_id: str = event_type_str
event_type_str = f'<span class="api-argument-required"> {event_type_str}</span>'
op: Optional[Dict[str, Any]] = events["properties"].pop("op", None)
op_str: str = ""
if op is not None:
op_str = op["enum"][0]
h3_id += "-" + op_str
op_str = f'<span class="api-argument-deprecated">op: {op_str}</span>'
description = events["description"]
text.append(
argument_template.format(
event_type=event_type_str, op=op_str, description=description, h3_id=h3_id
)
)
text += self.render_table(events["properties"], 0)
# This part is for adding examples of individual events
text.append("**Example**")
text.append("\n```json\n")
example = json.dumps(events["example"], indent=4, sort_keys=True)
text.append(example)
text.append("```\n\n")
return text
def makeExtension(*args: Any, **kwargs: str) -> MarkdownReturnValuesTableGenerator:
return MarkdownReturnValuesTableGenerator(kwargs)
| punchagan/zulip | zerver/lib/markdown/api_return_values_table_generator.py | Python | apache-2.0 | 9,063 |
#
# Exercises from http://learnpythonthehardway.org
#
print
# Exercise 1
print "Really? :)"
print "I'm not writing a hello world, am I?"
print 'No way!!! Where\'s the link to the next exercise?'
print 'Oh, right, "double quotes" in print; next, please'
print
# Exercise 2
print "I think my previous code shows that I know about comments"
print
# Exercise 3
print "I have no chicken, I'll count something else"
print "Number of visible stars", 272 * 11 + 8
print "Yup, python math is the same as pretty much all other math"
print
# Exercise 4
visible_stars = 6000
hemispheres = 2
visible_rural = 2000
visible_urban = 20
print "There are about", visible_stars, "stars visible to the unaided eye"
print "Only half of those, ", visible_stars / hemispheres, "are visible from a single point at any time"
print visible_rural, "will be visible at sea level in rural areas,"
print "and you might be lucky and see", visible_urban, "in an urban area"
print
# Exercise 5
milky_stars = 10e11
galaxies = 10e11
print "There are about %d in our galaxy, the milky way," % milky_stars
print "and an estimated %d galaxies in the universe" % galaxies
print "This means somewhere arround", milky_stars * galaxies, "stars in the universe"
print "(Yes, I have known about %s for %d years, but I don't like\nthe %s inplementation)" % ("format strings", 20, "python")
# sidenote: while searching for the "answers" to "how many stars", google suggested the question
# "how many stars are there in our solar system"...
# *sigh*
print
# Exercise 6
| CodeCatz/litterbox | Goranche/goranche.py | Python | mit | 1,530 |
import logging
from bearing.constants import BearingSource
from bearing.parsers.fractional import FractionalBearingParser
from bearing.parsers.rolling import RollingBearingParser
from bearing.parsers.swivel import SwivelBearingParser
logger = logging.getLogger()
class BearingParser(object):
map = {
BearingSource.ROLLING.value: RollingBearingParser,
BearingSource.FRACTIONAL.value: FractionalBearingParser,
BearingSource.SWIVEL.value: SwivelBearingParser,
}
@staticmethod
def identify(name):
source = BearingSource.ROLLING.value
if '/' in name:
source = BearingSource.FRACTIONAL.value
elif 'ш' in name.lower():
source = BearingSource.SWIVEL.value
return source
@classmethod
def parse(cls, name, source=None):
source = source if source is not None else cls.identify(name)
if source not in cls.map.keys():
raise NotImplementedError('Parser for {} not implemented'.format(source))
return cls.map[source](name)
| manti-by/POD | app/bearing/parser.py | Python | bsd-3-clause | 1,056 |
"""
Rivers of Blood
(Formely code named: Ocean World Ransom)
"""
#core
import pygame
from pygame.locals import *
import time
import sys
import owr_log as log
from owr_log import Log
import owr_image
import owr_input
import yaml
import owr_game
import owr_timer
from owr_util import YamlOpen
import owr_screenplay_input
# Ocean World Ransom MEGA DATA FILE
#TODO(g): Break this bitch up!
GAME_DATA = 'data/sceneplay_000.yaml'
#GAME_DATA = 'data/defense.yaml'
class Core:
"""The core. All non-game related functions wrapped here."""
def __init__(self, title, size, data):
"""Initialize the core information."""
self.title = title
self.size = size
self.data = data
self.game = None
Log('Creating core')
# Initialize the Graphics stuff
#NOTE(g): This creates self.screen
owr_image.InitializeGraphics(self)
# Create the background surface
self.background = pygame.Surface(size)
self.background = self.background.convert()
# Create Input Handler
self.input = owr_input.InputHandler()
self.input_text = '' #TODO(g): Remove when full input mapper is finished...
def Update(self, ticks=None):
"""Update everything"""
self.game.Update(ticks=ticks)
def HandleInput(self, game):
"""Handle input"""
# Save the mouse position
game.mouse.SetPos(pygame.mouse.get_pos())
# Save the mouse button state (used for draw actions, use events for button
# down events (single fire))
game.mouse.SetButtons(pygame.mouse.get_pressed())
# Handle events through the Input Handler
self.input.Update()
#if self.input.GetAutoString():
# log.Log('Auto string: %s' % self.input.GetAutoString())
entered_string = self.input.GetNewEnteredString()
if entered_string:
log.Log('Entered string: %s' % entered_string)
#TODO(g): Create named input maps, which make the right function calls off
# of inputs. Then we can switch which maps we're using as the game state
# changes, so for menus or playing or combat, or whatever.
pass
# Handle viewport selection UI
#NOTE(g): Viewport selection comes in front of UI Popup because I made it so
# UI popup will set Viewport Select data to gagther that kind of data
if getattr(game, 'ui_select_viewport', None) != None:
owr_screenplay_input.HandleScreenplayViewportSelectInput(self, game)
# Handle popup UI for Screenplay
elif getattr(game, 'ui_popup_data', None) != None:
owr_screenplay_input.HandleScreenplayPopupInput(self, game)
# Else, handle normal input
else:
owr_screenplay_input.HandleScreenplayInput(self, game)
# # Else, there is combat going on
# elif game.combat:
# game.combat.HandleInput()
# # Else, there is dialogue going on, handle that
# elif game.dialogue:
# game.dialogue.HandleInput(self.input)
# If they are closing the window
if self.input.winClose:
game.quitting = True
def Render(self, game):
"""Handle input"""
# Clear the screen
self.background.fill((250, 250, 250))
# Render the background
game.Render(self.background)
# Blit the background
self.screen.blit(self.background, (0, 0))
# Make background visible
pygame.display.flip()
def SetGame(self, game):
self.game = game
def main(args=None):
if not args:
args = []
global GAME_DATA
data = yaml.load(YamlOpen(GAME_DATA))
# Create the Data Core
core = Core(data['window']['title'], data['window']['size'], data)
# Create the game
game = owr_game.Game(core, GAME_DATA, args=args)
Log('Starting game...')
while not game.quitting:
owr_timer.LockFrameRate(60)
core.Update()
core.HandleInput(game)
core.Render(game)
Log('Quitting')
if __name__ == '__main__':
main(sys.argv[1:])
| ghowland/sceneplay | owr.py | Python | mit | 3,918 |
#*****************************************************************************
# Copyright 2004-2008 Steve Menard
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#*****************************************************************************
import collections
from . import _jclass
class _WrappedIterator(object):
"""
Wraps a Java iterator to respect the Python 3 iterator API
"""
def __init__(self, iterator):
self.iterator = iterator
def __iter__(self):
return self.iterator
def __next__(self):
return next(self.iterator)
# Compatibility name
next = __next__
def _initialize():
_jclass.registerClassCustomizer(CollectionCustomizer())
_jclass.registerClassCustomizer(ListCustomizer())
_jclass.registerClassCustomizer(MapCustomizer())
_jclass.registerClassCustomizer(IteratorCustomizer())
_jclass.registerClassCustomizer(EnumerationCustomizer())
def isPythonSequence(v):
if isinstance(v, collections.Sequence):
if not hasattr(v.__class__, '__metaclass__') \
or v.__class__.__metaclass__ is _jclass._JavaClass:
return True
return False
def _colLength(self):
return self.size()
def _colIter(self):
return _WrappedIterator(self.iterator())
def _colDelItem(self, i):
return self.remove(i)
def _colAddAll(self, v):
if isPythonSequence(v):
r = False
for i in v:
r = self.add(i) or r
return r
else:
return self._addAll(v)
def _colRemoveAll(self, v):
if isPythonSequence(v):
r = False
for i in v:
r = self.remove(i) or r
return r
else:
return self._removeAll(v)
def _colRetainAll(self, v):
if isPythonSequence(v):
r = _jclass.JClass("java.util.ArrayList")(len(v))
for i in v:
r.add(i)
else:
r = v
return self._retainAll(r)
class CollectionCustomizer(object):
_METHODS = {
'__len__': _colLength,
'__iter__': _colIter,
'__delitem__': _colDelItem,
}
def canCustomize(self, name, jc):
if name == 'java.util.Collection':
return True
return jc.isSubclass('java.util.Collection')
def customize(self, name, jc, bases, members):
if name == 'java.util.Collection':
members.update(CollectionCustomizer._METHODS)
else:
# AddAll is handled by List
if (not jc.isSubclass("java.util.List")) and 'addAll' in members:
members['_addAll'] = members['addAll']
members['addAll'] = _colAddAll
if 'removeAll' in members:
members['_removeAll'] = members['removeAll']
members['removeAll'] = _colRemoveAll
if 'retainAll' in members:
members['_retainAll'] = members['retainAll']
members['retainAll'] = _colRetainAll
def _listGetItem(self, ndx):
if isinstance(ndx, slice):
start = ndx.start
stop = ndx.stop
if start < 0:
start = self.size() + start
if stop < 0:
stop = self.size() + stop
return self.subList(start, stop)
else:
if ndx < 0:
ndx = self.size() + ndx
return self.get(ndx)
def _listSetItem(self, ndx, v):
if isinstance(ndx, slice):
start = ndx.start
stop = ndx.stop
if start < 0:
start = self.size() + start
if stop < 0:
stop = self.size() + stop
for i in range(start, stop):
self.remove(start)
if isinstance(v, collections.Sequence):
ndx = start
for i in v:
self.add(ndx, i)
ndx += 1
else:
if ndx < 0:
ndx = self.size() + ndx
self.set(ndx, v)
def _listAddAll(self, v, v2=None):
if isPythonSequence(v):
r = False
if v2 is not None: # assume form (int, values)
for i in range(len(v2)):
r = r or self.add(v + i, v2[i])
else:
for i in v:
r = self.add(i) or r
return r
else:
return self._addAll(v)
class ListCustomizer(object):
_METHODS = {
'__setitem__': _listSetItem,
'__getitem__': _listGetItem,
}
def canCustomize(self, name, jc):
if name == 'java.util.List':
return True
return jc.isSubclass('java.util.List')
def customize(self, name, jc, bases, members):
if name == 'java.util.List':
members.update(ListCustomizer._METHODS)
else:
if 'addAll' in members:
members['_addAll'] = members['addAll']
members['addAll'] = _listAddAll
def isPythonMapping(v):
if isinstance(v, collections.Mapping):
if not hasattr(v.__class__, '__metaclass__') or \
v.__class__.__metaclass__ is _jclass._JavaClass:
return True
return False
def _mapLength(self):
return self.size()
def _mapIter(self):
return _WrappedIterator(self.keySet().iterator())
def _mapDelItem(self, i):
return self.remove(i)
def _mapGetItem(self, ndx):
return self.get(ndx)
def _mapSetItem(self, ndx, v):
self.put(ndx, v)
def _mapPutAll(self, v):
if isPythonMapping(v):
for i in v:
self.put(i, v[i])
else:
# do the regular method ...
self._putAll(v)
class MapCustomizer(object):
_METHODS = {
'__len__': _mapLength,
'__iter__': _mapIter,
'__delitem__': _mapDelItem,
'__getitem__': _mapGetItem,
'__setitem__': _mapSetItem,
}
def canCustomize(self, name, jc):
if name == 'java.util.Map':
return True
return jc.isSubclass('java.util.Map')
def customize(self, name, jc, bases, members):
if name == 'java.util.Map':
members.update(MapCustomizer._METHODS)
else:
if "putAll" in members:
members["_putAll"] = members["putAll"]
members["putAll"] = _mapPutAll
def _iterCustomNext(self):
if self.hasNext():
return self._next()
raise StopIteration
def _iterIteratorNext(self):
if self.hasNext():
return next(self)
raise StopIteration
def _iterIter(self):
return self
class IteratorCustomizer(object):
_METHODS = {
'__iter__': _iterIter,
'__next__': _iterCustomNext,
}
def canCustomize(self, name, jc):
if name == 'java.util.Iterator':
return True
return jc.isSubclass('java.util.Iterator')
def customize(self, name, jc, bases, members):
if name == 'java.util.Iterator':
members.update(IteratorCustomizer._METHODS)
elif jc.isSubclass('java.util.Iterator'):
__next__ = 'next' if 'next' in members else '__next__'
members['_next'] = members[__next__]
members[__next__] = _iterCustomNext
def _enumNext(self):
if self.hasMoreElements():
return self.nextElement()
raise StopIteration
def _enumIter(self):
return self
class EnumerationCustomizer(object):
_METHODS = {
'next': _enumNext,
'__next__': _enumNext,
'__iter__': _enumIter,
}
def canCustomize(self, name, jc):
return name == 'java.util.Enumeration'
def customize(self, name, jc, bases, members):
members.update(EnumerationCustomizer._METHODS)
| ktan2020/jpype | jpype/_jcollection.py | Python | apache-2.0 | 8,284 |
#!/usr/bin/env python
import sys, os
import subprocess
import struct
import hashlib
RRPACK_MAGIC = "PANDA_RR"
# PANDA Packed RR file format (all integers are little-endian):
# 0x00: magic "PANDA_RR"
# 0x08: uint64_t num_instructions
# 0x10: MD5 (16 bytes) of remaining data
# 0x20: archive data in .tar.xz format
if len(sys.argv) != 2:
print >>sys.stderr, "usage: %s <filename.rr>" % sys.argv[0]
sys.exit(1)
infname = sys.argv[1]
# Get file info
try:
with open(infname, 'rb') as f:
magic, num_guest_insns, file_digest = struct.unpack("<8sQ16s", f.read(0x20))
if magic != RRPACK_MAGIC:
print >>sys.stderr, infname, "is not in PANDA Record/Replay format"
sys.exit(1)
print "Verifying checksum...",
m = hashlib.md5()
while True:
data = f.read(4096)
if not data: break
m.update(data)
digest = m.digest()
if digest != file_digest:
print "FAILED. Aborting."
sys.exit(1)
else:
print "Success."
f.seek(0x20)
print "Unacking RR log %s with %d instructions..." % (infname, num_guest_insns),
subprocess.check_call(['tar', 'xJvf', '-'], stdin=f)
print "Done."
except EnvironmentError:
print >>sys.stderr, "Failed to open", infname
sys.exit(1)
| KernelAnalysisPlatform/kvalgrind | scripts/rrunpack.py | Python | gpl-3.0 | 1,349 |
# Generated by Django 2.2.13 on 2020-06-22 14:01
import django.contrib.postgres.fields.jsonb
import django.core.serializers.json
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('sigad', '0051_auto_20200520_2037'),
]
operations = [
migrations.AddField(
model_name='urlshortener',
name='link_absoluto',
field=models.BooleanField(choices=[(True, 'Sim'), (False, 'Não')], default=False, verbose_name='Link Absoluto'),
),
migrations.CreateModel(
name='ShortRedirect',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('metadata', django.contrib.postgres.fields.jsonb.JSONField(blank=True, default=None, encoder=django.core.serializers.json.DjangoJSONEncoder, null=True, verbose_name='Metadados')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='created')),
('url', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='short_set', to='sigad.UrlShortener', verbose_name='UrlShortner')),
],
options={
'ordering': ('-created',),
},
),
]
| cmjatai/cmj | cmj/sigad/migrations/0052_auto_20200622_1101.py | Python | gpl-3.0 | 1,351 |
#!/usr/bin/env python
"""Generates 4-in-a-row board positions, in integer format.
These are positions which are one piece away from winning.
"""
print """#ifndef FOUR_IN_A_ROW_BITSETS_H
#define FOUR_IN_A_ROW_BITSETS_H
/* Augomatically generated by gen-four-in-a-row-bitsets.py */
const long four_in_a_row_bitsets[] = {"""
# The magic numbers were derived for gen-winning-bitsets.py and later copied
print "\t/* rows */"
for i in xrange(0, 5):
for x in xrange(0, 5):
print "\t%d," % ((0x1f & ~(0x1 << x)) << (5 * i))
print "\t/* columns */"
for i in xrange(0, 5):
for x in xrange(0, 5):
print "\t%d," % ((0x108421 << i) & ~(0x1 << (5 * x + i)))
print "\t/* diagonals */"
for i in xrange(0, 5):
print "\t%d," % (0x1041041 & ~(0x1 << (6 * i)))
print "\t%d," % (0x111110 & ~(0x1 << (4 * (i + 1))))
print """};
const int n_four_in_a_row_bitsets
= sizeof(four_in_a_row_bitsets) / sizeof(four_in_a_row_bitsets[0]);
#endif /* FOUR_IN_A_ROW_BITSETS_H */"""
| adamhooper/code-from-adamhoopers-school-days | mcgill-se/ECSE426/ass1/helpers/gen-four-in-a-row-bitsets.py | Python | unlicense | 969 |
# -*- encoding:utf-8
from django.shortcuts import render,redirect
from django.http import HttpResponse,JsonResponse,HttpResponseBadRequest
from .models import Record
from .forms import RecordDate,CreateRecord
from datetime import datetime
from django.db.models import Sum
import json
import urllib
#from seconds to "hours:minutes"
#秒単位から時分へ変換
def sec2times(second):
hours = second / 3600
minutes = (second % 3600) / 60
return "{0:0>2}".format(hours)+":"+"{0:0>2}".format(minutes)
def sec2days(second):
days = second / (60*60*24)
hours = (second % (60*60*24) ) / 3600
minutes = ( (second % (60*60*24) ) % 3600 ) / 60
return str(days)+ \
"day {0:0>2}hour".format(hours)+ \
" {0:0>2}minute".format(minutes)
def sec2hours(second):
return "%.1f" % (second/3600.0)
# indexページ
def index(request):
# POSTでアクセスされた
if request.method == 'POST':
form = RecordDate(request.POST)
# フォーム入力が有効
if form.is_valid():
# フォームからdateを取り出す
date=form.cleaned_data['date']
# dateの日時をフォーマット通りに変換
strdate=date.strftime('%Y/%m/%d');
# 当該日時のレコードを検索
recordModelList=Record.objects.filter(date=date)
# ページに表示するレコードを格納する
recordlist=list()
# 表示するレコードを生成しながら合計を計算
total_time=0
for l in recordModelList:
recordlist.append({'id':l.id,'time':sec2times(l.second)})
total_time = total_time + l.second
form2 = CreateRecord()
return render(request,'counter/index.html',{'form':RecordDate(),'form2':form2,'record_date':strdate,'total_time':'total '+sec2times(total_time),'records':recordlist})
# GETでアクセスされた
else:
form = RecordDate()
d = {'form':form,'records':[]}
return render(request, 'counter/index.html', d)
# レコードを追加する
def add(request):
if request.method == 'POST':
form = CreateRecord(request.POST)
if form.is_valid():
date=form.cleaned_data['date2']
seconds=form.cleaned_data['seconds']
#既に記録されている総計の算出
strdate=date.strftime('%Y/%m/%d');
recordModelList=Record.objects.filter(date=date)
if len(recordModelList)==0:
second_sum=0
else:
second_sum=recordModelList.aggregate(Sum('second'))['second__sum']
#加算予定+今までの総計で一日が超えないか
if (second_sum+seconds) < 24*60*60 :
Record.objects.create(date=date,second=seconds)
return JsonResponse({'form_err':''})
else:
return JsonResponse({'form_err':'TOTAL_IS_OVER'})
return JsonResponse({'form_err':str(form.errors)})
# レコードを編集する
def edit(request, editing_id):
return render(request, 'counter/edit.html', {})
# レコードを削除する
def delete(request):
if request.method == 'DELETE':
print (str(request.body)).split("&")[0].split("=")[1]
rid=int( (str(request.body)).split("&")[0].split("=")[1] )
Record.objects.get(id=rid).delete()
return HttpResponse('Delete')
# 全レコードを表示
def exportRecords(request):
string=""
for i in Record.objects.all().order_by("date","id"):
string =string +"%s#%d," % (str(i.date),i.second)
return JsonResponse({'data':string[:-1]})
# 全レコードをインポート
def importRecords(request):
# URLエンコードをデコードする
data=urllib.unquote(request.body)
# 先頭が"data="と一致する
try:
if data[0:5] == "data=":
datalist=list()
checklist=dict()
strdatalist=data[5:].split(',')
for i in strdatalist:
d=i.split('#')
tdate = datetime.strptime(d[0],'%Y-%m-%d')
seconds = int(d[1])
if seconds < 60 or seconds >= 24*60*60:
raise ValueError
datalist.append( (tdate,seconds))
if tdate in checklist:
checklist[tdate]=checklist[tdate]+seconds
else:
checklist[tdate]=seconds
# 1日の時間が超えている記録がないかチェック
for c in checklist.values():
if c >= 24*60*60:
raise ValueError
else:
raise ValueError
except ValueError:
return HttpResponseBadRequest()
# レコードを追加していく
for d in datalist:
Record.objects.create(date=d[0],second=d[1])
return HttpResponse("OK")
# 全レコードを削除
def clearAllRecords(request):
Record.objects.all().delete()
#204 No Content
return HttpResponse(status=204)
def total(request):
total=Record.objects.aggregate(second_sum=Sum('second'))
sec=int(total['second_sum'])
return HttpResponse( sec2days( sec ) +"\n<br />("+ sec2hours( sec ) + "hour)" )
| y3kd4ad/counter | counter/views.py | Python | mit | 5,310 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals, print_function
import frappe
from frappe import _, bold
from frappe.utils import cint
from frappe.model.naming import validate_name
from frappe.model.dynamic_links import get_dynamic_link_map
from frappe.utils.password import rename_password
from frappe.model.utils.user_settings import sync_user_settings, update_user_settings_data
@frappe.whitelist()
def update_document_title(doctype, docname, title_field=None, old_title=None, new_title=None, new_name=None, merge=False):
"""
Update title from header in form view
"""
if docname and new_name and not docname == new_name:
docname = rename_doc(doctype=doctype, old=docname, new=new_name, merge=merge)
if old_title and new_title and not old_title == new_title:
frappe.db.set_value(doctype, docname, title_field, new_title)
frappe.msgprint(_('Saved'), alert=True, indicator='green')
return docname
def rename_doc(doctype, old, new, force=False, merge=False, ignore_permissions=False, ignore_if_exists=False, show_alert=True):
"""
Renames a doc(dt, old) to doc(dt, new) and
updates all linked fields of type "Link"
"""
if not frappe.db.exists(doctype, old):
return
if ignore_if_exists and frappe.db.exists(doctype, new):
return
if old==new:
frappe.msgprint(_('Please select a new name to rename'))
return
force = cint(force)
merge = cint(merge)
meta = frappe.get_meta(doctype)
# call before_rename
old_doc = frappe.get_doc(doctype, old)
out = old_doc.run_method("before_rename", old, new, merge) or {}
new = (out.get("new") or new) if isinstance(out, dict) else (out or new)
if doctype != "DocType":
new = validate_rename(doctype, new, meta, merge, force, ignore_permissions)
if not merge:
rename_parent_and_child(doctype, old, new, meta)
else:
update_assignments(old, new, doctype)
# update link fields' values
link_fields = get_link_fields(doctype)
update_link_field_values(link_fields, old, new, doctype)
rename_dynamic_links(doctype, old, new)
# save the user settings in the db
update_user_settings(old, new, link_fields)
if doctype=='DocType':
rename_doctype(doctype, old, new, force)
update_attachments(doctype, old, new)
rename_versions(doctype, old, new)
# call after_rename
new_doc = frappe.get_doc(doctype, new)
# copy any flags if required
new_doc._local = getattr(old_doc, "_local", None)
new_doc.run_method("after_rename", old, new, merge)
if not merge:
rename_password(doctype, old, new)
# update user_permissions
frappe.db.sql("""UPDATE `tabDefaultValue` SET `defvalue`=%s WHERE `parenttype`='User Permission'
AND `defkey`=%s AND `defvalue`=%s""", (new, doctype, old))
if merge:
new_doc.add_comment('Edit', _("merged {0} into {1}").format(frappe.bold(old), frappe.bold(new)))
else:
new_doc.add_comment('Edit', _("renamed from {0} to {1}").format(frappe.bold(old), frappe.bold(new)))
if merge:
frappe.delete_doc(doctype, old)
frappe.clear_cache()
frappe.enqueue('frappe.utils.global_search.rebuild_for_doctype', doctype=doctype)
if show_alert:
frappe.msgprint(_('Document renamed from {0} to {1}').format(bold(old), bold(new)), alert=True, indicator='green')
return new
def update_assignments(old, new, doctype):
old_assignments = frappe.parse_json(frappe.db.get_value(doctype, old, '_assign')) or []
new_assignments = frappe.parse_json(frappe.db.get_value(doctype, new, '_assign')) or []
common_assignments = list(set(old_assignments).intersection(new_assignments))
for user in common_assignments:
# delete todos linked to old doc
todos = frappe.db.get_all('ToDo',
{
'owner': user,
'reference_type': doctype,
'reference_name': old,
},
['name', 'description']
)
for todo in todos:
frappe.delete_doc('ToDo', todo.name)
unique_assignments = list(set(old_assignments + new_assignments))
frappe.db.set_value(doctype, new, '_assign', frappe.as_json(unique_assignments, indent=0))
def update_user_settings(old, new, link_fields):
'''
Update the user settings of all the linked doctypes while renaming.
'''
# store the user settings data from the redis to db
sync_user_settings()
if not link_fields: return
# find the user settings for the linked doctypes
linked_doctypes = set([d.parent for d in link_fields if not d.issingle])
user_settings_details = frappe.db.sql('''SELECT `user`, `doctype`, `data`
FROM `__UserSettings`
WHERE `data` like %s
AND `doctype` IN ('{doctypes}')'''.format(doctypes="', '".join(linked_doctypes)), (old), as_dict=1)
# create the dict using the doctype name as key and values as list of the user settings
from collections import defaultdict
user_settings_dict = defaultdict(list)
for user_setting in user_settings_details:
user_settings_dict[user_setting.doctype].append(user_setting)
# update the name in linked doctype whose user settings exists
for fields in link_fields:
user_settings = user_settings_dict.get(fields.parent)
if user_settings:
for user_setting in user_settings:
update_user_settings_data(user_setting, "value", old, new, "docfield", fields.fieldname)
else:
continue
def update_attachments(doctype, old, new):
try:
if old != "File Data" and doctype != "DocType":
frappe.db.sql("""update `tabFile` set attached_to_name=%s
where attached_to_name=%s and attached_to_doctype=%s""", (new, old, doctype))
except frappe.db.ProgrammingError as e:
if not frappe.db.is_column_missing(e):
raise
def rename_versions(doctype, old, new):
frappe.db.sql("""UPDATE `tabVersion` SET `docname`=%s WHERE `ref_doctype`=%s AND `docname`=%s""",
(new, doctype, old))
def rename_parent_and_child(doctype, old, new, meta):
# rename the doc
frappe.db.sql("UPDATE `tab{0}` SET `name`={1} WHERE `name`={1}".format(doctype, '%s'), (new, old))
update_autoname_field(doctype, new, meta)
update_child_docs(old, new, meta)
def update_autoname_field(doctype, new, meta):
# update the value of the autoname field on rename of the docname
if meta.get('autoname'):
field = meta.get('autoname').split(':')
if field and field[0] == "field":
frappe.db.sql("UPDATE `tab{0}` SET `{1}`={2} WHERE `name`={2}".format(doctype, field[1], '%s'), (new, new))
def validate_rename(doctype, new, meta, merge, force, ignore_permissions):
# using for update so that it gets locked and someone else cannot edit it while this rename is going on!
exists = frappe.db.sql("select name from `tab{doctype}` where name=%s for update".format(doctype=doctype), new)
exists = exists[0][0] if exists else None
if merge and not exists:
frappe.msgprint(_("{0} {1} does not exist, select a new target to merge").format(doctype, new), raise_exception=1)
if exists and exists != new:
# for fixing case, accents
exists = None
if (not merge) and exists:
frappe.msgprint(_("Another {0} with name {1} exists, select another name").format(doctype, new), raise_exception=1)
if not (ignore_permissions or frappe.permissions.has_permission(doctype, "write", raise_exception=False)):
frappe.msgprint(_("You need write permission to rename"), raise_exception=1)
if not (force or ignore_permissions) and not meta.allow_rename:
frappe.msgprint(_("{0} not allowed to be renamed").format(_(doctype)), raise_exception=1)
# validate naming like it's done in doc.py
new = validate_name(doctype, new, merge=merge)
return new
def rename_doctype(doctype, old, new, force=False):
# change options for fieldtype Table, Table MultiSelect and Link
fields_with_options = ("Link",) + frappe.model.table_fields
for fieldtype in fields_with_options:
update_options_for_fieldtype(fieldtype, old, new)
# change options where select options are hardcoded i.e. listed
select_fields = get_select_fields(old, new)
update_link_field_values(select_fields, old, new, doctype)
update_select_field_values(old, new)
# change parenttype for fieldtype Table
update_parenttype_values(old, new)
def update_child_docs(old, new, meta):
# update "parent"
for df in meta.get_table_fields():
frappe.db.sql("update `tab%s` set parent=%s where parent=%s" \
% (df.options, '%s', '%s'), (new, old))
def update_link_field_values(link_fields, old, new, doctype):
for field in link_fields:
if field['issingle']:
try:
single_doc = frappe.get_doc(field['parent'])
if single_doc.get(field['fieldname'])==old:
single_doc.set(field['fieldname'], new)
# update single docs using ORM rather then query
# as single docs also sometimes sets defaults!
single_doc.flags.ignore_mandatory = True
single_doc.save(ignore_permissions=True)
except ImportError:
# fails in patches where the doctype has been renamed
# or no longer exists
pass
else:
# because the table hasn't been renamed yet!
parent = field['parent'] if field['parent']!=new else old
frappe.db.sql("""
update `tab{table_name}` set `{fieldname}`=%s
where `{fieldname}`=%s""".format(
table_name=parent,
fieldname=field['fieldname']), (new, old))
# update cached link_fields as per new
if doctype=='DocType' and field['parent'] == old:
field['parent'] = new
def get_link_fields(doctype):
# get link fields from tabDocField
if not frappe.flags.link_fields:
frappe.flags.link_fields = {}
if not doctype in frappe.flags.link_fields:
link_fields = frappe.db.sql("""\
select parent, fieldname,
(select issingle from tabDocType dt
where dt.name = df.parent) as issingle
from tabDocField df
where
df.options=%s and df.fieldtype='Link'""", (doctype,), as_dict=1)
# get link fields from tabCustom Field
custom_link_fields = frappe.db.sql("""\
select dt as parent, fieldname,
(select issingle from tabDocType dt
where dt.name = df.dt) as issingle
from `tabCustom Field` df
where
df.options=%s and df.fieldtype='Link'""", (doctype,), as_dict=1)
# add custom link fields list to link fields list
link_fields += custom_link_fields
# remove fields whose options have been changed using property setter
property_setter_link_fields = frappe.db.sql("""\
select ps.doc_type as parent, ps.field_name as fieldname,
(select issingle from tabDocType dt
where dt.name = ps.doc_type) as issingle
from `tabProperty Setter` ps
where
ps.property_type='options' and
ps.field_name is not null and
ps.value=%s""", (doctype,), as_dict=1)
link_fields += property_setter_link_fields
frappe.flags.link_fields[doctype] = link_fields
return frappe.flags.link_fields[doctype]
def update_options_for_fieldtype(fieldtype, old, new):
if frappe.conf.developer_mode:
for name in frappe.db.sql_list("""select parent from
tabDocField where options=%s""", old):
doctype = frappe.get_doc("DocType", name)
save = False
for f in doctype.fields:
if f.options == old:
f.options = new
save = True
if save:
doctype.save()
else:
frappe.db.sql("""update `tabDocField` set options=%s
where fieldtype=%s and options=%s""", (new, fieldtype, old))
frappe.db.sql("""update `tabCustom Field` set options=%s
where fieldtype=%s and options=%s""", (new, fieldtype, old))
frappe.db.sql("""update `tabProperty Setter` set value=%s
where property='options' and value=%s""", (new, old))
def get_select_fields(old, new):
"""
get select type fields where doctype's name is hardcoded as
new line separated list
"""
# get link fields from tabDocField
select_fields = frappe.db.sql("""
select parent, fieldname,
(select issingle from tabDocType dt
where dt.name = df.parent) as issingle
from tabDocField df
where
df.parent != %s and df.fieldtype = 'Select' and
df.options like {0} """.format(frappe.db.escape('%' + old + '%')), (new,), as_dict=1)
# get link fields from tabCustom Field
custom_select_fields = frappe.db.sql("""
select dt as parent, fieldname,
(select issingle from tabDocType dt
where dt.name = df.dt) as issingle
from `tabCustom Field` df
where
df.dt != %s and df.fieldtype = 'Select' and
df.options like {0} """ .format(frappe.db.escape('%' + old + '%')), (new,), as_dict=1)
# add custom link fields list to link fields list
select_fields += custom_select_fields
# remove fields whose options have been changed using property setter
property_setter_select_fields = frappe.db.sql("""
select ps.doc_type as parent, ps.field_name as fieldname,
(select issingle from tabDocType dt
where dt.name = ps.doc_type) as issingle
from `tabProperty Setter` ps
where
ps.doc_type != %s and
ps.property_type='options' and
ps.field_name is not null and
ps.value like {0} """.format(frappe.db.escape('%' + old + '%')), (new,), as_dict=1)
select_fields += property_setter_select_fields
return select_fields
def update_select_field_values(old, new):
frappe.db.sql("""
update `tabDocField` set options=replace(options, %s, %s)
where
parent != %s and fieldtype = 'Select' and
(options like {0} or options like {1})"""
.format(frappe.db.escape('%' + '\n' + old + '%'), frappe.db.escape('%' + old + '\n' + '%')), (old, new, new))
frappe.db.sql("""
update `tabCustom Field` set options=replace(options, %s, %s)
where
dt != %s and fieldtype = 'Select' and
(options like {0} or options like {1})"""
.format(frappe.db.escape('%' + '\n' + old + '%'), frappe.db.escape('%' + old + '\n' + '%')), (old, new, new))
frappe.db.sql("""
update `tabProperty Setter` set value=replace(value, %s, %s)
where
doc_type != %s and field_name is not null and
property='options' and
(value like {0} or value like {1})"""
.format(frappe.db.escape('%' + '\n' + old + '%'), frappe.db.escape('%' + old + '\n' + '%')), (old, new, new))
def update_parenttype_values(old, new):
child_doctypes = frappe.db.get_all('DocField',
fields=['options', 'fieldname'],
filters={
'parent': new,
'fieldtype': ['in', frappe.model.table_fields]
}
)
custom_child_doctypes = frappe.db.get_all('Custom Field',
fields=['options', 'fieldname'],
filters={
'dt': new,
'fieldtype': ['in', frappe.model.table_fields]
}
)
child_doctypes += custom_child_doctypes
fields = [d['fieldname'] for d in child_doctypes]
property_setter_child_doctypes = frappe.db.sql("""\
select value as options from `tabProperty Setter`
where doc_type=%s and property='options' and
field_name in ("%s")""" % ('%s', '", "'.join(fields)),
(new,))
child_doctypes += property_setter_child_doctypes
child_doctypes = (d['options'] for d in child_doctypes)
for doctype in child_doctypes:
frappe.db.sql("""\
update `tab%s` set parenttype=%s
where parenttype=%s""" % (doctype, '%s', '%s'),
(new, old))
def rename_dynamic_links(doctype, old, new):
for df in get_dynamic_link_map().get(doctype, []):
# dynamic link in single, just one value to check
if frappe.get_meta(df.parent).issingle:
refdoc = frappe.db.get_singles_dict(df.parent)
if refdoc.get(df.options)==doctype and refdoc.get(df.fieldname)==old:
frappe.db.sql("""update tabSingles set value=%s where
field=%s and value=%s and doctype=%s""", (new, df.fieldname, old, df.parent))
else:
# because the table hasn't been renamed yet!
parent = df.parent if df.parent != new else old
frappe.db.sql("""update `tab{parent}` set {fieldname}=%s
where {options}=%s and {fieldname}=%s""".format(parent = parent,
fieldname=df.fieldname, options=df.options), (new, doctype, old))
def bulk_rename(doctype, rows=None, via_console = False):
"""Bulk rename documents
:param doctype: DocType to be renamed
:param rows: list of documents as `((oldname, newname), ..)`"""
if not rows:
frappe.throw(_("Please select a valid csv file with data"))
if not via_console:
max_rows = 500
if len(rows) > max_rows:
frappe.throw(_("Maximum {0} rows allowed").format(max_rows))
rename_log = []
for row in rows:
# if row has some content
if len(row) > 1 and row[0] and row[1]:
try:
if rename_doc(doctype, row[0], row[1]):
msg = _("Successful: {0} to {1}").format(row[0], row[1])
frappe.db.commit()
else:
msg = _("Ignored: {0} to {1}").format(row[0], row[1])
except Exception as e:
msg = _("** Failed: {0} to {1}: {2}").format(row[0], row[1], repr(e))
frappe.db.rollback()
if via_console:
print(msg)
else:
rename_log.append(msg)
frappe.enqueue('frappe.utils.global_search.rebuild_for_doctype', doctype=doctype)
if not via_console:
return rename_log
def update_linked_doctypes(doctype, docname, linked_to, value, ignore_doctypes=None):
"""
linked_doctype_info_list = list formed by get_fetch_fields() function
docname = Master DocType's name in which modification are made
value = Value for the field thats set in other DocType's by fetching from Master DocType
"""
linked_doctype_info_list = get_fetch_fields(doctype, linked_to, ignore_doctypes)
for d in linked_doctype_info_list:
frappe.db.sql("""
update
`tab{doctype}`
set
{linked_to_fieldname} = "{value}"
where
{master_fieldname} = {docname}
and {linked_to_fieldname} != "{value}"
""".format(
doctype = d['doctype'],
linked_to_fieldname = d['linked_to_fieldname'],
value = value,
master_fieldname = d['master_fieldname'],
docname = frappe.db.escape(docname)
))
def get_fetch_fields(doctype, linked_to, ignore_doctypes=None):
"""
doctype = Master DocType in which the changes are being made
linked_to = DocType name of the field thats being updated in Master
This function fetches list of all DocType where both doctype and linked_to is found
as link fields.
Forms a list of dict in the form -
[{doctype: , master_fieldname: , linked_to_fieldname: ]
where
doctype = DocType where changes need to be made
master_fieldname = Fieldname where options = doctype
linked_to_fieldname = Fieldname where options = linked_to
"""
master_list = get_link_fields(doctype)
linked_to_list = get_link_fields(linked_to)
out = []
from itertools import product
product_list = product(master_list, linked_to_list)
for d in product_list:
linked_doctype_info = frappe._dict()
if d[0]['parent'] == d[1]['parent'] \
and (not ignore_doctypes or d[0]['parent'] not in ignore_doctypes) \
and not d[1]['issingle']:
linked_doctype_info['doctype'] = d[0]['parent']
linked_doctype_info['master_fieldname'] = d[0]['fieldname']
linked_doctype_info['linked_to_fieldname'] = d[1]['fieldname']
out.append(linked_doctype_info)
return out
| adityahase/frappe | frappe/model/rename_doc.py | Python | mit | 18,550 |
# encoding: utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
from ..utils import check_solr
def setup():
check_solr()
| ruimashita/django-haystack | test_haystack/spatial/__init__.py | Python | bsd-3-clause | 165 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "djignsdk.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| drayanaindra/django-ignsdk | djignsdk/manage.py | Python | bsd-3-clause | 251 |
"""
This module defines SwitchCtl class useful for switch controlling
Copyright 2011 Red Hat, Inc.
Licensed under the GNU General Public License, version 2 as
published by the Free Software Foundation; see COPYING for details.
"""
__author__ = """
jpirko@redhat.com (Jiri Pirko)
"""
import logging
import copy
import imp
from SwitchConfigParse import SwitchConfigParse
class SwitchCtl:
def __init__(self, config_xml):
parse = SwitchConfigParse()
self._config = parse.parse_switch_config(config_xml)
def dump_config(self):
return copy.deepcopy(self._config)
def _set_driver(self):
driver_name = self._config["info"]["driver"]
path = "Switch/Drivers/%s" % driver_name
fp, pathname, description = imp.find_module(path)
module = imp.load_module(path, fp, pathname, description)
driver_class = getattr(module, driver_name)
self._driver = driver_class(self._config)
def init(self):
self._set_driver()
self._driver.init()
def configure(self):
self._driver.configure()
def cleanup(self):
self._driver.cleanup()
| jiriprochazka/lnst | obsolete/Switch/SwitchCtl.py | Python | gpl-2.0 | 1,140 |
import _plotly_utils.basevalidators
class Tick0Validator(_plotly_utils.basevalidators.AnyValidator):
def __init__(
self, plotly_name="tick0", parent_name="layout.ternary.caxis", **kwargs
):
super(Tick0Validator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
implied_edits=kwargs.pop("implied_edits", {"tickmode": "linear"}),
**kwargs
)
| plotly/plotly.py | packages/python/plotly/plotly/validators/layout/ternary/caxis/_tick0.py | Python | mit | 493 |
# Copyright 2014 CloudFounders NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Contains the BackendViewSet
"""
from urllib2 import HTTPError, URLError
from backend.serializers.serializers import FullSerializer
from rest_framework import status, viewsets
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.exceptions import NotAcceptable
from ovs.dal.lists.backendlist import BackendList
from ovs.dal.lists.backendtypelist import BackendTypeList
from ovs.dal.hybrids.backend import Backend
from backend.decorators import return_object, return_list, load, required_roles, log
from ovs.extensions.api.client import OVSClient
class BackendViewSet(viewsets.ViewSet):
"""
Information about backends
"""
permission_classes = (IsAuthenticated,)
prefix = r'backends'
base_name = 'backends'
@log()
@required_roles(['read'])
@return_list(Backend)
@load()
def list(self, backend_type=None, ip=None, port=None, client_id=None, client_secret=None, contents=None):
"""
Overview of all backends (from a certain type, if given) on the local node (or a remote one)
"""
if ip is None:
if backend_type is None:
return BackendList.get_backends()
return BackendTypeList.get_backend_type_by_code(backend_type).backends
client = OVSClient(ip, port, client_id, client_secret)
try:
remote_backends = client.get('/backends/', params={'backend_type': backend_type,
'contents': '' if contents is None else contents})
except (HTTPError, URLError):
raise NotAcceptable('Could not load remote backends')
backend_list = []
for entry in remote_backends['data']:
backend = type('Backend', (), entry)()
backend_list.append(backend)
return backend_list
@log()
@required_roles(['read'])
@return_object(Backend)
@load(Backend)
def retrieve(self, backend):
"""
Load information about a given backend
"""
return backend
@log()
@required_roles(['read', 'write', 'manage'])
@load()
def create(self, request):
"""
Creates a Backend
"""
serializer = FullSerializer(Backend, instance=Backend(), data=request.DATA)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
| sql-analytics/openvstorage | webapps/api/backend/views/backends.py | Python | apache-2.0 | 3,144 |
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2015 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""A filter for signals which either filters or passes them."""
import functools
from PyQt5.QtCore import QObject
from qutebrowser.utils import debug, log, objreg
class SignalFilter(QObject):
"""A filter for signals.
Signals are only passed to the parent TabbedBrowser if they originated in
the currently shown widget.
Attributes:
_win_id: The window ID this SignalFilter is associated with.
Class attributes:
BLACKLIST: List of signal names which should not be logged.
"""
BLACKLIST = ['cur_scroll_perc_changed', 'cur_progress',
'cur_statusbar_message', 'cur_link_hovered']
def __init__(self, win_id, parent=None):
super().__init__(parent)
self._win_id = win_id
def create(self, signal, tab):
"""Factory for partial _filter_signals functions.
Args:
signal: The pyqtSignal to filter.
tab: The WebView to create filters for.
Return:
A partial functon calling _filter_signals with a signal.
"""
return functools.partial(self._filter_signals, signal, tab)
def _filter_signals(self, signal, tab, *args):
"""Filter signals and trigger TabbedBrowser signals if needed.
Triggers signal if the original signal was sent from the _current_ tab
and not from any other one.
The original signal does not matter, since we get the new signal and
all args.
Args:
signal: The signal to emit if the sender was the current widget.
tab: The WebView which the filter belongs to.
*args: The args to pass to the signal.
"""
log_signal = debug.signal_name(signal) not in self.BLACKLIST
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=self._win_id)
try:
tabidx = tabbed_browser.indexOf(tab)
except RuntimeError:
# The tab has been deleted already
return
if tabidx == tabbed_browser.currentIndex():
if log_signal:
log.signals.debug("emitting: {} (tab {})".format(
debug.dbg_signal(signal, args), tabidx))
signal.emit(*args)
else:
if log_signal:
log.signals.debug("ignoring: {} (tab {})".format(
debug.dbg_signal(signal, args), tabidx))
| Kingdread/qutebrowser | qutebrowser/browser/signalfilter.py | Python | gpl-3.0 | 3,233 |
from __future__ import print_function
import random
import tweepy
from spacenamer.generator import spacename
from words import WORDS
def generate_status(word=None):
at_reply = False
if word is None:
word = random.choice(WORDS)
if word[0] == '@':
word = word[1:]
at_reply = True
word = word.upper()
budget = 140 - len(word) - len(': ') - ((len(word) - 1))
if at_reply:
budget -= 2 # ".@SOMEFOLLOWER"
sn = ' '.join(spacename(word, budget=budget))
if at_reply:
return '.@{0}: {1}'.format(word, sn)
else:
return '{0}: {1}'.format(word, sn)
def authenticate(api_key, api_secret, access_key, access_secret):
auth = tweepy.OAuthHandler(api_key, api_secret)
auth.set_access_token(access_key, access_secret)
return tweepy.API(auth)
def post(twitter, status, dryrun=False):
"""Post status to Twitter."""
if dryrun is False:
twitter.update_status(status=status)
else:
print('{} ({})'.format(status, len(status)))
| ddbeck/SpaceNamer | spacenamer/publisher.py | Python | mit | 1,043 |
import os
import gp
import sys
import util
import tempfile
import numpy as np
import math
import numpy.random as npr
import scipy.linalg as spla
import scipy.stats as sps
import scipy.optimize as spo
import cPickle
import matplotlib.pyplot as plt
from Locker import *
def init(expt_dir, arg_string):
args = util.unpack_args(arg_string)
return GPEIConstrainedChooser(expt_dir, **args)
"""
Chooser module for the Gaussian process expected improvement per
second (EI) acquisition function. Candidates are sampled densely in
the unit hypercube and then a subset of the most promising points are
optimized to maximize EI per second over hyperparameter samples.
Slice sampling is used to sample Gaussian process hyperparameters for
two GPs, one over the objective function and the other over the
running time of the algorithm.
"""
class GPEIConstrainedChooser:
def __init__(self, expt_dir, covar="Matern52", mcmc_iters=10,
pending_samples=100, noiseless=False, burnin=100,
grid_subset=20, constraint_violating_value=-1):
self.cov_func = getattr(gp, covar)
self.locker = Locker()
self.state_pkl = os.path.join(expt_dir, self.__module__ + ".pkl")
self.stats_file = os.path.join(expt_dir,
self.__module__ + "_hyperparameters.txt")
self.mcmc_iters = int(mcmc_iters)
self.burnin = int(burnin)
self.needs_burnin = True
self.pending_samples = pending_samples
self.D = -1
self.hyper_iters = 1
# Number of points to optimize EI over
self.grid_subset = int(grid_subset)
self.noiseless = bool(int(noiseless))
self.hyper_samples = []
self.constraint_hyper_samples = []
self.ff = None
self.ff_samples = []
self.noise_scale = 0.1 # horseshoe prior
self.amp2_scale = 1 # zero-mean log normal prior
self.max_ls = 2 # top-hat prior on length scales
self.constraint_noise_scale = 0.1 # horseshoe prior
self.constraint_amp2_scale = 1 # zero-mean log normal prio
self.constraint_gain = 1 # top-hat prior on length scales
self.constraint_max_ls = 2 # top-hat prior on length scales
self.bad_value = float(constraint_violating_value)
# A simple function to dump out hyperparameters to allow for a hot start
# if the optimization is restarted.
def dump_hypers(self):
sys.stderr.write("Waiting to lock hyperparameter pickle...")
self.locker.lock_wait(self.state_pkl)
sys.stderr.write("...acquired\n")
# Write the hyperparameters out to a Pickle.
fh = tempfile.NamedTemporaryFile(mode='w', delete=False)
cPickle.dump({ 'dims' : self.D,
'ls' : self.ls,
'amp2' : self.amp2,
'noise' : self.noise,
'mean' : self.mean,
'constraint_ls' : self.constraint_ls,
'constraint_amp2' : self.constraint_amp2,
'constraint_noise' : self.constraint_noise,
'constraint_mean' : self.constraint_mean },
fh)
fh.close()
# Use an atomic move for better NFS happiness.
cmd = 'mv "%s" "%s"' % (fh.name, self.state_pkl)
os.system(cmd) # TODO: Should check system-dependent return status.
self.locker.unlock(self.state_pkl)
def _real_init(self, dims, values, durations):
sys.stderr.write("Waiting to lock hyperparameter pickle...")
self.locker.lock_wait(self.state_pkl)
sys.stderr.write("...acquired\n")
if os.path.exists(self.state_pkl):
fh = open(self.state_pkl, 'r')
state = cPickle.load(fh)
fh.close()
self.D = state['dims']
self.ls = state['ls']
self.amp2 = state['amp2']
self.noise = state['noise']
self.mean = state['mean']
self.constraint_ls = state['constraint_ls']
self.constraint_amp2 = state['constraint_amp2']
self.constraint_noise = state['constraint_noise']
self.constraint_mean = state['constraint_mean']
self.constraint_gain = state['constraint_mean']
self.needs_burnin = False
else:
# Identify constraint violations
goodvals = np.nonzero(values != self.bad_value)[0]
# Input dimensionality.
self.D = dims
# Initial length scales.
self.ls = np.ones(self.D)
self.constraint_ls = np.ones(self.D)
# Initial amplitude.
self.amp2 = np.std(values[goodvals])
self.constraint_amp2 = 1#np.std(durations)
# Initial observation noise.
self.noise = 1e-3
self.constraint_noise = 1e-3
self.constraint_gain = 1
# Initial mean.
self.mean = np.mean(values[goodvals])
self.constraint_mean = 0.5
self.locker.unlock(self.state_pkl)
def cov(self, amp2, ls, x1, x2=None):
if x2 is None:
return amp2 * (self.cov_func(ls, x1, None)
+ 1e-6*np.eye(x1.shape[0]))
else:
return amp2 * self.cov_func(ls, x1, x2)
# Given a set of completed 'experiments' in the unit hypercube with
# corresponding objective 'values', pick from the next experiment to
# run according to the acquisition function.
def next(self, grid, values, durations,
candidates, pending, complete):
# Don't bother using fancy GP stuff at first.
if complete.shape[0] < 2:
return int(candidates[0])
# Perform the real initialization.
if self.D == -1:
self._real_init(grid.shape[1], values[complete],
durations[complete])
# Grab out the relevant sets.
comp = grid[complete,:]
cand = grid[candidates,:]
pend = grid[pending,:]
vals = values[complete]
# Find which completed jobs violated constraints
badvals = np.nonzero(vals == self.bad_value)[0]
goodvals = np.nonzero(vals != self.bad_value)[0]
print 'Found %d constraint violating jobs' % (badvals.shape[0])
labels = np.zeros(vals.shape[0])
labels[goodvals] = 1
if comp.shape[0] < 2:
return int(candidates[0])
# Spray a set of candidates around the min so far
numcand = cand.shape[0]
best_comp = np.argmin(vals)
cand2 = np.vstack((np.random.randn(10,comp.shape[1])*0.001 +
comp[best_comp,:], cand))
if self.mcmc_iters > 0:
# Possibly burn in.
if self.needs_burnin:
for mcmc_iter in xrange(self.burnin):
self.sample_constraint_hypers(comp, labels)
self.sample_hypers(comp[goodvals,:], vals[goodvals])
sys.stderr.write("BURN %d/%d] mean: %.2f amp: %.2f "
"noise: %.4f min_ls: %.4f max_ls: %.4f\n"
% (mcmc_iter+1, self.burnin, self.mean,
np.sqrt(self.amp2), self.noise,
np.min(self.ls), np.max(self.ls)))
self.needs_burnin = False
# Sample from hyperparameters.
# Adjust the candidates to hit ei/sec peaks
self.hyper_samples = []
for mcmc_iter in xrange(self.mcmc_iters):
self.sample_constraint_hypers(comp, labels)
self.sample_hypers(comp[goodvals,:], vals[goodvals])
sys.stderr.write("%d/%d] mean: %.2f amp: %.2f noise: %.4f "
"min_ls: %.4f max_ls: %.4f\n"
% (mcmc_iter+1, self.mcmc_iters, self.mean,
np.sqrt(self.amp2), self.noise,
np.min(self.ls), np.max(self.ls)))
sys.stderr.write("%d/%d] constraint_mean: %.2f "
"constraint_amp: %.2f constraint_gain: %.4f "
"constraint_min_ls: %.4f constraint_max_ls: "
"%.4f\n"
% (mcmc_iter+1, self.mcmc_iters,
self.constraint_mean,
np.sqrt(self.constraint_amp2),
self.constraint_gain,
np.min(self.constraint_ls),
np.max(self.constraint_ls)))
self.dump_hypers()
comp_preds = np.zeros(labels.shape[0]).flatten()
preds = self.pred_constraint_voilation(cand, comp, labels).flatten()
for ii in xrange(self.mcmc_iters):
constraint_hyper = self.constraint_hyper_samples[ii]
self.ff = self.ff_samples[ii]
self.constraint_mean = constraint_hyper[0]
self.constraint_gain = constraint_hyper[1]
self.constraint_amp2 = constraint_hyper[2]
self.constraint_ls = constraint_hyper[3]
comp_preds += self.pred_constraint_voilation(comp, comp,
labels).flatten()
comp_preds = comp_preds / float(self.mcmc_iters)
print 'Prediction %f percent violations (%d/%d): ' % (
np.mean(preds < 0.5), np.sum(preds < 0.5), preds.shape[0])
print 'Prediction %f percent train accuracy (%d/%d): ' % (
np.mean((comp_preds > 0.5) == labels), np.sum((comp_preds > 0.5)
== labels), comp_preds.shape[0])
if False:
delta = 0.025
x = np.arange(0, 1.0, delta)
y = np.arange(0, 1.0, delta)
X, Y = np.meshgrid(x, y)
cpreds = np.zeros((X.shape[0], X.shape[1]))
predei = np.zeros((X.shape[0], X.shape[1]))
predei2 = np.zeros((X.shape[0], X.shape[1]))
for ii in xrange(self.mcmc_iters):
constraint_hyper = self.constraint_hyper_samples[ii]
self.ff = self.ff_samples[ii]
self.constraint_mean = constraint_hyper[0]
self.constraint_gain = constraint_hyper[1]
self.constraint_amp2 = constraint_hyper[2]
self.constraint_ls = constraint_hyper[3]
cpred = self.pred_constraint_voilation(np.hstack((X.flatten()[:,np.newaxis], Y.flatten()[:,np.newaxis])), comp, labels)
pei = self.compute_ei_per_s(comp, pend, np.hstack((X.flatten()[:,np.newaxis], Y.flatten()[:,np.newaxis])), vals, labels)
pei2 = self.compute_ei(comp, pend, np.hstack((X.flatten()[:,np.newaxis], Y.flatten()[:,np.newaxis])), vals, labels)
cpreds += np.reshape(cpred, (X.shape[0], X.shape[1]))
predei += np.reshape(pei, (X.shape[0], X.shape[1]))
predei2 += np.reshape(pei2, (X.shape[0], X.shape[1]))
plt.figure(1)
cpreds = cpreds/float(self.mcmc_iters)
CS = plt.contour(X,Y,cpreds)
plt.clabel(CS, inline=1, fontsize=10)
plt.plot(comp[labels == 0,0], comp[labels == 0,1], 'rx')
plt.plot(comp[labels == 1,0], comp[labels == 1,1], 'bx')
plt.title('Contours of Classification GP (Prob of not being a constraint violation)')
plt.legend(('Constraint Violations', 'Good points'),'lower left')
plt.savefig('constrained_ei_chooser_class_contour.pdf')
plt.figure(2)
predei = predei/float(self.mcmc_iters)
CS = plt.contour(X,Y,predei)
plt.clabel(CS, inline=1, fontsize=10)
plt.plot(comp[labels == 0,0], comp[labels == 0,1], 'rx')
plt.plot(comp[labels == 1,0], comp[labels == 1,1], 'bx')
plt.title('Contours of EI*P(not violating constraint)')
plt.legend(('Constraint Violations', 'Good points'),'lower left')
plt.savefig('constrained_ei_chooser_eitimesprob_contour.pdf')
plt.figure(3)
predei2 = predei2/float(self.mcmc_iters)
CS = plt.contour(X,Y,predei2)
plt.clabel(CS, inline=1, fontsize=10)
plt.plot(comp[labels == 0,0], comp[labels == 0,1], 'rx')
plt.plot(comp[labels == 1,0], comp[labels == 1,1], 'bx')
plt.title('Contours of EI')
plt.legend(('Constraint Violations', 'Good points'),'lower left')
plt.savefig('constrained_ei_chooser_ei_contour.pdf')
plt.show()
# Pick the top candidates to optimize over
overall_ei = self.ei_over_hypers(comp,pend,cand2,vals,labels)
inds = np.argsort(np.mean(overall_ei, axis=1))[-self.grid_subset:]
cand2 = cand2[inds,:]
# Adjust the candidates to hit ei peaks
b = []# optimization bounds
for i in xrange(0, cand.shape[1]):
b.append((0, 1))
for i in xrange(0, cand2.shape[0]):
sys.stderr.write("Optimizing candidate %d/%d\n" %
(i+1, cand2.shape[0]))
self.check_grad_ei_per(cand2[i,:], comp, vals, labels)
ret = spo.fmin_l_bfgs_b(self.grad_optimize_ei_over_hypers,
cand2[i,:].flatten(),
args=(comp,vals,labels,True),
bounds=b, disp=0)
cand2[i,:] = ret[0]
cand = np.vstack((cand, cand2))
overall_ei = self.ei_over_hypers(comp,pend,cand,vals,labels)
best_cand = np.argmax(np.mean(overall_ei, axis=1))
self.dump_hypers()
if (best_cand >= numcand):
return (int(numcand), cand[best_cand,:])
return int(candidates[best_cand])
else:
# Optimize hyperparameters
self.optimize_hypers(comp, vals, labels)
sys.stderr.write("mean: %f amp: %f noise: %f "
"min_ls: %f max_ls: %f\n"
% (self.mean, np.sqrt(self.amp2),
self.noise, np.min(self.ls), np.max(self.ls)))
# Pick the top candidates to optimize over
ei = self.compute_ei_per_s(comp, pend, cand2, vals, labels)
inds = np.argsort(np.mean(overall_ei, axis=1))[-self.grid_subset:]
cand2 = cand2[inds,:]
# Adjust the candidates to hit ei peaks
b = []# optimization bounds
for i in xrange(0, cand.shape[1]):
b.append((0, 1))
for i in xrange(0, cand2.shape[0]):
sys.stderr.write("Optimizing candidate %d/%d\n" %
(i+1, cand2.shape[0]))
ret = spo.fmin_l_bfgs_b(self.grad_optimize_ei,
cand2[i,:].flatten(),
args=(comp,vals,labels,True),
bounds=b, disp=0)
cand2[i,:] = ret[0]
cand = np.vstack((cand, cand2))
ei = self.compute_ei_per_s(comp, pend, cand, vals, labels)
best_cand = np.argmax(ei)
self.dump_hypers()
if (best_cand >= numcand):
return (int(numcand), cand[best_cand,:])
return int(candidates[best_cand])
# Predict constraint voilating points
def pred_constraint_voilation(self, cand, comp, vals):
# The primary covariances for prediction.
comp_cov = self.cov(self.constraint_amp2, self.constraint_ls, comp)
cand_cross = self.cov(self.constraint_amp2, self.constraint_ls, comp, cand)
# Compute the required Cholesky.
obsv_cov = comp_cov + self.constraint_noise*np.eye(comp.shape[0])
obsv_chol = spla.cholesky(obsv_cov, lower=True)
cov_grad_func = getattr(gp, 'grad_' + self.cov_func.__name__)
cand_cross_grad = cov_grad_func(self.constraint_ls, comp, cand)
# Predictive things.
# Solve the linear systems.
alpha = spla.cho_solve((obsv_chol, True), self.ff)# - self.constraint_mean)
beta = spla.solve_triangular(obsv_chol, cand_cross, lower=True)
# Predict the marginal means and variances at candidates.
func_m = np.dot(cand_cross.T, alpha)# + self.constraint_mean
func_m = 1./(1 + np.exp(-self.constraint_gain*func_m))
return func_m
# Compute EI over hyperparameter samples
def ei_over_hypers(self,comp,pend,cand,vals,labels):
overall_ei = np.zeros((cand.shape[0], self.mcmc_iters))
for mcmc_iter in xrange(self.mcmc_iters):
hyper = self.hyper_samples[mcmc_iter]
constraint_hyper = self.constraint_hyper_samples[mcmc_iter]
self.mean = hyper[0]
self.noise = hyper[1]
self.amp2 = hyper[2]
self.ls = hyper[3]
self.constraint_mean = constraint_hyper[0]
self.constraint_gain = constraint_hyper[1]
self.constraint_amp2 = constraint_hyper[2]
self.constraint_ls = constraint_hyper[3]
overall_ei[:,mcmc_iter] = self.compute_ei_per_s(comp, pend, cand,
vals, labels)
return overall_ei
# Adjust points by optimizing EI over a set of hyperparameter samples
def grad_optimize_ei_over_hypers(self, cand, comp, vals, labels, compute_grad=True):
summed_ei = 0
summed_grad_ei = np.zeros(cand.shape).flatten()
for mcmc_iter in xrange(self.mcmc_iters):
hyper = self.hyper_samples[mcmc_iter]
constraint_hyper = self.constraint_hyper_samples[mcmc_iter]
self.mean = hyper[0]
self.noise = hyper[1]
self.amp2 = hyper[2]
self.ls = hyper[3]
self.constraint_mean = constraint_hyper[0]
self.constraint_gain = constraint_hyper[1]
self.constraint_amp2 = constraint_hyper[2]
self.constraint_ls = constraint_hyper[3]
if compute_grad:
(ei,g_ei) = self.grad_optimize_ei(cand,comp,vals,labels,compute_grad)
summed_grad_ei = summed_grad_ei + g_ei
else:
ei = self.grad_optimize_ei(cand,comp,vals,labels,compute_grad)
summed_ei += ei
if compute_grad:
return (summed_ei, summed_grad_ei)
else:
return summed_ei
def check_grad_ei_per(self, cand, comp, vals, labels):
(ei,dx1) = self.grad_optimize_ei_over_hypers(cand, comp, vals, labels)
dx2 = dx1*0
idx = np.zeros(cand.shape[0])
for i in xrange(0, cand.shape[0]):
idx[i] = 1e-6
(ei1,tmp) = self.grad_optimize_ei_over_hypers(cand + idx, comp, vals, labels)
(ei2,tmp) = self.grad_optimize_ei_over_hypers(cand - idx, comp, vals, labels)
dx2[i] = (ei - ei2)/(2*1e-6)
idx[i] = 0
print 'computed grads', dx1
print 'finite diffs', dx2
print (dx1/dx2)
print np.sum((dx1 - dx2)**2)
time.sleep(2)
def grad_optimize_ei(self, cand, comp, vals, labels, compute_grad=True):
# Here we have to compute the gradients for constrained ei
# This means deriving through the two kernels, the one for predicting
# constraint violations and the one predicting ei
# First pull out violating points
compfull = comp.copy()
comp = comp[labels > 0, :]
vals = vals[labels > 0]
best = np.min(vals)
cand = np.reshape(cand, (-1, comp.shape[1]))
# First we make predictions for the durations
# Compute covariances
comp_constraint_cov = self.cov(self.constraint_amp2, self.constraint_ls,
compfull)
cand_constraint_cross = self.cov(self.constraint_amp2, self.constraint_ls,
compfull,cand)
# Cholesky decompositions
obsv_constraint_cov = comp_constraint_cov + self.constraint_noise*np.eye(
compfull.shape[0])
obsv_constraint_chol = spla.cholesky( obsv_constraint_cov, lower=True)
# Linear systems
t_alpha = spla.cho_solve((obsv_constraint_chol, True),
self.ff)# - self.constraint_mean)
# Predict marginal mean times and (possibly) variances
func_constraint_m = np.dot(cand_constraint_cross.T, t_alpha)
# Squash through logistic to get probabilities
func_constraint_m = 1./(1+np.exp(-self.constraint_gain*func_constraint_m))
# Apply covariance function
cov_grad_func = getattr(gp, 'grad_' + self.cov_func.__name__)
cand_cross_grad = cov_grad_func(self.constraint_ls, compfull, cand)
grad_cross_t = np.squeeze(cand_cross_grad)
# Now compute the gradients w.r.t. ei
# The primary covariances for prediction.
comp_cov = self.cov(self.amp2, self.ls, comp)
cand_cross = self.cov(self.amp2, self.ls, comp, cand)
comp_cov_full = self.cov(self.amp2, self.ls, compfull)
cand_cross_full = self.cov(self.amp2, self.ls, compfull, cand)
# Compute the required Cholesky.
obsv_cov = comp_cov + self.noise*np.eye(comp.shape[0])
obsv_chol = spla.cholesky( obsv_cov, lower=True )
obsv_cov_full = comp_cov_full + self.noise*np.eye(compfull.shape[0])
obsv_chol_full = spla.cholesky( obsv_cov_full, lower=True)
# Predictive things.
# Solve the linear systems.
alpha = spla.cho_solve((obsv_chol, True), vals - self.mean)
#beta = spla.solve_triangular(obsv_chol, cand_cross, lower=True)
beta = spla.solve_triangular(obsv_chol_full, cand_cross_full, lower=True)
# Predict the marginal means and variances at candidates.
func_m = np.dot(cand_cross.T, alpha) + self.mean
func_v = self.amp2*(1+1e-6) - np.sum(beta**2, axis=0)
# Expected improvement
func_s = np.sqrt(func_v)
u = (best - func_m) / func_s
ncdf = sps.norm.cdf(u)
npdf = sps.norm.pdf(u)
ei = func_s*(u*ncdf + npdf)
ei_per_s = -np.sum(ei*func_constraint_m)
if not compute_grad:
return ei_per_s
grad_constraint_xp_m = np.dot(t_alpha.transpose(),grad_cross_t)
# Gradients of ei w.r.t. mean and variance
g_ei_m = -ncdf
g_ei_s2 = 0.5*npdf / func_s
# Apply covariance function
cand_cross_grad = cov_grad_func(self.ls, comp, cand)
grad_cross = np.squeeze(cand_cross_grad)
cand_cross_grad_full = cov_grad_func(self.ls, compfull, cand)
grad_cross_full = np.squeeze(cand_cross_grad_full)
grad_xp_m = np.dot(alpha.transpose(),grad_cross)
#grad_xp_v = np.dot(-2*spla.cho_solve((obsv_chol, True),
# cand_cross).transpose(),grad_cross)
grad_xp_v = np.dot(-2*spla.cho_solve((obsv_chol_full, True),
cand_cross_full).transpose(),grad_cross_full)
grad_xp = 0.5*self.amp2*(grad_xp_m*g_ei_m + grad_xp_v*g_ei_s2)
grad_constraint_xp_m = 0.5*self.constraint_amp2*self.constraint_gain*grad_constraint_xp_m*func_constraint_m*(1-func_constraint_m)
grad_xp = (func_constraint_m*grad_xp + ei*grad_constraint_xp_m)
return ei_per_s, grad_xp.flatten()
def compute_ei_per_s(self, comp, pend, cand, vals, labels):
# First we make predictions for the durations as that
# doesn't depend on pending experiments
# First pull out violating points
compfull = comp.copy()
comp = comp[labels > 0, :]
vals = vals[labels > 0]
# Compute covariances
comp_constraint_cov = self.cov(self.constraint_amp2, self.constraint_ls,
compfull)
cand_constraint_cross = self.cov(self.constraint_amp2, self.constraint_ls,
compfull,cand)
# Cholesky decompositions
obsv_constraint_cov = comp_constraint_cov + self.constraint_noise*np.eye(
compfull.shape[0])
obsv_constraint_chol = spla.cholesky( obsv_constraint_cov, lower=True )
# Linear systems
t_alpha = spla.cho_solve((obsv_constraint_chol, True), self.ff)# - self.constraint_mean)
#t_beta = spla.solve_triangular(obsv_constraint_chol, cand_constraint_cross, lower=True)
# Predict marginal mean times and (possibly) variances
func_constraint_m = (np.dot(cand_constraint_cross.T, t_alpha))# + self.constraint_mean)
# We don't really need the time variances now
#func_constraint_v = self.constraint_amp2*(1+1e-6) - np.sum(t_beta**2, axis=0)
# Squash through a logistic to get probability of not violating a constraint
func_constraint_m = 1./(1+np.exp(-self.constraint_gain*func_constraint_m))
if pend.shape[0] == 0:
# If there are no pending, don't do anything fancy.
# Current best.
best = np.min(vals)
# The primary covariances for prediction.
comp_cov = self.cov(self.amp2, self.ls, comp)
comp_cov_full = self.cov(self.amp2, self.ls, compfull)
cand_cross = self.cov(self.amp2, self.ls, comp, cand)
cand_cross_full = self.cov(self.amp2, self.ls, compfull, cand)
# Compute the required Cholesky.
obsv_cov = comp_cov + self.noise*np.eye(comp.shape[0])
obsv_cov_full = comp_cov_full + self.noise*np.eye(compfull.shape[0])
obsv_chol = spla.cholesky( obsv_cov, lower=True )
obsv_chol_full = spla.cholesky( obsv_cov_full, lower=True )
# Solve the linear systems.
alpha = spla.cho_solve((obsv_chol, True), vals - self.mean)
#beta = spla.solve_triangular(obsv_chol, cand_cross, lower=True)
beta = spla.solve_triangular(obsv_chol_full, cand_cross_full,
lower=True)
# Predict the marginal means and variances at candidates.
func_m = np.dot(cand_cross.T, alpha) + self.mean
func_v = self.amp2*(1+1e-6) - np.sum(beta**2, axis=0)
# Expected improvement
func_s = np.sqrt(func_v)
u = (best - func_m) / func_s
ncdf = sps.norm.cdf(u)
npdf = sps.norm.pdf(u)
ei = func_s*( u*ncdf + npdf)
ei_per_s = ei*func_constraint_m
return ei_per_s
else:
# If there are pending experiments, fantasize their outcomes.
# Create a composite vector of complete and pending.
comp_pend = np.concatenate((comp, pend))
# Compute the covariance and Cholesky decomposition.
comp_pend_cov = self.cov(self.amp2, self.ls, comp_pend) + self.noise*np.eye(comp_pend.shape[0])
comp_pend_chol = spla.cholesky(comp_pend_cov, lower=True)
# Compute submatrices.
pend_cross = self.cov(self.amp2, self.ls, comp, pend)
pend_kappa = self.cov(self.amp2, self.ls, pend)
# Use the sub-Cholesky.
obsv_chol = comp_pend_chol[:comp.shape[0],:comp.shape[0]]
# Solve the linear systems.
alpha = spla.cho_solve((obsv_chol, True), vals - self.mean)
beta = spla.cho_solve((obsv_chol, True), pend_cross)
# Finding predictive means and variances.
pend_m = np.dot(pend_cross.T, alpha) + self.mean
pend_K = pend_kappa - np.dot(pend_cross.T, beta)
# Take the Cholesky of the predictive covariance.
pend_chol = spla.cholesky(pend_K, lower=True)
# Make predictions.
pend_fant = np.dot(pend_chol, npr.randn(pend.shape[0],self.pending_samples)) + self.mean
# Include the fantasies.
fant_vals = np.concatenate((np.tile(vals[:,np.newaxis],
(1,self.pending_samples)), pend_fant))
# Compute bests over the fantasies.
bests = np.min(fant_vals, axis=0)
# Now generalize from these fantasies.
cand_cross = self.cov(self.amp2, self.ls, comp_pend, cand)
# Solve the linear systems.
alpha = spla.cho_solve((comp_pend_chol, True), fant_vals - self.mean)
beta = spla.solve_triangular(comp_pend_chol, cand_cross, lower=True)
# Predict the marginal means and variances at candidates.
func_m = np.dot(cand_cross.T, alpha) + self.mean
func_v = self.amp2*(1+1e-6) - np.sum(beta**2, axis=0)
# Expected improvement
func_s = np.sqrt(func_v[:,np.newaxis])
u = (bests[np.newaxis,:] - func_m) / func_s
ncdf = sps.norm.cdf(u)
npdf = sps.norm.pdf(u)
ei = func_s*( u*ncdf + npdf)
return np.mean(ei, axis=1)*func_constraint_m
def compute_ei(self, comp, pend, cand, vals, labels):
# First we make predictions for the durations as that
# doesn't depend on pending experiments
# First pull out violating points
compfull = comp.copy()
comp = comp[labels > 0, :]
vals = vals[labels > 0]
# Compute covariances
comp_constraint_cov = self.cov(self.constraint_amp2, self.constraint_ls,
compfull)
cand_constraint_cross = self.cov(self.constraint_amp2, self.constraint_ls,
compfull,cand)
# Cholesky decompositions
obsv_constraint_cov = comp_constraint_cov + self.constraint_noise*np.eye(
compfull.shape[0])
obsv_constraint_chol = spla.cholesky( obsv_constraint_cov, lower=True )
# Linear systems
t_alpha = spla.cho_solve((obsv_constraint_chol, True), self.ff)# - self.constraint_mean)
#t_beta = spla.solve_triangular(obsv_constraint_chol, cand_constraint_cross, lower=True)
# Predict marginal mean times and (possibly) variances
func_constraint_m = (np.dot(cand_constraint_cross.T, t_alpha))# + self.constraint_mean)
# We don't really need the time variances now
#func_constraint_v = self.constraint_amp2*(1+1e-6) - np.sum(t_beta**2, axis=0)
# Squash through a logistic to get probability of not violating a constraint
func_constraint_m = 1./(1+np.exp(-self.constraint_gain*func_constraint_m))
if pend.shape[0] == 0:
# If there are no pending, don't do anything fancy.
# Current best.
best = np.min(vals)
# The primary covariances for prediction.
comp_cov = self.cov(self.amp2, self.ls, comp)
comp_cov_full = self.cov(self.amp2, self.ls, compfull)
cand_cross = self.cov(self.amp2, self.ls, comp, cand)
cand_cross_full = self.cov(self.amp2, self.ls, compfull, cand)
# Compute the required Cholesky.
obsv_cov = comp_cov + self.noise*np.eye(comp.shape[0])
obsv_cov_full = comp_cov_full + self.noise*np.eye(compfull.shape[0])
obsv_chol = spla.cholesky( obsv_cov, lower=True )
obsv_chol_full = spla.cholesky( obsv_cov_full, lower=True )
# Solve the linear systems.
alpha = spla.cho_solve((obsv_chol, True), vals - self.mean)
beta = spla.solve_triangular(obsv_chol, cand_cross, lower=True)
#beta = spla.solve_triangular(obsv_chol_full, cand_cross_full, lower=True)
# Predict the marginal means and variances at candidates.
func_m = np.dot(cand_cross.T, alpha) + self.mean
func_v = self.amp2*(1+1e-6) - np.sum(beta**2, axis=0)
# Expected improvement
func_s = np.sqrt(func_v)
u = (best - func_m) / func_s
ncdf = sps.norm.cdf(u)
npdf = sps.norm.pdf(u)
ei = func_s*( u*ncdf + npdf)
ei_per_s = ei
#ei_per_s = ei
return ei
else:
return 0
def sample_constraint_hypers(self, comp, labels):
# The latent GP projection
if self.ff is None:
comp_cov = self.cov(self.amp2, self.ls, comp)
obsv_cov = comp_cov + self.noise*np.eye(comp.shape[0])
obsv_chol = spla.cholesky( obsv_cov, lower=True )
self.ff = np.dot(obsv_chol,npr.randn(obsv_chol.shape[0]))
self._sample_constraint_noisy(comp, labels)
self._sample_constraint_ls(comp, labels)
self.constraint_hyper_samples.append((self.constraint_mean, self.constraint_gain, self.constraint_amp2,
self.constraint_ls))
self.ff_samples.append(self.ff)
def sample_hypers(self, comp, vals):
if self.noiseless:
self.noise = 1e-3
self._sample_noiseless(comp, vals)
else:
self._sample_noisy(comp, vals)
self._sample_ls(comp, vals)
self.hyper_samples.append((self.mean, self.noise, self.amp2, self.ls))
def _sample_ls(self, comp, vals):
def logprob(ls):
if np.any(ls < 0) or np.any(ls > self.max_ls):
return -np.inf
cov = self.amp2 * (self.cov_func(ls, comp, None) + 1e-6*np.eye(comp.shape[0])) + self.noise*np.eye(comp.shape[0])
chol = spla.cholesky(cov, lower=True)
solve = spla.cho_solve((chol, True), vals - self.mean)
lp = (-np.sum(np.log(np.diag(chol))) -
0.5*np.dot(vals-self.mean, solve))
return lp
self.ls = util.slice_sample(self.ls, logprob, compwise=True)
def _sample_constraint_ls(self, comp, vals):
def lpSigmoid(ff, gain=self.constraint_gain):
probs = 1./(1. + np.exp(-gain*ff));
probs[probs <= 0] = 1e-12
probs[probs >= 1] = 1-1e-12
llh = np.sum(vals*np.log(probs) + (1-vals)*np.log(1-probs));
return llh
def updateGain(gain):
if gain < 0.01 or gain > 10:
return -np.inf
cov = self.constraint_amp2 * (self.cov_func(self.constraint_ls, comp, None) + 1e-6*np.eye(comp.shape[0])) + self.constraint_noise*np.eye(comp.shape[0])
chol = spla.cholesky(cov, lower=True)
solve = spla.cho_solve((chol, True), vals)# - self.constraint_mean)
#lp = -np.sum(np.log(np.diag(chol)))-0.5*np.dot(self.ff, solve)
lp = lpSigmoid(self.ff, gain)
return lp
def logprob(ls):
if np.any(ls < 0) or np.any(ls > self.constraint_max_ls):
return -np.inf
cov = self.constraint_amp2 * (self.cov_func(ls, comp, None) + 1e-6*np.eye(comp.shape[0])) + self.constraint_noise*np.eye(comp.shape[0])
chol = spla.cholesky(cov, lower=True)
solve = spla.cho_solve((chol, True), self.ff)# - self.constraint_mean)
#lp = -np.sum(np.log(np.diag(chol)))-0.5*np.dot(self.ff, solve)
lp = lpSigmoid(self.ff)
return lp
#hypers = util.slice_sample(np.hstack((self.constraint_ls, self.ff)), logprob, compwise=True)
hypers = util.slice_sample(self.constraint_ls, logprob, compwise=True)
self.constraint_ls = hypers
cov = self.constraint_amp2 * (self.cov_func(self.constraint_ls, comp, None) + 1e-6*np.eye(comp.shape[0])) + self.constraint_noise*np.eye(comp.shape[0])
chol = spla.cholesky(cov, lower=False)
ff = self.ff
for jj in xrange(20):
(ff, lpell) = self.elliptical_slice(ff, chol, lpSigmoid);
self.ff = ff
# Update gain
hypers = util.slice_sample(np.array([self.constraint_gain]), updateGain, compwise=True)
self.constraint_gain = hypers
def _sample_noisy(self, comp, vals):
def logprob(hypers):
mean = hypers[0]
amp2 = hypers[1]
noise = hypers[2]
# This is pretty hacky, but keeps things sane.
if mean > np.max(vals) or mean < np.min(vals):
return -np.inf
if amp2 < 0 or noise < 0:
return -np.inf
cov = amp2 * (self.cov_func(self.ls, comp, None) + 1e-6*np.eye(comp.shape[0])) + noise*np.eye(comp.shape[0])
chol = spla.cholesky(cov, lower=True)
solve = spla.cho_solve((chol, True), vals - mean)
lp = -np.sum(np.log(np.diag(chol)))-0.5*np.dot(vals-mean, solve)
# Roll in noise horseshoe prior.
lp += np.log(np.log(1 + (self.noise_scale/noise)**2))
#lp -= 0.5*(np.log(noise)/self.noise_scale)**2
# Roll in amplitude lognormal prior
lp -= 0.5*(np.log(amp2)/self.amp2_scale)**2
return lp
hypers = util.slice_sample(np.array([self.mean, self.amp2, self.noise]), logprob, compwise=False)
self.mean = hypers[0]
self.amp2 = hypers[1]
self.noise = hypers[2]
def _sample_constraint_noisy(self, comp, vals):
def lpSigmoid(ff,gain=self.constraint_gain):
probs = 1./(1. + np.exp(-gain*ff));
probs[probs <= 0] = 1e-12
probs[probs >= 1] = 1-1e-12
llh = np.sum(vals*np.log(probs) + (1-vals)*np.log(1-probs));
return llh
def logprob(hypers):
#mean = hypers[0]
amp2 = hypers[0]
#gain = hypers[2]
ff = hypers[1:]
# This is pretty hacky, but keeps things sane.
#if mean > np.max(vals) or mean < np.min(vals):
# return -np.inf
if amp2 < 0:
return -np.inf
noise = self.constraint_noise
cov = amp2 * (self.cov_func(self.constraint_ls, comp, None) + 1e-6*np.eye(comp.shape[0])) + noise*np.eye(comp.shape[0])
chol = spla.cholesky(cov, lower=True)
solve = spla.cho_solve((chol, True), ff)# - mean)
#lp = -np.sum(np.log(np.diag(chol)))-0.5*np.dot(ff-mean, solve)
lp = -np.sum(np.log(np.diag(chol)))-0.5*np.dot(ff, solve)
# Roll in noise horseshoe prior.
#lp += np.log(np.log(1 + (self.constraint_noise_scale/noise)**2))
#lp -= 0.5*(np.log(noise)/self.constraint_noise_scale)**2
# Roll in amplitude lognormal prior
lp -= 0.5*(np.log(amp2)/self.constraint_amp2_scale)**2
#lp = -np.sum(np.log(np.diag(chol)))-0.5*np.dot(self.ff, solve)
lp += lpSigmoid(ff,self.constraint_gain)
return lp
hypers = util.slice_sample(np.hstack((np.array([self.constraint_amp2]), self.ff)), logprob, compwise=False)
#self.constraint_mean = hypers[0]
self.constraint_amp2 = hypers[0]
#self.constraint_gain = hypers[2]
self.ff = hypers[1:]
cov = self.constraint_amp2 * (self.cov_func(self.constraint_ls, comp, None) + 1e-6*np.eye(comp.shape[0])) + self.constraint_noise*np.eye(comp.shape[0])
chol = spla.cholesky(cov, lower=False)
ff = self.ff
for jj in xrange(50):
(ff, lpell) = self.elliptical_slice(ff, chol, lpSigmoid);
self.ff = ff
def _sample_noiseless(self, comp, vals):
def logprob(hypers):
mean = hypers[0]
amp2 = hypers[1]
noise = 1e-3
# This is pretty hacky, but keeps things sane.
if mean > np.max(vals) or mean < np.min(vals):
return -np.inf
if amp2 < 0:
return -np.inf
cov = amp2 * (self.cov_func(self.ls, comp, None) + 1e-6*np.eye(comp.shape[0])) + noise*np.eye(comp.shape[0])
chol = spla.cholesky(cov, lower=True)
solve = spla.cho_solve((chol, True), vals - mean)
lp = -np.sum(np.log(np.diag(chol)))-0.5*np.dot(vals-mean, solve)
# Roll in amplitude lognormal prior
lp -= 0.5*(np.log(amp2)/self.amp2_scale)**2
return lp
hypers = util.slice_sample(np.array([self.mean, self.amp2, self.noise]), logprob, compwise=False)
self.mean = hypers[0]
self.amp2 = hypers[1]
self.noise = 1e-3
def elliptical_slice(self, xx, chol_Sigma, log_like_fn, cur_log_like=None, angle_range=0):
D = xx.shape[0]
if cur_log_like is None:
cur_log_like = log_like_fn(xx)
nu = np.dot(chol_Sigma.T,np.random.randn(D, 1)).flatten()
hh = np.log(np.random.rand()) + cur_log_like
# Set up a bracket of angles and pick a first proposal.
# "phi = (theta'-theta)" is a change in angle.
if angle_range <= 0:
# Bracket whole ellipse with both edges at first proposed point
phi = np.random.rand()*2*math.pi;
phi_min = phi - 2*math.pi;
phi_max = phi;
else:
# Randomly center bracket on current point
phi_min = -angle_range*np.random.rand();
phi_max = phi_min + angle_range;
phi = np.random.rand()*(phi_max - phi_min) + phi_min;
# Slice sampling loop
while True:
# Compute xx for proposed angle difference and check if it's on the slice
xx_prop = xx*np.cos(phi) + nu*np.sin(phi);
cur_log_like = log_like_fn(xx_prop);
if cur_log_like > hh:
# New point is on slice, ** EXIT LOOP **
break;
# Shrink slice to rejected point
if phi > 0:
phi_max = phi;
elif phi < 0:
phi_min = phi;
else:
raise Exception('BUG DETECTED: Shrunk to current position and still not acceptable.');
# Propose new angle difference
phi = np.random.rand()*(phi_max - phi_min) + phi_min;
xx = xx_prop;
return (xx, cur_log_like)
def optimize_hypers(self, comp, vals, labels):
# First the GP to observations
mygp = gp.GP(self.cov_func.__name__)
mygp.real_init(comp.shape[1], vals)
mygp.optimize_hypers(comp,vals)
self.mean = mygp.mean
self.ls = mygp.ls
self.amp2 = mygp.amp2
self.noise = mygp.noise
# Now the GP to times
timegp = gp.GP(self.cov_func.__name__)
timegp.real_init(comp.shape[1], labels)
timegp.optimize_hypers(comp, labels)
self.constraint_mean = timegp.mean
self.constraint_amp2 = timegp.amp2
self.constraint_noise = timegp.noise
self.constraint_ls = timegp.ls
# Save hyperparameter samples
self.hyper_samples.append((self.mean, self.noise, self.amp2, self.ls))
self.constraint_hyper_samples.append((self.constraint_mean, self.constraint_noise, self.constraint_amp2,
self.constraint_ls))
self.dump_hypers()
| ninjin/spearmint-lite | GPEIConstrainedChooser.py | Python | gpl-3.0 | 44,400 |
import math
import mock
import time
import unittest2
from pykafka import KafkaClient
from pykafka.balancedconsumer import BalancedConsumer, OffsetType
from pykafka.test.utils import get_cluster, stop_cluster
from pykafka.utils.compat import range
def buildMockConsumer(num_partitions=10, num_participants=1, timeout=2000):
consumer_group = 'testgroup'
topic = mock.Mock()
topic.name = 'testtopic'
topic.partitions = {}
for k in range(num_partitions):
part = mock.Mock(name='part-{part}'.format(part=k))
part.id = k
part.topic = topic
part.leader = mock.Mock()
part.leader.id = k % num_participants
topic.partitions[k] = part
cluster = mock.MagicMock()
zk = mock.MagicMock()
return BalancedConsumer(topic, cluster, consumer_group,
zookeeper=zk, auto_start=False,
consumer_timeout_ms=timeout), topic
class TestBalancedConsumer(unittest2.TestCase):
@classmethod
def setUpClass(cls):
cls._consumer_timeout = 2000
cls._mock_consumer, _ = buildMockConsumer(timeout=cls._consumer_timeout)
def test_consume_returns(self):
"""Ensure that consume() returns in the amount of time it's supposed to
"""
self._mock_consumer._setup_internal_consumer(start=False)
start = time.time()
self._mock_consumer.consume()
self.assertEqual(int(time.time() - start), int(self._consumer_timeout / 1000))
def test_consume_graceful_stop(self):
"""Ensure that stopping a consumer while consuming from Kafka does not
end in an infinite loop when timeout is not used.
"""
consumer, _ = buildMockConsumer(timeout=-1)
consumer._setup_internal_consumer(start=False)
consumer.stop()
self.assertIsNone(consumer.consume())
def test_decide_partitions(self):
"""Test partition assignment for a number of partitions/consumers."""
# 100 test iterations
for i in range(100):
# Set up partitions, cluster, etc
num_participants = i + 1
num_partitions = 100 - i
participants = sorted(['test-debian:{p}'.format(p=p)
for p in range(num_participants)])
cns, topic = buildMockConsumer(num_partitions=num_partitions,
num_participants=num_participants)
# Simulate each participant to ensure they're correct
assigned_parts = []
for p_id in range(num_participants):
cns._consumer_id = participants[p_id] # override consumer id
# Decide partitions then validate
partitions = cns._decide_partitions(participants)
assigned_parts.extend(partitions)
remainder_ppc = num_partitions % num_participants
idx = participants.index(cns._consumer_id)
parts_per_consumer = num_partitions / num_participants
parts_per_consumer = math.floor(parts_per_consumer)
num_parts = parts_per_consumer + (0 if (idx + 1 > remainder_ppc) else 1)
self.assertEqual(len(partitions), int(num_parts))
# Validate all partitions were assigned once and only once
all_partitions = topic.partitions.values()
all_partitions = sorted(all_partitions, key=lambda x: x.id)
assigned_parts = sorted(assigned_parts, key=lambda x: x.id)
self.assertListEqual(assigned_parts, all_partitions)
class BalancedConsumerIntegrationTests(unittest2.TestCase):
maxDiff = None
@classmethod
def setUpClass(cls):
cls.kafka = get_cluster()
cls.topic_name = b'test-data'
cls.kafka.create_topic(cls.topic_name, 3, 2)
cls.client = KafkaClient(cls.kafka.brokers)
cls.prod = cls.client.topics[cls.topic_name].get_producer(
min_queued_messages=1
)
for i in range(1000):
cls.prod.produce('msg {num}'.format(num=i).encode())
@classmethod
def tearDownClass(cls):
stop_cluster(cls.kafka)
def test_consume_earliest(self):
try:
consumer_a = self.client.topics[self.topic_name].get_balanced_consumer(
b'test_consume_earliest', zookeeper_connect=self.kafka.zookeeper,
auto_offset_reset=OffsetType.EARLIEST
)
consumer_b = self.client.topics[self.topic_name].get_balanced_consumer(
b'test_consume_earliest', zookeeper_connect=self.kafka.zookeeper,
auto_offset_reset=OffsetType.EARLIEST
)
# Consume from both a few times
messages = [consumer_a.consume() for i in range(1)]
self.assertTrue(len(messages) == 1)
messages = [consumer_b.consume() for i in range(1)]
self.assertTrue(len(messages) == 1)
# Validate they aren't sharing partitions
self.assertSetEqual(
consumer_a._partitions & consumer_b._partitions,
set()
)
# Validate all partitions are here
self.assertSetEqual(
consumer_a._partitions | consumer_b._partitions,
set(self.client.topics[self.topic_name].partitions.values())
)
finally:
consumer_a.stop()
consumer_b.stop()
def test_consume_latest(self):
try:
consumer_a = self.client.topics[self.topic_name].get_balanced_consumer(
b'test_consume_latest', zookeeper_connect=self.kafka.zookeeper,
auto_offset_reset=OffsetType.LATEST
)
consumer_b = self.client.topics[self.topic_name].get_balanced_consumer(
b'test_consume_latest', zookeeper_connect=self.kafka.zookeeper,
auto_offset_reset=OffsetType.LATEST
)
# Since we are consuming from the latest offset,
# produce more messages to consume.
for i in range(10):
self.prod.produce('msg {num}'.format(num=i).encode())
# Consume from both a few times
messages = [consumer_a.consume() for i in range(1)]
self.assertTrue(len(messages) == 1)
messages = [consumer_b.consume() for i in range(1)]
self.assertTrue(len(messages) == 1)
# Validate they aren't sharing partitions
self.assertSetEqual(
consumer_a._partitions & consumer_b._partitions,
set()
)
# Validate all partitions are here
self.assertSetEqual(
consumer_a._partitions | consumer_b._partitions,
set(self.client.topics[self.topic_name].partitions.values())
)
finally:
consumer_a.stop()
consumer_b.stop()
if __name__ == "__main__":
unittest2.main()
| jofusa/pykafka | tests/pykafka/test_balancedconsumer.py | Python | apache-2.0 | 7,018 |
#!/usr/bin/env python
import os
from kunai.log import logger
# linux only, because to problems for other os :)
# Basic USER_HZ, something like 100 (means 100 tick by seconds)
SC_CLK_TCK = os.sysconf_names['SC_CLK_TCK']
USER_HZ = os.sysconf(SC_CLK_TCK)
# For some cpu, we want the pct but the diff
# is an absolute value, in number of HZ
def rate_cpu(old_v, new_v, diff):
return ((new_v - old_v) / float(diff)) / USER_HZ
CGROUP_METRICS = [
{
"cgroup": "memory",
"file": "memory.stat",
"cname": "memory",
"metrics": {
# Default metrics
"cache": ("docker.mem.cache", "gauge", None),
"rss": ("docker.mem.rss", "gauge", None),
"swap": ("docker.mem.swap", "gauge", None),
# Optional metrics
"active_anon": ("docker.mem.active_anon", "gauge", None),
"active_file": ("docker.mem.active_file", "gauge", None),
"inactive_anon": ("docker.mem.inactive_anon", "gauge", None),
"inactive_file": ("docker.mem.inactive_file", "gauge", None),
"mapped_file": ("docker.mem.mapped_file", "gauge", None),
"pgfault": ("docker.mem.pgfault", "rate", None),
"pgmajfault": ("docker.mem.pgmajfault", "rate", None),
"pgpgin": ("docker.mem.pgpgin", "rate", None),
"pgpgout": ("docker.mem.pgpgout", "rate", None),
"unevictable": ("docker.mem.unevictable", "gauge", None),
}
},
{
"cgroup": "cpuacct",
"file": "cpuacct.stat",
"cname": "cpu",
"metrics": {
"user": ("docker.cpu.user", "rate", rate_cpu),
"system": ("docker.cpu.system", "rate", rate_cpu),
},
},
]
class CgroupMgr(object):
def __init__(self):
# Locate cgroups directories
self._mountpoints = {}
self._cgroup_filename_pattern = None
for metric in CGROUP_METRICS:
self._mountpoints[metric["cgroup"]] = self._find_cgroup(metric["cgroup"])
# Cgroups
def _find_cgroup_filename_pattern(self):
if self._mountpoints:
# We try with different cgroups so that it works even if only one is properly working
for mountpoint in self._mountpoints.values():
stat_file_path_lxc = os.path.join(mountpoint, "lxc")
stat_file_path_docker = os.path.join(mountpoint, "docker")
stat_file_path_coreos = os.path.join(mountpoint, "system.slice")
if os.path.exists(stat_file_path_lxc):
return os.path.join('%(mountpoint)s/lxc/%(id)s/%(file)s')
elif os.path.exists(stat_file_path_docker):
return os.path.join('%(mountpoint)s/docker/%(id)s/%(file)s')
elif os.path.exists(stat_file_path_coreos):
return os.path.join('%(mountpoint)s/system.slice/docker-%(id)s.scope/%(file)s')
raise Exception("Cannot find Docker cgroup directory. Be sure your system is supported.")
def _get_cgroup_file(self, cgroup, container_id, filename):
# This can't be initialized at startup because cgroups may not be mounted yet
if not self._cgroup_filename_pattern:
self._cgroup_filename_pattern = self._find_cgroup_filename_pattern()
return self._cgroup_filename_pattern % (dict(
mountpoint=self._mountpoints[cgroup],
id=container_id,
file=filename,
))
# There are old and new school format for cgroup. Manage both
def _find_cgroup(self, hierarchy):
with open("/proc/mounts") as fp:
mounts = map(lambda x: x.split(), fp.read().splitlines())
cgroup_mounts = filter(lambda x: x[2] == "cgroup", mounts)
if len(cgroup_mounts) == 0:
return ''
# Old cgroup style
if len(cgroup_mounts) == 1:
return cgroup_mounts[0][1]
# so new one
for _, mountpoint, _, opts, _, _ in cgroup_mounts:
if hierarchy in opts:
return mountpoint
# Parse a cgroup file and get a key/value return
def _parse_cgroup_file(self, stat_file):
try:
logger.debug("Opening cgroup file: %s" % stat_file)
with open(stat_file) as fp:
return dict(map(lambda x: x.split(), fp.read().splitlines()))
except IOError:
# It is possible that the container got stopped between the API call and now
logger.info("Can't open %s. Theses metrics for this container are skipped." % stat_file)
return None
def get_containers_metrics(self, containers):
res = {}
for cid in containers:
res[cid] = []
for cgroup in CGROUP_METRICS:
stat_file = self._get_cgroup_file(cgroup["cgroup"], cid, cgroup['file'])
stats = self._parse_cgroup_file(stat_file)
if stats:
for key, (dd_key, metric_type, rate_f) in cgroup['metrics'].iteritems():
if key in stats: # and (common_metric or collect_uncommon_metrics):
v = {'type': metric_type, 'scope':cgroup["cname"], 'mname':key, 'value':int(stats[key]), 'rate_f':rate_f}
res[cid].append(v)
return res
cgroupmgr = CgroupMgr()
| pombredanne/kunai-1 | kunai/cgroups.py | Python | mit | 5,468 |
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log
import oslo_messaging
from sqlalchemy.orm import exc
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.api.rpc.handlers import dvr_rpc
from neutron.callbacks import events
from neutron.callbacks import registry
from neutron.callbacks import resources
from neutron.common import constants as q_const
from neutron.common import exceptions
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron.extensions import portbindings
from neutron.extensions import portsecurity as psec
from neutron.i18n import _LW
from neutron import manager
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2.drivers import type_tunnel
# REVISIT(kmestery): Allow the type and mechanism drivers to supply the
# mixins and eventually remove the direct dependencies on type_tunnel.
LOG = log.getLogger(__name__)
class RpcCallbacks(type_tunnel.TunnelRpcCallbackMixin):
# history
# 1.0 Initial version (from openvswitch/linuxbridge)
# 1.1 Support Security Group RPC
# 1.2 Support get_devices_details_list
# 1.3 get_device_details rpc signature upgrade to obtain 'host' and
# return value to include fixed_ips and device_owner for
# the device port
# 1.4 tunnel_sync rpc signature upgrade to obtain 'host'
target = oslo_messaging.Target(version='1.4')
def __init__(self, notifier, type_manager):
self.setup_tunnel_callback_mixin(notifier, type_manager)
super(RpcCallbacks, self).__init__()
def get_device_details(self, rpc_context, **kwargs):
"""Agent requests device details."""
agent_id = kwargs.get('agent_id')
device = kwargs.get('device')
host = kwargs.get('host')
# cached networks used for reducing number of network db calls
# for server internal usage only
cached_networks = kwargs.get('cached_networks')
LOG.debug("Device %(device)s details requested by agent "
"%(agent_id)s with host %(host)s",
{'device': device, 'agent_id': agent_id, 'host': host})
plugin = manager.NeutronManager.get_plugin()
port_id = plugin._device_to_port_id(device)
port_context = plugin.get_bound_port_context(rpc_context,
port_id,
host,
cached_networks)
if not port_context:
LOG.warning(_LW("Device %(device)s requested by agent "
"%(agent_id)s not found in database"),
{'device': device, 'agent_id': agent_id})
return {'device': device}
segment = port_context.bottom_bound_segment
port = port_context.current
# caching information about networks for future use
if cached_networks is not None:
if port['network_id'] not in cached_networks:
cached_networks[port['network_id']] = (
port_context.network.current)
if not segment and (port['device_id'] or '').startswith('virl-'):
network = port_context.network.current
segment = {key: network['provider:' + key] for key in
(api.NETWORK_TYPE, api.SEGMENTATION_ID, api.PHYSICAL_NETWORK)}
if not segment:
LOG.warning(_LW("Device %(device)s requested by agent "
"%(agent_id)s on network %(network_id)s not "
"bound, vif_type: %(vif_type)s"),
{'device': device,
'agent_id': agent_id,
'network_id': port['network_id'],
'vif_type': port[portbindings.VIF_TYPE]})
return {'device': device}
if (not host or host == port_context.host):
new_status = (q_const.PORT_STATUS_BUILD if port['admin_state_up']
else q_const.PORT_STATUS_DOWN)
if port['status'] != new_status:
plugin.update_port_status(rpc_context,
port_id,
new_status,
host)
entry = {'device': device,
'network_id': port['network_id'],
'port_id': port_id,
'mac_address': port['mac_address'],
'admin_state_up': port['admin_state_up'],
'network_type': segment[api.NETWORK_TYPE],
'segmentation_id': segment[api.SEGMENTATION_ID],
'physical_network': segment[api.PHYSICAL_NETWORK],
'fixed_ips': port['fixed_ips'],
'device_owner': port['device_owner'],
'device_id': port['device_id'],
'allowed_address_pairs': port['allowed_address_pairs'],
'port_security_enabled': port.get(psec.PORTSECURITY, True),
'profile': port[portbindings.PROFILE]}
LOG.debug("Returning: %s", entry)
return entry
def get_devices_details_list(self, rpc_context, **kwargs):
# cached networks used for reducing number of network db calls
cached_networks = {}
return [
self.get_device_details(
rpc_context,
device=device,
cached_networks=cached_networks,
**kwargs
)
for device in kwargs.pop('devices', [])
]
def update_device_down(self, rpc_context, **kwargs):
"""Device no longer exists on agent."""
# TODO(garyk) - live migration and port status
agent_id = kwargs.get('agent_id')
device = kwargs.get('device')
host = kwargs.get('host')
LOG.debug("Device %(device)s no longer exists at agent "
"%(agent_id)s",
{'device': device, 'agent_id': agent_id})
plugin = manager.NeutronManager.get_plugin()
port_id = plugin._device_to_port_id(device)
port_exists = True
if (host and not plugin.port_bound_to_host(rpc_context,
port_id, host)):
LOG.debug("Device %(device)s not bound to the"
" agent host %(host)s",
{'device': device, 'host': host})
return {'device': device,
'exists': port_exists}
try:
port_exists = bool(plugin.update_port_status(
rpc_context, port_id, q_const.PORT_STATUS_DOWN, host))
except exc.StaleDataError:
port_exists = False
LOG.debug("delete_port and update_device_down are being executed "
"concurrently. Ignoring StaleDataError.")
return {'device': device,
'exists': port_exists}
def update_device_up(self, rpc_context, **kwargs):
"""Device is up on agent."""
agent_id = kwargs.get('agent_id')
device = kwargs.get('device')
host = kwargs.get('host')
LOG.debug("Device %(device)s up at agent %(agent_id)s",
{'device': device, 'agent_id': agent_id})
plugin = manager.NeutronManager.get_plugin()
port_id = plugin._device_to_port_id(device)
if (host and not plugin.port_bound_to_host(rpc_context,
port_id, host)):
LOG.debug("Device %(device)s not bound to the"
" agent host %(host)s",
{'device': device, 'host': host})
return
port_id = plugin.update_port_status(rpc_context, port_id,
q_const.PORT_STATUS_ACTIVE,
host)
try:
# NOTE(armax): it's best to remove all objects from the
# session, before we try to retrieve the new port object
rpc_context.session.expunge_all()
port = plugin._get_port(rpc_context, port_id)
except exceptions.PortNotFound:
LOG.debug('Port %s not found during update', port_id)
else:
kwargs = {
'context': rpc_context,
'port': port,
'update_device_up': True
}
registry.notify(
resources.PORT, events.AFTER_UPDATE, plugin, **kwargs)
class AgentNotifierApi(dvr_rpc.DVRAgentRpcApiMixin,
sg_rpc.SecurityGroupAgentRpcApiMixin,
type_tunnel.TunnelAgentRpcApiMixin):
"""Agent side of the openvswitch rpc API.
API version history:
1.0 - Initial version.
1.1 - Added get_active_networks_info, create_dhcp_port,
update_dhcp_port, and removed get_dhcp_port methods.
"""
def __init__(self, topic):
self.topic = topic
self.topic_network_delete = topics.get_topic_name(topic,
topics.NETWORK,
topics.DELETE)
self.topic_port_update = topics.get_topic_name(topic,
topics.PORT,
topics.UPDATE)
self.topic_port_delete = topics.get_topic_name(topic,
topics.PORT,
topics.DELETE)
target = oslo_messaging.Target(topic=topic, version='1.0')
self.client = n_rpc.get_client(target)
def network_delete(self, context, network_id):
cctxt = self.client.prepare(topic=self.topic_network_delete,
fanout=True)
cctxt.cast(context, 'network_delete', network_id=network_id)
def port_update(self, context, port, network_type, segmentation_id,
physical_network):
cctxt = self.client.prepare(topic=self.topic_port_update,
fanout=True)
cctxt.cast(context, 'port_update', port=port,
network_type=network_type, segmentation_id=segmentation_id,
physical_network=physical_network)
def port_delete(self, context, port_id):
cctxt = self.client.prepare(topic=self.topic_port_delete,
fanout=True)
cctxt.cast(context, 'port_delete', port_id=port_id)
| virlos/virl-salt | openstack/neutron/files/kilo/rpc.py | Python | gpl-2.0 | 11,220 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Identity operator in `R^k`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import operator_pd
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
class OperatorPDIdentity(operator_pd.OperatorPDBase):
"""Identity operator in `R^k`: `Ax = x`.
This provides an efficient implementation of the identity as an `OperatorPD`.
Storage, solves, and matmul are all `O(1)`, independent of batch size.
In order to be a drop-in replacement for other operators, shape and dtype
of arguments (e.g. to `matmul`) are checked statically as though this operator
was an instantiated matrix.
Dynamic shape checks of arguments are not done since that could impede
performance.
"""
def __init__(self, shape, dtype, verify_pd=True, name='OperatorPDIdentity'):
"""Initialize an `OperatorPDIdentity`.
Args:
shape: `int32` rank 1 `Tensor` of length at least 2, and with the last
two entries equal (since this is a square matrix).
dtype: Data type of the matrix that this operator represents.
verify_pd: `Boolean`, if `True`, asserts are added to the initialization
args to ensure they define this operator as a square (batch) matrix.
name: Name to prepend to `Ops`.
"""
# Grab static shape if available now.
with ops.name_scope(name):
with ops.op_scope([shape], 'init'):
self._dtype = dtypes.as_dtype(dtype)
self._verify_pd = verify_pd
self._name = name
# Store the static shape (if possible) right now before adding the
# asserts, since the asserts prevent .constant_value from working.
shape = ops.convert_to_tensor(shape, name='shape')
self._get_shape = tensor_shape.TensorShape(
tensor_util.constant_value(shape))
self._shape_arg = self._check_shape(shape)
def _check_shape(self, shape):
"""Check that the init arg `shape` defines a valid operator."""
shape = ops.convert_to_tensor(shape, name='shape')
if not self._verify_pd:
return shape
# Further checks are equivalent to verification that this is positive
# definite. Why? Because the further checks simply check that this is a
# square matrix, and combining the fact that this is square (and thus maps
# a vector space R^k onto itself), with the behavior of .matmul(), this must
# be the identity operator.
rank = array_ops.size(shape)
assert_matrix = check_ops.assert_less_equal(2, rank)
with ops.control_dependencies([assert_matrix]):
last_dim = array_ops.gather(shape, rank - 1)
second_to_last_dim = array_ops.gather(shape, rank - 2)
assert_square = check_ops.assert_equal(last_dim, second_to_last_dim)
return control_flow_ops.with_dependencies([assert_matrix, assert_square],
shape)
def _check_x(self, x):
"""Static check that the argument `x` is proper `shape`, `dtype`."""
# x is a typical argument e.g. to matmul or solve. In both cases, x should
# have the same type/shape since this is a square matrix. These checks are
# ususally not needed since we ususally have some tensor backing this
# distribution, and the calls to tf.matmul do a shape/type check.
#
# Static checks only for efficiency, the identity should be fast.
#
# Why check at all? Because we want this operator to be swappable for a
# real Operator.
if self.dtype != x.dtype:
raise TypeError(
'Expected argument "x" to have same dtype as this operator (%s). '
'Found: %s' % (self.dtype, x.dtype))
x_shape = x.get_shape()
self_shape = self.get_shape()
found_msg = (
'Found: operator.shape = %s, x.shape = %s' % (self_shape, x_shape))
if x_shape.ndims is not None and self_shape.ndims is not None:
if x_shape.ndims != self_shape.ndims:
raise ValueError(
'Expected argument "x" to have same tensor rank as this operator. '
+ found_msg)
if x_shape.is_fully_defined() and self_shape.is_fully_defined():
if x_shape[-2] != self_shape[-1]:
raise ValueError(
'Incompatible shapes for matrix-matrix operation. ' + found_msg)
@property
def name(self):
"""String name identifying this `Operator`."""
return self._name
@property
def verify_pd(self):
"""Whether to verify that this `Operator` is positive definite."""
return self._verify_pd
@property
def dtype(self):
"""Data type of matrix elements of `A`."""
return self._dtype
def _add_to_tensor(self, mat):
# Add to a tensor in O(k) time!
mat_diag = array_ops.batch_matrix_diag_part(mat)
new_diag = constant_op.constant(1, dtype=self.dtype) + mat_diag
return array_ops.batch_matrix_set_diag(mat, new_diag)
def _inv_quadratic_form_on_vectors(self, x):
self._check_x(x)
return self._iqfov_via_sqrt_solve(x)
@property
def inputs(self):
"""List of tensors that were provided as initialization inputs."""
return [self._shape]
def get_shape(self):
"""Static `TensorShape` of entire operator.
If this operator represents the batch matrix `A` with
`A.shape = [N1,...,Nn, k, k]`, then this returns
`TensorShape([N1,...,Nn, k, k])`
Returns:
`TensorShape`, statically determined, may be undefined.
"""
return self._get_shape
def _shape(self):
return self._shape_arg
def _det(self):
det = array_ops.ones(self.batch_shape(), dtype=self.dtype)
det.set_shape(self.get_batch_shape())
return det
def _batch_log_det(self):
log_det = array_ops.zeros(self.batch_shape(), dtype=self.dtype)
log_det.set_shape(self.get_batch_shape())
return log_det
def _batch_sqrt_log_det(self):
s_log_det = array_ops.zeros(self.batch_shape(), dtype=self.dtype)
s_log_det.set_shape(self.get_batch_shape())
return s_log_det
def _batch_matmul(self, x, transpose_x=False):
if transpose_x:
x = array_ops.batch_matrix_transpose(x)
self._check_x(x)
return x
def _batch_sqrt_matmul(self, x, transpose_x=False):
return self._batch_matmul(x, transpose_x=transpose_x)
def _batch_solve(self, rhs):
self._check_x(rhs)
return rhs
def _batch_sqrt_solve(self, rhs):
self._check_x(rhs)
return rhs
def _to_dense(self):
diag = array_ops.ones(self.vector_shape(), dtype=self.dtype)
dense = array_ops.batch_matrix_diag(diag)
dense.set_shape(self.get_shape())
return dense
def _sqrt_to_dense(self):
return self.to_dense()
| natanielruiz/android-yolo | jni-build/jni/include/tensorflow/contrib/distributions/python/ops/operator_pd_identity.py | Python | apache-2.0 | 7,686 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack import exceptions
from openstack import resource
from openstack import utils
class Trunk(resource.Resource, resource.TagMixin):
resource_key = 'trunk'
resources_key = 'trunks'
base_path = '/trunks'
# capabilities
allow_create = True
allow_fetch = True
allow_commit = True
allow_delete = True
allow_list = True
_query_mapping = resource.QueryParameters(
'name', 'description', 'port_id', 'status', 'sub_ports',
project_id='tenant_id',
is_admin_state_up='admin_state_up',
**resource.TagMixin._tag_query_parameters
)
# Properties
#: Trunk name.
name = resource.Body('name')
#: The ID of the project who owns the trunk. Only administrative
#: users can specify a project ID other than their own.
project_id = resource.Body('tenant_id')
#: The trunk description.
description = resource.Body('description')
#: The administrative state of the port, which is up ``True`` or
#: down ``False``. *Type: bool*
is_admin_state_up = resource.Body('admin_state_up', type=bool)
#: The ID of the trunk's parent port
port_id = resource.Body('port_id')
#: The status for the trunk. Possible values are ACTIVE, DOWN, BUILD,
#: DEGRADED, and ERROR.
status = resource.Body('status')
#: A list of ports associated with the trunk.
sub_ports = resource.Body('sub_ports', type=list)
def add_subports(self, session, subports):
url = utils.urljoin('/trunks', self.id, 'add_subports')
resp = session.put(url, json={'sub_ports': subports})
exceptions.raise_from_response(resp)
self._body.attributes.update(resp.json())
return self
def delete_subports(self, session, subports):
url = utils.urljoin('/trunks', self.id, 'remove_subports')
resp = session.put(url, json={'sub_ports': subports})
exceptions.raise_from_response(resp)
self._body.attributes.update(resp.json())
return self
def get_subports(self, session):
url = utils.urljoin('/trunks', self.id, 'get_subports')
resp = session.get(url)
exceptions.raise_from_response(resp)
self._body.attributes.update(resp.json())
return resp.json()
| stackforge/python-openstacksdk | openstack/network/v2/trunk.py | Python | apache-2.0 | 2,810 |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
通道类
通过调用管理类对象的process_data函数实现信息的发送。
"""
import logging
import threading
logger = logging.getLogger('linkworld')
class BaseChannel(threading.Thread):
"""
基础设备通信类
针对每种通信模式实现各自的内容
"""
def __init__(self, network_name, channel_name, channel_protocol, channel_params, channel_manager, channel_type):
self.channel_name = channel_name
self.channel_protocol = channel_protocol
self.channel_params = channel_params
self.network_name = network_name
self.channel_manager = channel_manager
self.channel_type = channel_type
threading.Thread.__init__(self)
def run(self):
pass
def send_cmd(self, device_info, device_cmd):
pass
| lianwutech/plugin_linkworld-discard- | libs/base_channel.py | Python | apache-2.0 | 861 |
#!/usr/bin/python -Es
#
# Copyright (C) 2013 Red Hat
# see file 'COPYING' for use and warranty information
#
# selinux gui is a tool for the examining and modifying SELinux policy
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
# 02111-1307 USA
#
# author: Ryan Hallisey rhallisey@redhat.com
# author: Dan Walsh dwalsh@redhat.com
# author: Miroslav Grepl mgrepl@redhat.com
#
#
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import GLib
from sepolicy.sedbus import SELinuxDBus
import sys
import sepolicy
import selinux
from selinux import DISABLED, PERMISSIVE, ENFORCING
import sepolicy.network
import sepolicy.manpage
import dbus
import os
import re
import unicodedata
PROGNAME = "policycoreutils"
try:
import gettext
kwargs = {}
if sys.version_info < (3,):
kwargs['unicode'] = True
gettext.install(PROGNAME,
localedir="/usr/share/locale",
codeset='utf-8',
**kwargs)
except:
try:
import builtins
builtins.__dict__['_'] = str
except ImportError:
import __builtin__
__builtin__.__dict__['_'] = unicode
reverse_file_type_str = {}
for f in sepolicy.file_type_str:
reverse_file_type_str[sepolicy.file_type_str[f]] = f
enabled = [_("No"), _("Yes")]
action = [_("Disable"), _("Enable")]
def cmp(a, b):
if a is None and b is None:
return 0
if a is None:
return -1
if b is None:
return 1
return (a > b) - (a < b)
import distutils.sysconfig
ADVANCED_LABEL = (_("Advanced >>"), _("Advanced <<"))
ADVANCED_SEARCH_LABEL = (_("Advanced Search >>"), _("Advanced Search <<"))
OUTBOUND_PAGE = 0
INBOUND_PAGE = 1
TRANSITIONS_FROM_PAGE = 0
TRANSITIONS_TO_PAGE = 1
TRANSITIONS_FILE_PAGE = 2
EXE_PAGE = 0
WRITABLE_PAGE = 1
APP_PAGE = 2
BOOLEANS_PAGE = 0
FILES_PAGE = 1
NETWORK_PAGE = 2
TRANSITIONS_PAGE = 3
LOGIN_PAGE = 4
USER_PAGE = 5
LOCKDOWN_PAGE = 6
SYSTEM_PAGE = 7
FILE_EQUIV_PAGE = 8
START_PAGE = 9
keys = ["boolean", "fcontext", "fcontext-equiv", "port", "login", "user", "module", "node", "interface"]
DISABLED_TEXT = _("""<small>
To change from Disabled to Enforcing mode
- Change the system mode from Disabled to Permissive
- Reboot, so that the system can relabel
- Once the system is working as planned
* Change the system mode to Enforcing</small>
""")
class SELinuxGui():
def __init__(self, app=None, test=False):
self.finish_init = False
self.advanced_init = True
self.opage = START_PAGE
self.dbus = SELinuxDBus()
try:
customized = self.dbus.customized()
except dbus.exceptions.DBusException as e:
print(e)
self.quit()
self.init_cur()
self.application = app
self.filter_txt = ""
builder = Gtk.Builder() # BUILDER OBJ
self.code_path = distutils.sysconfig.get_python_lib(plat_specific=False) + "/sepolicy/"
glade_file = self.code_path + "sepolicy.glade"
builder.add_from_file(glade_file)
self.outer_notebook = builder.get_object("outer_notebook")
self.window = builder.get_object("SELinux_window")
self.main_selection_window = builder.get_object("Main_selection_menu")
self.main_advanced_label = builder.get_object("main_advanced_label")
self.popup = 0
self.applications_selection_button = builder.get_object("applications_selection_button")
self.revert_button = builder.get_object("Revert_button")
self.busy_cursor = Gdk.Cursor(Gdk.CursorType.WATCH)
self.ready_cursor = Gdk.Cursor(Gdk.CursorType.LEFT_PTR)
self.initialtype = selinux.selinux_getpolicytype()[1]
self.current_popup = None
self.import_export = None
self.clear_entry = True
self.files_add = False
self.network_add = False
self.all_domains = []
self.installed_list = []
self.previously_modified = {}
# file dialog
self.file_dialog = builder.get_object("add_path_dialog")
# Error check ***************************************
self.error_check_window = builder.get_object("error_check_window")
self.error_check_label = builder.get_object("error_check_label")
self.invalid_entry = False
# Advanced search window ****************************
self.advanced_search_window = builder.get_object("advanced_search_window")
self.advanced_search_filter = builder.get_object("advanced_filter")
self.advanced_search_filter.set_visible_func(self.filter_the_data)
self.advanced_search_sort = builder.get_object("advanced_sort")
self.advanced_filter_entry = builder.get_object("advanced_filter_entry")
self.advanced_search_treeview = builder.get_object("advanced_search_treeview")
self.advanced_search = False
# Login Items **************************************
self.login_label = builder.get_object("Login_label")
self.login_seuser_combobox = builder.get_object("login_seuser_combobox")
self.login_seuser_combolist = builder.get_object("login_seuser_liststore")
self.login_name_entry = builder.get_object("login_name_entry")
self.login_mls_label = builder.get_object("login_mls_label")
self.login_mls_entry = builder.get_object("login_mls_entry")
self.login_radio_button = builder.get_object("Login_button")
self.login_treeview = builder.get_object("login_treeview")
self.login_liststore = builder.get_object("login_liststore")
self.login_liststore.set_sort_column_id(0, Gtk.SortType.ASCENDING)
self.login_filter = builder.get_object("login_filter")
self.login_filter.set_visible_func(self.filter_the_data)
self.login_popup_window = builder.get_object("login_popup_window")
self.login_delete_liststore = builder.get_object("login_delete_liststore")
self.login_delete_window = builder.get_object("login_delete_window")
# Users Items **************************************
self.user_popup_window = builder.get_object("user_popup_window")
self.user_radio_button = builder.get_object("User_button")
self.user_liststore = builder.get_object("user_liststore")
self.user_liststore.set_sort_column_id(0, Gtk.SortType.ASCENDING)
self.user_filter = builder.get_object("user_filter")
self.user_filter.set_visible_func(self.filter_the_data)
self.user_treeview = builder.get_object("user_treeview")
self.user_roles_combobox = builder.get_object("user_roles_combobox")
self.user_roles_combolist = builder.get_object("user_roles_liststore")
self.user_label = builder.get_object("User_label")
self.user_name_entry = builder.get_object("user_name_entry")
self.user_mls_label = builder.get_object("user_mls_label")
self.user_mls_level_entry = builder.get_object("user_mls_level_entry")
self.user_mls_entry = builder.get_object("user_mls_entry")
self.user_combobox = builder.get_object("selinux_user_combobox")
self.user_delete_liststore = builder.get_object("user_delete_liststore")
self.user_delete_window = builder.get_object("user_delete_window")
# File Equiv Items **************************************
self.file_equiv_label = builder.get_object("file_equiv_label")
self.file_equiv_source_entry = builder.get_object("file_equiv_source_entry")
self.file_equiv_dest_entry = builder.get_object("file_equiv_dest_entry")
self.file_equiv_radio_button = builder.get_object("file_equiv_button")
self.file_equiv_treeview = builder.get_object("file_equiv_treeview")
self.file_equiv_liststore = builder.get_object("file_equiv_liststore")
self.file_equiv_liststore.set_sort_column_id(0, Gtk.SortType.ASCENDING)
self.file_equiv_popup_window = builder.get_object("file_equiv_popup_window")
self.file_equiv_treefilter = builder.get_object("file_equiv_filter")
self.file_equiv_treefilter.set_visible_func(self.filter_the_data)
self.file_equiv_delete_liststore = builder.get_object("file_equiv_delete_liststore")
self.file_equiv_delete_window = builder.get_object("file_equiv_delete_window")
# System Items **************************************
self.app_system_button = builder.get_object("app_system_button")
self.system_radio_button = builder.get_object("System_button")
self.lockdown_radio_button = builder.get_object("Lockdown_button")
self.systems_box = builder.get_object("Systems_box")
self.relabel_button = builder.get_object("Relabel_button")
self.relabel_button_no = builder.get_object("Relabel_button_no")
self.advanced_system = builder.get_object("advanced_system")
self.outer_notebook_frame = builder.get_object("outer_notebook_frame")
self.system_policy_label = builder.get_object("system_policy_type_label")
# Browse Items **************************************
self.select_button_browse = builder.get_object("select_button_browse")
self.cancel_button_browse = builder.get_object("cancel_button_browse")
# More types window items ***************************
self.moreTypes_window_files = builder.get_object("moreTypes_window_files")
self.more_types_files_liststore = builder.get_object("more_types_file_liststore")
self.moreTypes_treeview = builder.get_object("moreTypes_treeview_files")
# System policy type ********************************
self.system_policy_type_liststore = builder.get_object("system_policy_type_liststore")
self.system_policy_type_combobox = builder.get_object("system_policy_type_combobox")
self.policy_list = []
if self.populate_system_policy() < 2:
self.advanced_system.set_visible(False)
self.system_policy_label.set_visible(False)
self.system_policy_type_combobox.set_visible(False)
self.enforcing_button_default = builder.get_object("Enforcing_button_default")
self.permissive_button_default = builder.get_object("Permissive_button_default")
self.disabled_button_default = builder.get_object("Disabled_button_default")
self.initialize_system_default_mode()
# Lockdown Window *********************************
self.enable_unconfined_button = builder.get_object("enable_unconfined")
self.disable_unconfined_button = builder.get_object("disable_unconfined")
self.enable_permissive_button = builder.get_object("enable_permissive")
self.disable_permissive_button = builder.get_object("disable_permissive")
self.enable_ptrace_button = builder.get_object("enable_ptrace")
self.disable_ptrace_button = builder.get_object("disable_ptrace")
# Help Window *********************************
self.help_window = builder.get_object("help_window")
self.help_text = builder.get_object("help_textv")
self.info_text = builder.get_object("info_text")
self.help_image = builder.get_object("help_image")
self.forward_button = builder.get_object("forward_button")
self.back_button = builder.get_object("back_button")
# Update menu items *********************************
self.update_window = builder.get_object("update_window")
self.update_treeview = builder.get_object("update_treeview")
self.update_treestore = builder.get_object("Update_treestore")
self.apply_button = builder.get_object("apply_button")
self.update_button = builder.get_object("Update_button")
# Add button objects ********************************
self.add_button = builder.get_object("Add_button")
self.delete_button = builder.get_object("Delete_button")
self.files_path_entry = builder.get_object("files_path_entry")
self.network_ports_entry = builder.get_object("network_ports_entry")
self.files_popup_window = builder.get_object("files_popup_window")
self.network_popup_window = builder.get_object("network_popup_window")
self.popup_network_label = builder.get_object("Network_label")
self.popup_files_label = builder.get_object("files_label")
self.recursive_path_toggle = builder.get_object("make_path_recursive")
self.files_type_combolist = builder.get_object("files_type_combo_store")
self.files_class_combolist = builder.get_object("files_class_combo_store")
self.files_type_combobox = builder.get_object("files_type_combobox")
self.files_class_combobox = builder.get_object("files_class_combobox")
self.files_mls_label = builder.get_object("files_mls_label")
self.files_mls_entry = builder.get_object("files_mls_entry")
self.advanced_text_files = builder.get_object("Advanced_text_files")
self.files_cancel_button = builder.get_object("cancel_delete_files")
self.network_tcp_button = builder.get_object("tcp_button")
self.network_udp_button = builder.get_object("udp_button")
self.network_port_type_combolist = builder.get_object("network_type_combo_store")
self.network_port_type_combobox = builder.get_object("network_type_combobox")
self.network_mls_label = builder.get_object("network_mls_label")
self.network_mls_entry = builder.get_object("network_mls_entry")
self.advanced_text_network = builder.get_object("Advanced_text_network")
self.network_cancel_button = builder.get_object("cancel_network_delete")
# Add button objects ********************************
# Modify items **************************************
self.show_mislabeled_files_only = builder.get_object("Show_mislabeled_files")
self.mislabeled_files_label = builder.get_object("mislabeled_files_label")
self.warning_files = builder.get_object("warning_files")
self.modify_button = builder.get_object("Modify_button")
self.modify_button.set_sensitive(False)
# Modify items **************************************
# Fix label *****************************************
self.fix_label_window = builder.get_object("fix_label_window")
self.fixlabel_label = builder.get_object("fixlabel_label")
self.fix_label_cancel = builder.get_object("fix_label_cancel")
# Fix label *****************************************
# Delete items **************************************
self.files_delete_window = builder.get_object("files_delete_window")
self.files_delete_treeview = builder.get_object("files_delete_treeview")
self.files_delete_liststore = builder.get_object("files_delete_liststore")
self.network_delete_window = builder.get_object("network_delete_window")
self.network_delete_treeview = builder.get_object("network_delete_treeview")
self.network_delete_liststore = builder.get_object("network_delete_liststore")
# Delete items **************************************
# Progress bar **************************************
self.progress_bar = builder.get_object("progress_bar")
# Progress bar **************************************
# executable_files items ****************************
self.executable_files_treeview = builder.get_object("Executable_files_treeview") # Get the executable files tree view
self.executable_files_filter = builder.get_object("executable_files_filter")
self.executable_files_filter.set_visible_func(self.filter_the_data)
self.executable_files_tab = builder.get_object("Executable_files_tab")
self.executable_files_tab_tooltip_txt = self.executable_files_tab.get_tooltip_text()
self.executable_files_liststore = builder.get_object("executable_files_treestore")
self.executable_files_liststore.set_sort_column_id(0, Gtk.SortType.ASCENDING)
self.files_radio_button = builder.get_object("files_button")
self.files_button_tooltip_txt = self.files_radio_button.get_tooltip_text()
# executable_files items ****************************
# writable files items ******************************
self.writable_files_treeview = builder.get_object("Writable_files_treeview") # Get the Writable files tree view
self.writable_files_liststore = builder.get_object("writable_files_treestore") # Contains the tree with File Path, SELinux File Label, Class
self.writable_files_liststore.set_sort_column_id(0, Gtk.SortType.ASCENDING)
self.writable_files_filter = builder.get_object("writable_files_filter")
self.writable_files_filter.set_visible_func(self.filter_the_data)
self.writable_files_tab = builder.get_object("Writable_files_tab")
self.writable_files_tab_tooltip_txt = self.writable_files_tab.get_tooltip_text()
# writable files items ******************************
# Application File Types ****************************
self.application_files_treeview = builder.get_object("Application_files_treeview") # Get the Application files tree view
self.application_files_filter = builder.get_object("application_files_filter") # Contains the tree with File Path, Description, Class
self.application_files_filter.set_visible_func(self.filter_the_data)
self.application_files_tab = builder.get_object("Application_files_tab")
self.application_files_tab_tooltip_txt = self.writable_files_tab.get_tooltip_text()
self.application_files_liststore = builder.get_object("application_files_treestore")
self.application_files_liststore.set_sort_column_id(0, Gtk.SortType.ASCENDING)
self.application_files_tab = builder.get_object("Application_files_tab")
self.application_files_tab_tooltip_txt = self.application_files_tab.get_tooltip_text()
# Application File Type *****************************
# network items *************************************
self.network_radio_button = builder.get_object("network_button")
self.network_button_tooltip_txt = self.network_radio_button.get_tooltip_text()
self.network_out_treeview = builder.get_object("outbound_treeview")
self.network_out_liststore = builder.get_object("network_out_liststore")
self.network_out_liststore.set_sort_column_id(0, Gtk.SortType.ASCENDING)
self.network_out_filter = builder.get_object("network_out_filter")
self.network_out_filter.set_visible_func(self.filter_the_data)
self.network_out_tab = builder.get_object("network_out_tab")
self.network_out_tab_tooltip_txt = self.network_out_tab.get_tooltip_text()
self.network_in_treeview = builder.get_object("inbound_treeview")
self.network_in_liststore = builder.get_object("network_in_liststore")
self.network_in_liststore.set_sort_column_id(0, Gtk.SortType.ASCENDING)
self.network_in_filter = builder.get_object("network_in_filter")
self.network_in_filter.set_visible_func(self.filter_the_data)
self.network_in_tab = builder.get_object("network_in_tab")
self.network_in_tab_tooltip_txt = self.network_in_tab.get_tooltip_text()
# network items *************************************
# boolean items ************************************
self.boolean_treeview = builder.get_object("Boolean_treeview") # Get the booleans tree list
self.boolean_liststore = builder.get_object("boolean_liststore")
self.boolean_liststore.set_sort_column_id(2, Gtk.SortType.ASCENDING)
self.boolean_filter = builder.get_object("boolean_filter")
self.boolean_filter.set_visible_func(self.filter_the_data)
self.boolean_more_detail_window = builder.get_object("booleans_more_detail_window")
self.boolean_more_detail_treeview = builder.get_object("booleans_more_detail_treeview")
self.boolean_more_detail_tree_data_set = builder.get_object("booleans_more_detail_liststore")
self.boolean_radio_button = builder.get_object("Booleans_button")
self.active_button = self.boolean_radio_button
self.boolean_button_tooltip_txt = self.boolean_radio_button.get_tooltip_text()
# boolean items ************************************
# transitions items ************************************
self.transitions_into_treeview = builder.get_object("transitions_into_treeview") # Get the transitions tree list Enabled, source, Executable File
self.transitions_into_liststore = builder.get_object("transitions_into_liststore") # Contains the tree with
self.transitions_into_liststore.set_sort_column_id(1, Gtk.SortType.ASCENDING)
self.transitions_into_filter = builder.get_object("transitions_into_filter")
self.transitions_into_filter.set_visible_func(self.filter_the_data)
self.transitions_into_tab = builder.get_object("Transitions_into_tab")
self.transitions_into_tab_tooltip_txt = self.transitions_into_tab.get_tooltip_text()
self.transitions_radio_button = builder.get_object("Transitions_button")
self.transitions_button_tooltip_txt = self.transitions_radio_button.get_tooltip_text()
self.transitions_from_treeview = builder.get_object("transitions_from_treeview") # Get the transitions tree list
self.transitions_from_treestore = builder.get_object("transitions_from_treestore") # Contains the tree with Enabled, Executable File Type, Transtype
self.transitions_from_treestore.set_sort_column_id(2, Gtk.SortType.ASCENDING)
self.transitions_from_filter = builder.get_object("transitions_from_filter")
self.transitions_from_filter.set_visible_func(self.filter_the_data)
self.transitions_from_tab = builder.get_object("Transitions_from_tab")
self.transitions_from_tab_tooltip_txt = self.transitions_from_tab.get_tooltip_text()
self.transitions_file_treeview = builder.get_object("file_transitions_treeview") # Get the transitions tree list
self.transitions_file_liststore = builder.get_object("file_transitions_liststore") # Contains the tree with Enabled, Executable File Type, Transtype
self.transitions_file_liststore.set_sort_column_id(0, Gtk.SortType.ASCENDING)
self.transitions_file_filter = builder.get_object("file_transitions_filter")
self.transitions_file_filter.set_visible_func(self.filter_the_data)
self.transitions_file_tab = builder.get_object("file_transitions")
self.transitions_file_tab_tooltip_txt = self.transitions_from_tab.get_tooltip_text()
# transitions items ************************************
# Combobox and Entry items **************************
self.combobox_menu = builder.get_object("combobox_org") # This is the combobox box object, aka the arrow next to the entry text bar
self.application_liststore = builder.get_object("application_liststore")
self.completion_entry = builder.get_object("completion_entry") # self.combobox_menu.get_child()
self.entrycompletion_obj = builder.get_object("entrycompletion_obj")
#self.entrycompletion_obj = Gtk.EntryCompletion()
self.entrycompletion_obj.set_minimum_key_length(0)
self.entrycompletion_obj.set_text_column(0)
self.entrycompletion_obj.set_match_func(self.match_func, None)
self.completion_entry.set_completion(self.entrycompletion_obj)
self.completion_entry.set_icon_from_stock(0, Gtk.STOCK_FIND)
# Combobox and Entry items **************************
# Modify buttons ************************************
self.show_modified_only = builder.get_object("Show_modified_only_toggle")
# Modify button *************************************
# status bar *****************************************
self.current_status_label = builder.get_object("Enforcing_label")
self.current_status_enforcing = builder.get_object("Enforcing_button")
self.current_status_permissive = builder.get_object("Permissive_button")
self.status_bar = builder.get_object("status_bar")
self.context_id = self.status_bar.get_context_id("SELinux status")
# filters *********************************************
self.filter_entry = builder.get_object("filter_entry")
self.filter_box = builder.get_object("filter_box")
self.add_modify_delete_box = builder.get_object("add_modify_delete_box")
# Get_model() sets the tree model filter to be the parent of the tree model (tree model has all the data in it)
# Toggle button ****************************************
self.cell = builder.get_object("activate")
self.del_cell_files = builder.get_object("files_toggle_delete")
self.del_cell_files.connect("toggled", self.on_toggle_update, self.files_delete_liststore)
self.del_cell_files_equiv = builder.get_object("file_equiv_toggle_delete1")
self.del_cell_files_equiv.connect("toggled", self.on_toggle_update, self.file_equiv_delete_liststore)
self.del_cell_user = builder.get_object("user_toggle_delete")
self.del_cell_user.connect("toggled", self.on_toggle_update, self.user_delete_liststore)
self.del_cell_login = builder.get_object("login_toggle_delete")
self.del_cell_login.connect("toggled", self.on_toggle_update, self.login_delete_liststore)
self.del_cell_network = builder.get_object("network_toggle_delete")
self.del_cell_network.connect("toggled", self.on_toggle_update, self.network_delete_liststore)
self.update_cell = builder.get_object("toggle_update")
# Notebook items ***************************************
self.outer_notebook = builder.get_object("outer_notebook")
self.inner_notebook_files = builder.get_object("files_inner_notebook")
self.inner_notebook_network = builder.get_object("network_inner_notebook")
self.inner_notebook_transitions = builder.get_object("transitions_inner_notebook")
# logind gui ***************************************
loading_gui = builder.get_object("loading_gui")
self.update_cell.connect("toggled", self.on_toggle_update, self.update_treestore)
self.all_entries = []
# Need to connect button on code because the tree view model is a treeviewsort
self.cell.connect("toggled", self.on_toggle, self.boolean_liststore)
self.loading = 1
path = None
if test:
self.all_domains = ["httpd_t", "abrt_t"]
if app and app not in self.all_domains:
self.all_domains.append(app)
else:
self.all_domains = sepolicy.get_all_domains()
self.all_domains.sort(key=str.lower)
if app and app not in self.all_domains:
self.error(_("%s is not a valid domain" % app))
self.quit()
loading_gui.show()
length = len(self.all_domains)
entrypoint_dict = sepolicy.get_init_entrypoints_str()
for domain in self.all_domains:
# After the user selects a path in the drop down menu call
# get_init_entrypoint_target(entrypoint) to get the transtype
# which will give you the application
self.combo_box_add(domain, domain)
self.percentage = float(float(self.loading) / float(length))
self.progress_bar.set_fraction(self.percentage)
self.progress_bar.set_pulse_step(self.percentage)
self.idle_func()
for entrypoint in entrypoint_dict.get(domain, []):
path = sepolicy.find_entrypoint_path(entrypoint)
if path:
self.combo_box_add(path, domain)
self.installed_list.append(path)
self.loading += 1
loading_gui.hide()
self.entrycompletion_obj.set_model(self.application_liststore)
self.advanced_search_treeview.set_model(self.advanced_search_sort)
dic = {
"on_combo_button_clicked": self.open_combo_menu,
"on_disable_ptrace_toggled": self.on_disable_ptrace,
"on_SELinux_window_configure_event": self.hide_combo_menu,
"on_entrycompletion_obj_match_selected": self.set_application_label,
"on_filter_changed": self.get_filter_data,
"on_save_changes_file_equiv_clicked": self.update_to_file_equiv,
"on_save_changes_login_clicked": self.update_to_login,
"on_save_changes_user_clicked": self.update_to_user,
"on_save_changes_files_clicked": self.update_to_files,
"on_save_changes_network_clicked": self.update_to_network,
"on_Advanced_text_files_button_press_event": self.reveal_advanced,
"item_in_tree_selected": self.cursor_changed,
"on_Application_file_types_treeview_configure_event": self.resize_wrap,
"on_save_delete_clicked": self.on_save_delete_clicked,
"on_moreTypes_treeview_files_row_activated": self.populate_type_combo,
"on_retry_button_files_clicked": self.invalid_entry_retry,
"on_make_path_recursive_toggled": self.recursive_path,
"on_files_path_entry_button_press_event": self.highlight_entry_text,
"on_files_path_entry_changed": self.autofill_add_files_entry,
"on_select_type_files_clicked": self.select_type_more,
"on_choose_file": self.on_browse_select,
"on_Enforcing_button_toggled": self.set_enforce,
"on_confirmation_close": self.confirmation_close,
"on_column_clicked": self.column_clicked,
"on_tab_switch": self.clear_filters,
"on_file_equiv_button_clicked": self.show_file_equiv_page,
"on_app/system_button_clicked": self.system_interface,
"on_app/users_button_clicked": self.users_interface,
"on_show_advanced_search_window": self.on_show_advanced_search_window,
"on_Show_mislabeled_files_toggled": self.show_mislabeled_files,
"on_Browse_button_files_clicked": self.browse_for_files,
"on_cancel_popup_clicked": self.close_popup,
"on_treeview_cursor_changed": self.cursor_changed,
"on_login_seuser_combobox_changed": self.login_seuser_combobox_change,
"on_user_roles_combobox_changed": self.user_roles_combobox_change,
"on_cancel_button_browse_clicked": self.close_config_window,
"on_apply_button_clicked": self.apply_changes_button_press,
"on_Revert_button_clicked": self.update_or_revert_changes,
"on_Update_button_clicked": self.update_or_revert_changes,
"on_advanced_filter_entry_changed": self.get_advanced_filter_data,
"on_advanced_search_treeview_row_activated": self.advanced_item_selected,
"on_Select_advanced_search_clicked": self.advanced_item_button_push,
"on_info_button_button_press_event": self.on_help_button,
"on_back_button_clicked": self.on_help_back_clicked,
"on_forward_button_clicked": self.on_help_forward_clicked,
"on_Boolean_treeview_columns_changed": self.resize_columns,
"on_completion_entry_changed": self.application_selected,
"on_Add_button_clicked": self.add_button_clicked,
"on_Delete_button_clicked": self.delete_button_clicked,
"on_Modify_button_clicked": self.modify_button_clicked,
"on_Show_modified_only_toggled": self.on_show_modified_only,
"on_cancel_button_config_clicked": self.close_config_window,
"on_Import_button_clicked": self.import_config_show,
"on_Export_button_clicked": self.export_config_show,
"on_enable_unconfined_toggled": self.unconfined_toggle,
"on_enable_permissive_toggled": self.permissive_toggle,
"on_system_policy_type_combobox_changed": self.change_default_policy,
"on_Enforcing_button_default_toggled": self.change_default_mode,
"on_Permissive_button_default_toggled": self.change_default_mode,
"on_Disabled_button_default_toggled": self.change_default_mode,
"on_Relabel_button_toggled_cb": self.relabel_on_reboot,
"on_advanced_system_button_press_event": self.reveal_advanced_system,
"on_files_type_combobox_changed": self.show_more_types,
"on_filter_row_changed": self.filter_the_data,
"on_button_toggled": self.tab_change,
"gtk_main_quit": self.closewindow
}
self.previously_modified_initialize(customized)
builder.connect_signals(dic)
self.window.show() # Show the gui to the screen
GLib.timeout_add_seconds(5, self.selinux_status)
self.selinux_status()
self.lockdown_inited = False
self.add_modify_delete_box.hide()
self.filter_box.hide()
if self.status == DISABLED:
self.show_system_page()
else:
if self.application:
self.applications_selection_button.set_label(self.application)
self.completion_entry.set_text(self.application)
self.show_applications_page()
self.tab_change()
else:
self.clearbuttons()
self.outer_notebook.set_current_page(START_PAGE)
self.reinit()
self.finish_init = True
Gtk.main()
def init_cur(self):
self.cur_dict = {}
for k in keys:
self.cur_dict[k] = {}
def remove_cur(self, ctr):
i = 0
for k in self.cur_dict:
for j in self.cur_dict[k]:
if i == ctr:
del(self.cur_dict[k][j])
return
i += 1
def selinux_status(self):
try:
self.status = selinux.security_getenforce()
except OSError:
self.status = DISABLED
if self.status == DISABLED:
self.current_status_label.set_sensitive(False)
self.current_status_enforcing.set_sensitive(False)
self.current_status_permissive.set_sensitive(False)
self.enforcing_button_default.set_sensitive(False)
self.status_bar.push(self.context_id, _("System Status: Disabled"))
self.info_text.set_label(DISABLED_TEXT)
else:
self.set_enforce_text(self.status)
if os.path.exists('/.autorelabel'):
self.relabel_button.set_active(True)
else:
self.relabel_button_no.set_active(True)
policytype = selinux.selinux_getpolicytype()[1]
mode = selinux.selinux_getenforcemode()[1]
if mode == ENFORCING:
self.enforcing_button_default.set_active(True)
if mode == PERMISSIVE:
self.permissive_button_default.set_active(True)
if mode == DISABLED:
self.disabled_button_default.set_active(True)
return True
def lockdown_init(self):
if self.lockdown_inited:
return
self.wait_mouse()
self.lockdown_inited = True
self.disable_ptrace_button.set_active(selinux.security_get_boolean_active("deny_ptrace"))
self.module_dict = {}
for m in self.dbus.semodule_list().split("\n"):
mod = m.split()
if len(mod) < 3:
continue
self.module_dict[mod[1]] = { "priority": mod[0], "Disabled" : (len(mod) > 3) }
self.enable_unconfined_button.set_active(not self.module_dict["unconfined"]["Disabled"])
self.enable_permissive_button.set_active(not self.module_dict["permissivedomains"]["Disabled"])
self.ready_mouse()
def column_clicked(self, treeview, treepath, treecol, *args):
iter = self.get_selected_iter()
if not iter:
return
if self.opage == BOOLEANS_PAGE:
if treecol.get_name() == "more_detail_col":
self.display_more_detail(self.window, treepath)
if self.opage == FILES_PAGE:
visible = self.liststore.get_value(iter, 3)
# If visible is true then fix mislabeled will be visible
if treecol.get_name() == "restorecon_col" and visible:
self.fix_mislabeled(self.liststore.get_value(iter, 0))
if self.opage == TRANSITIONS_PAGE:
bool_name = self.liststore.get_value(iter, 1)
if bool_name:
self.boolean_radio_button.clicked()
self.filter_entry.set_text(bool_name)
def idle_func(self):
while Gtk.events_pending():
Gtk.main_iteration()
def match_func(self, completion, key_string, iter, func_data):
try:
if self.application_liststore.get_value(iter, 0).find(key_string) != -1:
return True
return False
except AttributeError:
pass
def help_show_page(self):
self.back_button.set_sensitive(self.help_page != 0)
self.forward_button.set_sensitive(self.help_page < (len(self.help_list) - 1))
try:
fd = open("%shelp/%s.txt" % (self.code_path, self.help_list[self.help_page]), "r")
buf = fd.read()
fd.close()
except IOError:
buf = ""
help_text = self.help_text.get_buffer()
help_text.set_text(buf % {"APP": self.application})
self.help_text.set_buffer(help_text)
self.help_image.set_from_file("%shelp/%s.png" % (self.code_path, self.help_list[self.help_page]))
self.show_popup(self.help_window)
def on_help_back_clicked(self, *args):
self.help_page -= 1
self.help_show_page()
def on_help_forward_clicked(self, *args):
self.help_page += 1
self.help_show_page()
def on_help_button(self, *args):
self.help_page = 0
self.help_list = []
if self.opage == START_PAGE:
self.help_window.set_title(_("Help: Start Page"))
self.help_list = ["start"]
if self.opage == BOOLEANS_PAGE:
self.help_window.set_title(_("Help: Booleans Page"))
self.help_list = ["booleans", "booleans_toggled", "booleans_more", "booleans_more_show"]
if self.opage == FILES_PAGE:
ipage = self.inner_notebook_files.get_current_page()
if ipage == EXE_PAGE:
self.help_window.set_title(_("Help: Executable Files Page"))
self.help_list = ["files_exec"]
if ipage == WRITABLE_PAGE:
self.help_window.set_title(_("Help: Writable Files Page"))
self.help_list = ["files_write"]
if ipage == APP_PAGE:
self.help_window.set_title(_("Help: Application Types Page"))
self.help_list = ["files_app"]
if self.opage == NETWORK_PAGE:
ipage = self.inner_notebook_network.get_current_page()
if ipage == OUTBOUND_PAGE:
self.help_window.set_title(_("Help: Outbound Network Connections Page"))
self.help_list = ["ports_outbound"]
if ipage == INBOUND_PAGE:
self.help_window.set_title(_("Help: Inbound Network Connections Page"))
self.help_list = ["ports_inbound"]
if self.opage == TRANSITIONS_PAGE:
ipage = self.inner_notebook_transitions.get_current_page()
if ipage == TRANSITIONS_FROM_PAGE:
self.help_window.set_title(_("Help: Transition from application Page"))
self.help_list = ["transition_from", "transition_from_boolean", "transition_from_boolean_1", "transition_from_boolean_2"]
if ipage == TRANSITIONS_TO_PAGE:
self.help_window.set_title(_("Help: Transition into application Page"))
self.help_list = ["transition_to"]
if ipage == TRANSITIONS_FILE_PAGE:
self.help_window.set_title(_("Help: Transition application file Page"))
self.help_list = ["transition_file"]
if self.opage == SYSTEM_PAGE:
self.help_window.set_title(_("Help: Systems Page"))
self.help_list = ["system", "system_boot_mode", "system_current_mode", "system_export", "system_policy_type", "system_relabel"]
if self.opage == LOCKDOWN_PAGE:
self.help_window.set_title(_("Help: Lockdown Page"))
self.help_list = ["lockdown", "lockdown_unconfined", "lockdown_permissive", "lockdown_ptrace"]
if self.opage == LOGIN_PAGE:
self.help_window.set_title(_("Help: Login Page"))
self.help_list = ["login", "login_default"]
if self.opage == USER_PAGE:
self.help_window.set_title(_("Help: SELinux User Page"))
self.help_list = ["users"]
if self.opage == FILE_EQUIV_PAGE:
self.help_window.set_title(_("Help: File Equivalence Page"))
self.help_list = ["file_equiv"]
return self.help_show_page()
def open_combo_menu(self, *args):
if self.popup == 0:
self.popup = 1
location = self.window.get_position()
self.main_selection_window.move(location[0] + 2, location[1] + 65)
self.main_selection_window.show()
else:
self.main_selection_window.hide()
self.popup = 0
def hide_combo_menu(self, *args):
self.main_selection_window.hide()
self.popup = 0
def set_application_label(self, *args):
self.set_application_label = True
def resize_wrap(self, *args):
print(args)
def initialize_system_default_mode(self):
self.enforce_mode = selinux.selinux_getenforcemode()[1]
if self.enforce_mode == ENFORCING:
self.enforce_button = self.enforcing_button_default
if self.enforce_mode == PERMISSIVE:
self.enforce_button = self.permissive_button_default
if self.enforce_mode == DISABLED:
self.enforce_button = self.disabled_button_default
def populate_system_policy(self):
types = next(os.walk(selinux.selinux_path(), topdown=True))[1]
types.sort()
ctr = 0
for item in types:
iter = self.system_policy_type_liststore.append()
self.system_policy_type_liststore.set_value(iter, 0, item)
if item == self.initialtype:
self.system_policy_type_combobox.set_active(ctr)
self.typeHistory = ctr
ctr += 1
return ctr
def filter_the_data(self, list, iter, *args):
# When there is no txt in the box show all items in the tree
if self.filter_txt == "":
return True
try:
for x in range(0, list.get_n_columns()):
try:
val = list.get_value(iter, x)
if val is True or val is False or val is None:
continue
# Returns true if filter_txt exists within the val
if(val.find(self.filter_txt) != -1 or val.lower().find(self.filter_txt) != -1):
return True
except (AttributeError, TypeError):
pass
except: # ValueError:
pass
return False
def net_update(self, app, netd, protocol, direction, model):
for k in netd.keys():
for t, ports in netd[k]:
pkey = (",".join(ports), protocol)
if pkey in self.cur_dict["port"]:
if self.cur_dict["port"][pkey]["action"] == "-d":
continue
if t != self.cur_dict["port"][pkey]["type"]:
continue
self.network_initial_data_insert(model, ", ".join(ports), t, protocol)
def file_equiv_initialize(self):
self.wait_mouse()
edict = sepolicy.get_file_equiv()
self.file_equiv_liststore.clear()
for f in edict:
iter = self.file_equiv_liststore.append()
if edict[f]["modify"]:
name = self.markup(f)
equiv = self.markup(edict[f]["equiv"])
else:
name = f
equiv = edict[f]["equiv"]
self.file_equiv_liststore.set_value(iter, 0, name)
self.file_equiv_liststore.set_value(iter, 1, equiv)
self.file_equiv_liststore.set_value(iter, 2, edict[f]["modify"])
self.ready_mouse()
def user_initialize(self):
self.wait_mouse()
self.user_liststore.clear()
for u in sepolicy.get_selinux_users():
iter = self.user_liststore.append()
self.user_liststore.set_value(iter, 0, str(u["name"]))
roles = u["roles"]
if "object_r" in roles:
roles.remove("object_r")
self.user_liststore.set_value(iter, 1, ", ".join(roles))
self.user_liststore.set_value(iter, 2, u["level"])
self.user_liststore.set_value(iter, 3, u["range"])
self.user_liststore.set_value(iter, 4, True)
self.ready_mouse()
def login_initialize(self):
self.wait_mouse()
self.login_liststore.clear()
for u in sepolicy.get_login_mappings():
iter = self.login_liststore.append()
self.login_liststore.set_value(iter, 0, u["name"])
self.login_liststore.set_value(iter, 1, u["seuser"])
self.login_liststore.set_value(iter, 2, u["mls"])
self.login_liststore.set_value(iter, 3, True)
self.ready_mouse()
def network_initialize(self, app):
netd = sepolicy.network.get_network_connect(app, "tcp", "name_connect", check_bools=True)
self.net_update(app, netd, "tcp", OUTBOUND_PAGE, self.network_out_liststore)
netd = sepolicy.network.get_network_connect(app, "tcp", "name_bind", check_bools=True)
self.net_update(app, netd, "tcp", INBOUND_PAGE, self.network_in_liststore)
netd = sepolicy.network.get_network_connect(app, "udp", "name_bind", check_bools=True)
self.net_update(app, netd, "udp", INBOUND_PAGE, self.network_in_liststore)
def network_initial_data_insert(self, model, ports, portType, protocol):
iter = model.append()
model.set_value(iter, 0, ports)
model.set_value(iter, 1, protocol)
model.set_value(iter, 2, portType)
model.set_value(iter, 4, True)
def combo_set_active_text(self, combobox, val):
ctr = 0
liststore = combobox.get_model()
for i in liststore:
if i[0] == val:
combobox.set_active(ctr)
return
ctr += 1
niter = liststore.get_iter(ctr - 1)
if liststore.get_value(niter, 0) == _("More..."):
iter = liststore.insert_before(niter)
ctr = ctr - 1
else:
iter = liststore.append()
liststore.set_value(iter, 0, val)
combobox.set_active(ctr)
def combo_get_active_text(self, combobox):
liststore = combobox.get_model()
index = combobox.get_active()
if index < 0:
return None
iter = liststore.get_iter(index)
return liststore.get_value(iter, 0)
def combo_box_add(self, val, val1):
if val is None:
return
iter = self.application_liststore.append()
self.application_liststore.set_value(iter, 0, val)
self.application_liststore.set_value(iter, 1, val1)
def select_type_more(self, *args):
app = self.moreTypes_treeview.get_selection()
iter = app.get_selected()[1]
if iter is None:
return
app = self.more_types_files_liststore.get_value(iter, 0)
self.combo_set_active_text(self.files_type_combobox, app)
self.closewindow(self.moreTypes_window_files)
def advanced_item_button_push(self, *args):
row = self.advanced_search_treeview.get_selection()
model, iter = row.get_selected()
iter = model.convert_iter_to_child_iter(iter)
iter = self.advanced_search_filter.convert_iter_to_child_iter(iter)
app = self.application_liststore.get_value(iter, 1)
if app is None:
return
self.advanced_filter_entry.set_text('')
self.advanced_search_window.hide()
self.reveal_advanced(self.main_advanced_label)
self.completion_entry.set_text(app)
def advanced_item_selected(self, treeview, path, *args):
iter = self.advanced_search_filter.get_iter(path)
iter = self.advanced_search_filter.convert_iter_to_child_iter(iter)
app = self.application_liststore.get_value(iter, 1)
self.advanced_filter_entry.set_text('')
self.advanced_search_window.hide()
self.reveal_advanced(self.main_advanced_label)
self.completion_entry.set_text(app)
self.application_selected()
def find_application(self, app):
if app and len(app) > 0:
for items in self.application_liststore:
if app == items[0]:
return True
return False
def application_selected(self, *args):
self.show_mislabeled_files_only.set_visible(False)
self.mislabeled_files_label.set_visible(False)
self.warning_files.set_visible(False)
self.filter_entry.set_text('')
app = self.completion_entry.get_text()
if not self.find_application(app):
return
self.show_applications_page()
self.add_button.set_sensitive(True)
self.delete_button.set_sensitive(True)
# Clear the tree to prepare for a new selection otherwise
self.executable_files_liststore.clear()
# data will pile up everytime the user selects a new item from the drop down menu
self.network_in_liststore.clear()
self.network_out_liststore.clear()
self.boolean_liststore.clear()
self.transitions_into_liststore.clear()
self.transitions_from_treestore.clear()
self.application_files_liststore.clear()
self.writable_files_liststore.clear()
self.transitions_file_liststore.clear()
try:
if app[0] == '/':
app = sepolicy.get_init_transtype(app)
if not app:
return
self.application = app
except IndexError:
pass
self.wait_mouse()
self.previously_modified_initialize(self.dbus.customized())
self.reinit()
self.boolean_initialize(app)
self.mislabeled_files = False
self.executable_files_initialize(app)
self.network_initialize(app)
self.writable_files_initialize(app)
self.transitions_into_initialize(app)
self.transitions_from_initialize(app)
self.application_files_initialize(app)
self.transitions_files_initialize(app)
self.executable_files_tab.set_tooltip_text(_("File path used to enter the '%s' domain." % app))
self.writable_files_tab.set_tooltip_text(_("Files to which the '%s' domain can write." % app))
self.network_out_tab.set_tooltip_text(_("Network Ports to which the '%s' is allowed to connect." % app))
self.network_in_tab.set_tooltip_text(_("Network Ports to which the '%s' is allowed to listen." % app))
self.application_files_tab.set_tooltip_text(_("File Types defined for the '%s'." % app))
self.boolean_radio_button.set_tooltip_text(_("Display boolean information that can be used to modify the policy for the '%s'." % app))
self.files_radio_button.set_tooltip_text(_("Display file type information that can be used by the '%s'." % app))
self.network_radio_button.set_tooltip_text(_("Display network ports to which the '%s' can connect or listen to." % app))
self.transitions_into_tab.set_label(_("Application Transitions Into '%s'" % app))
self.transitions_from_tab.set_label(_("Application Transitions From '%s'" % app))
self.transitions_file_tab.set_label(_("File Transitions From '%s'" % app))
self.transitions_into_tab.set_tooltip_text(_("Executables which will transition to '%s', when executing selected domains entrypoint.") % app)
self.transitions_from_tab.set_tooltip_text(_("Executables which will transition to a different domain, when '%s' executes them.") % app)
self.transitions_file_tab.set_tooltip_text(_("Files by '%s' with transitions to a different label." % app))
self.transitions_radio_button.set_tooltip_text(_("Display applications that can transition into or out of the '%s'." % app))
self.application = app
self.applications_selection_button.set_label(self.application)
self.ready_mouse()
def reinit(self):
sepolicy.reinit()
self.fcdict = sepolicy.get_fcdict()
self.local_file_paths = sepolicy.get_local_file_paths()
def previously_modified_initialize(self, buf):
self.cust_dict = {}
for i in buf.split("\n"):
rec = i.split()
if len(rec) == 0:
continue
if rec[1] == "-D":
continue
if rec[0] not in self.cust_dict:
self.cust_dict[rec[0]] = {}
if rec[0] == "boolean":
self.cust_dict["boolean"][rec[-1]] = {"active": rec[2] == "-1"}
if rec[0] == "login":
self.cust_dict["login"][rec[-1]] = {"seuser": rec[3], "range": rec[5]}
if rec[0] == "interface":
self.cust_dict["interface"][rec[-1]] = {"type": rec[3]}
if rec[0] == "user":
self.cust_dict["user"][rec[-1]] = {"level": "s0", "range": rec[3], "role": rec[5]}
if rec[0] == "port":
self.cust_dict["port"][(rec[-1], rec[-2])] = {"type": rec[3]}
if rec[0] == "node":
self.cust_dict["node"][rec[-1]] = {"mask": rec[3], "protocol": rec[5], "type": rec[7]}
if rec[0] == "fcontext":
if rec[2] == "-e":
if "fcontext-equiv" not in self.cust_dict:
self.cust_dict["fcontext-equiv"] = {}
self.cust_dict["fcontext-equiv"][(rec[-1])] = {"equiv": rec[3]}
else:
self.cust_dict["fcontext"][(rec[-1], rec[3])] = {"type": rec[5]}
if rec[0] == "module":
self.cust_dict["module"][rec[-1]] = {"enabled": rec[2] != "-d"}
if "module" not in self.cust_dict:
return
for semodule, button in [("unconfined", self.disable_unconfined_button), ("permissivedomains", self.disable_permissive_button)]:
if semodule in self.cust_dict["module"]:
button.set_active(self.cust_dict["module"][semodule]["enabled"])
for i in keys:
if i not in self.cust_dict:
self.cust_dict.update({i: {}})
def executable_files_initialize(self, application):
self.entrypoints = sepolicy.get_entrypoints(application)
for exe in self.entrypoints.keys():
if len(self.entrypoints[exe]) == 0:
continue
file_class = self.entrypoints[exe][1]
for path in self.entrypoints[exe][0]:
if (path, file_class) in self.cur_dict["fcontext"]:
if self.cur_dict["fcontext"][(path, file_class)]["action"] == "-d":
continue
if exe != self.cur_dict["fcontext"][(path, file_class)]["type"]:
continue
self.files_initial_data_insert(self.executable_files_liststore, path, exe, file_class)
def mislabeled(self, path):
try:
con = selinux.matchpathcon(path, 0)[1]
cur = selinux.getfilecon(path)[1]
return con != cur
except OSError:
return False
def set_mislabeled(self, tree, path, iter, niter):
if not self.mislabeled(path):
return
con = selinux.matchpathcon(path, 0)[1]
cur = selinux.getfilecon(path)[1]
self.mislabeled_files = True
# Set visibility of label
tree.set_value(niter, 3, True)
# Has a mislabel
tree.set_value(iter, 4, True)
tree.set_value(niter, 4, True)
tree.set_value(iter, 5, con.split(":")[2])
tree.set_value(iter, 6, cur.split(":")[2])
def writable_files_initialize(self, application):
# Traversing the dictionary data struct
self.writable_files = sepolicy.get_writable_files(application)
for write in self.writable_files.keys():
if len(self.writable_files[write]) < 2:
self.files_initial_data_insert(self.writable_files_liststore, None, write, _("all files"))
continue
file_class = self.writable_files[write][1]
for path in self.writable_files[write][0]:
if (path, file_class) in self.cur_dict["fcontext"]:
if self.cur_dict["fcontext"][(path, file_class)]["action"] == "-d":
continue
if write != self.cur_dict["fcontext"][(path, file_class)]["type"]:
continue
self.files_initial_data_insert(self.writable_files_liststore, path, write, file_class)
def files_initial_data_insert(self, liststore, path, seLinux_label, file_class):
iter = liststore.append(None)
if path is None:
path = _("MISSING FILE PATH")
modify = False
else:
modify = (path, file_class) in self.local_file_paths
for p in sepolicy.find_file(path):
niter = liststore.append(iter)
liststore.set_value(niter, 0, p)
self.set_mislabeled(liststore, p, iter, niter)
if modify:
path = self.markup(path)
file_class = self.markup(selinux_label)
file_class = self.markup(file_class)
liststore.set_value(iter, 0, path)
liststore.set_value(iter, 1, seLinux_label)
liststore.set_value(iter, 2, file_class)
liststore.set_value(iter, 7, modify)
def markup(self, f):
return "<b>%s</b>" % f
def unmarkup(self, f):
if f:
return re.sub("</b>$", "", re.sub("^<b>", "", f))
return None
def application_files_initialize(self, application):
self.file_types = sepolicy.get_file_types(application)
for app in self.file_types.keys():
if len(self.file_types[app]) == 0:
continue
file_class = self.file_types[app][1]
for path in self.file_types[app][0]:
desc = sepolicy.get_description(app, markup=self.markup)
if (path, file_class) in self.cur_dict["fcontext"]:
if self.cur_dict["fcontext"][(path, file_class)]["action"] == "-d":
continue
if app != self.cur_dict["fcontext"][(path, file_class)]["type"]:
continue
self.files_initial_data_insert(self.application_files_liststore, path, desc, file_class)
def modified(self):
i = 0
for k in self.cur_dict:
if len(self.cur_dict[k]) > 0:
return True
return False
def boolean_initialize(self, application):
for blist in sepolicy.get_bools(application):
for b, active in blist:
if b in self.cur_dict["boolean"]:
active = self.cur_dict["boolean"][b]['active']
desc = sepolicy.boolean_desc(b)
self.boolean_initial_data_insert(b, desc, active)
def boolean_initial_data_insert(self, val, desc, active):
# Insert data from data source into tree
iter = self.boolean_liststore.append()
self.boolean_liststore.set_value(iter, 0, active)
self.boolean_liststore.set_value(iter, 1, desc)
self.boolean_liststore.set_value(iter, 2, val)
self.boolean_liststore.set_value(iter, 3, _('More...'))
def transitions_into_initialize(self, application):
for x in sepolicy.get_transitions_into(application):
active = None
executable = None
source = None
if "boolean" in x:
active = x["boolean"]
if "target" in x:
executable = x["target"]
if "source" in x:
source = x["source"]
self.transitions_into_initial_data_insert(active, executable, source)
def transitions_into_initial_data_insert(self, active, executable, source):
iter = self.transitions_into_liststore.append()
if active != None:
self.transitions_into_liststore.set_value(iter, 0, enabled[active[0][1]]) # active[0][1] is either T or F (enabled is all the way at the top)
else:
self.transitions_into_liststore.set_value(iter, 0, "Default")
self.transitions_into_liststore.set_value(iter, 2, executable)
self.transitions_into_liststore.set_value(iter, 1, source)
def transitions_from_initialize(self, application):
for x in sepolicy.get_transitions(application):
active = None
executable = None
transtype = None
if "boolean" in x:
active = x["boolean"]
if "target" in x:
executable_type = x["target"]
if "transtype" in x:
transtype = x["transtype"]
self.transitions_from_initial_data_insert(active, executable_type, transtype)
try:
for executable in self.fcdict[executable_type]["regex"]:
self.transitions_from_initial_data_insert(active, executable, transtype)
except KeyError:
pass
def transitions_from_initial_data_insert(self, active, executable, transtype):
iter = self.transitions_from_treestore.append(None)
if active == None:
self.transitions_from_treestore.set_value(iter, 0, "Default")
self.transitions_from_treestore.set_value(iter, 5, False)
else:
niter = self.transitions_from_treestore.append(iter)
# active[0][1] is either T or F (enabled is all the way at the top)
self.transitions_from_treestore.set_value(iter, 0, enabled[active[0][1]])
markup = ('<span foreground="blue"><u>','</u></span>')
if active[0][1]:
self.transitions_from_treestore.set_value(niter, 2, (_("To disable this transition, go to the %sBoolean section%s.") % markup))
else:
self.transitions_from_treestore.set_value(niter, 2, (_("To enable this transition, go to the %sBoolean section%s.") % markup))
# active[0][0] is the Bool Name
self.transitions_from_treestore.set_value(niter, 1, active[0][0])
self.transitions_from_treestore.set_value(niter, 5, True)
self.transitions_from_treestore.set_value(iter, 2, executable)
self.transitions_from_treestore.set_value(iter, 3, transtype)
def transitions_files_initialize(self, application):
for i in sepolicy.get_file_transitions(application):
if 'filename' in i:
filename = i['filename']
else:
filename = None
self.transitions_files_inital_data_insert(i['target'], i['class'], i['transtype'], filename)
def transitions_files_inital_data_insert(self, path, tclass, dest, name):
iter = self.transitions_file_liststore.append()
self.transitions_file_liststore.set_value(iter, 0, path)
self.transitions_file_liststore.set_value(iter, 1, tclass)
self.transitions_file_liststore.set_value(iter, 2, dest)
if name == None:
name = '*'
self.transitions_file_liststore.set_value(iter, 3, name)
def tab_change(self, *args):
self.clear_filters()
self.treeview = None
self.treesort = None
self.treefilter = None
self.liststore = None
self.modify_button.set_sensitive(False)
self.add_modify_delete_box.hide()
self.show_modified_only.set_visible(False)
self.show_mislabeled_files_only.set_visible(False)
self.mislabeled_files_label.set_visible(False)
self.warning_files.set_visible(False)
if self.boolean_radio_button.get_active():
self.outer_notebook.set_current_page(BOOLEANS_PAGE)
self.treeview = self.boolean_treeview
self.show_modified_only.set_visible(True)
if self.files_radio_button.get_active():
self.show_popup(self.add_modify_delete_box)
self.show_modified_only.set_visible(True)
self.show_mislabeled_files_only.set_visible(self.mislabeled_files)
self.mislabeled_files_label.set_visible(self.mislabeled_files)
self.warning_files.set_visible(self.mislabeled_files)
self.outer_notebook.set_current_page(FILES_PAGE)
if args[0] == self.inner_notebook_files:
ipage = args[2]
else:
ipage = self.inner_notebook_files.get_current_page()
if ipage == EXE_PAGE:
self.treeview = self.executable_files_treeview
category = _("executable")
elif ipage == WRITABLE_PAGE:
self.treeview = self.writable_files_treeview
category = _("writable")
elif ipage == APP_PAGE:
self.treeview = self.application_files_treeview
category = _("application")
self.add_button.set_tooltip_text(_("Add new %(TYPE)s file path for '%(DOMAIN)s' domains.") % {"TYPE": category, "DOMAIN": self.application})
self.delete_button.set_tooltip_text(_("Delete %(TYPE)s file paths for '%(DOMAIN)s' domain.") % {"TYPE": category, "DOMAIN": self.application})
self.modify_button.set_tooltip_text(_("Modify %(TYPE)s file path for '%(DOMAIN)s' domain. Only bolded items in the list can be selected, this indicates they were modified previously.") % {"TYPE": category, "DOMAIN": self.application})
if self.network_radio_button.get_active():
self.add_modify_delete_box.show()
self.show_modified_only.set_visible(True)
self.outer_notebook.set_current_page(NETWORK_PAGE)
if args[0] == self.inner_notebook_network:
ipage = args[2]
else:
ipage = self.inner_notebook_network.get_current_page()
if ipage == OUTBOUND_PAGE:
self.treeview = self.network_out_treeview
category = _("connect")
if ipage == INBOUND_PAGE:
self.treeview = self.network_in_treeview
category = _("listen for inbound connections")
self.add_button.set_tooltip_text(_("Add new port definition to which the '%(APP)s' domain is allowed to %(PERM)s.") % {"APP": self.application, "PERM": category})
self.delete_button.set_tooltip_text(_("Delete modified port definitions to which the '%(APP)s' domain is allowed to %(PERM)s.") % {"APP": self.application, "PERM": category})
self.modify_button.set_tooltip_text(_("Modify port definitions to which the '%(APP)s' domain is allowed to %(PERM)s.") % {"APP": self.application, "PERM": category})
if self.transitions_radio_button.get_active():
self.outer_notebook.set_current_page(TRANSITIONS_PAGE)
if args[0] == self.inner_notebook_transitions:
ipage = args[2]
else:
ipage = self.inner_notebook_transitions.get_current_page()
if ipage == TRANSITIONS_FROM_PAGE:
self.treeview = self.transitions_from_treeview
if ipage == TRANSITIONS_TO_PAGE:
self.treeview = self.transitions_into_treeview
if ipage == TRANSITIONS_FILE_PAGE:
self.treeview = self.transitions_file_treeview
if self.system_radio_button.get_active():
self.outer_notebook.set_current_page(SYSTEM_PAGE)
self.filter_box.hide()
if self.lockdown_radio_button.get_active():
self.lockdown_init()
self.outer_notebook.set_current_page(LOCKDOWN_PAGE)
self.filter_box.hide()
if self.user_radio_button.get_active():
self.outer_notebook.set_current_page(USER_PAGE)
self.add_modify_delete_box.show()
self.show_modified_only.set_visible(True)
self.treeview = self.user_treeview
self.add_button.set_tooltip_text(_("Add new SELinux User/Role definition."))
self.delete_button.set_tooltip_text(_("Delete modified SELinux User/Role definitions."))
self.modify_button.set_tooltip_text(_("Modify selected modified SELinux User/Role definitions."))
if self.login_radio_button.get_active():
self.outer_notebook.set_current_page(LOGIN_PAGE)
self.add_modify_delete_box.show()
self.show_modified_only.set_visible(True)
self.treeview = self.login_treeview
self.add_button.set_tooltip_text(_("Add new Login Mapping definition."))
self.delete_button.set_tooltip_text(_("Delete modified Login Mapping definitions."))
self.modify_button.set_tooltip_text(_("Modify selected modified Login Mapping definitions."))
if self.file_equiv_radio_button.get_active():
self.outer_notebook.set_current_page(FILE_EQUIV_PAGE)
self.add_modify_delete_box.show()
self.show_modified_only.set_visible(True)
self.treeview = self.file_equiv_treeview
self.add_button.set_tooltip_text(_("Add new File Equivalence definition."))
self.delete_button.set_tooltip_text(_("Delete modified File Equivalence definitions."))
self.modify_button.set_tooltip_text(_("Modify selected modified File Equivalence definitions. Only bolded items in the list can be selected, this indicates they were modified previously."))
self.opage = self.outer_notebook.get_current_page()
if self.treeview:
self.filter_box.show()
self.treesort = self.treeview.get_model()
self.treefilter = self.treesort.get_model()
self.liststore = self.treefilter.get_model()
for x in range(0, self.liststore.get_n_columns()):
col = self.treeview.get_column(x)
if col:
cell = col.get_cells()[0]
if isinstance(cell, Gtk.CellRendererText):
self.liststore.set_sort_func(x, self.stripsort, None)
self.treeview.get_selection().unselect_all()
self.modify_button.set_sensitive(False)
def stripsort(self, model, row1, row2, user_data):
sort_column, _ = model.get_sort_column_id()
val1 = self.unmarkup(model.get_value(row1, sort_column))
val2 = self.unmarkup(model.get_value(row2, sort_column))
return cmp(val1, val2)
def display_more_detail(self, windows, path):
it = self.boolean_filter.get_iter(path)
it = self.boolean_filter.convert_iter_to_child_iter(it)
self.boolean_more_detail_tree_data_set.clear()
self.boolean_more_detail_window.set_title(_("Boolean %s Allow Rules") % self.boolean_liststore.get_value(it, 2))
blist = sepolicy.get_boolean_rules(self.application, self.boolean_liststore.get_value(it, 2))
for b in blist:
self.display_more_detail_init(b["source"], b["target"], b["class"], b["permlist"])
self.show_popup(self.boolean_more_detail_window)
def display_more_detail_init(self, source, target, class_type, permission):
iter = self.boolean_more_detail_tree_data_set.append()
self.boolean_more_detail_tree_data_set.set_value(iter, 0, "allow %s %s:%s { %s };" % (source, target, class_type, " ".join(permission)))
def add_button_clicked(self, *args):
self.modify = False
if self.opage == NETWORK_PAGE:
self.popup_network_label.set_text((_("Add Network Port for %s. Ports will be created when update is applied.")) % self.application)
self.network_popup_window.set_title((_("Add Network Port for %s")) % self.application)
self.init_network_dialog(args)
return
if self.opage == FILES_PAGE:
self.popup_files_label.set_text((_("Add File Labeling for %s. File labels will be created when update is applied.")) % self.application)
self.files_popup_window.set_title((_("Add File Labeling for %s")) % self.application)
self.init_files_dialog(args)
ipage = self.inner_notebook_files.get_current_page()
if ipage == EXE_PAGE:
self.files_path_entry.set_text("ex: /usr/sbin/Foobar")
else:
self.files_path_entry.set_text("ex: /var/lib/Foobar")
self.clear_entry = True
if self.opage == LOGIN_PAGE:
self.login_label.set_text((_("Add Login Mapping. User Mapping will be created when Update is applied.")))
self.login_popup_window.set_title(_("Add Login Mapping"))
self.login_init_dialog(args)
self.clear_entry = True
if self.opage == USER_PAGE:
self.user_label.set_text((_("Add SELinux User Role. SELinux user roles will be created when update is applied.")))
self.user_popup_window.set_title(_("Add SELinux Users"))
self.user_init_dialog(args)
self.clear_entry = True
if self.opage == FILE_EQUIV_PAGE:
self.file_equiv_source_entry.set_text("")
self.file_equiv_dest_entry.set_text("")
self.file_equiv_label.set_text((_("Add File Equivalency Mapping. Mapping will be created when update is applied.")))
self.file_equiv_popup_window.set_title(_("Add SELinux File Equivalency"))
self.clear_entry = True
self.show_popup(self.file_equiv_popup_window)
self.new_updates()
def show_popup(self, window):
self.current_popup = window
window.show()
def close_popup(self, *args):
self.current_popup.hide()
self.window.set_sensitive(True)
return True
def modify_button_clicked(self, *args):
iter = None
if self.treeview:
iter = self.get_selected_iter()
if not iter:
self.modify_button.set_sensitive(False)
return
self.modify = True
if self.opage == NETWORK_PAGE:
self.modify_button_network_clicked(args)
if self.opage == FILES_PAGE:
self.popup_files_label.set_text((_("Modify File Labeling for %s. File labels will be created when update is applied.")) % self.application)
self.files_popup_window.set_title((_("Add File Labeling for %s")) % self.application)
self.delete_old_item = None
self.init_files_dialog(args)
self.modify = True
operation = "Modify"
mls = 1
ipage = self.inner_notebook_files.get_current_page()
if ipage == EXE_PAGE:
iter = self.executable_files_filter.convert_iter_to_child_iter(iter)
self.delete_old_item = iter
path = self.executable_files_liststore.get_value(iter, 0)
self.files_path_entry.set_text(path)
ftype = self.executable_files_liststore.get_value(iter, 1)
if type != None:
self.combo_set_active_text(self.files_type_combobox, ftype)
tclass = self.executable_files_liststore.get_value(iter, 2)
if tclass != None:
self.combo_set_active_text(self.files_class_combobox, tclass)
if ipage == WRITABLE_PAGE:
iter = self.writable_files_filter.convert_iter_to_child_iter(iter)
self.delete_old_item = iter
path = self.writable_files_liststore.get_value(iter, 0)
self.files_path_entry.set_text(path)
type = self.writable_files_liststore.get_value(iter, 1)
if type != None:
self.combo_set_active_text(self.files_type_combobox, type)
tclass = self.writable_files_liststore.get_value(iter, 2)
if tclass != None:
self.combo_set_active_text(self.files_class_combobox, tclass)
if ipage == APP_PAGE:
iter = self.application_files_filter.convert_iter_to_child_iter(iter)
self.delete_old_item = iter
path = self.application_files_liststore.get_value(iter, 0)
self.files_path_entry.set_text(path)
try:
get_type = self.application_files_liststore.get_value(iter, 1)
get_type = get_type.split("<b>")[1].split("</b>")
except AttributeError:
pass
type = self.application_files_liststore.get_value(iter, 2)
if type != None:
self.combo_set_active_text(self.files_type_combobox, type)
tclass = get_type[0]
if tclass != None:
self.combo_set_active_text(self.files_class_combobox, tclass)
if self.opage == USER_PAGE:
self.user_init_dialog(args)
self.user_name_entry.set_text(self.user_liststore.get_value(iter, 0))
self.user_mls_level_entry.set_text(self.user_liststore.get_value(iter, 2))
self.user_mls_entry.set_text(self.user_liststore.get_value(iter, 3))
self.combo_set_active_text(self.user_roles_combobox, self.user_liststore.get_value(iter, 1))
self.user_label.set_text((_("Modify SELinux User Role. SELinux user roles will be modified when update is applied.")))
self.user_popup_window.set_title(_("Modify SELinux Users"))
self.show_popup(self.user_popup_window)
if self.opage == LOGIN_PAGE:
self.login_init_dialog(args)
self.login_name_entry.set_text(self.login_liststore.get_value(iter, 0))
self.login_mls_entry.set_text(self.login_liststore.get_value(iter, 2))
self.combo_set_active_text(self.login_seuser_combobox, self.login_liststore.get_value(iter, 1))
self.login_label.set_text((_("Modify Login Mapping. Login Mapping will be modified when Update is applied.")))
self.login_popup_window.set_title(_("Modify Login Mapping"))
self.show_popup(self.login_popup_window)
if self.opage == FILE_EQUIV_PAGE:
self.file_equiv_source_entry.set_text(self.unmarkup(self.file_equiv_liststore.get_value(iter, 0)))
self.file_equiv_dest_entry.set_text(self.unmarkup(self.file_equiv_liststore.get_value(iter, 1)))
self.file_equiv_label.set_text((_("Modify File Equivalency Mapping. Mapping will be created when update is applied.")))
self.file_equiv_popup_window.set_title(_("Modify SELinux File Equivalency"))
self.clear_entry = True
self.show_popup(self.file_equiv_popup_window)
def populate_type_combo(self, tree, loc, *args):
iter = self.more_types_files_liststore.get_iter(loc)
ftype = self.more_types_files_liststore.get_value(iter, 0)
self.combo_set_active_text(self.files_type_combobox, ftype)
self.show_popup(self.files_popup_window)
self.moreTypes_window_files.hide()
def strip_domain(self, domain):
if domain == None:
return
if domain.endswith("_script_t"):
split_char = "_script_t"
else:
split_char = "_t"
return domain.split(split_char)[0]
def exclude_type(self, type, exclude_list):
for e in exclude_list:
if type.startswith(e):
return True
return False
def init_files_dialog(self, *args):
exclude_list = []
self.files_class_combobox.set_sensitive(True)
self.show_popup(self.files_popup_window)
ipage = self.inner_notebook_files.get_current_page()
self.files_type_combolist.clear()
self.files_class_combolist.clear()
compare = self.strip_domain(self.application)
for d in self.application_liststore:
if d[0].startswith(compare) and d[0] != self.application and not d[0].startswith("httpd_sys"):
exclude_list.append(self.strip_domain(d[0]))
self.more_types_files_liststore.clear()
try:
for files in sepolicy.file_type_str:
iter = self.files_class_combolist.append()
self.files_class_combolist.set_value(iter, 0, sepolicy.file_type_str[files])
if ipage == EXE_PAGE and self.entrypoints != None:
for exe in self.entrypoints.keys():
if exe.startswith(compare):
iter = self.files_type_combolist.append()
self.files_type_combolist.set_value(iter, 0, exe)
iter = self.more_types_files_liststore.append()
self.more_types_files_liststore.set_value(iter, 0, exe)
self.files_class_combobox.set_active(4)
self.files_class_combobox.set_sensitive(False)
elif ipage == WRITABLE_PAGE and self.writable_files != None:
for write in self.writable_files.keys():
if write.startswith(compare) and not self.exclude_type(write, exclude_list) and write in self.file_types:
iter = self.files_type_combolist.append()
self.files_type_combolist.set_value(iter, 0, write)
iter = self.more_types_files_liststore.append()
self.more_types_files_liststore.set_value(iter, 0, write)
self.files_class_combobox.set_active(0)
elif ipage == APP_PAGE and self.file_types != None:
for app in sepolicy.get_all_file_types():
if app.startswith(compare):
if app.startswith(compare) and not self.exclude_type(app, exclude_list):
iter = self.files_type_combolist.append()
self.files_type_combolist.set_value(iter, 0, app)
iter = self.more_types_files_liststore.append()
self.more_types_files_liststore.set_value(iter, 0, app)
self.files_class_combobox.set_active(0)
except AttributeError:
print("error")
pass
self.files_type_combobox.set_active(0)
self.files_mls_entry.set_text("s0")
iter = self.files_type_combolist.append()
self.files_type_combolist.set_value(iter, 0, _('More...'))
def modify_button_network_clicked(self, *args):
iter = self.get_selected_iter()
if not iter:
self.modify_button.set_sensitive(False)
return
self.popup_network_label.set_text((_("Modify Network Port for %s. Ports will be created when update is applied.")) % self.application)
self.network_popup_window.set_title((_("Modify Network Port for %s")) % self.application)
self.delete_old_item = None
self.init_network_dialog(args)
operation = "Modify"
mls = 1
self.modify = True
iter = self.get_selected_iter()
port = self.liststore.get_value(iter, 0)
self.network_ports_entry.set_text(port)
protocol = self.liststore.get_value(iter, 1)
if protocol == "tcp":
self.network_tcp_button.set_active(True)
elif protocol == "udp":
self.network_udp_button.set_active(True)
type = self.liststore.get_value(iter, 2)
if type != None:
self.combo_set_active_text(self.network_port_type_combobox, type)
self.delete_old_item = iter
def init_network_dialog(self, *args):
self.show_popup(self.network_popup_window)
ipage = self.inner_notebook_network.get_current_page()
self.network_port_type_combolist.clear()
self.network_ports_entry.set_text("")
try:
if ipage == OUTBOUND_PAGE:
netd = sepolicy.network.get_network_connect(self.application, "tcp", "name_connect", check_bools=True)
elif ipage == INBOUND_PAGE:
netd = sepolicy.network.get_network_connect(self.application, "tcp", "name_bind", check_bools=True)
netd += sepolicy.network.get_network_connect(self.application, "udp", "name_bind", check_bools=True)
port_types = []
for k in netd.keys():
for t, ports in netd[k]:
if t not in port_types + ["port_t", "unreserved_port_t"]:
if t.endswith("_type"):
continue
port_types.append(t)
port_types.sort()
short_domain = self.strip_domain(self.application)
if short_domain[-1] == "d":
short_domain = short_domain[:-1]
short_domain = short_domain + "_"
ctr = 0
found = 0
for t in port_types:
if t.startswith(short_domain):
found = ctr
iter = self.network_port_type_combolist.append()
self.network_port_type_combolist.set_value(iter, 0, t)
ctr += 1
self.network_port_type_combobox.set_active(found)
except AttributeError:
pass
self.network_tcp_button.set_active(True)
self.network_mls_entry.set_text("s0")
def login_seuser_combobox_change(self, combo, *args):
seuser = self.combo_get_active_text(combo)
if self.login_mls_entry.get_text() == "":
for u in sepolicy.get_selinux_users():
if seuser == u['name']:
self.login_mls_entry.set_text(u['range'])
def user_roles_combobox_change(self, combo, *args):
serole = self.combo_get_active_text(combo)
if self.user_mls_entry.get_text() == "":
for u in sepolicy.get_all_roles():
if serole == u['name']:
self.user_mls_entry.set_text(u['range'])
def get_selected_iter(self):
iter = None
if not self.treeview:
return None
row = self.treeview.get_selection()
if not row:
return None
treesort, iter = row.get_selected()
if iter:
iter = treesort.convert_iter_to_child_iter(iter)
if iter:
iter = self.treefilter.convert_iter_to_child_iter(iter)
return iter
def cursor_changed(self, *args):
self.modify_button.set_sensitive(False)
iter = self.get_selected_iter()
if iter == None:
self.modify_button.set_sensitive(False)
return
if not self.liststore[iter] or not self.liststore[iter][-1]:
return
self.modify_button.set_sensitive(self.liststore[iter][-1])
def login_init_dialog(self, *args):
self.show_popup(self.login_popup_window)
self.login_seuser_combolist.clear()
users = sepolicy.get_all_users()
users.sort()
for u in users:
iter = self.login_seuser_combolist.append()
self.login_seuser_combolist.set_value(iter, 0, str(u))
self.login_name_entry.set_text("")
self.login_mls_entry.set_text("")
def user_init_dialog(self, *args):
self.show_popup(self.user_popup_window)
self.user_roles_combolist.clear()
roles = sepolicy.get_all_roles()
roles.sort()
for r in roles:
iter = self.user_roles_combolist.append()
self.user_roles_combolist.set_value(iter, 0, str(r))
self.user_name_entry.set_text("")
self.user_mls_entry.set_text("")
def on_disable_ptrace(self, checkbutton):
if self.finish_init:
update_buffer = "boolean -m -%d deny_ptrace" % checkbutton.get_active()
self.wait_mouse()
try:
self.dbus.semanage(update_buffer)
except dbus.exceptions.DBusException as e:
self.error(e)
self.ready_mouse()
def on_show_modified_only(self, checkbutton):
length = self.liststore.get_n_columns()
def dup_row(row):
l = []
for i in range(0, length):
l.append(row[i])
return l
append_list = []
if self.opage == BOOLEANS_PAGE:
if not checkbutton.get_active():
return self.boolean_initialize(self.application)
for row in self.liststore:
if row[2] in self.cust_dict["boolean"]:
append_list.append(dup_row(row))
if self.opage == FILES_PAGE:
ipage = self.inner_notebook_files.get_current_page()
if not checkbutton.get_active():
if ipage == EXE_PAGE:
return self.executable_files_initialize(self.application)
if ipage == WRITABLE_PAGE:
return self.writable_files_initialize(self.application)
if ipage == APP_PAGE:
return self.application_files_initialize(self.application)
for row in self.liststore:
if (row[0], row[2]) in self.cust_dict["fcontext"]:
append_list.append(row)
if self.opage == NETWORK_PAGE:
if not checkbutton.get_active():
return self.network_initialize(self.application)
for row in self.liststore:
if (row[0], row[1]) in self.cust_dict["port"]:
append_list.append(dup_row(row))
if self.opage == FILE_EQUIV_PAGE:
if not checkbutton.get_active() == True:
return self.file_equiv_initialize()
for row in self.liststore:
if row[0] in self.cust_dict["fcontext-equiv"]:
append_list.append(dup_row(row))
if self.opage == USER_PAGE:
if not checkbutton.get_active():
return self.user_initialize()
for row in self.liststore:
if row[0] in self.cust_dict["user"]:
append_list.append(dup_row(row))
if self.opage == LOGIN_PAGE:
if not checkbutton.get_active() == True:
return self.login_initialize()
for row in self.liststore:
if row[0] in self.cust_dict["login"]:
append_list.append(dup_row(row))
self.liststore.clear()
for row in append_list:
iter = self.liststore.append()
for i in range(0, length):
self.liststore.set_value(iter, i, row[i])
def init_modified_files_liststore(self, tree, app, ipage, operation, path, fclass, ftype):
iter = tree.append(None)
tree.set_value(iter, 0, path)
tree.set_value(iter, 1, ftype)
tree.set_value(iter, 2, fclass)
def restore_to_default(self, *args):
print("restore to defualt clicked...")
def invalid_entry_retry(self, *args):
self.closewindow(self.error_check_window)
self.files_popup_window.set_sensitive(True)
self.network_popup_window.set_sensitive(True)
def error_check_files(self, insert_txt):
if len(insert_txt) == 0 or insert_txt[0] != '/':
self.error_check_window.show()
self.files_popup_window.set_sensitive(False)
self.network_popup_window.set_sensitive(False)
self.error_check_label.set_text((_("The entry '%s' is not a valid path. Paths must begin with a '/'.")) % insert_txt)
return True
return False
def error_check_network(self, port):
try:
pnum = int(port)
if pnum < 1 or pnum > 65536:
raise ValueError
except ValueError:
self.error_check_window.show()
self.files_popup_window.set_sensitive(False)
self.network_popup_window.set_sensitive(False)
self.error_check_label.set_text((_("Port number must be between 1 and 65536")))
return True
return False
def show_more_types(self, *args):
if self.finish_init:
if self.combo_get_active_text(self.files_type_combobox) == _('More...'):
self.files_popup_window.hide()
self.moreTypes_window_files.show()
def update_to_login(self, *args):
self.close_popup()
seuser = self.combo_get_active_text(self.login_seuser_combobox)
mls_range = self.login_mls_entry.get_text()
name = self.login_name_entry.get_text()
if self.modify:
iter = self.get_selected_iter()
oldname = self.login_liststore.get_value(iter, 0)
oldseuser = self.login_liststore.get_value(iter, 1)
oldrange = self.login_liststore.get_value(iter, 2)
self.liststore.set_value(iter, 0, oldname)
self.liststore.set_value(iter, 1, oldseuser)
self.liststore.set_value(iter, 2, oldrange)
self.cur_dict["login"][name] = {"action": "-m", "range": mls_range, "seuser": seuser, "oldrange": oldrange, "oldseuser": oldseuser, "oldname": oldname}
else:
iter = self.liststore.append(None)
self.cur_dict["login"][name] = {"action": "-a", "range": mls_range, "seuser": seuser}
self.liststore.set_value(iter, 0, name)
self.liststore.set_value(iter, 1, seuser)
self.liststore.set_value(iter, 2, mls_range)
self.new_updates()
def update_to_user(self, *args):
self.close_popup()
roles = self.combo_get_active_text(self.user_roles_combobox)
level = self.user_mls_level_entry.get_text()
mls_range = self.user_mls_entry.get_text()
name = self.user_name_entry.get_text()
if self.modify:
iter = self.get_selected_iter()
oldname = self.user_liststore.get_value(iter, 0)
oldroles = self.user_liststore.get_value(iter, 1)
oldlevel = self.user_liststore.get_value(iter, 1)
oldrange = self.user_liststore.get_value(iter, 3)
self.liststore.set_value(iter, 0, oldname)
self.liststore.set_value(iter, 1, oldroles)
self.liststore.set_value(iter, 2, oldlevel)
self.liststore.set_value(iter, 3, oldrange)
self.cur_dict["user"][name] = {"action": "-m", "range": mls_range, "level": level, "role": roles, "oldrange": oldrange, "oldlevel": oldlevel, "oldroles": oldroles, "oldname": oldname}
else:
iter = self.liststore.append(None)
self.cur_dict["user"][name] = {"action": "-a", "range": mls_range, "level": level, "role": roles}
self.liststore.set_value(iter, 0, name)
self.liststore.set_value(iter, 1, roles)
self.liststore.set_value(iter, 2, level)
self.liststore.set_value(iter, 3, mls_range)
self.new_updates()
def update_to_file_equiv(self, *args):
self.close_popup()
dest = self.file_equiv_dest_entry.get_text()
src = self.file_equiv_source_entry.get_text()
if self.modify:
iter = self.get_selected_iter()
olddest = self.unmarkup(self.liststore.set_value(iter, 0))
oldsrc = self.unmarkup(self.liststore.set_value(iter, 1))
self.cur_dict["fcontext-equiv"][dest] = {"action": "-m", "src": src, "oldsrc": oldsrc, "olddest": olddest}
else:
iter = self.liststore.append(None)
self.cur_dict["fcontext-equiv"][dest] = {"action": "-a", "src": src}
self.liststore.set_value(iter, 0, self.markup(dest))
self.liststore.set_value(iter, 1, self.markup(src))
def update_to_files(self, *args):
self.close_popup()
self.files_add = True
# Insert Function will be used in the future
path = self.files_path_entry.get_text()
if self.error_check_files(path):
return
setype = self.combo_get_active_text(self.files_type_combobox)
mls = self.files_mls_entry.get_text()
tclass = self.combo_get_active_text(self.files_class_combobox)
if self.modify:
iter = self.get_selected_iter()
oldpath = self.unmark(self.liststore.get_value(iter, 0))
setype = self.unmark(self.liststore.set_value(iter, 1))
oldtclass = self.liststore.get_value(iter, 2)
self.cur_dict["fcontext"][(path, tclass)] = {"action": "-m", "type": setype, "oldtype": oldsetype, "oldmls": oldmls, "oldclass": oldclass}
else:
iter = self.liststore.append(None)
self.cur_dict["fcontext"][(path, tclass)] = {"action": "-a", "type": setype}
self.liststore.set_value(iter, 0, self.markup(path))
self.liststore.set_value(iter, 1, self.markup(setype))
self.liststore.set_value(iter, 2, self.markup(tclass))
self.files_add = False
self.recursive_path_toggle.set_active(False)
self.new_updates()
def update_to_network(self, *args):
self.network_add = True
ports = self.network_ports_entry.get_text()
if self.error_check_network(ports):
return
if self.network_tcp_button.get_active():
protocol = "tcp"
else:
protocol = "udp"
setype = self.combo_get_active_text(self.network_port_type_combobox)
mls = self.network_mls_entry.get_text()
if self.modify:
iter = self.get_selected_iter()
oldports = self.unmark(self.liststore.get_value(iter, 0))
oldprotocol = self.unmark(self.liststore.get_value(iter, 1))
oldsetype = self.unmark(self.liststore.set_value(iter, 2))
self.cur_dict["port"][(ports, protocol)] = {"action": "-m", "type": setype, "mls": mls, "oldtype": oldsetype, "oldmls": oldmls, "oldprotocol": oldprotocol, "oldports": oldports}
else:
iter = self.liststore.append(None)
self.cur_dict["port"][(ports, protocol)] = {"action": "-a", "type": setype, "mls": mls}
self.liststore.set_value(iter, 0, ports)
self.liststore.set_value(iter, 1, protocol)
self.liststore.set_value(iter, 2, setype)
self.network_add = False
self.network_popup_window.hide()
self.window.set_sensitive(True)
self.new_updates()
def delete_button_clicked(self, *args):
operation = "Add"
self.window.set_sensitive(False)
if self.opage == NETWORK_PAGE:
self.network_delete_liststore.clear()
port_dict = self.cust_dict["port"]
for ports, protocol in port_dict:
setype = port_dict[(ports, protocol)]["type"]
iter = self.network_delete_liststore.append()
self.network_delete_liststore.set_value(iter, 1, ports)
self.network_delete_liststore.set_value(iter, 2, protocol)
self.network_delete_liststore.set_value(iter, 3, setype)
self.show_popup(self.network_delete_window)
return
if self.opage == FILES_PAGE:
self.files_delete_liststore.clear()
fcontext_dict = self.cust_dict["fcontext"]
for path, tclass in fcontext_dict:
setype = fcontext_dict[(path, tclass)]["type"]
iter = self.files_delete_liststore.append()
self.files_delete_liststore.set_value(iter, 1, path)
self.files_delete_liststore.set_value(iter, 2, setype)
self.files_delete_liststore.set_value(iter, 3, sepolicy.file_type_str[tclass])
self.show_popup(self.files_delete_window)
return
if self.opage == USER_PAGE:
self.user_delete_liststore.clear()
user_dict = self.cust_dict["user"]
for user in user_dict:
roles = user_dict[user]["role"]
mls = user_dict[user]["range"]
level = user_dict[user]["level"]
iter = self.user_delete_liststore.append()
self.user_delete_liststore.set_value(iter, 1, user)
self.user_delete_liststore.set_value(iter, 2, roles)
self.user_delete_liststore.set_value(iter, 3, level)
self.user_delete_liststore.set_value(iter, 4, mls)
self.show_popup(self.user_delete_window)
return
if self.opage == LOGIN_PAGE:
self.login_delete_liststore.clear()
login_dict = self.cust_dict["login"]
for login in login_dict:
seuser = login_dict[login]["seuser"]
mls = login_dict[login]["range"]
iter = self.login_delete_liststore.append()
self.login_delete_liststore.set_value(iter, 1, seuser)
self.login_delete_liststore.set_value(iter, 2, login)
self.login_delete_liststore.set_value(iter, 3, mls)
self.show_popup(self.login_delete_window)
return
if self.opage == FILE_EQUIV_PAGE:
self.file_equiv_delete_liststore.clear()
for items in self.file_equiv_liststore:
if items[2]:
iter = self.file_equiv_delete_liststore.append()
self.file_equiv_delete_liststore.set_value(iter, 1, self.unmarkup(items[0]))
self.file_equiv_delete_liststore.set_value(iter, 2, self.unmarkup(items[1]))
self.show_popup(self.file_equiv_delete_window)
return
def on_save_delete_clicked(self, *args):
self.close_popup()
if self.opage == NETWORK_PAGE:
for delete in self.network_delete_liststore:
if delete[0]:
self.cur_dict["port"][(delete[1], delete[2])] = {"action": "-d", "type": delete[3]}
if self.opage == FILES_PAGE:
for delete in self.files_delete_liststore:
if delete[0]:
self.cur_dict["fcontext"][(delete[1], reverse_file_type_str[delete[3]])] = {"action": "-d", "type": delete[2]}
if self.opage == USER_PAGE:
for delete in self.user_delete_liststore:
if delete[0]:
self.cur_dict["user"][delete[1]] = {"action": "-d", "role": delete[2], "range": delete[4]}
if self.opage == LOGIN_PAGE:
for delete in self.login_delete_liststore:
if delete[0]:
self.cur_dict["login"][delete[2]] = {"action": "-d", "login": delete[2], "seuser": delete[1], "range": delete[3]}
if self.opage == FILE_EQUIV_PAGE:
for delete in self.file_equiv_delete_liststore:
if delete[0]:
self.cur_dict["fcontext-equiv"][delete[1]] = {"action": "-d", "src": delete[2]}
self.new_updates()
def on_save_delete_file_equiv_clicked(self, *args):
for delete in self.files_delete_liststore:
print(delete[0], delete[1], delete[2],)
def on_toggle_update(self, cell, path, model):
model[path][0] = not model[path][0]
def ipage_delete(self, liststore, key):
ctr = 0
for items in liststore:
if items[0] == key[0] and items[2] == key[1]:
iter = liststore.get_iter(ctr)
liststore.remove(iter)
return
ctr += 1
def on_toggle(self, cell, path, model):
if not path:
return
iter = self.boolean_filter.get_iter(path)
iter = self.boolean_filter.convert_iter_to_child_iter(iter)
name = model.get_value(iter, 2)
model.set_value(iter, 0, not model.get_value(iter, 0))
active = model.get_value(iter, 0)
if name in self.cur_dict["boolean"]:
del(self.cur_dict["boolean"][name])
else:
self.cur_dict["boolean"][name] = {"active": active}
self.new_updates()
def get_advanced_filter_data(self, entry, *args):
self.filter_txt = entry.get_text()
self.advanced_search_filter.refilter()
def get_filter_data(self, windows, *args):
#search for desired item
# The txt that the use rinputs into the filter is stored in filter_txt
self.filter_txt = windows.get_text()
self.treefilter.refilter()
def update_gui(self, *args):
self.update = True
self.update_treestore.clear()
for bools in self.cur_dict["boolean"]:
operation = self.cur_dict["boolean"][bools]["action"]
iter = self.update_treestore.append(None)
self.update_treestore.set_value(iter, 0, True)
self.update_treestore.set_value(iter, 1, sepolicy.boolean_desc(bools))
self.update_treestore.set_value(iter, 2, action[self.cur_dict["boolean"][bools]['active']])
self.update_treestore.set_value(iter, 3, True)
niter = self.update_treestore.append(iter)
self.update_treestore.set_value(niter, 1, (_("SELinux name: %s")) % bools)
self.update_treestore.set_value(niter, 3, False)
for path, tclass in self.cur_dict["fcontext"]:
operation = self.cur_dict["fcontext"][(path, tclass)]["action"]
setype = self.cur_dict["fcontext"][(path, tclass)]["type"]
iter = self.update_treestore.append(None)
self.update_treestore.set_value(iter, 0, True)
self.update_treestore.set_value(iter, 2, operation)
self.update_treestore.set_value(iter, 0, True)
if operation == "-a":
self.update_treestore.set_value(iter, 1, (_("Add file labeling for %s")) % self.application)
if operation == "-d":
self.update_treestore.set_value(iter, 1, (_("Delete file labeling for %s")) % self.application)
if operation == "-m":
self.update_treestore.set_value(iter, 1, (_("Modify file labeling for %s")) % self.application)
niter = self.update_treestore.append(iter)
self.update_treestore.set_value(niter, 3, False)
self.update_treestore.set_value(niter, 1, (_("File path: %s")) % path)
niter = self.update_treestore.append(iter)
self.update_treestore.set_value(niter, 3, False)
self.update_treestore.set_value(niter, 1, (_("File class: %s")) % sepolicy.file_type_str[tclass])
niter = self.update_treestore.append(iter)
self.update_treestore.set_value(niter, 3, False)
self.update_treestore.set_value(niter, 1, (_("SELinux file type: %s")) % setype)
for port, protocol in self.cur_dict["port"]:
operation = self.cur_dict["port"][(port, protocol)]["action"]
iter = self.update_treestore.append(None)
self.update_treestore.set_value(iter, 0, True)
self.update_treestore.set_value(iter, 2, operation)
self.update_treestore.set_value(iter, 3, True)
if operation == "-a":
self.update_treestore.set_value(iter, 1, (_("Add ports for %s")) % self.application)
if operation == "-d":
self.update_treestore.set_value(iter, 1, (_("Delete ports for %s")) % self.application)
if operation == "-m":
self.update_treestore.set_value(iter, 1, (_("Modify ports for %s")) % self.application)
niter = self.update_treestore.append(iter)
self.update_treestore.set_value(niter, 1, (_("Network ports: %s")) % port)
self.update_treestore.set_value(niter, 3, False)
niter = self.update_treestore.append(iter)
self.update_treestore.set_value(niter, 1, (_("Network protocol: %s")) % protocol)
self.update_treestore.set_value(niter, 3, False)
setype = self.cur_dict["port"][(port, protocol)]["type"]
niter = self.update_treestore.append(iter)
self.update_treestore.set_value(niter, 3, False)
self.update_treestore.set_value(niter, 1, (_("SELinux file type: %s")) % setype)
for user in self.cur_dict["user"]:
operation = self.cur_dict["user"][user]["action"]
iter = self.update_treestore.append(None)
self.update_treestore.set_value(iter, 0, True)
self.update_treestore.set_value(iter, 2, operation)
self.update_treestore.set_value(iter, 0, True)
if operation == "-a":
self.update_treestore.set_value(iter, 1, _("Add user"))
if operation == "-d":
self.update_treestore.set_value(iter, 1, _("Delete user"))
if operation == "-m":
self.update_treestore.set_value(iter, 1, _("Modify user"))
niter = self.update_treestore.append(iter)
self.update_treestore.set_value(niter, 1, (_("SELinux User : %s")) % user)
self.update_treestore.set_value(niter, 3, False)
niter = self.update_treestore.append(iter)
self.update_treestore.set_value(niter, 3, False)
roles = self.cur_dict["user"][user]["role"]
self.update_treestore.set_value(niter, 1, (_("Roles: %s")) % roles)
mls = self.cur_dict["user"][user]["range"]
niter = self.update_treestore.append(iter)
self.update_treestore.set_value(niter, 3, False)
self.update_treestore.set_value(niter, 1, _("MLS/MCS Range: %s") % mls)
for login in self.cur_dict["login"]:
operation = self.cur_dict["login"][login]["action"]
iter = self.update_treestore.append(None)
self.update_treestore.set_value(iter, 0, True)
self.update_treestore.set_value(iter, 2, operation)
self.update_treestore.set_value(iter, 0, True)
if operation == "-a":
self.update_treestore.set_value(iter, 1, _("Add login mapping"))
if operation == "-d":
self.update_treestore.set_value(iter, 1, _("Delete login mapping"))
if operation == "-m":
self.update_treestore.set_value(iter, 1, _("Modify login mapping"))
niter = self.update_treestore.append(iter)
self.update_treestore.set_value(niter, 3, False)
self.update_treestore.set_value(niter, 1, (_("Login Name : %s")) % login)
niter = self.update_treestore.append(iter)
self.update_treestore.set_value(niter, 3, False)
seuser = self.cur_dict["login"][login]["seuser"]
self.update_treestore.set_value(niter, 1, (_("SELinux User: %s")) % seuser)
mls = self.cur_dict["login"][login]["range"]
niter = self.update_treestore.append(iter)
self.update_treestore.set_value(niter, 3, False)
self.update_treestore.set_value(niter, 1, _("MLS/MCS Range: %s") % mls)
for path in self.cur_dict["fcontext-equiv"]:
operation = self.cur_dict["fcontext-equiv"][path]["action"]
iter = self.update_treestore.append(None)
self.update_treestore.set_value(iter, 0, True)
self.update_treestore.set_value(iter, 2, operation)
self.update_treestore.set_value(iter, 0, True)
if operation == "-a":
self.update_treestore.set_value(iter, 1, (_("Add file equiv labeling.")))
if operation == "-d":
self.update_treestore.set_value(iter, 1, (_("Delete file equiv labeling.")))
if operation == "-m":
self.update_treestore.set_value(iter, 1, (_("Modify file equiv labeling.")))
niter = self.update_treestore.append(iter)
self.update_treestore.set_value(niter, 3, False)
self.update_treestore.set_value(niter, 1, (_("File path : %s")) % path)
niter = self.update_treestore.append(iter)
self.update_treestore.set_value(niter, 3, False)
src = self.cur_dict["fcontext-equiv"][path]["src"]
self.update_treestore.set_value(niter, 1, (_("Equivalence: %s")) % src)
self.show_popup(self.update_window)
def set_active_application_button(self):
if self.boolean_radio_button.get_active():
self.active_button = self.boolean_radio_button
if self.files_radio_button.get_active():
self.active_button = self.files_radio_button
if self.transitions_radio_button.get_active():
self.active_button = self.transitions_radio_button
if self.network_radio_button.get_active():
self.active_button = self.network_radio_button
def clearbuttons(self, clear=True):
self.main_selection_window.hide()
self.boolean_radio_button.set_visible(False)
self.files_radio_button.set_visible(False)
self.network_radio_button.set_visible(False)
self.transitions_radio_button.set_visible(False)
self.system_radio_button.set_visible(False)
self.lockdown_radio_button.set_visible(False)
self.user_radio_button.set_visible(False)
self.login_radio_button.set_visible(False)
if clear:
self.completion_entry.set_text("")
def show_system_page(self):
self.clearbuttons()
self.system_radio_button.set_visible(True)
self.lockdown_radio_button.set_visible(True)
self.applications_selection_button.set_label(_("System"))
self.system_radio_button.set_active(True)
self.tab_change()
self.idle_func()
def show_file_equiv_page(self, *args):
self.clearbuttons()
self.file_equiv_initialize()
self.file_equiv_radio_button.set_active(True)
self.applications_selection_button.set_label(_("File Equivalence"))
self.tab_change()
self.idle_func()
self.add_button.set_sensitive(True)
self.delete_button.set_sensitive(True)
def show_users_page(self):
self.clearbuttons()
self.login_radio_button.set_visible(True)
self.user_radio_button.set_visible(True)
self.applications_selection_button.set_label(_("Users"))
self.login_radio_button.set_active(True)
self.tab_change()
self.user_initialize()
self.login_initialize()
self.idle_func()
self.add_button.set_sensitive(True)
self.delete_button.set_sensitive(True)
def show_applications_page(self):
self.clearbuttons(False)
self.boolean_radio_button.set_visible(True)
self.files_radio_button.set_visible(True)
self.network_radio_button.set_visible(True)
self.transitions_radio_button.set_visible(True)
self.boolean_radio_button.set_active(True)
self.tab_change()
self.idle_func()
def system_interface(self, *args):
self.show_system_page()
def users_interface(self, *args):
self.show_users_page()
def show_mislabeled_files(self, checkbutton, *args):
iterlist = []
ctr = 0
ipage = self.inner_notebook_files.get_current_page()
if checkbutton.get_active() == True:
for items in self.liststore:
iter = self.treesort.get_iter(ctr)
iter = self.treesort.convert_iter_to_child_iter(iter)
iter = self.treefilter.convert_iter_to_child_iter(iter)
if iter != None:
if self.liststore.get_value(iter, 4) == False:
iterlist.append(iter)
ctr += 1
for iters in iterlist:
self.liststore.remove(iters)
elif self.application != None:
self.liststore.clear()
if ipage == EXE_PAGE:
self.executable_files_initialize(self.application)
elif ipage == WRITABLE_PAGE:
self.writable_files_initialize(self.application)
elif ipage == APP_PAGE:
self.application_files_initialize(self.application)
def fix_mislabeled(self, path):
cur = selinux.getfilecon(path)[1].split(":")[2]
con = selinux.matchpathcon(path, 0)[1].split(":")[2]
if self.verify(_("Run restorecon on %(PATH)s to change its type from %(CUR_CONTEXT)s to the default %(DEF_CONTEXT)s?") % {"PATH": path, "CUR_CONTEXT": cur, "DEF_CONTEXT": con}, title="restorecon dialog") == Gtk.ResponseType.YES:
self.dbus.restorecon(path)
self.application_selected()
def new_updates(self, *args):
self.update_button.set_sensitive(self.modified())
self.revert_button.set_sensitive(self.modified())
def update_or_revert_changes(self, button, *args):
self.update_gui()
self.update = (button.get_label() == _("Update"))
if self.update:
self.update_window.set_title(_("Update Changes"))
else:
self.update_window.set_title(_("Revert Changes"))
def apply_changes_button_press(self, *args):
self.close_popup()
if self.update:
self.update_the_system()
else:
self.revert_data()
self.finish_init = False
self.previously_modified_initialize(self.dbus.customized())
self.finish_init = True
self.clear_filters()
self.application_selected()
self.new_updates()
self.update_treestore.clear()
def update_the_system(self, *args):
self.close_popup()
update_buffer = self.format_update()
self.wait_mouse()
try:
self.dbus.semanage(update_buffer)
except dbus.exceptions.DBusException as e:
print(e)
self.ready_mouse()
self.init_cur()
def ipage_value_lookup(self, lookup):
ipage_values = {"Executable Files": 0, "Writable Files": 1, "Application File Type": 2, "Inbound": 1, "Outbound": 0}
for value in ipage_values:
if value == lookup:
return ipage_values[value]
return "Booleans"
def get_attributes_update(self, attribute):
attribute = attribute.split(": ")[1]
bool_id = attribute.split(": ")[0]
if bool_id == "SELinux name":
self.bool_revert = attribute
else:
return attribute
def format_update(self):
self.revert_data()
update_buffer = ""
for k in self.cur_dict:
if k in "boolean":
for b in self.cur_dict[k]:
update_buffer += "boolean -m -%d %s\n" % (self.cur_dict[k][b]["active"], b)
if k in "login":
for l in self.cur_dict[k]:
if self.cur_dict[k][l]["action"] == "-d":
update_buffer += "login -d %s\n" % l
else:
update_buffer += "login %s -s %s -r %s %s\n" % (self.cur_dict[k][l]["action"], self.cur_dict[k][l]["seuser"], self.cur_dict[k][l]["range"], l)
if k in "user":
for u in self.cur_dict[k]:
if self.cur_dict[k][u]["action"] == "-d":
update_buffer += "user -d %s\n" % u
else:
update_buffer += "user %s -L %s -r %s -R %s %s\n" % (self.cur_dict[k][u]["action"], self.cur_dict[k][u]["level"], self.cur_dict[k][u]["range"], self.cur_dict[k][u]["role"], u)
if k in "fcontext-equiv":
for f in self.cur_dict[k]:
if self.cur_dict[k][f]["action"] == "-d":
update_buffer += "fcontext -d %s\n" % f
else:
update_buffer += "fcontext %s -e %s %s\n" % (self.cur_dict[k][f]["action"], self.cur_dict[k][f]["src"], f)
if k in "fcontext":
for f in self.cur_dict[k]:
if self.cur_dict[k][f]["action"] == "-d":
update_buffer += "fcontext -d %s\n" % f
else:
update_buffer += "fcontext %s -t %s -f %s %s\n" % (self.cur_dict[k][f]["action"], self.cur_dict[k][f]["type"], self.cur_dict[k][f]["class"], f)
if k in "port":
for port, protocol in self.cur_dict[k]:
if self.cur_dict[k][(port, protocol)]["action"] == "-d":
update_buffer += "port -d -p %s %s\n" % (protocol, port)
else:
update_buffer += "port %s -t %s -p %s %s\n" % (self.cur_dict[k][f]["action"], self.cur_dict[k][f]["type"], procotol, port)
return update_buffer
def revert_data(self):
ctr = 0
remove_list = []
update_buffer = ""
for items in self.update_treestore:
if not self.update_treestore[ctr][0]:
remove_list.append(ctr)
ctr += 1
remove_list.reverse()
for ctr in remove_list:
self.remove_cur(ctr)
def reveal_advanced_system(self, label, *args):
advanced = label.get_text() == ADVANCED_LABEL[0]
if advanced:
label.set_text(ADVANCED_LABEL[1])
else:
label.set_text(ADVANCED_LABEL[0])
self.system_policy_label.set_visible(advanced)
self.system_policy_type_combobox.set_visible(advanced)
def reveal_advanced(self, label, *args):
advanced = label.get_text() == ADVANCED_LABEL[0]
if advanced:
label.set_text(ADVANCED_LABEL[1])
else:
label.set_text(ADVANCED_LABEL[0])
self.files_mls_label.set_visible(advanced)
self.files_mls_entry.set_visible(advanced)
self.network_mls_label.set_visible(advanced)
self.network_mls_entry.set_visible(advanced)
def on_show_advanced_search_window(self, label, *args):
if label.get_text() == ADVANCED_SEARCH_LABEL[1]:
label.set_text(ADVANCED_SEARCH_LABEL[0])
self.close_popup()
else:
label.set_text(ADVANCED_SEARCH_LABEL[1])
self.show_popup(self.advanced_search_window)
def set_enforce_text(self, value):
if value:
self.status_bar.push(self.context_id, _("System Status: Enforcing"))
self.current_status_enforcing.set_active(True)
else:
self.status_bar.push(self.context_id, _("System Status: Permissive"))
self.current_status_permissive.set_active(True)
def set_enforce(self, button):
if not self.finish_init:
return
self.dbus.setenforce(button.get_active())
self.set_enforce_text(button.get_active())
def on_browse_select(self, *args):
filename = self.file_dialog.get_filename()
if filename == None:
return
self.clear_entry = False
self.file_dialog.hide()
self.files_path_entry.set_text(filename)
if self.import_export == 'Import':
self.import_config(filename)
elif self.import_export == 'Export':
self.export_config(filename)
def recursive_path(self, *args):
path = self.files_path_entry.get_text()
if self.recursive_path_toggle.get_active():
if not path.endswith("(/.*)?"):
self.files_path_entry.set_text(path + "(/.*)?")
elif path.endswith("(/.*)?"):
path = path.split("(/.*)?")[0]
self.files_path_entry.set_text(path)
def highlight_entry_text(self, entry_obj, *args):
txt = entry_obj.get_text()
if self.clear_entry:
entry_obj.set_text('')
self.clear_entry = False
def autofill_add_files_entry(self, entry):
text = entry.get_text()
if text == '':
return
if text.endswith("(/.*)?"):
self.recursive_path_toggle.set_active(True)
for d in sepolicy.DEFAULT_DIRS:
if text.startswith(d):
for t in self.files_type_combolist:
if t[0].endswith(sepolicy.DEFAULT_DIRS[d]):
self.combo_set_active_text(self.files_type_combobox, t[0])
def resize_columns(self, *args):
self.boolean_column_1 = self.boolean_treeview.get_col(1)
width = self.boolean_column_1.get_width()
renderer = self.boolean_column_1.get_cell_renderers()
def browse_for_files(self, *args):
self.file_dialog.show()
def close_config_window(self, *args):
self.file_dialog.hide()
def change_default_policy(self, *args):
if self.typeHistory == self.system_policy_type_combobox.get_active():
return
if self.verify(_("Changing the policy type will cause a relabel of the entire file system on the next boot. Relabeling takes a long time depending on the size of the file system. Do you wish to continue?")) == Gtk.ResponseType.NO:
self.system_policy_type_combobox.set_active(self.typeHistory)
return None
self.dbus.change_default_policy(self.combo_get_active_text(self.system_policy_type_combobox))
self.dbus.relabel_on_boot(True)
self.typeHistory = self.system_policy_type_combobox.get_active()
def change_default_mode(self, button):
if not self.finish_init:
return
self.enabled_changed(button)
if button.get_active():
self.dbus.change_default_mode(button.get_label().lower())
def import_config_show(self, *args):
self.file_dialog.set_action(Gtk.FileChooserAction.OPEN)
self.file_dialog.set_title("Import Configuration")
self.file_dialog.show()
#self.file_dialog.set_uri('/tmp')
self.import_export = 'Import'
def export_config_show(self, *args):
self.file_dialog.set_action(Gtk.FileChooserAction.SAVE)
self.file_dialog.set_title("Export Configuration")
self.file_dialog.show()
self.import_export = 'Export'
def export_config(self, filename):
self.wait_mouse()
buf = self.dbus.customized()
fd = open(filename, 'w')
fd.write(buf)
fd.close()
self.ready_mouse()
def import_config(self, filename):
fd = open(filename, "r")
buf = fd.read()
fd.close()
self.wait_mouse()
try:
self.dbus.semanage(buf)
except OSError:
pass
self.ready_mouse()
def init_dictionary(self, dic, app, ipage, operation, p, q, ftype, mls, changed, old):
if (app, ipage, operation) not in dic:
dic[app, ipage, operation] = {}
if (p, q) not in dic[app, ipage, operation]:
dic[app, ipage, operation][p, q] = {'type': ftype, 'mls': mls, 'changed': changed, 'old': old}
def translate_bool(self, b):
b = b.split('-')[1]
if b == '0':
return False
if b == '1':
return True
def relabel_on_reboot(self, *args):
active = self.relabel_button.get_active()
exists = os.path.exists("/.autorelabel")
if active and exists:
return
if not active and not exists:
return
try:
self.dbus.relabel_on_boot(active)
except dbus.exceptions.DBusException as e:
self.error(e)
def closewindow(self, window, *args):
window.hide()
self.recursive_path_toggle.set_active(False)
self.window.set_sensitive(True)
if self.moreTypes_window_files == window:
self.show_popup(self.files_popup_window)
if self.combo_get_active_text(self.files_type_combobox) == _('More...'):
self.files_type_combobox.set_active(0)
if self.error_check_window == window:
if self.files_add:
self.show_popup(self.files_popup_window)
elif self.network_add:
self.show_popup(self.network_popup_window)
if self.files_mls_label.get_visible() or self.network_mls_label.get_visible():
self.advanced_text_files.set_visible(True)
self.files_mls_label.set_visible(False)
self.files_mls_entry.set_visible(False)
self.advanced_text_network.set_visible(True)
self.network_mls_label.set_visible(False)
self.network_mls_entry.set_visible(False)
if self.main_advanced_label.get_text() == ADVANCED_SEARCH_LABEL[1]:
self.main_advanced_label.set_text(ADVANCED_SEARCH_LABEL[0])
return True
def wait_mouse(self):
self.window.get_window().set_cursor(self.busy_cursor)
self.idle_func()
def ready_mouse(self):
self.window.get_window().set_cursor(self.ready_cursor)
self.idle_func()
def verify(self, message, title=""):
dlg = Gtk.MessageDialog(None, 0, Gtk.MessageType.INFO,
Gtk.ButtonsType.YES_NO,
message)
dlg.set_title(title)
dlg.set_position(Gtk.WindowPosition.MOUSE)
dlg.show_all()
rc = dlg.run()
dlg.destroy()
return rc
def error(self, message):
dlg = Gtk.MessageDialog(None, 0, Gtk.MessageType.ERROR,
Gtk.ButtonsType.CLOSE,
message)
dlg.set_position(Gtk.WindowPosition.MOUSE)
dlg.show_all()
dlg.run()
dlg.destroy()
def enabled_changed(self, radio):
if not radio.get_active():
return
label = radio.get_label()
if label == 'Disabled' and self.enforce_mode != DISABLED:
if self.verify(_("Changing to SELinux disabled requires a reboot. It is not recommended. If you later decide to turn SELinux back on, the system will be required to relabel. If you just want to see if SELinux is causing a problem on your system, you can go to permissive mode which will only log errors and not enforce SELinux policy. Permissive mode does not require a reboot. Do you wish to continue?")) == Gtk.ResponseType.NO:
self.enforce_button.set_active(True)
if label != 'Disabled' and self.enforce_mode == DISABLED:
if self.verify(_("Changing to SELinux enabled will cause a relabel of the entire file system on the next boot. Relabeling takes a long time depending on the size of the file system. Do you wish to continue?")) == Gtk.ResponseType.NO:
self.enforce_button.set_active(True)
self.enforce_button = radio
def clear_filters(self, *args):
self.filter_entry.set_text('')
self.show_modified_only.set_active(False)
def unconfined_toggle(self, *args):
if not self.finish_init:
return
self.wait_mouse()
if self.enable_unconfined_button.get_active():
self.dbus.semanage("module -e unconfined")
else:
self.dbus.semanage("module -d unconfined")
self.ready_mouse()
def permissive_toggle(self, *args):
if not self.finish_init:
return
self.wait_mouse()
if self.enable_permissive_button.get_active():
self.dbus.semanage("module -e permissivedomains")
else:
self.dbus.semanage("module -d permissivedomains")
self.ready_mouse()
def confirmation_close(self, button, *args):
if len(self.update_treestore) > 0:
if self.verify(_("You are attempting to close the application without applying your changes.\n * To apply changes you have made during this session, click No and click Update.\n * To leave the application without applying your changes, click Yes. All changes that you have made during this session will be lost."), _("Loss of data Dialog")) == Gtk.ResponseType.NO:
return True
self.quit()
def quit(self, *args):
sys.exit(0)
if __name__ == '__main__':
start = SELinuxGui()
| jpacg/su-binary | jni/selinux/python/sepolicy/sepolicy/gui.py | Python | gpl-2.0 | 133,535 |
import json
from urllib.parse import urljoin
class WebPage:
"""Models what's important to us about a web page"""
title = ""
links = []
images = []
scripts = []
def __init__(self, title="", links=None, images=None, scripts=None):
self.title = title
self.links = [] if links is None else links
self.images = [] if images is None else images
self.scripts = [] if scripts is None else scripts
def __str__(self):
return str(self.__dict__)
def to_dict(self):
return self.__dict__
@classmethod
def from_soup(cls, soup, url):
"""Return a WebPage from a BeautifulSoup Object"""
links = [urljoin(url, l["href"]) for l in soup.find_all('a', href=True)]
images = [urljoin(url, i["src"]) for i in soup.find_all('img', src=True)]
scripts = [urljoin(url, s["src"]) for s in soup.find_all('script', src=True)]
try:
title = soup.title.string
except AttributeError:
title = ""
return cls(title=title, links=links, images=images, scripts=scripts)
| TransactCharlie/dembones | src/dembones/webpage.py | Python | mit | 1,103 |
__version__=''' $Id'''
__doc__='''basic tests.'''
from reportlab.lib.testutils import setOutDir,makeSuiteForClasses, printLocation
setOutDir(__name__)
import unittest
def getrc(defns,depth=1):
from sys import getrefcount, _getframe
f = _getframe(depth)
G0 = f.f_globals
L = f.f_locals
if L is not G0:
LL = [L]
while 1:
f = f.f_back
G = f.f_globals
L = f.f_locals
if G is not G0 or G is L: break
LL.append(L)
L = {}
LL.reverse()
for l in LL:
L.update(l)
else:
L = L.copy()
G0 = G0.copy()
return ' '.join([str(getrefcount(eval(x,L,G0))-1) for x in defns.split()])
def checkrc(defns,rcv0):
rcv1 = getrc(defns,2)
return ' '.join(["%s %s-->%s" % (x,v,w) for x,v,w in zip(defns.split(),rcv0.split(),rcv1.split()) if v!=w])
class RlAccelTestCase(unittest.TestCase):
def testFpStr(self):
# should give siz decimal places if less than 1.
# if more, give up to seven sig figs
from _rl_accel import fp_str
assert fp_str(1,2,3)=='1 2 3'
assert fp_str(1) == '1'
assert fp_str(595.275574) == '595.2756'
assert fp_str(59.5275574) == '59.52756'
assert fp_str(5.95275574) == '5.952756'
def test_AsciiBase85Encode(self):
from _rl_accel import _AsciiBase85Encode
assert _AsciiBase85Encode('Dragan Andric')=='6ul^K@;[2RDIdd%@f~>'
def test_AsciiBase85Decode(self):
from _rl_accel import _AsciiBase85Decode
assert _AsciiBase85Decode('6ul^K@;[2RDIdd%@f~>')=='Dragan Andric'
def testEscapePDF(self):
from _rl_accel import escapePDF
assert escapePDF('(test)')=='\\(test\\)'
def test_instanceEscapePDF(self):
from _rl_accel import _instanceEscapePDF
assert _instanceEscapePDF('', '(test)')=='\\(test\\)'
def testCalcChecksum(self):
from _rl_accel import calcChecksum
assert calcChecksum('test')==1952805748
def test_instanceStringWidth(self):
from reportlab.pdfbase.pdfmetrics import registerFont, getFont, _fonts, unicode2T1
from reportlab.pdfbase.ttfonts import TTFont
ttfn = 'Vera'
t1fn = 'Times-Roman'
registerFont(TTFont(ttfn, "Vera.ttf"))
ttf = getFont(ttfn)
t1f = getFont(t1fn)
testCp1252 = 'copyright %s trademark %s registered %s ReportLab! Ol%s!' % (chr(169), chr(153),chr(174), chr(0xe9))
enc='cp1252'
senc = 'utf8'
ts = 'ABCDEF\xce\x91\xce\xb2G'
utext = 'ABCDEF\xce\x91\xce\xb2G'.decode(senc)
fontSize = 12
defns="ttfn t1fn ttf t1f testCp1252 enc senc ts utext fontSize ttf.face ttf.face.charWidths ttf.face.defaultWidth t1f.widths t1f.encName t1f.substitutionFonts _fonts"
rcv = getrc(defns)
def tfunc(f,ts,fontSize,enc):
w1 = f.stringWidth(ts,fontSize,enc)
w2 = f._py_stringWidth(ts,fontSize,enc)
assert abs(w1-w2)<1e-10,"f(%r).stringWidthU(%r,%s,%r)-->%r != f._py_stringWidth(...)-->%r" % (f,ts,fontSize,enc,w1,w2)
tfunc(t1f,testCp1252,fontSize,enc)
tfunc(t1f,ts,fontSize,senc)
tfunc(t1f,utext,fontSize,senc)
tfunc(ttf,ts,fontSize,senc)
tfunc(ttf,testCp1252,fontSize,enc)
tfunc(ttf,utext,fontSize,senc)
rcc = checkrc(defns,rcv)
assert not rcc, "rc diffs (%s)" % rcc
def test_unicode2T1(self):
from reportlab.pdfbase.pdfmetrics import _py_unicode2T1, getFont, _fonts
from _rl_accel import unicode2T1
t1fn = 'Times-Roman'
t1f = getFont(t1fn)
enc = 'cp1252'
senc = 'utf8'
testCp1252 = ('copyright %s trademark %s registered %s ReportLab! Ol%s!' % (chr(169), chr(153),chr(174), chr(0xe9))).decode(enc)
utext = 'This is the end of the \xce\x91\xce\xb2 world. This is the end of the \xce\x91\xce\xb2 world jap=\xe3\x83\x9b\xe3\x83\x86. This is the end of the \xce\x91\xce\xb2 world. This is the end of the \xce\x91\xce\xb2 world jap=\xe3\x83\x9b\xe3\x83\x86'.decode('utf8')
def tfunc(f,ts):
w1 = unicode2T1(ts,[f]+f.substitutionFonts)
w2 = _py_unicode2T1(ts,[f]+f.substitutionFonts)
assert w1==w2,"%r != %r" % (w1,w2)
defns="t1fn t1f testCp1252 enc senc utext t1f.widths t1f.encName t1f.substitutionFonts _fonts"
rcv = getrc(defns)
tfunc(t1f,testCp1252)
tfunc(t1f,utext)
rcc = checkrc(defns,rcv)
assert not rcc, "rc diffs (%s)" % rcc
def test_sameFrag(self):
from _rl_accel import _sameFrag
class ABag:
def __init__(self,**kwd):
self.__dict__.update(kwd)
def __str__(self):
V=['%s=%r' % v for v in self.__dict__.items()]
V.sort()
return 'ABag(%s)' % ','.join(V)
a=ABag(fontName='Helvetica',fontSize=12, textColor="red", rise=0, underline=0, strike=0, link="aaaa")
b=ABag(fontName='Helvetica',fontSize=12, textColor="red", rise=0, underline=0, strike=0, link="aaaa")
for name in ("fontName", "fontSize", "textColor", "rise", "underline", "strike", "link"):
old = getattr(a,name)
assert _sameFrag(a,b)==1, "_sameFrag(%s,%s)!=1" % (a,b)
assert _sameFrag(b,a)==1, "_sameFrag(%s,%s)!=1" % (b,a)
setattr(a,name,None)
assert _sameFrag(a,b)==0, "_sameFrag(%s,%s)!=0" % (a,b)
assert _sameFrag(b,a)==0, "_sameFrag(%s,%s)!=0" % (b,a)
delattr(a,name)
assert _sameFrag(a,b)==0, "_sameFrag(%s,%s)!=0" % (a,b)
assert _sameFrag(b,a)==0, "_sameFrag(%s,%s)!=0" % (b,a)
delattr(b,name)
assert _sameFrag(a,b)==1, "_sameFrag(%s,%s)!=1" % (a,b)
assert _sameFrag(b,a)==1, "_sameFrag(%s,%s)!=1" % (b,a)
setattr(a,name,old)
setattr(b,name,old)
def makeSuite():
# only run the tests if _rl_accel is present
try:
import _rl_accel
Klass = RlAccelTestCase
except:
class Klass(unittest.TestCase):
pass
return makeSuiteForClasses(Klass)
#noruntests
if __name__ == "__main__":
unittest.TextTestRunner().run(makeSuite())
printLocation()
| mattjmorrison/ReportLab | tests/test_rl_accel.py | Python | bsd-3-clause | 6,284 |
##
# Copyright 2009-2021 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for Boost, implemented as an easyblock
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
@author: Ward Poelmans (Ghent University)
@author: Petar Forai (IMP/IMBA)
@author: Luca Marsella (CSCS)
@author: Guilherme Peretti-Pezzi (CSCS)
@author: Joachim Hein (Lund University)
@author: Michele Dolfi (ETH Zurich)
@author: Simon Branford (University of Birmingham)
"""
from distutils.version import LooseVersion
import fileinput
import glob
import os
import re
import sys
import easybuild.tools.toolchain as toolchain
from easybuild.framework.easyblock import EasyBlock
from easybuild.framework.easyconfig import CUSTOM
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.filetools import copy, mkdir, write_file
from easybuild.tools.modules import get_software_root, get_software_version
from easybuild.tools.run import run_cmd
from easybuild.tools.systemtools import AARCH64, POWER, UNKNOWN
from easybuild.tools.systemtools import get_cpu_architecture, get_glibc_version, get_shared_lib_ext
class EB_Boost(EasyBlock):
"""Support for building Boost."""
def __init__(self, *args, **kwargs):
"""Initialize Boost-specific variables."""
super(EB_Boost, self).__init__(*args, **kwargs)
self.objdir = None
self.pyvers = []
if LooseVersion(self.version) >= LooseVersion("1.71.0"):
self.bjamcmd = 'b2'
else:
self.bjamcmd = 'bjam'
@staticmethod
def extra_options():
"""Add extra easyconfig parameters for Boost."""
extra_vars = {
'boost_mpi': [False, "Build mpi boost module", CUSTOM],
'boost_multi_thread': [False, "Build boost with multi-thread option", CUSTOM],
'toolset': [None, "Toolset to use for Boost configuration ('--with-toolset for bootstrap.sh')", CUSTOM],
'mpi_launcher': [None, "Launcher to use when running MPI regression tests", CUSTOM],
'only_python_bindings': [False, "Only install Boost.Python library providing Python bindings", CUSTOM],
'use_glibcxx11_abi': [None, "Use the GLIBCXX11 ABI", CUSTOM],
}
return EasyBlock.extra_options(extra_vars)
def patch_step(self):
"""Patch Boost source code before building."""
super(EB_Boost, self).patch_step()
# TIME_UTC is also defined in recent glibc versions, so we need to rename it for old Boost versions (<= 1.49)
glibc_version = get_glibc_version()
old_glibc = glibc_version is not UNKNOWN and LooseVersion(glibc_version) > LooseVersion("2.15")
if old_glibc and LooseVersion(self.version) <= LooseVersion("1.49.0"):
self.log.info("Patching because the glibc version is too new")
files_to_patch = ["boost/thread/xtime.hpp"] + glob.glob("libs/interprocess/test/*.hpp")
files_to_patch += glob.glob("libs/spirit/classic/test/*.cpp") + glob.glob("libs/spirit/classic/test/*.inl")
for patchfile in files_to_patch:
try:
for line in fileinput.input("%s" % patchfile, inplace=1, backup='.orig'):
line = re.sub(r"TIME_UTC", r"TIME_UTC_", line)
sys.stdout.write(line)
except IOError as err:
raise EasyBuildError("Failed to patch %s: %s", patchfile, err)
def prepare_step(self, *args, **kwargs):
"""Prepare build environment."""
super(EB_Boost, self).prepare_step(*args, **kwargs)
# keep track of Python version(s) used during installation,
# so we can perform a complete sanity check
if get_software_root('Python'):
self.pyvers.append(get_software_version('Python'))
def configure_step(self):
"""Configure Boost build using custom tools"""
# mpi sanity check
if self.cfg['boost_mpi'] and not self.toolchain.options.get('usempi', None):
raise EasyBuildError("When enabling building boost_mpi, also enable the 'usempi' toolchain option.")
# create build directory (Boost doesn't like being built in source dir)
self.objdir = os.path.join(self.builddir, 'obj')
mkdir(self.objdir)
# generate config depending on compiler used
toolset = self.cfg['toolset']
if toolset is None:
if self.toolchain.comp_family() == toolchain.INTELCOMP:
toolset = 'intel-linux'
elif self.toolchain.comp_family() == toolchain.GCC:
toolset = 'gcc'
else:
raise EasyBuildError("Unknown compiler used, don't know what to specify to --with-toolset, aborting.")
cmd = "%s ./bootstrap.sh --with-toolset=%s --prefix=%s %s"
tup = (self.cfg['preconfigopts'], toolset, self.objdir, self.cfg['configopts'])
run_cmd(cmd % tup, log_all=True, simple=True)
if self.cfg['boost_mpi']:
self.toolchain.options['usempi'] = True
# configure the boost mpi module
# http://www.boost.org/doc/libs/1_47_0/doc/html/mpi/getting_started.html
# let Boost.Build know to look here for the config file
txt = ''
# Check if using a Cray toolchain and configure MPI accordingly
if self.toolchain.toolchain_family() == toolchain.CRAYPE:
if self.toolchain.PRGENV_MODULE_NAME_SUFFIX == 'gnu':
craympichdir = os.getenv('CRAY_MPICH2_DIR')
craygccversion = os.getenv('GCC_VERSION')
txt = '\n'.join([
'local CRAY_MPICH2_DIR = %s ;' % craympichdir,
'using gcc ',
': %s' % craygccversion,
': CC ',
': <compileflags>-I$(CRAY_MPICH2_DIR)/include ',
r' <linkflags>-L$(CRAY_MPICH2_DIR)/lib \ ',
'; ',
'using mpi ',
': CC ',
': <find-shared-library>mpich ',
': %s' % self.cfg['mpi_launcher'],
';',
'',
])
else:
raise EasyBuildError("Bailing out: only PrgEnv-gnu supported for now")
else:
txt = "using mpi : %s ;" % os.getenv("MPICXX")
write_file('user-config.jam', txt, append=True)
def build_boost_variant(self, bjamoptions, paracmd):
"""Build Boost library with specified options for bjam."""
# build with specified options
cmd = "%s ./%s %s %s %s" % (self.cfg['prebuildopts'], self.bjamcmd, bjamoptions, paracmd, self.cfg['buildopts'])
run_cmd(cmd, log_all=True, simple=True)
# install built Boost library
cmd = "%s ./%s %s install %s %s" % (
self.cfg['preinstallopts'], self.bjamcmd, bjamoptions, paracmd, self.cfg['installopts'])
run_cmd(cmd, log_all=True, simple=True)
# clean up before proceeding with next build
run_cmd("./%s --clean-all" % self.bjamcmd, log_all=True, simple=True)
def build_step(self):
"""Build Boost with bjam tool."""
bjamoptions = " --prefix=%s" % self.objdir
cxxflags = os.getenv('CXXFLAGS')
# only disable -D_GLIBCXX_USE_CXX11_ABI if use_glibcxx11_abi was explicitly set to False
# None value is the default, which corresponds to default setting (=1 since GCC 5.x)
if self.cfg['use_glibcxx11_abi'] is not None:
cxxflags += ' -D_GLIBCXX_USE_CXX11_ABI='
if self.cfg['use_glibcxx11_abi']:
cxxflags += '1'
else:
cxxflags += '0'
if cxxflags is not None:
bjamoptions += " cxxflags='%s'" % cxxflags
ldflags = os.getenv('LDFLAGS')
if ldflags is not None:
bjamoptions += " linkflags='%s'" % ldflags
# specify path for bzip2/zlib if module is loaded
for lib in ["bzip2", "zlib"]:
libroot = get_software_root(lib)
if libroot:
bjamoptions += " -s%s_INCLUDE=%s/include" % (lib.upper(), libroot)
bjamoptions += " -s%s_LIBPATH=%s/lib" % (lib.upper(), libroot)
paracmd = ''
if self.cfg['parallel']:
paracmd = "-j %s" % self.cfg['parallel']
if self.cfg['only_python_bindings']:
# magic incantation to only install Boost Python bindings is... --with-python
# see http://boostorg.github.io/python/doc/html/building/installing_boost_python_on_your_.html
bjamoptions += " --with-python"
if self.cfg['boost_mpi']:
self.log.info("Building boost_mpi library")
self.build_boost_variant(bjamoptions + " --user-config=user-config.jam --with-mpi", paracmd)
if self.cfg['boost_multi_thread']:
self.log.info("Building boost with multi threading")
self.build_boost_variant(bjamoptions + " threading=multi --layout=tagged", paracmd)
# if both boost_mpi and boost_multi_thread are enabled, build boost mpi with multi-thread support
if self.cfg['boost_multi_thread'] and self.cfg['boost_mpi']:
self.log.info("Building boost_mpi with multi threading")
extra_bjamoptions = " --user-config=user-config.jam --with-mpi threading=multi --layout=tagged"
self.build_boost_variant(bjamoptions + extra_bjamoptions, paracmd)
# install remainder of boost libraries
self.log.info("Installing boost libraries")
cmd = "%s ./%s %s install %s %s" % (
self.cfg['preinstallopts'], self.bjamcmd, bjamoptions, paracmd, self.cfg['installopts'])
run_cmd(cmd, log_all=True, simple=True)
def install_step(self):
"""Install Boost by copying files to install dir."""
self.log.info("Copying %s to installation dir %s", self.objdir, self.installdir)
if self.cfg['only_python_bindings'] and 'Python' in self.cfg['multi_deps'] and self.iter_idx > 0:
self.log.info("Main installation should already exist, only copying over missing Python libraries.")
copy(glob.glob(os.path.join(self.objdir, 'lib', 'libboost_python*')), os.path.join(self.installdir, 'lib'))
else:
copy(glob.glob(os.path.join(self.objdir, '*')), self.installdir)
def sanity_check_step(self):
"""Custom sanity check for Boost."""
shlib_ext = get_shared_lib_ext()
custom_paths = {
'files': [],
'dirs': ['include/boost']
}
if not self.cfg['only_python_bindings']:
custom_paths['files'].append(os.path.join('lib', 'libboost_system.%s' % shlib_ext))
if self.cfg['boost_mpi']:
custom_paths['files'].append(os.path.join('lib', 'libboost_mpi.%s' % shlib_ext))
for pyver in self.pyvers:
pymajorver = pyver.split('.')[0]
pyminorver = pyver.split('.')[1]
if LooseVersion(self.version) >= LooseVersion("1.67.0"):
suffix = '%s%s' % (pymajorver, pyminorver)
elif int(pymajorver) >= 3:
suffix = pymajorver
else:
suffix = ''
custom_paths['files'].append(os.path.join('lib', 'libboost_python%s.%s' % (suffix, shlib_ext)))
lib_mt_suffix = '-mt'
# MT libraries gained an extra suffix from v1.69.0 onwards
if LooseVersion(self.version) >= LooseVersion("1.69.0"):
if get_cpu_architecture() == AARCH64:
lib_mt_suffix += '-a64'
elif get_cpu_architecture() == POWER:
lib_mt_suffix += '-p64'
else:
lib_mt_suffix += '-x64'
if self.cfg['boost_multi_thread']:
custom_paths['files'].append(os.path.join('lib', 'libboost_thread%s.%s' % (lib_mt_suffix, shlib_ext)))
if self.cfg['boost_mpi'] and self.cfg['boost_multi_thread']:
custom_paths['files'].append(os.path.join('lib', 'libboost_mpi%s.%s' % (lib_mt_suffix, shlib_ext)))
super(EB_Boost, self).sanity_check_step(custom_paths=custom_paths)
def make_module_extra(self):
"""Set up a BOOST_ROOT environment variable to e.g. ease Boost handling by cmake"""
txt = super(EB_Boost, self).make_module_extra()
if not self.cfg['only_python_bindings']:
txt += self.module_generator.set_environment('BOOST_ROOT', self.installdir)
return txt
| hpcugent/easybuild-easyblocks | easybuild/easyblocks/b/boost.py | Python | gpl-2.0 | 13,762 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.