gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
from __future__ import unicode_literals
from django.contrib import admin
from django.contrib.auth import get_user_model
from django.contrib.auth.admin import UserAdmin
from django.forms import ValidationError, ModelForm
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User as AuthUser
from mezzanine.conf import settings
from mezzanine.core.forms import DynamicInlineAdminForm
from mezzanine.core.models import (Orderable, SitePermission,
CONTENT_STATUS_PUBLISHED)
from mezzanine.utils.static import static_lazy as static
if settings.USE_MODELTRANSLATION:
from collections import OrderedDict
from django.utils.translation import activate, get_language
from modeltranslation.admin import (TranslationAdmin,
TranslationInlineModelAdmin)
class BaseTranslationModelAdmin(TranslationAdmin):
"""
Mimic modeltranslation's TabbedTranslationAdmin but uses a
custom tabbed_translation_fields.js
"""
class Media:
js = (
static("modeltranslation/js/force_jquery.js"),
static("mezzanine/js/%s" % settings.JQUERY_UI_FILENAME),
static("mezzanine/js/admin/tabbed_translation_fields.js"),
)
css = {
"all": (static(
"mezzanine/css/admin/tabbed_translation_fields.css"),),
}
else:
class BaseTranslationModelAdmin(admin.ModelAdmin):
"""
Abstract class used to handle the switch between translation
and no-translation class logic. We define the basic structure
for the Media class so we can extend it consistently regardless
of whether or not modeltranslation is used.
"""
class Media:
js = ()
css = {"all": ()}
User = get_user_model()
class DisplayableAdminForm(ModelForm):
def clean_content(form):
status = form.cleaned_data.get("status")
content = form.cleaned_data.get("content")
if status == CONTENT_STATUS_PUBLISHED and not content:
raise ValidationError(_("This field is required if status "
"is set to published."))
return content
class DisplayableAdmin(BaseTranslationModelAdmin):
"""
Admin class for subclasses of the abstract ``Displayable`` model.
"""
list_display = ("title", "status", "admin_link")
list_display_links = ("title",)
list_editable = ("status",)
list_filter = ("status", "keywords__keyword")
# modeltranslation breaks date hierarchy links, see:
# https://github.com/deschler/django-modeltranslation/issues/324
# Once that's resolved we can restore this.
date_hierarchy = None if settings.USE_MODELTRANSLATION else "publish_date"
radio_fields = {"status": admin.HORIZONTAL}
fieldsets = (
(None, {
"fields": ["title", "status", ("publish_date", "expiry_date")],
}),
(_("Meta data"), {
"fields": ["_meta_title", "slug",
("description", "gen_description"),
"keywords", "in_sitemap"],
"classes": ("collapse-closed",)
}),
)
form = DisplayableAdminForm
def __init__(self, *args, **kwargs):
super(DisplayableAdmin, self).__init__(*args, **kwargs)
try:
self.search_fields = list(set(list(self.search_fields) + list(
self.model.objects.get_search_fields().keys())))
except AttributeError:
pass
def save_model(self, request, obj, form, change):
"""
Save model for every language so that field auto-population
is done for every each of it.
"""
super(DisplayableAdmin, self).save_model(request, obj, form, change)
if settings.USE_MODELTRANSLATION:
lang = get_language()
for code in OrderedDict(settings.LANGUAGES):
if code != lang: # Already done
try:
activate(code)
except:
pass
else:
obj.save()
activate(lang)
class BaseDynamicInlineAdmin(object):
"""
Admin inline that uses JS to inject an "Add another" link which
when clicked, dynamically reveals another fieldset. Also handles
adding the ``_order`` field and its widget for models that
subclass ``Orderable``.
"""
form = DynamicInlineAdminForm
extra = 1
def get_fields(self, request, obj=None):
fields = super(BaseDynamicInlineAdmin, self).get_fields(request, obj)
if issubclass(self.model, Orderable):
fields = list(fields)
try:
fields.remove("_order")
except ValueError:
pass
fields.append("_order")
return fields
def get_fieldsets(self, request, obj=None):
fieldsets = super(BaseDynamicInlineAdmin, self).get_fieldsets(
request, obj)
if issubclass(self.model, Orderable):
for fieldset in fieldsets:
fields = [f for f in list(fieldset[1]["fields"])
if not hasattr(f, "translated_field")]
try:
fields.remove("_order")
except ValueError:
pass
fieldset[1]["fields"] = fields
fieldsets[-1][1]["fields"].append("_order")
return fieldsets
def get_inline_base_class(cls):
if settings.USE_MODELTRANSLATION:
class InlineBase(TranslationInlineModelAdmin, cls):
"""
Abstract class that mimics django-modeltranslation's
Translation{Tabular,Stacked}Inline. Used as a placeholder
for future improvement.
"""
pass
return InlineBase
return cls
class TabularDynamicInlineAdmin(BaseDynamicInlineAdmin,
get_inline_base_class(admin.TabularInline)):
pass
class StackedDynamicInlineAdmin(BaseDynamicInlineAdmin,
get_inline_base_class(admin.StackedInline)):
def __init__(self, *args, **kwargs):
"""
Stacked dynamic inlines won't work without grappelli
installed, as the JavaScript in dynamic_inline.js isn't
able to target each of the inlines to set the value of
the order field.
"""
grappelli_name = getattr(settings, "PACKAGE_NAME_GRAPPELLI")
if grappelli_name not in settings.INSTALLED_APPS:
error = "StackedDynamicInlineAdmin requires Grappelli installed."
raise Exception(error)
super(StackedDynamicInlineAdmin, self).__init__(*args, **kwargs)
class OwnableAdmin(admin.ModelAdmin):
"""
Admin class for models that subclass the abstract ``Ownable``
model. Handles limiting the change list to objects owned by the
logged in user, as well as setting the owner of newly created
objects to the logged in user.
Remember that this will include the ``user`` field in the required
fields for the admin change form which may not be desirable. The
best approach to solve this is to define a ``fieldsets`` attribute
that excludes the ``user`` field or simple add ``user`` to your
admin excludes: ``exclude = ('user',)``
"""
def save_form(self, request, form, change):
"""
Set the object's owner as the logged in user.
"""
obj = form.save(commit=False)
if obj.user_id is None:
obj.user = request.user
return super(OwnableAdmin, self).save_form(request, form, change)
def get_queryset(self, request):
"""
Filter the change list by currently logged in user if not a
superuser. We also skip filtering if the model for this admin
class has been added to the sequence in the setting
``OWNABLE_MODELS_ALL_EDITABLE``, which contains models in the
format ``app_label.object_name``, and allows models subclassing
``Ownable`` to be excluded from filtering, eg: ownership should
not imply permission to edit.
"""
opts = self.model._meta
model_name = ("%s.%s" % (opts.app_label, opts.object_name)).lower()
models_all_editable = settings.OWNABLE_MODELS_ALL_EDITABLE
models_all_editable = [m.lower() for m in models_all_editable]
qs = super(OwnableAdmin, self).get_queryset(request)
if request.user.is_superuser or model_name in models_all_editable:
return qs
return qs.filter(user__id=request.user.id)
###########################################
# Site Permissions Inlines for User Admin #
###########################################
class SitePermissionInline(admin.TabularInline):
model = SitePermission
max_num = 1
can_delete = False
class SitePermissionUserAdmin(UserAdmin):
inlines = [SitePermissionInline]
# only register if User hasn't been overridden
if User == AuthUser:
if User in admin.site._registry:
admin.site.unregister(User)
admin.site.register(User, SitePermissionUserAdmin)
|
|
import os
import re
import unittest
from socket import AF_INET, AF_INET6
from unittest.mock import Mock, call, patch
from requests.packages.urllib3.util.connection import allowed_gai_family
from streamlink import NoPluginError, Streamlink
from streamlink.plugin import HIGH_PRIORITY, LOW_PRIORITY, NORMAL_PRIORITY, NO_PRIORITY, Plugin, pluginmatcher
from streamlink.stream import AkamaiHDStream, HLSStream, HTTPStream, RTMPStream
class EmptyPlugin(Plugin):
def _get_streams(self):
pass # pragma: no cover
class TestSession(unittest.TestCase):
plugin_path = os.path.join(os.path.dirname(__file__), "plugin")
def subject(self, load_plugins=True):
session = Streamlink()
if load_plugins:
session.load_plugins(self.plugin_path)
return session
def test_exceptions(self):
session = self.subject()
self.assertRaises(NoPluginError, session.resolve_url, "invalid url", follow_redirect=False)
def test_load_plugins(self):
session = self.subject()
plugins = session.get_plugins()
self.assertIn("testplugin", plugins)
self.assertNotIn("testplugin_missing", plugins)
self.assertNotIn("testplugin_invalid", plugins)
def test_load_plugins_builtin(self):
session = self.subject()
plugins = session.get_plugins()
self.assertIn("twitch", plugins)
self.assertEqual(plugins["twitch"].__module__, "streamlink.plugins.twitch")
@patch("streamlink.session.log")
def test_load_plugins_override(self, mock_log):
session = self.subject()
plugins = session.get_plugins()
file = os.path.join(os.path.dirname(__file__), "plugin", "testplugin_override.py")
self.assertIn("testplugin", plugins)
self.assertNotIn("testplugin_override", plugins)
self.assertEqual(plugins["testplugin"].__name__, "TestPluginOverride")
self.assertEqual(plugins["testplugin"].__module__, "streamlink.plugins.testplugin_override")
self.assertEqual(mock_log.debug.mock_calls, [call(f"Plugin testplugin is being overridden by {file}")])
@patch("streamlink.session.load_module")
@patch("streamlink.session.log")
def test_load_plugins_importerror(self, mock_log, mock_load_module):
mock_load_module.side_effect = ImportError()
session = self.subject()
plugins = session.get_plugins()
self.assertGreater(len(mock_log.exception.mock_calls), 0)
self.assertEqual(len(plugins.keys()), 0)
@patch("streamlink.session.load_module")
@patch("streamlink.session.log")
def test_load_plugins_syntaxerror(self, mock_log, mock_load_module):
mock_load_module.side_effect = SyntaxError()
with self.assertRaises(SyntaxError):
self.subject()
def test_resolve_url(self):
session = self.subject()
plugins = session.get_plugins()
plugin = session.resolve_url("http://test.se/channel")
self.assertTrue(isinstance(plugin, Plugin))
self.assertTrue(isinstance(plugin, plugins["testplugin"]))
self.assertTrue(hasattr(session.resolve_url, "cache_info"), "resolve_url has a lookup cache")
def test_resolve_url_priority(self):
@pluginmatcher(priority=HIGH_PRIORITY, pattern=re.compile(
"http://(high|normal|low|no)$"
))
class HighPriority(EmptyPlugin):
pass
@pluginmatcher(priority=NORMAL_PRIORITY, pattern=re.compile(
"http://(normal|low|no)$"
))
class NormalPriority(EmptyPlugin):
pass
@pluginmatcher(priority=LOW_PRIORITY, pattern=re.compile(
"http://(low|no)$"
))
class LowPriority(EmptyPlugin):
pass
@pluginmatcher(priority=NO_PRIORITY, pattern=re.compile(
"http://(no)$"
))
class NoPriority(EmptyPlugin):
pass
session = self.subject(load_plugins=False)
session.plugins = {
"high": HighPriority,
"normal": NormalPriority,
"low": LowPriority,
"no": NoPriority,
}
no = session.resolve_url_no_redirect("no")
low = session.resolve_url_no_redirect("low")
normal = session.resolve_url_no_redirect("normal")
high = session.resolve_url_no_redirect("high")
self.assertIsInstance(no, HighPriority)
self.assertIsInstance(low, HighPriority)
self.assertIsInstance(normal, HighPriority)
self.assertIsInstance(high, HighPriority)
session.resolve_url.cache_clear()
session.plugins = {
"no": NoPriority,
}
with self.assertRaises(NoPluginError):
session.resolve_url_no_redirect("no")
@patch("streamlink.session.log")
def test_resolve_deprecated(self, mock_log: Mock):
@pluginmatcher(priority=LOW_PRIORITY, pattern=re.compile(
"http://low"
))
class LowPriority(EmptyPlugin):
pass
class DeprecatedNormalPriority(EmptyPlugin):
# noinspection PyUnusedLocal
@classmethod
def can_handle_url(cls, url):
return True
class DeprecatedHighPriority(DeprecatedNormalPriority):
# noinspection PyUnusedLocal
@classmethod
def priority(cls, url):
return HIGH_PRIORITY
session = self.subject(load_plugins=False)
session.plugins = {
"empty": EmptyPlugin,
"low": LowPriority,
"dep-normal-one": DeprecatedNormalPriority,
"dep-normal-two": DeprecatedNormalPriority,
"dep-high": DeprecatedHighPriority,
}
self.assertIsInstance(session.resolve_url_no_redirect("low"), DeprecatedHighPriority)
self.assertEqual(mock_log.info.mock_calls, [
call("Resolved plugin dep-normal-one with deprecated can_handle_url API"),
call("Resolved plugin dep-high with deprecated can_handle_url API")
])
def test_resolve_url_no_redirect(self):
session = self.subject()
plugin = session.resolve_url_no_redirect("http://test.se/channel")
plugins = session.get_plugins()
self.assertTrue(isinstance(plugin, Plugin))
self.assertTrue(isinstance(plugin, plugins["testplugin"]))
def test_options(self):
session = self.subject()
session.set_option("test_option", "option")
self.assertEqual(session.get_option("test_option"), "option")
self.assertEqual(session.get_option("non_existing"), None)
self.assertEqual(session.get_plugin_option("testplugin", "a_option"), "default")
session.set_plugin_option("testplugin", "another_option", "test")
self.assertEqual(session.get_plugin_option("testplugin", "another_option"), "test")
self.assertEqual(session.get_plugin_option("non_existing", "non_existing"), None)
self.assertEqual(session.get_plugin_option("testplugin", "non_existing"), None)
def test_plugin(self):
session = self.subject()
plugin = session.resolve_url("http://test.se/channel")
streams = plugin.streams()
self.assertTrue("best" in streams)
self.assertTrue("worst" in streams)
self.assertTrue(streams["best"] is streams["1080p"])
self.assertTrue(streams["worst"] is streams["350k"])
self.assertTrue(isinstance(streams["rtmp"], RTMPStream))
self.assertTrue(isinstance(streams["http"], HTTPStream))
self.assertTrue(isinstance(streams["hls"], HLSStream))
self.assertTrue(isinstance(streams["akamaihd"], AkamaiHDStream))
def test_plugin_stream_types(self):
session = self.subject()
plugin = session.resolve_url("http://test.se/channel")
streams = plugin.streams(stream_types=["http", "rtmp"])
self.assertTrue(isinstance(streams["480p"], HTTPStream))
self.assertTrue(isinstance(streams["480p_rtmp"], RTMPStream))
streams = plugin.streams(stream_types=["rtmp", "http"])
self.assertTrue(isinstance(streams["480p"], RTMPStream))
self.assertTrue(isinstance(streams["480p_http"], HTTPStream))
def test_plugin_stream_sorting_excludes(self):
session = self.subject()
plugin = session.resolve_url("http://test.se/channel")
streams = plugin.streams(sorting_excludes=[])
self.assertTrue("best" in streams)
self.assertTrue("worst" in streams)
self.assertFalse("best-unfiltered" in streams)
self.assertFalse("worst-unfiltered" in streams)
self.assertTrue(streams["worst"] is streams["350k"])
self.assertTrue(streams["best"] is streams["1080p"])
streams = plugin.streams(sorting_excludes=["1080p", "3000k"])
self.assertTrue("best" in streams)
self.assertTrue("worst" in streams)
self.assertFalse("best-unfiltered" in streams)
self.assertFalse("worst-unfiltered" in streams)
self.assertTrue(streams["worst"] is streams["350k"])
self.assertTrue(streams["best"] is streams["1500k"])
streams = plugin.streams(sorting_excludes=[">=1080p", ">1500k"])
self.assertTrue(streams["best"] is streams["1500k"])
streams = plugin.streams(sorting_excludes=lambda q: not q.endswith("p"))
self.assertTrue(streams["best"] is streams["3000k"])
streams = plugin.streams(sorting_excludes=lambda q: False)
self.assertFalse("best" in streams)
self.assertFalse("worst" in streams)
self.assertTrue("best-unfiltered" in streams)
self.assertTrue("worst-unfiltered" in streams)
self.assertTrue(streams["worst-unfiltered"] is streams["350k"])
self.assertTrue(streams["best-unfiltered"] is streams["1080p"])
plugin = session.resolve_url("http://test.se/UnsortableStreamNames")
streams = plugin.streams()
self.assertFalse("best" in streams)
self.assertFalse("worst" in streams)
self.assertFalse("best-unfiltered" in streams)
self.assertFalse("worst-unfiltered" in streams)
self.assertTrue("vod" in streams)
self.assertTrue("vod_alt" in streams)
self.assertTrue("vod_alt2" in streams)
def test_set_and_get_locale(self):
session = Streamlink()
session.set_option("locale", "en_US")
self.assertEqual(session.localization.country.alpha2, "US")
self.assertEqual(session.localization.language.alpha2, "en")
self.assertEqual(session.localization.language_code, "en_US")
@patch("streamlink.session.api")
def test_interface(self, mock_api):
adapter_http = Mock(poolmanager=Mock(connection_pool_kw={}))
adapter_https = Mock(poolmanager=Mock(connection_pool_kw={}))
adapter_foo = Mock(poolmanager=Mock(connection_pool_kw={}))
mock_api.HTTPSession.return_value = Mock(adapters={
"http://": adapter_http,
"https://": adapter_https,
"foo://": adapter_foo
})
session = self.subject(load_plugins=False)
self.assertEqual(session.get_option("interface"), None)
session.set_option("interface", "my-interface")
self.assertEqual(adapter_http.poolmanager.connection_pool_kw, {"source_address": ("my-interface", 0)})
self.assertEqual(adapter_https.poolmanager.connection_pool_kw, {"source_address": ("my-interface", 0)})
self.assertEqual(adapter_foo.poolmanager.connection_pool_kw, {})
self.assertEqual(session.get_option("interface"), "my-interface")
session.set_option("interface", None)
self.assertEqual(adapter_http.poolmanager.connection_pool_kw, {})
self.assertEqual(adapter_https.poolmanager.connection_pool_kw, {})
self.assertEqual(adapter_foo.poolmanager.connection_pool_kw, {})
self.assertEqual(session.get_option("interface"), None)
@patch("streamlink.session.urllib3_connection", allowed_gai_family=allowed_gai_family)
def test_ipv4_ipv6(self, mock_urllib3_connection):
session = self.subject(load_plugins=False)
self.assertEqual(session.get_option("ipv4"), False)
self.assertEqual(session.get_option("ipv6"), False)
self.assertEqual(mock_urllib3_connection.allowed_gai_family, allowed_gai_family)
session.set_option("ipv4", True)
self.assertEqual(session.get_option("ipv4"), True)
self.assertEqual(session.get_option("ipv6"), False)
self.assertNotEqual(mock_urllib3_connection.allowed_gai_family, allowed_gai_family)
self.assertEqual(mock_urllib3_connection.allowed_gai_family(), AF_INET)
session.set_option("ipv4", False)
self.assertEqual(session.get_option("ipv4"), False)
self.assertEqual(session.get_option("ipv6"), False)
self.assertEqual(mock_urllib3_connection.allowed_gai_family, allowed_gai_family)
session.set_option("ipv6", True)
self.assertEqual(session.get_option("ipv4"), False)
self.assertEqual(session.get_option("ipv6"), True)
self.assertNotEqual(mock_urllib3_connection.allowed_gai_family, allowed_gai_family)
self.assertEqual(mock_urllib3_connection.allowed_gai_family(), AF_INET6)
session.set_option("ipv6", False)
self.assertEqual(session.get_option("ipv4"), False)
self.assertEqual(session.get_option("ipv6"), False)
self.assertEqual(mock_urllib3_connection.allowed_gai_family, allowed_gai_family)
session.set_option("ipv4", True)
session.set_option("ipv6", False)
self.assertEqual(session.get_option("ipv4"), True)
self.assertEqual(session.get_option("ipv6"), False)
self.assertEqual(mock_urllib3_connection.allowed_gai_family, allowed_gai_family)
def test_https_proxy_default(self):
session = self.subject(load_plugins=False)
session.set_option("http-proxy", "http://testproxy.com")
self.assertEqual("http://testproxy.com", session.http.proxies['http'])
self.assertEqual("http://testproxy.com", session.http.proxies['https'])
def test_https_proxy_set_first(self):
session = self.subject(load_plugins=False)
session.set_option("https-proxy", "https://testhttpsproxy.com")
session.set_option("http-proxy", "http://testproxy.com")
self.assertEqual("http://testproxy.com", session.http.proxies['http'])
self.assertEqual("https://testhttpsproxy.com", session.http.proxies['https'])
def test_https_proxy_default_override(self):
session = self.subject(load_plugins=False)
session.set_option("http-proxy", "http://testproxy.com")
session.set_option("https-proxy", "https://testhttpsproxy.com")
self.assertEqual("http://testproxy.com", session.http.proxies['http'])
self.assertEqual("https://testhttpsproxy.com", session.http.proxies['https'])
def test_https_proxy_set_only(self):
session = self.subject(load_plugins=False)
session.set_option("https-proxy", "https://testhttpsproxy.com")
self.assertFalse("http" in session.http.proxies)
self.assertEqual("https://testhttpsproxy.com", session.http.proxies['https'])
|
|
# Copyright 2021 The Distla Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Test for vops.py."""
import functools
import operator
import jax
from jax import lax
import jax.numpy as jnp
import numpy as np
import pytest
from distla_core.linalg.utils import testutils
from distla_core.utils import pops
from distla_core.utils import vops
DTYPE = jnp.float32
dtypes = [jnp.float32]
AXIS_NAME = pops.AXIS_NAME
precisions = [lax.Precision.HIGHEST]
shapes = [(8, 1), (16, 16), (8, 128), (128, 8)]
flags = [True, False]
seeds = [0, 1]
###############################################################################
# REPLICATEDTHINMATRIX
###############################################################################
@pytest.mark.parametrize("seed", seeds)
@pytest.mark.parametrize("shape", shapes)
@pytest.mark.parametrize("column_replicate", flags)
def test_replicatedthinmatrix_unary(shape, column_replicate, seed):
"""
Test methods of ReplicatedThinMatrix that only operate on a single matrix.
"""
A_jnp = jax.random.normal(jax.random.PRNGKey(seed), shape)
A = vops.distribute(A_jnp, column_replicate)
np.testing.assert_allclose(A_jnp + 2, vops.undistribute(A + 2))
np.testing.assert_allclose(2 + A_jnp, vops.undistribute(2 + A))
np.testing.assert_allclose(A_jnp - 2, vops.undistribute(A - 2))
np.testing.assert_allclose(2 - A_jnp, vops.undistribute(2 - A))
np.testing.assert_allclose(A_jnp * 2, vops.undistribute(A * 2))
np.testing.assert_allclose(2 * A_jnp, vops.undistribute(2 * A))
np.testing.assert_allclose(A_jnp / 2, vops.undistribute(A / 2))
np.testing.assert_allclose(2 / A_jnp, vops.undistribute(2 / A))
np.testing.assert_allclose(A_jnp // 2, vops.undistribute(A // 2))
np.testing.assert_allclose(2 // A_jnp, vops.undistribute(2 // A))
np.testing.assert_allclose(A_jnp % 2, vops.undistribute(A % 2))
np.testing.assert_allclose(2 % A_jnp, vops.undistribute(2 % A))
np.testing.assert_allclose(A_jnp**2, vops.undistribute(A**2))
np.testing.assert_allclose(2**A_jnp, vops.undistribute(2**A))
np.testing.assert_allclose(A_jnp == 0, vops.undistribute(A == 0))
np.testing.assert_allclose(0 == A_jnp, vops.undistribute(0 == A))
np.testing.assert_allclose(A_jnp != 0, vops.undistribute(A != 0))
np.testing.assert_allclose(0 != A_jnp, vops.undistribute(0 != A))
np.testing.assert_allclose(A_jnp < 0, vops.undistribute(A < 0))
np.testing.assert_allclose(0 < A_jnp, vops.undistribute(0 < A))
np.testing.assert_allclose(A_jnp > 0, vops.undistribute(A > 0))
np.testing.assert_allclose(0 > A_jnp, vops.undistribute(0 > A))
np.testing.assert_allclose(A_jnp <= 0, vops.undistribute(A <= 0))
np.testing.assert_allclose(0 <= A_jnp, vops.undistribute(0 <= A))
np.testing.assert_allclose(A_jnp >= 0, vops.undistribute(A >= 0))
np.testing.assert_allclose(0 >= A_jnp, vops.undistribute(0 >= A))
np.testing.assert_allclose(
operator.neg(A_jnp),
vops.undistribute(operator.neg(A)),
)
np.testing.assert_allclose(
operator.pos(A_jnp),
vops.undistribute(operator.pos(A)),
)
np.testing.assert_allclose(abs(A_jnp), vops.undistribute(abs(A)))
np.testing.assert_allclose(jnp.conj(A_jnp), vops.undistribute(A.conj()))
np.testing.assert_allclose(jnp.sqrt(A_jnp), vops.undistribute(A.sqrt()))
np.testing.assert_allclose(jnp.sign(A_jnp), vops.undistribute(A.sign()))
np.testing.assert_allclose(jnp.log(A_jnp), vops.undistribute(A.log()))
np.testing.assert_allclose(jnp.exp(A_jnp), vops.undistribute(A.exp()))
np.testing.assert_allclose(jnp.imag(A_jnp), vops.undistribute(A.imag()))
np.testing.assert_allclose(jnp.real(A_jnp), vops.undistribute(A.real()))
np.testing.assert_allclose(jnp.min(A_jnp), A.min())
np.testing.assert_allclose(jnp.max(A_jnp), A.max())
np.testing.assert_allclose(
jnp.zeros(shape),
vops.undistribute(A.zeros_like()),
)
np.testing.assert_allclose(
jnp.ones(shape),
vops.undistribute(A.ones_like()),
)
np.testing.assert_allclose(
jnp.full(shape, 3),
vops.undistribute(A.full_like(3)),
)
np.testing.assert_allclose(jnp.all(A_jnp > 0), (A > 0).all())
np.testing.assert_allclose(jnp.any(A_jnp > 0), (A > 0).any())
@pytest.mark.parametrize("seed", seeds)
@pytest.mark.parametrize("shape", shapes)
@pytest.mark.parametrize("column_replicate_A", flags)
@pytest.mark.parametrize("column_replicate_B", flags)
def test_replicatedthinmatrix_binary(
seed,
shape,
column_replicate_A,
column_replicate_B,
):
"""
Test methods of ReplicatedThinMatrix that operate on two matrices.
"""
A_jnp = jax.random.normal(jax.random.PRNGKey(seed), shape)
A = vops.distribute(A_jnp, column_replicate_A)
B_jnp = jax.random.normal(jax.random.PRNGKey(seed + 1), shape)
B = vops.distribute(B_jnp, column_replicate_B)
np.testing.assert_allclose(A_jnp + B_jnp, vops.undistribute(A + B))
np.testing.assert_allclose(A_jnp - B_jnp, vops.undistribute(A - B))
np.testing.assert_allclose(A_jnp * B_jnp, vops.undistribute(A * B))
np.testing.assert_allclose(A_jnp / B_jnp, vops.undistribute(A / B))
np.testing.assert_allclose(A_jnp // B_jnp, vops.undistribute(A // B))
np.testing.assert_allclose(A_jnp % B_jnp, vops.undistribute(A % B))
np.testing.assert_allclose(A_jnp**B_jnp, vops.undistribute(A**B))
np.testing.assert_allclose(A_jnp == B_jnp, vops.undistribute(A == B))
np.testing.assert_allclose(A_jnp != B_jnp, vops.undistribute(A != B))
np.testing.assert_allclose(A_jnp < B_jnp, vops.undistribute(A < B))
np.testing.assert_allclose(A_jnp > B_jnp, vops.undistribute(A > B))
np.testing.assert_allclose(A_jnp <= B_jnp, vops.undistribute(A <= B))
np.testing.assert_allclose(A_jnp >= B_jnp, vops.undistribute(A >= B))
np.testing.assert_allclose(jnp.allclose(A_jnp, B_jnp), A.allclose(B))
###############################################################################
# INITIALIZATION
###############################################################################
@pytest.mark.parametrize("seed", seeds)
@pytest.mark.parametrize("shape", shapes)
@pytest.mark.parametrize("column_replicate", flags)
def test_random(seed, shape, column_replicate):
random = vops.random(
shape,
column_replicated=column_replicate,
key_seed=seed,
)
data = random.array.reshape((*pops.DGRID, *random.array.shape[1:]))
eps = jnp.finfo(data.dtype).eps
if column_replicate:
for i in range(pops.DGRID[1]):
testutils.assert_allclose(data[:, i, :, :], data[:, 0, :, :], atol=eps)
else:
for i in range(pops.DGRID[0]):
testutils.assert_allclose(data[i, :, :, :], data[0, :, :, :], atol=eps)
assert random.is_column_replicated == column_replicate
@pytest.mark.parametrize("shape", shapes)
@pytest.mark.parametrize("column_replicate", flags)
def test_zeros(shape, column_replicate):
dtype = jnp.float32
ps = jnp.arange(pops.NDPROCS)
@functools.partial(pops.pmap, static_broadcasted_argnums=(1, 2, 3))
def test_f(ps, shape, dtype, column_rep):
return vops.zeros(shape, dtype, column_rep)
result = test_f(ps, shape, dtype, column_replicate)
assert result.is_column_replicated == column_replicate
result = vops.undistribute(result)
expected = jnp.zeros(shape, dtype=dtype)
testutils.assert_allclose(
expected, result, atol=jnp.finfo(result.dtype).eps)
@pytest.mark.parametrize("shape", shapes)
@pytest.mark.parametrize("column_replicate", flags)
def test_ones(shape, column_replicate):
dtype = jnp.float32
ps = jnp.arange(pops.NDPROCS)
@functools.partial(pops.pmap, static_broadcasted_argnums=(1, 2, 3))
def test_f(ps, shape, dtype, column_rep):
return vops.ones(shape, dtype, column_rep)
result = test_f(ps, shape, dtype, column_replicate)
assert result.is_column_replicated == column_replicate
result = vops.undistribute(result)
expected = jnp.ones(shape, dtype=dtype)
testutils.assert_allclose(
expected, result, atol=jnp.finfo(result.dtype).eps)
@pytest.mark.parametrize("shape", shapes)
@pytest.mark.parametrize("column_replicate", flags)
def test_full(shape, column_replicate):
val = 3.0
dtype = jnp.float32
ps = jnp.arange(pops.NDPROCS)
@functools.partial(
pops.pmap,
static_broadcasted_argnums=(1, 3, 4),
in_axes=(0, None, None, None, None))
def test_f(ps, shape, val, dtype, column_rep):
return vops.full(shape, val, dtype, column_rep)
result = test_f(ps, shape, val, dtype, column_replicate)
assert result.is_column_replicated == column_replicate
result = vops.undistribute(result)
expected = jnp.full(shape, val, dtype=dtype)
testutils.assert_allclose(
expected, result, atol=jnp.finfo(result.dtype).eps)
@pytest.mark.parametrize("shape", ([16, 16], [128, 128], [16, 128], [128, 16]))
@pytest.mark.parametrize("dtype", [np.float32, ])
@pytest.mark.parametrize("seed", seeds)
@pytest.mark.parametrize("trim_columns_to", [None, 3])
def test_big_to_thin(shape, dtype, seed, trim_columns_to):
np.random.seed(seed)
expected = np.random.randn(*shape).astype(dtype)
A_d = pops.distribute(expected)
if trim_columns_to is not None:
expected = expected[:, :trim_columns_to]
@pops.pmap
def _big_to_thin_f(A):
return vops.big_to_thin(A, trim_columns_to=trim_columns_to)
A_v = _big_to_thin_f(A_d)
result = vops.undistribute(A_v)
testutils.assert_allclose(expected, result, atol=0.)
@pytest.mark.parametrize("shape", shapes)
@pytest.mark.parametrize("column_replicate", flags)
@pytest.mark.parametrize("dtype", [np.float32, ])
@pytest.mark.parametrize("seed", seeds)
def test_frobnorm(shape, column_replicate, dtype, seed):
np.random.seed(seed)
matrix = np.random.randn(*shape).astype(dtype)
expected = np.linalg.norm(matrix)
vec_d = vops.distribute(matrix, column_replicated=column_replicate)
@functools.partial(pops.pmap, out_axes=None)
def test_f(vec):
return vops.frobnorm(vec)
result = test_f(vec_d)
assert (expected - result) / expected < jnp.finfo(dtype).eps
@pytest.mark.parametrize("shape", [[4, 8]])
@pytest.mark.parametrize("column_replicate", flags)
@pytest.mark.parametrize("n_cols", [1, 3])
@pytest.mark.parametrize("offset", [0, 1])
@pytest.mark.parametrize("dtype", [np.float32, ])
@pytest.mark.parametrize("seed", seeds)
@pytest.mark.parametrize("big", flags)
def test_get_columns(
shape, column_replicate, n_cols, offset, dtype, seed, big):
np.random.seed(seed)
matrix = np.random.randn(*shape).astype(dtype)
expected = matrix[:, offset:offset + n_cols]
if big:
matrix_d = pops.distribute(matrix)
else:
matrix_d = vops.distribute(matrix, column_replicated=column_replicate)
@functools.partial(
pops.pmap, static_broadcasted_argnums=(2,), in_axes=(0, None, None))
def test_f(matrix, offset, n_cols):
return vops.get_columns(matrix, offset, n_cols)
result = test_f(matrix_d, offset, n_cols)
result = vops.undistribute(result)
testutils.assert_allclose(expected, result, atol=np.finfo(dtype).eps)
@pytest.mark.parametrize("shape", shapes)
@pytest.mark.parametrize("column_replicate_l", flags)
@pytest.mark.parametrize("column_replicate_r", flags)
@pytest.mark.parametrize("n_cols", [1, 3])
@pytest.mark.parametrize("offset", [0, 1])
@pytest.mark.parametrize("dtype", [np.float32, ])
@pytest.mark.parametrize("seed", seeds)
def test_set_columns_vec(
shape, column_replicate_l, column_replicate_r, n_cols, offset, dtype, seed):
shape = [shape[0], shape[1] + offset + n_cols]
np.random.seed(seed)
matrix = np.random.randn(*shape).astype(dtype)
new_cols = np.random.randn(shape[0], n_cols).astype(dtype)
matrix_d = vops.distribute(matrix, column_replicated=column_replicate_l)
new_cols_d = vops.distribute(new_cols, column_replicated=column_replicate_r)
@functools.partial(pops.pmap, in_axes=(0, 0, None))
def test_f(matrix, new_vecs, offset):
return vops.set_columns(matrix, new_vecs, offset)
result = test_f(matrix_d, new_cols_d, offset)
result = vops.undistribute(result)
matrix[:, offset:offset + n_cols] = new_cols
testutils.assert_allclose(matrix, result, atol=np.finfo(dtype).eps)
@pytest.mark.parametrize("shape", [[4, 6]])
@pytest.mark.parametrize("column_replicate_r", flags)
@pytest.mark.parametrize("n_cols", [1, 2, 3, 4])
@pytest.mark.parametrize("offset", [0, 2])
@pytest.mark.parametrize("dtype", [
np.float32,
])
@pytest.mark.parametrize("seed", seeds)
def test_set_columns_mat(
shape, column_replicate_r, n_cols, offset, dtype, seed):
np.random.seed(seed)
matrix = np.random.randn(*shape).astype(dtype)
new_cols = np.random.randn(shape[0], n_cols).astype(dtype)
matrix_d = pops.distribute(matrix)
new_cols_d = vops.distribute(new_cols, column_replicated=column_replicate_r)
@functools.partial(pops.pmap, in_axes=(0, 0, None))
def test_f(matrix, new_vecs, offset):
return vops.set_columns(matrix, new_vecs, offset)
result = test_f(matrix_d, new_cols_d, offset)
result = pops.undistribute(result)
matrix[:, offset:offset + n_cols] = new_cols
testutils.assert_allclose(matrix, result, atol=np.finfo(dtype).eps)
@pytest.mark.parametrize("shapeA", [[2, 3], [4, 8], [8, 12]])
@pytest.mark.parametrize("shapeB", [[2, 3], [8, 12]])
@pytest.mark.parametrize("dtype", [np.float32, ])
@pytest.mark.parametrize("seed", seeds)
@pytest.mark.parametrize("pmap", flags)
@pytest.mark.parametrize("col_rep_A", flags)
@pytest.mark.parametrize("col_rep_B", flags)
def test_hstack_pair(shapeA, shapeB, dtype, seed, pmap, col_rep_A, col_rep_B):
np.random.seed(seed)
shapeA = (shapeA[0] * pops.NROWS, shapeA[1])
shapeB = (shapeB[0] * pops.NROWS, shapeB[1])
vec_l = np.random.randn(*shapeA).astype(dtype)
vec_r = np.random.randn(*shapeB).astype(dtype)
vec_ld = vops.distribute(vec_l, column_replicated=col_rep_A)
vec_rd = vops.distribute(vec_r, column_replicated=col_rep_B)
if pmap:
test_f = pops.pmap(vops.hstack_pair)
else:
test_f = vops.hstack_pair
if shapeA[0] != shapeB[0]:
with pytest.raises(TypeError):
result = test_f(vec_ld, vec_rd)
return
result = vops.undistribute(test_f(vec_ld, vec_rd))
expected = np.hstack([vec_l, vec_r])
testutils.assert_allclose(result, expected, atol=0.)
@pytest.mark.parametrize("shape", shapes)
@pytest.mark.parametrize("column_replicate", flags)
@pytest.mark.parametrize("dtype", [np.float32, ])
@pytest.mark.parametrize("seed", seeds)
def test_indices_vec(shape, column_replicate, dtype, seed):
np.random.seed(seed)
vec = np.random.randn(*shape).astype(dtype)
vec_d = vops.distribute(vec, column_replicated=column_replicate)
@pops.pmap
def test_f(vec):
rows, cols = vops._indices_vec(vec)
rows = vops.ReplicatedThinMatrix(rows, vec.is_column_replicated)
cols = vops.ReplicatedThinMatrix(cols, vec.is_column_replicated)
prow = pops.my_prow()
pcol = pops.my_pcol()
pname = pops.my_name()
return rows, cols, prow, pcol, pname
all_rows = np.arange(shape[0])
expected_cols = np.arange(shape[1])
local_rows = vec_d.shape[1]
rows, cols, prows, pcols, pnames = test_f(vec_d)
for p in pnames:
these_rows = rows.array[p, :]
these_cols = cols.array[p, :]
if vec_d.is_column_replicated:
pidx = prows[p]
else:
pidx = pcols[p]
expected_rows = all_rows[pidx * local_rows:(pidx + 1) * local_rows]
np.testing.assert_array_equal(these_rows, expected_rows)
np.testing.assert_array_equal(these_cols, expected_cols)
@pytest.mark.parametrize("shape", shapes)
@pytest.mark.parametrize("column_replicate", flags)
@pytest.mark.parametrize("dtype", [np.float32, ])
@pytest.mark.parametrize("seed", seeds)
@pytest.mark.parametrize("val", [1., -35.4])
@pytest.mark.parametrize("k", [-2, 0, 1])
def test_add_to_diagonal(shape, column_replicate, dtype, seed, val, k):
np.random.seed(seed)
vec = np.random.randn(*shape).astype(dtype)
expected = np.copy(vec)
id = np.eye(*shape, k=k, dtype=dtype)
expected = vec + val * id
vec_d = vops.distribute(vec, column_replicated=column_replicate)
@pops.pmap
def test_f(vec):
return vops.add_to_diagonal(vec, val, k=k)
result = test_f(vec_d)
result = vops.undistribute(result)
np.testing.assert_array_equal(result, expected)
@pytest.mark.parametrize("shape", shapes)
@pytest.mark.parametrize("column_replicate_l", flags)
@pytest.mark.parametrize("column_replicate_r", flags)
@pytest.mark.parametrize("dtype", [np.float32, ])
@pytest.mark.parametrize("seed", seeds)
def test_vecvec(shape, column_replicate_l, column_replicate_r, dtype, seed):
np.random.seed(seed)
matrix_l = np.random.randn(*shape).astype(dtype)
matrix_r = np.random.randn(*shape).astype(dtype)
expected = np.dot(matrix_l.conj().T, matrix_r)
matrix_ld = vops.distribute(matrix_l, column_replicated=column_replicate_l)
matrix_rd = vops.distribute(matrix_r, column_replicated=column_replicate_r)
@functools.partial(pops.pmap, out_axes=None)
def test_f(matrix_ld, matrix_rd):
return vops.vecvec(matrix_ld, matrix_rd)
result = test_f(matrix_ld, matrix_rd)
tol = np.finfo(dtype).eps * np.linalg.norm(expected) ** 2
testutils.assert_allclose(expected, result, atol=tol)
@pytest.mark.parametrize("shape", shapes)
@pytest.mark.parametrize("column_replicate", flags)
@pytest.mark.parametrize("dtype", [np.float32, ])
@pytest.mark.parametrize("seed", seeds)
def test_vecsmall(shape, column_replicate, dtype, seed):
np.random.seed(seed)
matrix_l = np.random.randn(*shape).astype(dtype)
matrix_r = np.random.randn(*shape[::-1]).astype(dtype)
expected = np.dot(matrix_l, matrix_r)
matrix_ld = vops.distribute(matrix_l, column_replicated=column_replicate)
matrix_r = jnp.array(matrix_r)
@functools.partial(pops.pmap, in_axes=(0, None))
def test_f(matrix_ld, matrix_r):
return vops.vecsmall(matrix_ld, matrix_r)
result = test_f(matrix_ld, matrix_r)
result = vops.undistribute(result)
tol = np.finfo(dtype).eps * np.linalg.norm(expected) ** 2
testutils.assert_allclose(expected, result, atol=tol)
@pytest.mark.parametrize("shape", shapes)
@pytest.mark.parametrize("pmap", flags)
@pytest.mark.parametrize("seed", seeds)
def test_distribute_column_undistribute(shape, pmap, seed):
np.random.seed(seed)
v = np.random.rand(*shape)
vp = vops.distribute(v, pmap=pmap, column_replicated=True,
host_replicated_input=True)
vo = vops.undistribute(vp)
np.testing.assert_allclose(v, vo)
@pytest.mark.parametrize("shape", shapes)
@pytest.mark.parametrize("pmap", flags)
@pytest.mark.parametrize("seed", seeds)
def test_distribute_row_undistribute(shape, pmap, seed):
np.random.seed(seed)
v = np.random.rand(*shape)
vp = vops.distribute(v, pmap=pmap, column_replicated=False,
host_replicated_input=True)
vo = vops.undistribute(vp)
np.testing.assert_allclose(v, vo)
###############################################################################
# REDISTRIBUTION
###############################################################################
@pytest.mark.parametrize("shape", shapes)
@pytest.mark.parametrize("column_replicate", flags)
@pytest.mark.parametrize("seed", seeds)
def test_column_replicated(shape, column_replicate, seed):
np.random.seed(seed)
v = np.random.rand(*shape)
vp = vops.distribute(v, column_replicated=column_replicate)
expected = vops.distribute(v, column_replicated=True)
@pops.pmap
def test_f(v):
return vops.to_column_replicated(v)
result = test_f(vp)
np.testing.assert_allclose(expected.array, result.array)
assert result.is_column_replicated == expected.is_column_replicated
@pytest.mark.parametrize("shape", shapes)
@pytest.mark.parametrize("row_replicate", flags)
@pytest.mark.parametrize("seed", seeds)
def test_row_replicated(shape, row_replicate, seed):
np.random.seed(seed)
v = np.random.rand(*shape)
vp = vops.distribute(v, column_replicated=(not row_replicate))
expected = vops.distribute(v, column_replicated=False)
@pops.pmap
def test_f(v):
return vops.to_row_replicated(v)
result = test_f(vp)
np.testing.assert_allclose(expected.array, result.array)
assert result.is_column_replicated == expected.is_column_replicated
@pytest.mark.parametrize("shape", shapes)
@pytest.mark.parametrize("column_replicate", flags)
@pytest.mark.parametrize("transpose", flags)
@pytest.mark.parametrize("seed", seeds)
def test_matvec(shape, column_replicate, transpose, seed):
np.random.seed(seed)
A = np.random.randn(shape[0], shape[0]).astype(DTYPE)
Ap = pops.distribute(A)
x = np.random.randn(*shape).astype(DTYPE)
if transpose:
expected = np.dot(A.T, x)
else:
expected = np.dot(A, x)
xp = vops.distribute(x, column_replicated=column_replicate)
@pops.pmap
def _matvec_f(A, x):
return vops.matvec(A, x, transpose_A=transpose)
xp = _matvec_f(Ap, xp)
result = vops.undistribute(xp)
eps = 10 * np.finfo(DTYPE).eps * np.linalg.norm(x)
np.testing.assert_allclose(expected, result, atol=eps)
@pytest.mark.parametrize("shape", shapes)
@pytest.mark.parametrize("column_replicate", flags)
@pytest.mark.parametrize("seed", seeds)
@pytest.mark.parametrize("precision", precisions)
def test_vec_t_mat(shape, column_replicate, seed, precision):
np.random.seed(seed)
A = np.random.randn(shape[0], shape[0]).astype(DTYPE)
vec = np.random.randn(shape[0], shape[1]).astype(DTYPE)
A_d = pops.distribute(A)
vec_d = vops.distribute(vec, column_replicated=column_replicate)
expected = jnp.dot(vec.T, A, precision=precision)
@pops.pmap
def _vectmat_f(vec, mat):
return vops.vec_t_mat(vec, mat, precision=precision)
result = _vectmat_f(vec_d, A_d)
result = vops.undistribute(result).T
eps = 10 * np.finfo(DTYPE).eps * np.linalg.norm(vec) * np.linalg.norm(A)
testutils.assert_allclose(result, expected, atol=eps)
@pytest.mark.parametrize("shape", shapes)
@pytest.mark.parametrize("column_replicate_l", flags)
@pytest.mark.parametrize("column_replicate_r", flags)
@pytest.mark.parametrize("seed", seeds)
@pytest.mark.parametrize("precision", precisions)
def test_outer(
shape, column_replicate_l, column_replicate_r, seed, precision):
np.random.seed(seed)
vec_l = np.random.randn(shape[0], shape[1]).astype(DTYPE)
vec_r = np.random.randn(shape[0], shape[1]).astype(DTYPE)
vec_ld = vops.distribute(vec_l, column_replicated=column_replicate_l)
vec_rd = vops.distribute(vec_r, column_replicated=column_replicate_r)
expected = jnp.dot(vec_l, vec_r.T, precision=precision)
@pops.pmap
def _outer_f(vec_l, vec_r):
return vops.outer(vec_l, vec_r, precision=precision)
result = _outer_f(vec_ld, vec_rd)
result = pops.undistribute(result)
eps = 10 * np.finfo(DTYPE).eps * np.linalg.norm(vec_l)
eps *= np.linalg.norm(vec_r)
testutils.assert_allclose(result, expected, atol=eps)
@pytest.mark.parametrize("shape", shapes)
@pytest.mark.parametrize("column_replicate", flags)
@pytest.mark.parametrize("right", flags)
@pytest.mark.parametrize("dtype", dtypes)
@pytest.mark.parametrize("seed", seeds)
def test_diagmult(shape, column_replicate, right, dtype, seed):
np.random.seed(seed)
A = np.random.randn(shape[0], shape[0]).astype(dtype)
Ap = pops.distribute(A)
x = np.random.randn(shape[0], 1).astype(dtype)
if right:
expected = np.dot(A, np.diag(x.ravel()))
else:
expected = np.dot(np.diag(x.ravel()), A)
xp = vops.distribute(x, column_replicated=column_replicate)
@pops.pmap
def _diagmult_f(A, x):
return vops.diagmult(A, x, vector_on_right=right)
result_1 = _diagmult_f(Ap, xp)
result_1 = pops.undistribute(result_1)
result_2 = vops.diagmult(Ap, xp, vector_on_right=right)
result_2 = pops.undistribute(result_2)
eps = 10 * np.linalg.norm(x) * np.linalg.norm(A)
eps *= testutils.eps(lax.Precision.HIGHEST, dtype)
testutils.assert_allclose(result_1, result_2, eps)
testutils.assert_allclose(expected, result_1, eps)
@pytest.mark.parametrize("shape", shapes)
@pytest.mark.parametrize("column_replicate_l", flags)
@pytest.mark.parametrize("column_replicate_r", flags)
@pytest.mark.parametrize("seed", seeds)
def test_align(shape, column_replicate_l, column_replicate_r, seed):
np.random.seed(seed)
vec1 = np.zeros(shape, dtype=DTYPE)
vec2 = np.zeros(shape, dtype=DTYPE)
vec1 = vops.distribute(vec1, column_replicate_l)
vec2 = vops.distribute(vec2, column_replicate_r)
vec2 = vops._align(vec2, vec1)
assert vec2.is_column_replicated == vec1.is_column_replicated
|
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2008-2012 the MansOS team. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import wx
from os.path import exists, join, split, realpath
from os import chdir, getcwd
from wx.lib.agw import aui
from globals import * #@UnusedWildImports
from editor_manager import EditorManager
from generate_makefile import GenerateMakefile
from Translater import localize
from src.Settings import Settings
class TabManager(aui.AuiNotebook):
def __init__(self, parent, API):
aui.AuiNotebook.__init__(self, parent, style = aui.AUI_NB_CLOSE_ON_ACTIVE_TAB | aui.AUI_NB_SMART_TABS)
self.API = API
self.API.tabManager = self
# Need to set because next statement uses it
self.nextPageNr = 1
self.AddPage(EditorManager(self, self.API), localize("Untitled"))
self.Bind(aui.EVT_AUINOTEBOOK_PAGE_CHANGED, self.onPageChanged)
self.Bind(aui.EVT_AUINOTEBOOK_PAGE_CLOSE, self.doPopupClose)
#self.Bind(aui.EVT_AUINOTEBOOK_BUTTON, self.onCloseCheck)
self.Bind(aui.EVT_AUINOTEBOOK_TAB_RIGHT_DOWN, self.showPopupMenu)
def onPageChanged(self, event):
if event != None:
event.Skip()
if self.getPageObject().projectType == SEAL_PROJECT:
self.API.frame.enableAdders()
else:
self.API.frame.disableAdders()
# Clear last dialog line, otherwise same line can't trigger new dialog
# until other line triggers it.
self.getPageObject().code.lastLine = -1
# Remove any Helper windows
self.API.checkForDeletedEditors()
wx.YieldIfNeeded()
self.getPageObject().parseConfigFile()
def showPopupMenu(self, event):
# Make clicked tab active, so all actions target this tab.
self.SetSelection(event.GetSelection(), True)
self._rmenu = wx.Menu()
self.openConfig = self._rmenu.Append(wx.ID_ANY, '&' + localize("Open config file") +
'', localize("Open config file"))
self.openMakefile = self._rmenu.Append(wx.ID_ANY, '&' + localize("Open makefile") +
'', localize("Open makefile"))
self.doClean = self._rmenu.Append(wx.ID_ANY, '&' + localize("Clean target") +
'', localize("Clean target"))
self.popupReload = self._rmenu.Append(wx.ID_REPLACE, '&' + localize("Reload") +
'\tCtrl+R', localize("Reload"))
self.popupSave = self._rmenu.Append(wx.ID_SAVE, '&' + localize('Save') +
'\tCtrl+S', localize("Save"))
self.popupSaveAs = self._rmenu.Append(wx.ID_SAVEAS, '&' + localize("Save as") +
'\tCtrl+A', localize("Save as"))
self.popupClose = self._rmenu.Append(wx.ID_CLOSE, '&' + localize('Close') +
'\tCtrl+W', localize("Close"))
self.Bind(wx.EVT_MENU, self.doPopupConfig, self.openConfig)
self.Bind(wx.EVT_MENU, self.doPopupMakefile, self.openMakefile)
self.Bind(wx.EVT_MENU, self.doPopupReload, self.popupReload)
self.Bind(wx.EVT_MENU, self.doPopupSave, self.popupSave)
self.Bind(wx.EVT_MENU, self.doPopupSaveAs, self.popupSaveAs)
self.Bind(wx.EVT_MENU, self.doPopupClose, self.popupClose)
self.Bind(wx.EVT_MENU, self.API.compiler.clean, self.doClean)
#Disable control if needed
if self.getPageObject().saveState == True:
self.popupSave.Enable(False)
if self.getPageObject().fileName[-3:] != ".sl" and \
self.getPageObject().fileName[-2:] != ".c":
self.openConfig.Enable(False)
self.openMakefile.Enable(False)
# Popup the menu. If an item is selected then its handler
# will be called before PopupMenu returns.
self.PopupMenu(self._rmenu)
self._rmenu.Destroy()
def doPopupConfig(self, event):
pathToOpenedFile = split(realpath(self.getPageObject().filePath))[0]
if not exists(join(pathToOpenedFile, 'config')):
open(join(pathToOpenedFile, 'config'), "wb")
self.addPage(join(pathToOpenedFile, 'config'))
def doPopupMakefile(self, event):
pathToOpenedFile = split(realpath(self.getPageObject().filePath))[0]
if not exists(join(pathToOpenedFile, 'Makefile')):
curPath = getcwd()
chdir(pathToOpenedFile)
GenerateMakefile().generate(self.getPageObject().fileName,
self.getPageObject().projectType,
self.API.pathToMansos)
chdir(curPath)
self.addPage(join(pathToOpenedFile, 'Makefile'))
def doPopupReload(self, event):
self.getPageObject().update()
self.onPageChanged(None)
def doPopupSave(self, event):
if self.getPageObject() == None:
return
if self.getPageObject().hasAFile == True:
self.getPageObject().save()
else:
save = wx.FileDialog(self,
localize("Save") + " \"" +
str(self.GetPageText(self.GetSelection())) + '"',
wildcard = 'Seal ' + localize('files') + ' (*.sl)|*.sl|' +
localize('All files') + '|*',
style = wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT,
defaultFile = self.getPageObject().fileName)
if save.ShowModal() == wx.ID_OK:
if not save.GetPath().endswith(".sl") and self.getPageObject().projectType == SEAL_PROJECT:
self.getPageObject().updateInfo(path = save.GetPath() + '.sl')
else:
self.getPageObject().updateInfo(path = save.GetPath())
self.getPageObject().save()
self.getPageObject().hasAFile = True
save.Destroy()
return self.getPageObject().hasAFile == True
def doPopupSaveAs(self, event):
save = wx.FileDialog(self,
localize("Save as") + " \"" +
str(self.GetPageText(self.GetSelection())) + '"',
wildcard = 'Seal ' + localize('files') + ' (*.sl)|*.sl|' +
localize('All files') + '|*',
style = wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT,
defaultFile = self.getPageObject().fileName)
if save.ShowModal() == wx.ID_OK:
if not save.GetPath().endswith(".sl") and self.getPageObject().projectType == SEAL_PROJECT:
self.getPageObject().updateInfo(path = save.GetPath() + '.sl')
else:
self.getPageObject().updateInfo(path = save.GetPath())
self.getPageObject().save()
self.getPageObject().hasAFile = True
save.Destroy()
def doPopupClose(self, event, checkConsequences = True):
if self.onCloseCheck() == False:
return False
# Remove selected page.
self.DeletePage(self.GetSelection())
if self.GetPageCount() == 0:
self.API.frame.activateNoEditorMode()
self.API.checkForDeletedEditors()
if event:
event.Veto()
self.Layout()
return True
def titleChange(self, newName):
self.SetPageText(self.GetSelection(), newName)
def markAsUnsaved(self):
self.titleChange('* ' + self.getPageObject().caption)
def markAsSaved(self):
if self.getPageObject() != None:
self.titleChange(self.getPageObject().caption)
def getPageObject(self):
if self.GetSelection() != -1:
return self.GetPage(self.GetSelection())
else:
return None
def addPage(self, newFile = ''):
if newFile == '':
self.AddPage(EditorManager(self, self.API),
localize("Untitled") + ' ' + str(self.nextPageNr) + '.sl')
else:
self.AddPage(EditorManager(self, self.API), newFile)
self.nextPageNr += 1
self.SetSelection(self.GetPageCount() - 1)
# Add any file associated with it
self.getPageObject().update(newFile)
self.Layout()
self.API.frame.deactivateNoEditorMode()
self.onPageChanged(None)
def GetCurrentPage(self):
return self.getPageObject()
def onCloseCheck(self, event = None):
if self.getPageObject() == None:
# Nothing to check
return True
if self.getPageObject().saveState == False:
# Initiate DialogBox
dialog = wx.MessageDialog(self,
localize('Save changes to') + ' "' +
self.getPageObject().fileName + '" ' +
localize('before close it?'),
localize('Unsaved file') + ' "' +
self.getPageObject().fileName + '"',
wx.YES_NO | wx.CANCEL | wx.ICON_EXCLAMATION)
retVal = dialog.ShowModal()
if retVal == wx.ID_YES:
# Create save dialog
self.doPopupSave(None)
# Recursion to make sure it's really saved.
return self.onCloseCheck()
elif retVal == wx.ID_CANCEL:
# Stop action if there is any
if event:
event.Veto()
return False
# It's ok to close
return True
def onQuitCheck(self):
while self.GetPageCount() > 0:
# Select first page and try to close it
self.SetSelection(0)
if self.doPopupClose(None, False) == False:
return False
return True
def rememberOpenedTabs(self):
result = ''
for x in self.API.editors:
if type(x) is EditorManager:
result += x.filePath + ";"
Settings.set('openedTabs', result.strip(";"))
def loadRememberedTabs(self):
tabs = Settings.get('openedTabs').split(';')
# Remove automatically created first page
self.DeletePage(0)
if tabs != ['']:
# Add all Tabs
for x in tabs:
self.addPage(x)
return
# Open default files if no tabs were saved
path = join(self.API.path, "../../apps/seal/Empty/")
if exists(path):
filename = self.API.frame.findFirstSourceFile(path)
if filename:
self.addPage(filename)
return
self.addPage('sampleCode.sl')
|
|
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.trainer.config_parser import Settings, default_decay_rate, \
default_gradient_clipping_threshold, default_momentum
from .default_decorators import wrap_param_default
__all__ = [
'Optimizer', 'BaseSGDOptimizer', 'MomentumOptimizer', 'AdamaxOptimizer',
'AdamOptimizer', 'AdaGradOptimizer', 'RMSPropOptimizer',
'DecayedAdaGradOptimizer', 'AdaDeltaOptimizer', 'BaseRegularization',
'L2Regularization', 'settings', 'ModelAverage'
]
class Optimizer(object):
def to_setting_kwargs(self):
raise NotImplementedError()
def extra_settings(self):
pass
@property
def is_support_sparse(self):
return True
class BaseSGDOptimizer(Optimizer):
"""
SGD Optimizer.
SGD is an optimization method, trying to find a neural network that
minimize the "cost/error" of it by iteration. In paddle's implementation
SGD Optimizer is synchronized, which means all gradients will be wait to
calculate and reduced into one gradient, then do optimize operation.
The neural network consider the learning problem of minimizing an objective
function, that has the form of a sum
.. math::
Q(w) = \\sum_{i}^{n} Q_i(w)
The value of function Q sometimes is the cost of neural network (Mean
Square Error between prediction and label for example). The function Q is
parametrised by w, the weight/bias of neural network. And weights is what to
be learned. The i is the i-th observation in (trainning) data.
So, the SGD method will optimize the weight by
.. math::
w = w - \\eta \\nabla Q(w) = w - \\eta \\sum_{i}^{n} \\nabla Q_i(w)
where :math:`\\eta` is learning rate. And :math:`n` is batch size.
"""
def to_setting_kwargs(self):
raise NotImplementedError()
class MomentumOptimizer(BaseSGDOptimizer):
"""
MomentumOptimizer.
When sparse=True, the update scheme:
.. math::
\\alpha_t &= \\alpha_{t-1} / k \\\\
\\beta_t &= \\beta_{t-1} / (1 + \\lambda \\gamma_t) \\\\
u_t &= u_{t-1} - \\alpha_t \\gamma_t g_t \\\\
v_t &= v_{t-1} + \\tau_{t-1} \\alpha_t \\gamma_t g_t \\\\
\\tau_t &= \\tau_{t-1} + \\beta_t / \\alpha_t
where :math:`k` is momentum, :math:`\\lambda` is decay rate,
:math:`\\gamma_t` is learning rate at the t'th step.
:param sparse: with sparse support or not.
:type sparse: bool
"""
def extra_settings(self):
default_momentum(self.momentum)
def to_setting_kwargs(self):
if self.sparse:
return {'learning_method': 'sparse_momentum'}
else:
return {'learning_method': 'momentum'}
def __init__(self, momentum=None, sparse=False):
self.momentum = momentum
self.sparse = sparse
class AdamOptimizer(BaseSGDOptimizer):
"""
Adam optimizer.
The details of please refer `Adam: A Method for Stochastic Optimization
<https://arxiv.org/abs/1412.6980>`_
.. math::
m(w, t) & = \\beta_1 m(w, t-1) + (1 - \\beta_1) \\nabla Q_i(w) \\\\
v(w, t) & = \\beta_2 v(w, t-1) + (1 - \\beta_2)(\\nabla Q_i(w)) ^2 \\\\
w & = w - \\frac{\\eta m(w, t)}{\\sqrt{v(w,t) + \\epsilon}}
:param beta1: the :math:`\\beta_1` in equation.
:type beta1: float
:param beta2: the :math:`\\beta_2` in equation.
:type beta2: float
:param epsilon: the :math:`\\epsilon` in equation. It is used to prevent
divided by zero.
:type epsilon: float
"""
@property
def is_support_sparse(self):
return False
def __init__(self, beta1=0.9, beta2=0.999, epsilon=1e-8):
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
def to_setting_kwargs(self):
return {
'learning_method': 'adam',
'adam_beta1': self.beta1,
'adam_beta2': self.beta2,
'adam_epsilon': self.epsilon
}
class AdamaxOptimizer(BaseSGDOptimizer):
"""
Adamax optimizer.
The details of please refer this `Adam: A Method for Stochastic Optimization
<https://arxiv.org/abs/1412.6980>`_
.. math::
m_t & = \\beta_1 * m_{t-1} + (1-\\beta_1)* \\nabla Q_i(w) \\\\
u_t & = max(\\beta_2*u_{t-1}, abs(\\nabla Q_i(w))) \\\\
w_t & = w_{t-1} - (\\eta/(1-\\beta_1^t))*m_t/u_t
:param beta1: the :math:`\\beta_1` in the equation.
:type beta1: float
:param beta2: the :math:`\\beta_2` in the equation.
:type beta2: float
"""
def __init__(self, beta1, beta2):
self.beta1 = beta1
self.beta2 = beta2
def to_setting_kwargs(self):
return {
'learning_method': 'adamax',
'adam_beta1': self.beta1,
'adam_beta2': self.beta2
}
@property
def is_support_sparse(self):
return False
class AdaGradOptimizer(BaseSGDOptimizer):
"""
Adagrad(for ADAptive GRAdient algorithm) optimizer.
For details please refer this `Adaptive Subgradient Methods for
Online Learning and Stochastic Optimization
<http://www.magicbroom.info/Papers/DuchiHaSi10.pdf>`_.
.. math::
G &= \\sum_{\\tau=1}^{t} g_{\\tau} g_{\\tau}^T \\\\
w & = w - \\eta diag(G)^{-\\frac{1}{2}} \\circ g
"""
def to_setting_kwargs(self):
return {'learning_method': 'adagrad'}
def __init__(self):
pass
class RMSPropOptimizer(BaseSGDOptimizer):
"""
RMSProp(for Root Mean Square Propagation) optimizer. For details please
refer this `slide <http://www.cs.toronto.edu/~tijmen/csc321/slides/
lecture_slides_lec6.pdf>`_.
The equations of this method as follows:
.. math::
v(w, t) & = \\rho v(w, t-1) + (1 - \\rho)(\\nabla Q_{i}(w))^2 \\\\
w & = w - \\frac{\\eta} {\\sqrt{v(w,t) + \\epsilon}} \\nabla Q_{i}(w)
:param rho: the :math:`\\rho` in the equation. The forgetting factor.
:type rho: float
:param epsilon: the :math:`\\epsilon` in the equation.
:type epsilon: float
"""
def to_setting_kwargs(self):
return {
'learning_method': 'rmsprop',
'ada_rou': self.rho,
'ada_epsilon': self.epsilon
}
def __init__(self, rho=0.95, epsilon=1e-6):
self.rho = rho
self.epsilon = epsilon
class DecayedAdaGradOptimizer(BaseSGDOptimizer):
"""
AdaGrad method with decayed sum gradients. The equations of this method
show as follow.
.. math::
E(g_t^2) &= \\rho * E(g_{t-1}^2) + (1-\\rho) * g^2 \\\\
learning\\_rate &= 1/sqrt( ( E(g_t^2) + \\epsilon )
:param rho: The :math:`\\rho` parameter in that equation
:type rho: float
:param epsilon: The :math:`\\epsilon` parameter in that equation.
:type epsilon: float
"""
def to_setting_kwargs(self):
return {
'learning_method': 'decayed_adagrad',
'ada_rou': self.rho,
'ada_epsilon': self.epsilon
}
def __init__(self, rho=0.95, epsilon=1e-6):
self.rho = rho
self.epsilon = epsilon
class AdaDeltaOptimizer(BaseSGDOptimizer):
"""
AdaDelta method. The details of adadelta please refer to this
`ADADELTA: AN ADAPTIVE LEARNING RATE METHOD
<http://www.matthewzeiler.com/pubs/googleTR2012/googleTR2012.pdf>`_.
.. math::
E(g_t^2) &= \\rho * E(g_{t-1}^2) + (1-\\rho) * g^2 \\\\
learning\\_rate &= sqrt( ( E(dx_{t-1}^2) + \\epsilon ) / ( \\
E(g_t^2) + \\epsilon ) ) \\\\
E(dx_t^2) &= \\rho * E(dx_{t-1}^2) + (1-\\rho) * (-g*learning\\_rate)^2
:param rho: :math:`\\rho` in equation
:type rho: float
:param epsilon: :math:`\\rho` in equation
:type epsilon: float
"""
def to_setting_kwargs(self):
return {
'learning_method': 'adadelta',
'ada_rou': self.rho,
'ada_epsilon': self.epsilon
}
def __init__(self, rho=0.95, epsilon=1e-6):
self.rho = rho
self.epsilon = epsilon
class BaseRegularization(Optimizer):
def __init__(self):
self.algorithm = ""
self.learning_method = ""
def to_setting_kwargs(self):
return {}
class L2Regularization(BaseRegularization):
def __init__(self, rate):
super(L2Regularization, self).__init__()
self.decay_rate = rate
def to_setting_kwargs(self):
if self.algorithm == 'owlqn':
return {'l2weight': self.decay_rate}
else:
return dict()
def extra_settings(self):
if self.algorithm == 'sgd' or self.algorithm == 'async_sgd':
default_decay_rate(self.decay_rate)
class ModelAverage(Optimizer):
def to_setting_kwargs(self):
return {
'average_window': self.average_window,
'max_average_window': self.max_average_window,
'do_average_in_cpu': self.do_average_in_cpu
}
def __init__(self,
average_window,
max_average_window=None,
do_average_in_cpu=False):
self.average_window = average_window
self.max_average_window = max_average_window
self.do_average_in_cpu = do_average_in_cpu
class GradientClippingThreshold(Optimizer):
def extra_settings(self):
default_gradient_clipping_threshold(self.threshold)
def __init__(self, threshold):
self.threshold = threshold
def to_setting_kwargs(self):
return dict()
def __extends__(dict1, dict2):
for key in dict2:
assert key not in dict1
dict1[key] = dict2[key]
return dict1
@wrap_param_default(
['learning_method'], default_factory=lambda _: MomentumOptimizer())
@wrap_param_default(
['regularization'], default_factory=lambda _: BaseRegularization())
def settings(batch_size,
learning_rate=1e-3,
learning_rate_decay_a=0.,
learning_rate_decay_b=0.,
learning_rate_schedule='poly',
learning_rate_args='',
async_lagged_grad_discard_ratio=1.5,
learning_method=None,
regularization=None,
is_async=False,
model_average=None,
gradient_clipping_threshold=None):
"""
Set the optimization method, learning rate, batch size, and other training
settings. The currently supported algorithms are SGD and Async-SGD.
.. warning::
Note that the 'batch_size' in PaddlePaddle is not equal to global
training batch size. It represents the single training process's batch
size. If you use N processes to train one model, for example use three
GPU machines, the global batch size is N*'batch_size'.
:param batch_size: batch size for one training process.
:type batch_size: int
:param learning_rate: learning rate for SGD
:type learning_rate: float
:param learning_method: The extension optimization algorithms of gradient
descent, such as momentum, adagrad, rmsprop, etc.
Note that it should be instance with base type
BaseSGDOptimizer.
:type learning_method: BaseSGDOptimizer
:param regularization: The regularization method.
:type regularization: BaseRegularization
:param is_async: Is Async-SGD or not. Default value is False.
:type is_async: bool
:param model_average: Model Average Settings.
:type model_average: ModelAverage
:param gradient_clipping_threshold: gradient clipping threshold. If gradient
value larger than some value, will be
clipped.
:type gradient_clipping_threshold: float
:param async_lagged_grad_discard_ratio: async SGD gradient commit control,
when async_lagged_grad_discard_ratio * num_gradient_servers commit passed,
the current async SGD gradient is discarded.
:type async_lagged_grad_discard_ratio: float
"""
if isinstance(regularization, BaseRegularization):
regularization = [regularization]
assert isinstance(learning_method, Optimizer)
if isinstance(learning_method, BaseSGDOptimizer):
algorithm = 'async_sgd' if is_async else 'sgd'
else:
algorithm = 'owlqn'
args = [
'batch_size', 'learning_rate', 'learning_rate_decay_a',
'learning_rate_decay_b', 'learning_rate_schedule', 'learning_rate_args',
'gradient_clipping_threshold', 'async_lagged_grad_discard_ratio'
]
kwargs = dict()
kwargs['algorithm'] = algorithm
for arg in args:
kwargs[arg] = locals()[arg]
kwargs = __extends__(kwargs, learning_method.to_setting_kwargs())
learning_method.extra_settings()
for regular in regularization:
assert isinstance(regular, BaseRegularization)
regular.algorithm = algorithm
regular.learning_method = kwargs['learning_method']
kwargs = __extends__(kwargs, regular.to_setting_kwargs())
regular.extra_settings()
if gradient_clipping_threshold is not None:
gradient_clipping_threshold = GradientClippingThreshold(
threshold=gradient_clipping_threshold)
for each in [model_average, gradient_clipping_threshold]:
if each is not None:
assert isinstance(each, Optimizer)
each.algorithm = algorithm
each.learning_method = kwargs['learning_method']
kwargs = __extends__(kwargs, each.to_setting_kwargs())
each.extra_settings()
# Do Check?
Settings(**kwargs)
|
|
# Copyright 2020-2021 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MediaPipe Objectron."""
import enum
from typing import List, Tuple, NamedTuple, Optional
import attr
import numpy as np
# pylint: disable=unused-import
from mediapipe.calculators.core import constant_side_packet_calculator_pb2
from mediapipe.calculators.core import gate_calculator_pb2
from mediapipe.calculators.core import split_vector_calculator_pb2
from mediapipe.calculators.tensor import image_to_tensor_calculator_pb2
from mediapipe.calculators.tensor import inference_calculator_pb2
from mediapipe.calculators.tensor import tensors_to_detections_calculator_pb2
from mediapipe.calculators.tensor import tensors_to_floats_calculator_pb2
from mediapipe.calculators.tensor import tensors_to_landmarks_calculator_pb2
from mediapipe.calculators.tflite import ssd_anchors_calculator_pb2
from mediapipe.calculators.util import association_calculator_pb2
from mediapipe.calculators.util import collection_has_min_size_calculator_pb2
from mediapipe.calculators.util import detection_label_id_to_text_calculator_pb2
from mediapipe.calculators.util import detections_to_rects_calculator_pb2
from mediapipe.calculators.util import landmark_projection_calculator_pb2
from mediapipe.calculators.util import local_file_contents_calculator_pb2
from mediapipe.calculators.util import non_max_suppression_calculator_pb2
from mediapipe.calculators.util import rect_transformation_calculator_pb2
from mediapipe.calculators.util import thresholding_calculator_pb2
from mediapipe.framework.formats import landmark_pb2
from mediapipe.modules.objectron.calculators import annotation_data_pb2
from mediapipe.modules.objectron.calculators import frame_annotation_to_rect_calculator_pb2
from mediapipe.modules.objectron.calculators import lift_2d_frame_annotation_to_3d_calculator_pb2
# pylint: enable=unused-import
from mediapipe.python.solution_base import SolutionBase
from mediapipe.python.solutions import download_utils
class BoxLandmark(enum.IntEnum):
"""The 9 3D box landmarks."""
#
# 3 + + + + + + + + 7
# +\ +\ UP
# + \ + \
# + \ + \ |
# + 4 + + + + + + + + 8 | y
# + + + + |
# + + + + |
# + + (0) + + .------- x
# + + + + \
# 1 + + + + + + + + 5 + \
# \ + \ + \ z
# \ + \ + \
# \+ \+
# 2 + + + + + + + + 6
CENTER = 0
BACK_BOTTOM_LEFT = 1
FRONT_BOTTOM_LEFT = 2
BACK_TOP_LEFT = 3
FRONT_TOP_LEFT = 4
BACK_BOTTOM_RIGHT = 5
FRONT_BOTTOM_RIGHT = 6
BACK_TOP_RIGHT = 7
FRONT_TOP_RIGHT = 8
_BINARYPB_FILE_PATH = 'mediapipe/modules/objectron/objectron_cpu.binarypb'
BOX_CONNECTIONS = frozenset([
(BoxLandmark.BACK_BOTTOM_LEFT, BoxLandmark.FRONT_BOTTOM_LEFT),
(BoxLandmark.BACK_BOTTOM_LEFT, BoxLandmark.BACK_TOP_LEFT),
(BoxLandmark.BACK_BOTTOM_LEFT, BoxLandmark.BACK_BOTTOM_RIGHT),
(BoxLandmark.FRONT_BOTTOM_LEFT, BoxLandmark.FRONT_TOP_LEFT),
(BoxLandmark.FRONT_BOTTOM_LEFT, BoxLandmark.FRONT_BOTTOM_RIGHT),
(BoxLandmark.BACK_TOP_LEFT, BoxLandmark.FRONT_TOP_LEFT),
(BoxLandmark.BACK_TOP_LEFT, BoxLandmark.BACK_TOP_RIGHT),
(BoxLandmark.FRONT_TOP_LEFT, BoxLandmark.FRONT_TOP_RIGHT),
(BoxLandmark.BACK_BOTTOM_RIGHT, BoxLandmark.FRONT_BOTTOM_RIGHT),
(BoxLandmark.BACK_BOTTOM_RIGHT, BoxLandmark.BACK_TOP_RIGHT),
(BoxLandmark.FRONT_BOTTOM_RIGHT, BoxLandmark.FRONT_TOP_RIGHT),
(BoxLandmark.BACK_TOP_RIGHT, BoxLandmark.FRONT_TOP_RIGHT),
])
@attr.s(auto_attribs=True)
class ObjectronModel(object):
model_path: str
label_name: str
@attr.s(auto_attribs=True, frozen=True)
class ShoeModel(ObjectronModel):
model_path: str = ('mediapipe/modules/objectron/'
'object_detection_3d_sneakers.tflite')
label_name: str = 'Footwear'
@attr.s(auto_attribs=True, frozen=True)
class ChairModel(ObjectronModel):
model_path: str = ('mediapipe/modules/objectron/'
'object_detection_3d_chair.tflite')
label_name: str = 'Chair'
@attr.s(auto_attribs=True, frozen=True)
class CameraModel(ObjectronModel):
model_path: str = ('mediapipe/modules/objectron/'
'object_detection_3d_camera.tflite')
label_name: str = 'Camera'
@attr.s(auto_attribs=True, frozen=True)
class CupModel(ObjectronModel):
model_path: str = ('mediapipe/modules/objectron/'
'object_detection_3d_cup.tflite')
label_name: str = 'Coffee cup, Mug'
_MODEL_DICT = {
'Shoe': ShoeModel(),
'Chair': ChairModel(),
'Cup': CupModel(),
'Camera': CameraModel()
}
def _download_oss_objectron_models(objectron_model: str):
"""Downloads the objectron models from the MediaPipe Github repo if they don't exist in the package."""
download_utils.download_oss_model(
'mediapipe/modules/objectron/object_detection_ssd_mobilenetv2_oidv4_fp16.tflite'
)
download_utils.download_oss_model(objectron_model)
def get_model_by_name(name: str) -> ObjectronModel:
if name not in _MODEL_DICT:
raise ValueError(f'{name} is not a valid model name for Objectron.')
_download_oss_objectron_models(_MODEL_DICT[name].model_path)
return _MODEL_DICT[name]
@attr.s(auto_attribs=True)
class ObjectronOutputs(object):
landmarks_2d: landmark_pb2.NormalizedLandmarkList
landmarks_3d: landmark_pb2.LandmarkList
rotation: np.ndarray
translation: np.ndarray
scale: np.ndarray
class Objectron(SolutionBase):
"""MediaPipe Objectron.
MediaPipe Objectron processes an RGB image and returns the 3D box landmarks
and 2D rectangular bounding box of each detected object.
"""
def __init__(self,
static_image_mode: bool = False,
max_num_objects: int = 5,
min_detection_confidence: float = 0.5,
min_tracking_confidence: float = 0.99,
model_name: str = 'Shoe',
focal_length: Tuple[float, float] = (1.0, 1.0),
principal_point: Tuple[float, float] = (0.0, 0.0),
image_size: Optional[Tuple[int, int]] = None,
):
"""Initializes a MediaPipe Objectron class.
Args:
static_image_mode: Whether to treat the input images as a batch of static
and possibly unrelated images, or a video stream.
max_num_objects: Maximum number of objects to detect.
min_detection_confidence: Minimum confidence value ([0.0, 1.0]) for object
detection to be considered successful.
min_tracking_confidence: Minimum confidence value ([0.0, 1.0]) for the
box landmarks to be considered tracked successfully.
model_name: Name of model to use for predicting box landmarks, currently
support {'Shoe', 'Chair', 'Cup', 'Camera'}.
focal_length: Camera focal length `(fx, fy)`, by default is defined in NDC
space. To use focal length (fx_pixel, fy_pixel) in pixel space, users
should provide image_size = (image_width, image_height) to enable
conversions inside the API.
principal_point: Camera principal point (px, py), by default is defined in
NDC space. To use principal point (px_pixel, py_pixel) in pixel space,
users should provide image_size = (image_width, image_height) to enable
conversions inside the API.
image_size (Optional): size (image_width, image_height) of the input image
, ONLY needed when use focal_length and principal_point in pixel space.
Raises:
ConnectionError: If the objectron open source model can't be downloaded
from the MediaPipe Github repo.
"""
# Get Camera parameters.
fx, fy = focal_length
px, py = principal_point
if image_size is not None:
half_width = image_size[0] / 2.0
half_height = image_size[1] / 2.0
fx = fx / half_width
fy = fy / half_height
px = - (px - half_width) / half_width
py = - (py - half_height) / half_height
# Create and init model.
model = get_model_by_name(model_name)
super().__init__(
binary_graph_path=_BINARYPB_FILE_PATH,
side_inputs={
'box_landmark_model_path': model.model_path,
'allowed_labels': model.label_name,
'max_num_objects': max_num_objects,
'use_prev_landmarks': not static_image_mode,
},
calculator_params={
('objectdetectionoidv4subgraph'
'__TensorsToDetectionsCalculator.min_score_thresh'):
min_detection_confidence,
('boxlandmarksubgraph__ThresholdingCalculator'
'.threshold'):
min_tracking_confidence,
('Lift2DFrameAnnotationTo3DCalculator'
'.normalized_focal_x'): fx,
('Lift2DFrameAnnotationTo3DCalculator'
'.normalized_focal_y'): fy,
('Lift2DFrameAnnotationTo3DCalculator'
'.normalized_principal_point_x'): px,
('Lift2DFrameAnnotationTo3DCalculator'
'.normalized_principal_point_y'): py,
},
outputs=['detected_objects'])
def process(self, image: np.ndarray) -> NamedTuple:
"""Processes an RGB image and returns the box landmarks and rectangular bounding box of each detected object.
Args:
image: An RGB image represented as a numpy ndarray.
Raises:
RuntimeError: If the underlying graph throws any error.
ValueError: If the input image is not three channel RGB.
Returns:
A NamedTuple object with a "detected_objects" field that contains a list
of detected 3D bounding boxes. Each detected box is represented as an
"ObjectronOutputs" instance.
"""
results = super().process(input_data={'image': image})
if results.detected_objects:
results.detected_objects = self._convert_format(results.detected_objects)
else:
results.detected_objects = None
return results
def _convert_format(
self,
inputs: annotation_data_pb2.FrameAnnotation) -> List[ObjectronOutputs]:
new_outputs = list()
for annotation in inputs.annotations:
# Get 3d object pose.
rotation = np.reshape(np.array(annotation.rotation), (3, 3))
translation = np.array(annotation.translation)
scale = np.array(annotation.scale)
# Get 2d/3d landmakrs.
landmarks_2d = landmark_pb2.NormalizedLandmarkList()
landmarks_3d = landmark_pb2.LandmarkList()
for keypoint in annotation.keypoints:
point_2d = keypoint.point_2d
landmarks_2d.landmark.add(x=point_2d.x, y=point_2d.y)
point_3d = keypoint.point_3d
landmarks_3d.landmark.add(x=point_3d.x, y=point_3d.y, z=point_3d.z)
# Add to objectron outputs.
new_outputs.append(ObjectronOutputs(landmarks_2d, landmarks_3d,
rotation, translation, scale=scale))
return new_outputs
|
|
import pytest
from cedar import ParseError, parse
from cedar.ast import (
Enum, Tag,
Record, Attribute,
Function, Parameter,
Type, Union, Dict, List, Nullable
)
from .common import Module, table
def test_parse_errors_can_halt_execution():
with pytest.raises(ParseError) as e:
parse("record!")
with pytest.raises(SystemExit):
e.value.print_and_halt()
def test_tabs_are_not_allowed():
with pytest.raises(ParseError) as e:
parse("record A {\n\ta Int\n}")
assert e.value.message == r"unexpected '\t'"
def test_unexpected_tokens_raise_errors():
with pytest.raises(ParseError) as e:
parse("record A! {}")
assert e.value.message == "unexpected '!'"
def test_only_toplevel_declarations_are_allowed_in_a_module():
with pytest.raises(ParseError) as e:
parse("User")
assert e.value.line == 1
assert e.value.column == 4
def test_enums_cannnot_have_many_dangling_commas():
with pytest.raises(ParseError) as e:
parse("enum A {B, C,,}")
assert e.value.line == 1
assert e.value.column == 13
def test_unions_cannot_be_empty():
with pytest.raises(ParseError):
parse("union A {}")
def test_comments_are_ignored():
table([
(
"""
// a comment
enum A {}
// another comment
""",
Module([Enum("A", [])])
),
(
"""
// an enum
enum A { B }
// a record
record C {
// a field
d Int
}
""",
Module([
Enum("A", [Tag("B")]),
Record("C", [Attribute("d", Type("Int"))])
])
),
])
def test_can_parse_modules():
table([
(
"",
Module([])
)
])
def test_can_parse_enums():
table([
("enum A {}", Enum("A", [])),
("enum A { B, C, D }", Enum("A", [Tag("B"), Tag("C"), Tag("D")])),
(
"""
enum A {
B,
C
}
""",
Enum("A", [Tag("B"), Tag("C")])
),
(
"""
enum A {
B,
C,
}
""",
Enum("A", [Tag("B"), Tag("C")])
)
])
def test_can_parse_unions():
table([
("union A { B, C }", Union("A", [Type("B"), Type("C")])),
(
"""
union A {
B,
C,
}
""",
Union("A", [Type("B"), Type("C")])
),
])
def test_can_parse_records():
table([
(
"record Unit {}",
Record("Unit", [])
),
(
"""
record User {
id Int
name String
email String
age Int?
friends [User]
}
""",
Record(
"User",
[
Attribute("id", Type("Int")),
Attribute("name", Type("String")),
Attribute("email", Type("String")),
Attribute("age", Nullable(Type("Int"))),
Attribute("friends", List(Type("User"))),
]
)
),
(
"record A { map {String: Int?} }",
Record(
"A",
[
Attribute("map", Dict(Type("String"), Nullable(Type("Int"))))
]
)
)
])
def test_can_parse_functions():
table([
(
"fn findAllUsers() [User]",
Function("findAllUsers", [], List(Type("User")))
),
(
"fn findUser(id Int) User?",
Function("findUser", [Parameter("id", Type("Int"))], Nullable(Type("User")))
),
(
"fn findAllResources(kind ResourceKind) Resource",
Function(
"findAllResources",
[Parameter("kind", Type("ResourceKind"))],
Type("Resource")
)
),
(
[
"fn findUsersByAge(comparator Comparator, age Int) [User]",
"fn findUsersByAge(comparator Comparator, age Int,) [User]",
"""
fn findUsersByAge(
comparator Comparator, age Int,
) [User]
""",
"""
fn findUsersByAge(
comparator Comparator,
age Int,
) [User]
""",
"""
fn findUsersByAge(
comparator Comparator,
age Int
) [User]
"""
],
Function(
"findUsersByAge",
[Parameter("comparator", Type("Comparator")), Parameter("age", Type("Int"))],
List(Type("User"))
)
)
])
|
|
# Copyright The IETF Trust 2007, All Rights Reserved
import codecs
import re
import os.path
import django.utils.html
from django.shortcuts import render_to_response as render
from django.template import RequestContext
from django.conf import settings
from django.http import Http404
from ietf.idtracker.models import IETFWG, InternetDraft, Rfc
from ietf.ipr.models import IprRfc, IprDraft, IprDetail
from ietf.ipr.related import related_docs
from ietf.utils import log, normalize_draftname
def mark_last_doc(iprs):
for item in iprs:
docs = item.docs()
count = len(docs)
if count > 1:
item.last_draft = docs[count-1]
def iprs_from_docs(docs):
iprs = []
for doc in docs:
if settings.USE_DB_REDESIGN_PROXY_CLASSES:
from ietf.ipr.models import IprDocAlias
disclosures = [ x.ipr for x in IprDocAlias.objects.filter(doc_alias=doc, ipr__status__in=[1,3]) ]
elif isinstance(doc, InternetDraft):
disclosures = [ item.ipr for item in IprDraft.objects.filter(document=doc, ipr__status__in=[1,3]) ]
elif isinstance(doc, Rfc):
disclosures = [ item.ipr for item in IprRfc.objects.filter(document=doc, ipr__status__in=[1,3]) ]
else:
raise ValueError("Doc type is neither draft nor rfc: %s" % doc)
if disclosures:
doc.iprs = disclosures
iprs += disclosures
iprs = list(set(iprs))
return iprs, docs
def patent_file_search(url, q):
if url:
fname = url.split("/")[-1]
fpath = os.path.join(settings.IPR_DOCUMENT_PATH, fname)
#print "*** Checking file", fpath
if os.path.isfile(fpath):
#print "*** Found file", fpath
file = codecs.open(fpath, mode='r', encoding='utf-8', errors='replace')
text = file.read()
file.close
return q in text
return False
def search(request, type="", q="", id=""):
if settings.USE_DB_REDESIGN_PROXY_CLASSES:
from ietf.group.models import Group
wgs = Group.objects.filter(type="wg").exclude(acronym="2000").select_related().order_by("acronym")
else:
wgs = IETFWG.objects.filter(group_type__group_type_id=1).exclude(group_acronym__acronym='2000').select_related().order_by('acronym.acronym')
args = request.REQUEST.items()
if args:
for key, value in args:
if key == "option":
type = value
if re.match(".*search", key):
q = value
if re.match(".*id", key):
id = value
if type and q or id:
#log("Got query: type=%s, q=%s, id=%s" % (type, q, id))
# Search by RFC number or draft-identifier
# Document list with IPRs
if type in ["document_search", "rfc_search"]:
doc = q
if type == "document_search":
if q:
q = normalize_draftname(q)
if settings.USE_DB_REDESIGN_PROXY_CLASSES:
from ietf.doc.proxy import DraftLikeDocAlias
start = DraftLikeDocAlias.objects.filter(name__contains=q, name__startswith="draft")
else:
start = InternetDraft.objects.filter(filename__contains=q)
if id:
if settings.USE_DB_REDESIGN_PROXY_CLASSES:
from ietf.doc.proxy import DraftLikeDocAlias
start = DraftLikeDocAlias.objects.filter(name=id)
else:
try:
id = int(id,10)
except:
id = -1
start = InternetDraft.objects.filter(id_document_tag=id)
if type == "rfc_search":
if q:
try:
q = int(q, 10)
except:
q = -1
if settings.USE_DB_REDESIGN_PROXY_CLASSES:
from ietf.doc.proxy import DraftLikeDocAlias
start = DraftLikeDocAlias.objects.filter(name__contains=q, name__startswith="rfc")
else:
start = Rfc.objects.filter(rfc_number=q)
if start.count() == 1:
first = start[0]
doc = str(first)
# get all related drafts, then search for IPRs on all
docs = related_docs(first, [])
#docs = get_doclist.get_doclist(first)
iprs, docs = iprs_from_docs(docs)
return render("ipr/search_doc_result.html", {"q": q, "first": first, "iprs": iprs, "docs": docs, "doc": doc },
context_instance=RequestContext(request) )
elif start.count():
return render("ipr/search_doc_list.html", {"q": q, "docs": start },
context_instance=RequestContext(request) )
else:
return render("ipr/search_doc_result.html", {"q": q, "first": {}, "iprs": {}, "docs": {}, "doc": doc },
context_instance=RequestContext(request) )
# Search by legal name
# IPR list with documents
elif type == "patent_search":
iprs = IprDetail.objects.filter(legal_name__icontains=q, status__in=[1,3]).order_by("-submitted_date", "-ipr_id")
count = iprs.count()
iprs = [ ipr for ipr in iprs if not ipr.updated_by.all() ]
# Some extra information, to help us render 'and' between the
# last two documents in a sequence
mark_last_doc(iprs)
return render("ipr/search_holder_result.html", {"q": q, "iprs": iprs, "count": count },
context_instance=RequestContext(request) )
# Search by content of email or pagent_info field
# IPR list with documents
elif type == "patent_info_search":
if len(q) < 3:
return render("ipr/search_error.html", {"q": q, "error": "The search string must contain at least three characters" },
context_instance=RequestContext(request) )
digits = re.search("[0-9]", q)
if not digits:
return render("ipr/search_error.html", {"q": q, "error": "The search string must contain at least one digit" },
context_instance=RequestContext(request) )
iprs = []
for ipr in IprDetail.objects.filter(status__in=[1,3]):
if ((q in ipr.patents) |
patent_file_search(ipr.legacy_url_0, q) |
patent_file_search(ipr.legacy_url_1, q) |
patent_file_search(ipr.legacy_url_2, q) ):
iprs.append(ipr)
count = len(iprs)
iprs = [ ipr for ipr in iprs if not ipr.updated_by.all() ]
# Some extra information, to help us render 'and' between the
# last two documents in a sequence
iprs.sort(key=lambda x: x.ipr_id, reverse=True) # Reverse sort
mark_last_doc(iprs)
return render("ipr/search_patent_result.html", {"q": q, "iprs": iprs, "count": count },
context_instance=RequestContext(request) )
# Search by wg acronym
# Document list with IPRs
elif type == "wg_search":
if settings.USE_DB_REDESIGN_PROXY_CLASSES:
from ietf.doc.proxy import DraftLikeDocAlias
try:
docs = list(DraftLikeDocAlias.objects.filter(document__group__acronym=q))
docs += list(DraftLikeDocAlias.objects.filter(document__relateddocument__target__in=docs, document__relateddocument__relationship="replaces"))
except:
docs = []
else:
try:
docs = list(InternetDraft.objects.filter(group__acronym=q))
except:
docs = []
docs += [ draft.replaced_by for draft in docs if draft.replaced_by_id ]
docs += list(Rfc.objects.filter(group_acronym=q))
docs = [ doc for doc in docs if doc.ipr.count() ]
iprs, docs = iprs_from_docs(docs)
count = len(iprs)
return render("ipr/search_wg_result.html", {"q": q, "docs": docs, "count": count },
context_instance=RequestContext(request) )
# Search by rfc and id title
# Document list with IPRs
elif type == "title_search":
if settings.USE_DB_REDESIGN_PROXY_CLASSES:
from ietf.doc.proxy import DraftLikeDocAlias
try:
docs = list(DraftLikeDocAlias.objects.filter(document__title__icontains=q))
except:
docs = []
else:
try:
docs = list(InternetDraft.objects.filter(title__icontains=q))
except:
docs = []
docs += list(Rfc.objects.filter(title__icontains=q))
docs = [ doc for doc in docs if doc.ipr.count() ]
iprs, docs = iprs_from_docs(docs)
count = len(iprs)
return render("ipr/search_doctitle_result.html", {"q": q, "docs": docs, "count": count },
context_instance=RequestContext(request) )
# Search by title of IPR disclosure
# IPR list with documents
elif type == "ipr_title_search":
iprs = IprDetail.objects.filter(title__icontains=q, status__in=[1,3]).order_by("-submitted_date", "-ipr_id")
count = iprs.count()
iprs = [ ipr for ipr in iprs if not ipr.updated_by.all() ]
# Some extra information, to help us render 'and' between the
# last two documents in a sequence
mark_last_doc(iprs)
return render("ipr/search_iprtitle_result.html", {"q": q, "iprs": iprs, "count": count },
context_instance=RequestContext(request) )
else:
raise Http404("Unexpected search type in IPR query: %s" % type)
return django.http.HttpResponseRedirect(request.path)
if settings.USE_DB_REDESIGN_PROXY_CLASSES:
for wg in wgs:
wg.group_acronym = wg # proxy group_acronym for select box
return render("ipr/search.html", {"wgs": wgs}, context_instance=RequestContext(request))
|
|
import time
import os
import os.path
from nose.tools import ok_, eq_, raises
from concurrent.futures import ThreadPoolExecutor
from carpool import Carpool
from carpool.hooks import Carhook
def create_temp_file(fname, msg = 'something', sleep_time=0.2):
if os.path.isfile(fname):
raise Exception("File already exists!")
with open(fname, 'w') as f:
f.write(msg + "\n")
time.sleep(sleep_time)
return 'done'
class SampleHook(Carhook):
def __init__(self):
self.ready_wait_counter = 0
self.ready_for_task_called = 0
self.task_started_called = 0
self.task_ended_called = 0
self.start_called = 0
self.stop_called = 0
def ready_for_task(self, args, kwargs):
self.ready_for_task_called += 1
while self.ready_wait_counter > 0:
self.ready_wait_counter -= 1
return False
return True
def task_started(self, future, args, kwargs):
self.task_started_called += 1
def task_ended(self, future, args, kwargs):
self.task_ended_called += 1
def start(self, fn):
self.fn = fn
self.start_called += 1
def stop(self):
self.stop_called += 1
class TestCarpool:
def setup(self):
self.carpool = Carpool(create_temp_file)
self.hook = SampleHook()
def teardown(self):
eq_(self.carpool._active_workers, [])
if os.path.isfile('temp-a.txt'):
os.remove('temp-a.txt')
if os.path.isfile('temp-b.txt'):
os.remove('temp-b.txt')
def test_simple_fn(self):
create_temp_file('temp-b.txt', msg = 'something2')
ok_(os.path.isfile('temp-b.txt'))
def test_with_one_worker(self):
t = time.time()
with self.carpool.workers(max_workers=1) as create_temp_file:
create_temp_file('temp-a.txt')
create_temp_file('temp-b.txt', msg = 'something2')
elapsed = time.time() - t
ok_(elapsed > 0.4)
ok_(os.path.isfile('temp-a.txt'))
ok_(os.path.isfile('temp-b.txt'))
def test_with_multiple_workers(self):
t = time.time()
with self.carpool.workers(max_workers=2) as create_temp_file:
create_temp_file('temp-a.txt')
create_temp_file('temp-b.txt', msg = 'something2')
elapsed = time.time() - t
ok_(elapsed < 0.3)
ok_(os.path.isfile('temp-a.txt'))
ok_(os.path.isfile('temp-b.txt'))
def test_with_too_many_workers(self):
t = time.time()
with self.carpool.workers(max_workers=5) as create_temp_file:
create_temp_file('temp-a.txt')
create_temp_file('temp-b.txt', msg = 'something2')
elapsed = time.time() - t
ok_(elapsed < 0.3)
ok_(os.path.isfile('temp-a.txt'))
ok_(os.path.isfile('temp-b.txt'))
def test_using_thread_pool_executor(self):
self.carpool.pool_executor = ThreadPoolExecutor
t = time.time()
with self.carpool.workers(max_workers=2) as create_temp_file:
create_temp_file('temp-a.txt')
create_temp_file('temp-b.txt', msg = 'something2')
elapsed = time.time() - t
ok_(elapsed < 0.3)
ok_(os.path.isfile('temp-a.txt'))
ok_(os.path.isfile('temp-b.txt'))
def test_using_wait_time(self):
t = time.time()
self.carpool.wait_time = 0
with self.carpool.workers(max_workers=1) as create_temp_file:
create_temp_file('temp-a.txt')
create_temp_file('temp-b.txt', msg = 'something2')
elapsed = time.time() - t
ok_(elapsed < 0.5)
ok_(elapsed > 0.4)
ok_(os.path.isfile('temp-a.txt'))
ok_(os.path.isfile('temp-b.txt'))
def test_using_default_workers(self):
with self.carpool.workers() as create_temp_file:
create_temp_file('temp-a.txt')
create_temp_file('temp-b.txt', msg = 'something2')
ok_(os.path.isfile('temp-a.txt'))
ok_(os.path.isfile('temp-b.txt'))
def test_exception_second_worker_fails(self):
with self.carpool.workers() as create_temp_file:
f1 = create_temp_file('temp-a.txt')
# Fails because file already exists
f2 = create_temp_file('temp-a.txt', msg = 'failed')
msg = open('temp-a.txt').read().strip()
eq_(msg, 'something')
ok_(not f1.exception())
ok_(f2.exception())
@raises(Exception)
def test_exception_within_block(self):
with self.carpool.workers() as fn:
raise(Exception("Fake Error"))
def test_canceling_futures(self):
with self.carpool.workers(max_workers=1) as create_temp_file:
f1 = create_temp_file('temp-a.txt')
f2 = create_temp_file('temp-b.txt')
ok_(f2.cancel())
ok_(not f2.running())
ok_(f2.done())
ok_(f2.cancel()) # check multiple cancels does not raise error.
ok_(not os.path.isfile('temp-b.txt'))
def test_hook(self):
self.carpool.hooks.append(self.hook)
self.carpool.wait_time = 0
with self.carpool.workers(max_workers=1) as create_temp_file:
eq_(self.hook.start_called, 1)
eq_(self.hook.ready_for_task_called, 0)
eq_(self.hook.task_started_called, 0)
eq_(self.hook.task_ended_called, 0)
eq_(self.hook.stop_called, 0)
f = create_temp_file('temp-a.txt')
eq_(self.hook.start_called, 1)
eq_(self.hook.ready_for_task_called, 1)
eq_(self.hook.task_started_called, 1)
eq_(self.hook.task_ended_called, 0)
eq_(self.hook.stop_called, 0)
self.hook.ready_wait_counter = 2
f = create_temp_file('temp-b.txt')
eq_(self.hook.start_called, 1)
eq_(self.hook.ready_for_task_called, 4) # waiting loop - multiple calls made
eq_(self.hook.task_started_called, 2)
eq_(self.hook.task_ended_called, 1)
eq_(self.hook.stop_called, 0)
eq_(self.hook.start_called, 1)
eq_(self.hook.ready_for_task_called, 4) # waiting loop - multiple calls
eq_(self.hook.task_started_called, 2)
eq_(self.hook.task_ended_called, 2)
eq_(self.hook.stop_called, 1)
def test_hook_exception_in_start_hook(self):
self.carpool.hooks.append(self.hook)
self.hook.start_called = None
self.carpool.wait_time = 0
try:
with self.carpool.workers(max_workers=1) as create_temp_file:
create_temp_file('temp-a.txt')
except:
pass
eq_(self.hook.start_called, None)
eq_(self.hook.ready_for_task_called, 0)
eq_(self.hook.task_started_called, 0)
eq_(self.hook.task_ended_called, 0)
eq_(self.hook.stop_called, 1)
def test_hook_exception_in_ready_for_task(self):
self.carpool.hooks.append(self.hook)
self.hook.ready_for_task_called = None
self.carpool.wait_time = 0
try:
with self.carpool.workers(max_workers=1) as create_temp_file:
create_temp_file('temp-a.txt')
except:
pass
eq_(self.hook.start_called, 1)
eq_(self.hook.ready_for_task_called, None)
eq_(self.hook.task_started_called, 0)
eq_(self.hook.task_ended_called, 0)
eq_(self.hook.stop_called, 1)
def test_hook_exception_in_task_started(self):
self.carpool.hooks.append(self.hook)
self.hook.task_started_called = None
self.carpool.wait_time = 0
try:
with self.carpool.workers(max_workers=1) as create_temp_file:
create_temp_file('temp-a.txt')
except:
pass
eq_(self.hook.start_called, 1)
eq_(self.hook.ready_for_task_called, 1)
eq_(self.hook.task_started_called, None)
eq_(self.hook.task_ended_called, 0)
eq_(self.hook.stop_called, 1)
def test_hook_exception_in_task_ended(self):
self.carpool.hooks.append(self.hook)
self.hook.task_ended_called = None
self.carpool.wait_time = 0
try:
with self.carpool.workers(max_workers=1) as create_temp_file:
create_temp_file('temp-a.txt')
except:
pass
eq_(self.hook.start_called, 1)
eq_(self.hook.ready_for_task_called, 1)
eq_(self.hook.task_started_called, 1)
eq_(self.hook.task_ended_called, None)
eq_(self.hook.stop_called, 1)
def test_hook_exception_in_stop_called(self):
self.carpool.hooks.append(self.hook)
self.hook.stop_called = None
self.carpool.wait_time = 0
try:
with self.carpool.workers(max_workers=1) as create_temp_file:
create_temp_file('temp-a.txt')
except:
pass
eq_(self.hook.start_called, 1)
eq_(self.hook.ready_for_task_called, 1)
eq_(self.hook.task_started_called, 1)
eq_(self.hook.task_ended_called, 1)
eq_(self.hook.stop_called, None)
def test_multiple_hooks_exception_in_stop_called(self):
hook2 = SampleHook()
self.carpool.hooks.append(self.hook)
self.carpool.hooks.append(hook2)
self.hook.stop_called = None
self.carpool.wait_time = 0
try:
with self.carpool.workers(max_workers=1) as create_temp_file:
create_temp_file('temp-a.txt')
except:
pass
eq_(self.hook.start_called, 1)
eq_(self.hook.ready_for_task_called, 1)
eq_(self.hook.task_started_called, 1)
eq_(self.hook.task_ended_called, 1)
eq_(self.hook.stop_called, None)
eq_(hook2.stop_called, 1)
# TODO
def test_hook_exception_timeout_future(self):
self.carpool.hooks.append(self.hook)
self.carpool.wait_time = 0
try:
with self.carpool.workers(max_workers=1) as create_temp_file:
f = create_temp_file('temp-a.txt')
except:
pass
ok_(f.done())
|
|
import copy
from django.db.models.aggregates import Aggregate
from django.db.models.base import ModelBase
from django.db.models.manager import Manager
from django.db.models.query import QuerySet
from .exceptions import APIInputError
def _validate_field_lookup_term(model, term):
"""Checks whether the term is a valid field_lookup for the model.
Args:
model(django.db.models.Model): a django model for which to check
whether the term is a valid field_lookup.
term(str): the term to check whether it is a valid field_lookup for the
model supplied.
Returns:
True if term corresponds to a valid field_lookup for the model.
Raises:
FieldError: If the term supplied is not a valid field lookup parameter
for the model.
"""
# TODO: Memoization for speed enchancements
terms = term.split('__')
model_fields = model._meta.get_all_field_names()
if terms[0] not in model_fields:
raise APIInputError("Field %r does not exist. Valid lookups are %s."
% (terms[0], ', '.join(model_fields)))
if len(terms) == 1:
return model._meta.get_field(terms[0]).verbose_name
else:
# DocString details for model._meta.get_field_by_name
#
# Returns a tuple (field_object, model, direct, m2m), where
# field_object is the Field instance for the given name,
# model is the model containing this field (None for
# local fields),
# direct is True if the field exists on this model,
# and m2m is True for many-to-many relations.
# When 'direct' is False, 'field_object' is the corresponding
# RelatedObject for this field (since the field doesn't have
# an instance associated with it).
field_details = model._meta.get_field_by_name(terms[0])
# if the field is direct field
if field_details[2]:
m = field_details[0].related.parent_model
else:
m = field_details[0].model
return _validate_field_lookup_term(m, '__'.join(terms[1:]))
def _clean_source(source):
if isinstance(source, ModelBase):
return source._base_manager.all()
elif isinstance(source, Manager):
return source.all()
elif isinstance(source, QuerySet):
return source
raise APIInputError("'source' must either be a QuerySet, Model or "
"Manager. Got %s of type %s instead."
%(source, type(source)))
def _validate_func(func):
if not isinstance(func, Aggregate):
raise APIInputError("'func' must an instance of django Aggregate. "
"Got %s of type %s instead" % (func, type(func)))
def _clean_categories(categories, source):
if isinstance(categories, basestring):
categories = [categories]
elif isinstance(categories, (tuple, list)):
if not categories:
raise APIInputError("'categories' tuple or list must contain at "
"least one valid model field. Got %s."
%categories)
else:
raise APIInputError("'categories' must be one of the following "
"types: basestring, tuple or list. Got %s of "
"type %s instead."
%(categories, type(categories)))
field_aliases = {}
for c in categories:
field_aliases[c] = _validate_field_lookup_term(source.model, c)
return categories, field_aliases
def _clean_legend_by(legend_by, source):
if isinstance(legend_by, basestring):
legend_by = [legend_by]
elif isinstance(legend_by, (tuple, list)):
pass
elif legend_by is None:
legend_by = ()
else:
raise APIInputError("'legend_by' must be one of the following "
"types: basestring, tuple or list. Got %s of "
"type %s instead."
%(legend_by, type(legend_by)))
field_aliases = {}
for lg in legend_by:
field_aliases[lg] = _validate_field_lookup_term(source.model, lg)
return legend_by, field_aliases
def _validate_top_n_per_cat(top_n_per_cat):
if not isinstance(top_n_per_cat, int):
raise APIInputError("'top_n_per_cat' must be an int. Got %s of type "
"%s instead."
%(top_n_per_cat, type(top_n_per_cat)))
def _clean_field_aliases(fa_actual, fa_cat, fa_lgby):
fa = copy.copy(fa_lgby)
fa.update(fa_cat)
fa.update(fa_actual)
return fa
def _convert_pdps_to_dict(series_list):
series_dict = {}
for sd in series_list:
try:
options = sd['options']
except KeyError:
raise APIInputError("%s is missing the 'options' key." %sd)
if not isinstance(options, dict):
raise APIInputError("Expecting a dict in place of: %s" %options)
try:
terms = sd['terms']
except KeyError:
raise APIInputError("%s is missing have the 'terms' key." %sd)
if isinstance(terms, dict):
if not terms:
raise APIInputError("'terms' cannot be empty.")
for tk, tv in terms.items():
if isinstance(tv, Aggregate):
tv = {'func': tv}
elif isinstance(tv, dict):
pass
else:
raise APIInputError("Expecting a dict or django Aggregate "
"in place of: %s" %tv)
opts = copy.deepcopy(options)
opts.update(tv)
series_dict.update({tk: opts})
else:
raise APIInputError("Expecting a dict in place of: %s"
%terms)
return series_dict
def clean_pdps(series):
"""Clean the PivotDataPool series input from the user.
"""
if isinstance(series, list):
series = _convert_pdps_to_dict(series)
clean_pdps(series)
elif isinstance(series, dict):
if not series:
raise APIInputError("'series' cannot be empty.")
for td in series.values():
# td is not a dict
if not isinstance(td, dict):
raise APIInputError("Expecting a dict in place of: %s" %td)
# source
try:
td['source'] = _clean_source(td['source'])
except KeyError:
raise APIInputError("Missing 'source': %s" % td)
# func
try:
_validate_func(td['func'])
except KeyError:
raise APIInputError("Missing 'func': %s" % td)
# categories
try:
td['categories'], fa_cat = _clean_categories(td['categories'],
td['source'])
except KeyError:
raise APIInputError("Missing 'categories': %s" % td)
# legend_by
try:
td['legend_by'], fa_lgby = _clean_legend_by(td['legend_by'],
td['source'])
except KeyError:
td['legend_by'], fa_lgby = (), {}
# top_n_per_cat
try:
_validate_top_n_per_cat(td['top_n_per_cat'])
except KeyError:
td['top_n_per_cat'] = 0
# field_aliases
try:
fa_actual = td['field_aliases']
except KeyError:
td['field_aliases'] = fa_actual = {}
td['field_aliases'] = _clean_field_aliases(fa_actual,
fa_cat,
fa_lgby)
else:
raise APIInputError("Expecting a dict or list in place of: %s" %series)
return series
def _convert_dps_to_dict(series_list):
series_list = copy.deepcopy(series_list)
series_dict = {}
if not series_list:
raise APIInputError("'series' cannot be empty.")
for sd in series_list:
try:
options = sd['options']
except KeyError:
raise APIInputError("%s is missing the 'options' key." %sd)
if not isinstance(options, dict):
raise APIInputError("Expecting a dict in place of: %s" %options)
try:
terms = sd['terms']
except KeyError:
raise APIInputError("%s is missing the 'terms' key." %sd)
if isinstance(terms, list):
for term in terms:
if isinstance(term, basestring):
series_dict[term] = copy.deepcopy(options)
elif isinstance(term, dict):
for tk, tv in term.items():
if isinstance(tv, basestring):
opts = copy.deepcopy(options)
opts['field'] = tv
series_dict[tk] = opts
elif isinstance(tv, dict):
opts = copy.deepcopy(options)
opts.update(tv)
series_dict[tk] = opts
else:
raise APIInputError("Expecting a basestring or "
" dict in place of: %s" %tv)
elif isinstance(term,tuple):
t,fn = term
opt = copy.deepcopy(options)
opt['fn'] = fn
series_dict[t] = opt
elif isinstance(terms, dict):
for tk, tv in terms.items():
if isinstance(tv, basestring):
opts = copy.deepcopy(options)
opts['field'] = tv
series_dict[tk] = opts
elif isinstance(tv, dict):
opts = copy.deepcopy(options)
opts.update(tv)
series_dict[tk] = opts
else:
raise APIInputError("Expecting a basestring or dict in "
"place of: %s" %tv)
else:
raise APIInputError("Expecting a list or dict in place of: %s."
%terms)
return series_dict
def clean_dps(series):
"""Clean the DataPool series input from the user.
"""
if isinstance(series, dict):
if not series:
raise APIInputError("'series' cannot be empty.")
for tk, td in series.items():
try:
td['source'] = _clean_source(td['source'])
except KeyError:
raise APIInputError("%s is missing the 'source' key." %td)
td.setdefault('field', tk)
fa = _validate_field_lookup_term(td['source'].model, td['field'])\
.title()
# If the user supplied term is not a field name, use it as an alias
if tk != td['field']:
fa = tk
td.setdefault('field_alias', fa)
elif isinstance(series, list):
series = _convert_dps_to_dict(series)
clean_dps(series)
else:
raise APIInputError("Expecting a dict or list in place of: %s" %series)
return series
def _convert_pcso_to_dict(series_options):
series_options_dict = {}
for stod in series_options:
try:
options = stod['options']
except KeyError:
raise APIInputError("%s is missing the 'options' key." %stod)
if not isinstance(options, dict):
raise APIInputError("Expecting a dict in place of: %s" %options)
try:
terms = stod['terms']
except KeyError:
raise APIInputError("%s is missing the 'terms' key." %stod)
if isinstance(terms, list):
for term in terms:
if isinstance(term, basestring):
opts = copy.deepcopy(options)
series_options_dict.update({term: opts})
elif isinstance(term, dict):
for tk, tv in term.items():
if not isinstance(tv, dict):
raise APIInputError("Expecting a dict in place "
"of: %s" %tv)
opts = copy.deepcopy(options)
opts.update(tv)
series_options_dict.update({tk: opts})
else:
raise APIInputError("Expecting a list in place of: %s" %terms)
return series_options_dict
def clean_pcso(series_options, ds):
"""Clean the PivotChart series_options input from the user.
"""
#todlist = term option dict list
if isinstance(series_options, dict):
for sok, sod in series_options.items():
if sok not in ds.series.keys():
raise APIInputError("All the series terms must be present "
"in the series dict of the "
"datasource. Got %s. Allowed values "
"are: %s"
%(sok, ', '.join(ds.series.keys())))
if not isinstance(sod, dict):
raise APIInputError("All the series options must be of the "
"type dict. Got %s of type %s instead."
%(sod, type(sod)))
elif isinstance(series_options, list):
series_options = _convert_pcso_to_dict(series_options)
clean_pcso(series_options, ds)
else:
raise APIInputError("Expecting a dict or list in place of: %s."
%series_options)
return series_options
def _convert_cso_to_dict(series_options):
series_options_dict = {}
#stod: series term and option dict
for stod in series_options:
try:
options = stod['options']
except KeyError:
raise APIInputError("%s is missing the 'options' key." %stod)
if not isinstance(options, dict):
raise APIInputError("Expecting a dict in place of: %s" %options)
try:
terms = stod['terms']
except KeyError:
raise APIInputError("%s is missing the 'terms' key." %stod)
if isinstance(terms, dict):
if not terms:
raise APIInputError("'terms' dict cannot be empty.")
for tk, td in terms.items():
if isinstance(td, list):
for yterm in td:
if isinstance(yterm, basestring):
opts = copy.deepcopy(options)
opts['_x_axis_term'] = tk
series_options_dict[yterm] = opts
elif isinstance(yterm, dict):
opts = copy.deepcopy(options)
opts.update(yterm.values()[0])
opts['_x_axis_term'] = tk
series_options_dict[yterm.keys()[0]] = opts
else:
raise APIInputError("Expecting a basestring or "
"dict in place of: %s." %yterm)
else:
raise APIInputError("Expecting a list instead of: %s"
%td)
else:
raise APIInputError("Expecting a dict in place of: %s."
%terms)
return series_options_dict
def clean_cso(series_options, ds):
"""Clean the Chart series_options input from the user.
"""
if isinstance(series_options, dict):
for sok, sod in series_options.items():
if sok not in ds.series.keys():
raise APIInputError("%s is not one of the keys of the "
"datasource series. Allowed values "
"are: %s"
%(sok, ', '.join(ds.series.keys())))
if not isinstance(sod, dict):
raise APIInputError("%s is of type: %s. Expecting a dict."
%(sod, type(sod)))
try:
_x_axis_term = sod['_x_axis_term']
if _x_axis_term not in ds.series.keys():
raise APIInputError("%s is not one of the keys of the "
"datasource series. Allowed values "
"are: %s"
%(_x_axis_term,
', '.join(ds.series.keys())))
except KeyError:
raise APIInputError("Expecting a '_x_axis_term' for %s." %sod)
if ds.series[sok]['_data'] != ds.series[_x_axis_term]['_data']:
raise APIInputError("%s and %s do not belong to the same "
"table." %(sok, _x_axis_term))
sod['_data'] = ds.series[sok]['_data']
elif isinstance(series_options, list):
series_options = _convert_cso_to_dict(series_options)
clean_cso(series_options, ds)
else:
raise APIInputError("'series_options' must either be a dict or a "
"list. Got %s of type %s instead."
%(series_options, type(series_options)))
return series_options
def clean_sortf_mapf_mts(sortf_mapf_mts):
if sortf_mapf_mts is None:
sortf_mapf_mts = (None, None, False)
if isinstance(sortf_mapf_mts, tuple):
if len(sortf_mapf_mts) != 3:
raise APIInputError("%r must have exactly three elements."
%sortf_mapf_mts)
sortf, mapf, mts = sortf_mapf_mts
if not callable(sortf) and sortf is not None:
raise APIInputError("%r must be callable or None." %sortf)
if not callable(mapf) and mapf is not None:
raise APIInputError("%r must be callable or None." %mapf)
mts = bool(mts)
return (sortf, mapf, mts)
def clean_x_sortf_mapf_mts(x_sortf_mapf_mts):
cleaned_x_s_m_mts = []
if x_sortf_mapf_mts is None:
x_sortf_mapf_mts = [(None, None, False)]
if isinstance(x_sortf_mapf_mts, tuple):
x_sortf_mapf_mts = [x_sortf_mapf_mts]
for x_s_m_mts in x_sortf_mapf_mts:
if not isinstance(x_s_m_mts, tuple):
raise APIInputError("%r must be a tuple." %x_s_m_mts)
if len(x_s_m_mts) != 3:
raise APIInputError("%r must have exactly three elements."
%x_s_m_mts)
x_sortf = x_s_m_mts[0]
if not callable(x_sortf) and x_sortf is not None:
raise APIInputError("%r must be callable or None." %x_sortf)
x_mapf = x_s_m_mts[1]
if not callable(x_mapf) and x_mapf is not None:
raise APIInputError("%r must be callable or None." %x_mapf)
x_mts = bool(x_s_m_mts[2])
cleaned_x_s_m_mts.append((x_sortf, x_mapf, x_mts))
return cleaned_x_s_m_mts
|
|
# Copyright (c) 2017 Huawei, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from oslo_utils import uuidutils
from conveyor import context
from conveyor import exception
from conveyor.network import neutron
from conveyor.resource.driver import networks
from conveyor.resource.driver import secgroup
from conveyor.resource import resource
from conveyor.tests import test
from conveyor.tests.unit.resource import fake_object
fake_net_dict = fake_object.fake_net_dict
fake_subnet_dict = fake_object.fake_subnet_dict
fake_port_dict = fake_object.fake_port_dict
fake_secgroup_dict = fake_object.fake_secgroup_dict
fake_fip_dict = fake_object.fake_fip_dict
fake_route_dict = fake_object.fake_router_dict
# resource_id for test extract_network_resource
net_0_id = 'net-0'
sb_of_net_0_id = 'net-0-subnet-0'
other_net_id = 'net-1'
sb_of_other_net_id = 'net-1-subnet-1'
# fake external net and subnet id
ext_net_id = 'ext-net'
ext_subnet_id = 'ext-subnet'
def mock_new_net(cls, context, network_id, timeout=None, **_params):
fake_net = copy.deepcopy(fake_net_dict)
fake_net['id'] = network_id
if network_id == net_0_id:
fake_net['subnets'] = [sb_of_net_0_id]
elif network_id == other_net_id:
fake_net['subnets'] = [sb_of_other_net_id]
elif network_id == ext_net_id:
fake_net['subnets'] = [ext_subnet_id]
return fake_net
def mock_new_subnet(cls, context, subnet_id, **_params):
fake_subnet = copy.deepcopy(fake_subnet_dict)
fake_subnet['id'] = subnet_id
if subnet_id == sb_of_net_0_id:
fake_subnet['network_id'] = net_0_id
elif subnet_id == sb_of_other_net_id:
fake_subnet['network_id'] = other_net_id
elif subnet_id == ext_subnet_id:
fake_subnet['network_id'] = ext_net_id
return fake_subnet
def mock_extract_secgroups(cls, secgroups_ids):
secgroup_res = []
for secgroup_id in secgroups_ids:
fake_secgroup = copy.deepcopy(fake_object.fake_secgroup_dict)
fake_secgroup['id'] = secgroup_id
name_in_tmpl = uuidutils.generate_uuid()
sg_res = resource.Resource(name_in_tmpl,
'OS::Neutron::SecurityGroup',
secgroup_id)
sg_dep = resource.ResourceDependency(secgroup_id,
fake_secgroup['name'],
name_in_tmpl,
'OS::Neutron::SecurityGroup')
cls._collected_resources[secgroup_id] = sg_res
cls._collected_dependencies[secgroup_id] = sg_dep
secgroup_res.append(sg_res)
return secgroup_res
class NetworkResourceTestCase(test.TestCase):
def setUp(self):
super(NetworkResourceTestCase, self).setUp()
self.context = context.RequestContext(
fake_object.fake_user_id,
fake_object.fake_project_id,
is_admin=False)
self.net_resource = networks.NetworkResource(self.context)
@mock.patch.object(neutron.API, 'network_list')
def test_extract_all_nets(self, mock_net_list):
fake_net = copy.deepcopy(fake_net_dict)
fake_net_id = fake_net['id']
mock_net_list.return_value = [fake_net]
result = self.net_resource.extract_nets([])
self.assertTrue(1 == len(result))
self.assertEqual(fake_net_id, result[0].id)
net_dep = self.net_resource.get_collected_dependencies()[fake_net_id]
self.assertFalse(net_dep.dependencies)
@mock.patch.object(neutron.API, 'get_network')
def test_extract_nets_with_ids(self, mock_net):
fake_net = copy.deepcopy(fake_net_dict)
mock_net.return_value = fake_net
result = self.net_resource.extract_nets([fake_net['id']])
self.assertTrue(1 == len(result))
self.assertEqual(fake_net['id'], result[0].id)
@mock.patch.object(neutron.API, 'get_network', side_effect=Exception)
def test_extract_nets_failed(self, mock_net):
self.assertRaises(exception.ResourceNotFound,
self.net_resource.extract_nets,
['net-id'])
@mock.patch.object(neutron.API, 'network_list')
def test_extract_nets_from_cache(self, mock_net_list):
fake_net = copy.deepcopy(fake_net_dict)
mock_net_list.return_value = [fake_net]
fake_net_id = fake_net['id']
fake_net_name = fake_net['name']
fake_net_res = resource.Resource('network_0',
'OS::Neutron::Net',
fake_net_id)
fake_net_dep = resource.ResourceDependency(fake_net_id,
fake_net_name,
'network_0',
'OS::Neutron::Net')
self.net_resource = networks.NetworkResource(
self.context,
collected_resources={fake_net_id: fake_net_res},
collected_dependencies={fake_net_id: fake_net_dep})
result = self.net_resource.extract_nets([])
self.assertTrue(1 == len(result))
self.assertEqual(fake_net_id, result[0].id)
@mock.patch.object(neutron.API, 'get_subnet')
@mock.patch.object(neutron.API, 'get_network')
def test_extract_nets_with_subnets(self, mock_net, mock_subnet):
# NOTE: evoke extract_nets with parameter with_subnets=True
fake_net = copy.deepcopy(fake_net_dict)
fake_net_id = fake_net['id']
mock_net.return_value = fake_net
fake_subnet = copy.deepcopy(fake_subnet_dict)
fake_subnet_id = fake_subnet['id']
mock_subnet.return_value = fake_subnet
result = self.net_resource.extract_nets([fake_net_id],
with_subnets=True)
self.assertTrue(1 == len(result))
self.assertTrue(2 == len(self.net_resource.get_collected_resources()))
net_res = self.net_resource.get_collected_resources()[fake_net_id]
net_dep = self.net_resource.get_collected_dependencies()[fake_net_id]
sn_dep = self.net_resource.get_collected_dependencies()[fake_subnet_id]
self.assertFalse(len(net_dep.dependencies))
self.assertEqual(1, len(sn_dep.dependencies))
self.assertIn(net_res.name, sn_dep.dependencies)
@mock.patch.object(neutron.API, 'get_network')
@mock.patch.object(neutron.API, 'subnet_list')
def test_extract_all_subnets(self, mock_subnet_list, mock_net):
# NOTE: the switch case that retrieve net for this subnet, if
# returned network_res is None, then the exception will be raised
# in method extract_nets, so this case will never be evoked.
fake_subnet = copy.deepcopy(fake_subnet_dict)
mock_subnet_list.return_value = [fake_subnet]
mock_net.return_value = copy.deepcopy(fake_net_dict)
result = self.net_resource.extract_subnets([])
self.assertTrue(1 == len(result))
self.assertEqual(fake_subnet['id'], result[0].id)
collected_res = self.net_resource.get_collected_resources()
collected_deps = self.net_resource.get_collected_dependencies()
self.assertTrue(2 == len(collected_res))
net_dep = collected_deps[fake_net_dict['id']]
subnet_dep = collected_deps[fake_subnet['id']]
self.assertFalse(len(net_dep.dependencies))
self.assertEqual(1, len(subnet_dep.dependencies))
self.assertIn(net_dep.name_in_template, subnet_dep.dependencies)
@mock.patch.object(neutron.API, 'get_network')
@mock.patch.object(neutron.API, 'get_subnet')
def test_extract_subnets_with_ids(self, mock_subnet, mock_net):
fake_subnet = copy.deepcopy(fake_subnet_dict)
mock_subnet.return_value = fake_subnet
mock_net.return_value = copy.deepcopy(fake_net_dict)
result = self.net_resource.extract_subnets([fake_subnet['id']])
self.assertTrue(1 == len(result))
self.assertEqual(fake_subnet['id'], result[0].id)
self.assertTrue(2 == len(self.net_resource.get_collected_resources()))
@mock.patch.object(neutron.API, 'get_subnet', side_effect=Exception)
def test_extract_subnets_failed(self, mock_subnet):
self.assertRaises(exception.ResourceNotFound,
self.net_resource.extract_subnets,
['subnet_123'])
@mock.patch.object(neutron.API, 'subnet_list')
def test_extract_subnets_from_cache(self, mock_subnet_list):
fake_subnet = copy.deepcopy(fake_subnet_dict)
fake_subnet_id = fake_subnet['id']
fake_subnet_name = fake_subnet['name']
mock_subnet_list.return_value = [fake_subnet]
fake_subnet_res = resource.Resource(fake_subnet_name,
'OS::Neutron::Subnet',
fake_subnet_id)
fake_subnet_dep = resource.ResourceDependency(fake_subnet_id,
fake_subnet_name,
'subnet_0',
'OS::Neutron::Subnet')
self.net_resource = networks.NetworkResource(
self.context,
collected_resources={fake_subnet_id: fake_subnet_res},
collected_dependencies={fake_subnet_id: fake_subnet_dep}
)
result = self.net_resource.extract_subnets([])
self.assertTrue(1 == len(result))
self.assertEqual(fake_subnet_id, result[0].id)
collected_res = self.net_resource.get_collected_resources()
collected_deps = self.net_resource.get_collected_dependencies()
self.assertTrue(1 == len(collected_res))
self.assertTrue(1 == len(collected_deps))
subnet_dep = collected_deps[fake_subnet_id]
self.assertFalse(len(subnet_dep.dependencies))
@mock.patch.object(neutron.API, 'get_subnet')
@mock.patch.object(neutron.API, 'port_list')
def test_extract_all_ports(self, mock_port_list, mock_subnet):
# NOTE: default, in the case, subnets will be extracted.
fake_port = copy.deepcopy(fake_port_dict)
mock_port_list.return_value = [fake_port]
fake_subnet = copy.deepcopy(fake_subnet_dict)
fake_subnet_id = fake_subnet['id']
fake_subnet_name = fake_subnet['name']
mock_subnet.return_value = fake_subnet
fake_subnet_res = resource.Resource(
fake_subnet_name,
'OS::Neutron::Subnet',
fake_subnet_id,
properties={
'network_id': {'get_resource': 'network_0'}
})
fake_subnet_dep = resource.ResourceDependency(fake_subnet_id,
fake_subnet_name,
'subnet_0',
'OS::Neutron::Subnet')
self.net_resource = networks.NetworkResource(
self.context,
collected_resources={fake_subnet_id: fake_subnet_res},
collected_dependencies={fake_subnet_id: fake_subnet_dep}
)
result = self.net_resource.extract_ports([])
self.assertTrue(1 == len(result))
self.assertTrue(2 == len(self.net_resource.get_collected_resources()))
@mock.patch.object(neutron.API, 'get_subnet')
@mock.patch.object(neutron.API, 'get_port')
def test_extract_ports_with_ids(self, mock_port, mock_subnet):
# NOTE: default, in the case, subnets will be extracted.
fake_port = copy.deepcopy(fake_port_dict)
mock_port.return_value = fake_port
fake_subnet = copy.deepcopy(fake_subnet_dict)
fake_subnet_id = fake_subnet['id']
fake_subnet_name = fake_subnet['name']
mock_subnet.return_value = fake_subnet
fake_subnet_res = resource.Resource(
fake_subnet_name,
'OS::Neutron::Subnet',
fake_subnet_id,
properties={
'network_id': {'get_resource': 'network_0'}
})
fake_subnet_dep = resource.ResourceDependency(fake_subnet_id,
fake_subnet_name,
'subnet_0',
'OS::Neutron::Subnet')
self.net_resource = networks.NetworkResource(
self.context,
collected_resources={fake_subnet_id: fake_subnet_res},
collected_dependencies={fake_subnet_id: fake_subnet_dep}
)
result = self.net_resource.extract_ports([fake_port['id']])
self.assertTrue(1 == len(result))
self.assertTrue(2 == len(self.net_resource.get_collected_resources()))
@mock.patch.object(neutron.API, 'get_port')
def test_extract_ports_failed(self, mock_port):
fake_port = copy.deepcopy(fake_port_dict)
mock_port.return_value = fake_port
self.assertRaises(exception.ResourceNotFound,
self.net_resource.extract_ports,
[fake_port['id']])
@mock.patch.object(neutron.API, 'port_list')
def test_extract_ports_from_cache(self, mock_port_list):
fake_port = copy.deepcopy(fake_port_dict)
mock_port_list.return_value = [fake_port]
fake_port_id = fake_port['id']
fake_port_name = fake_port['name']
fake_port_des = resource.Resource(fake_port_name, 'OS::Neutron::Port',
fake_port_id)
fake_port_dep = resource.ResourceDependency(fake_port_id,
fake_port_name,
'port_0',
'OS::Neutron::Port')
self.net_resource = networks.NetworkResource(
self.context,
collected_resources={fake_port_id: fake_port_des},
collected_dependencies={fake_port_id: fake_port_dep})
result = self.net_resource.extract_ports([])
self.assertTrue(1 == len(result))
self.assertTrue(1 == len(self.net_resource.get_collected_resources()))
@mock.patch.object(secgroup.SecGroup, 'extract_secgroups',
mock_extract_secgroups)
@mock.patch.object(neutron.API, 'get_port')
def test_extract_ports_with_secgroups(self, mock_port):
# NOTE: this test will open the switch case
# `if port.get('security_groups')`
fake_port = copy.deepcopy(fake_port_dict)
fake_port['security_groups'] = ['164c7126-ee4e-44e8-afb5-cc2f11225b30']
fake_port['fixed_ips'] = []
mock_port.return_value = fake_port
result = self.net_resource.extract_ports([fake_port['id']])
self.assertEqual(1, len(result))
self.assertEqual(fake_port['id'], result[0].id)
@mock.patch.object(neutron.API, 'get_port')
def test_extract_ports_with_invalid_ips(self, mock_port):
fake_port = copy.deepcopy(fake_port_dict)
fake_port['fixed_ips'] = [{}]
mock_port.return_value = fake_port
self.assertRaises(exception.ResourceAttributesException,
self.net_resource.extract_ports,
[fake_port['id']])
@mock.patch.object(secgroup.SecGroup, 'extract_secgroups',
mock_extract_secgroups)
def test_extract_secgroups(self):
fake_secgroup = copy.deepcopy(fake_secgroup_dict)
result = self.net_resource.extract_secgroups([fake_secgroup['id']])
self.assertEqual(1, len(result))
self.assertEqual(fake_secgroup['id'], result[0].id)
@mock.patch.object(neutron.API, 'get_subnet')
@mock.patch.object(neutron.API, 'get_network')
@mock.patch.object(neutron.API, 'floatingip_list')
def test_extract_all_fips(self, mock_fip_list, mock_net, mock_subnet):
# NOTE: This test only extracts net and subnet for fip while without
# router and port.
fake_fip = copy.deepcopy(fake_fip_dict)
mock_fip_list.return_value = [fake_fip]
fake_net = copy.deepcopy(fake_net_dict)
mock_net.return_value = fake_net
fake_subnet = copy.deepcopy(fake_subnet_dict)
mock_subnet.return_value = fake_subnet
result = self.net_resource.extract_floatingips([])
self.assertTrue(1 == len(result))
self.assertTrue(3 == len(self.net_resource.get_collected_resources()))
@mock.patch.object(neutron.API, 'get_subnet')
@mock.patch.object(neutron.API, 'get_network')
@mock.patch.object(neutron.API, 'get_floatingip')
def test_extract_fips_with_ids(self, mock_fip, mock_net, mock_subnet):
fake_fip = copy.deepcopy(fake_fip_dict)
mock_fip.return_value = fake_fip
fake_net = copy.deepcopy(fake_net_dict)
mock_net.return_value = fake_net
fake_subnet = copy.deepcopy(fake_subnet_dict)
mock_subnet.return_value = fake_subnet
result = self.net_resource.extract_floatingips([fake_fip['id']])
self.assertTrue(1 == len(result))
self.assertTrue(3 == len(self.net_resource.get_collected_resources()))
@mock.patch.object(neutron.API, 'get_floatingip', side_effect=Exception)
def test_extract_fips_failed(self, mock_fip):
self.assertRaises(exception.ResourceNotFound,
self.net_resource.extract_floatingips,
[fake_fip_dict['id']])
@mock.patch.object(neutron.API, 'floatingip_list')
def test_extract_fips_from_cache(self, mock_fip_list):
fake_fip = copy.deepcopy(fake_fip_dict)
mock_fip_list.return_value = [fake_fip]
fake_fip_id = fake_fip['id']
fake_fip_name = ''
fake_fip_res = resource.Resource(fake_fip_name,
'OS::Neutron::FloatingIP',
fake_fip_id)
fake_fip_dep = resource.ResourceDependency(fake_fip_id,
fake_fip_name,
'floatingip_0',
'OS::Neutron::FloatingIP')
self.net_resource = networks.NetworkResource(
self.context,
collected_resources={fake_fip_id: fake_fip_res},
collected_dependencies={fake_fip_id: fake_fip_dep})
result = self.net_resource.extract_floatingips([])
self.assertTrue(1 == len(result))
self.assertTrue(1 == len(self.net_resource.get_collected_resources()))
@mock.patch.object(neutron.API, 'floatingip_list')
def test_extract_fips_with_invalid_ip(self, mock_fip_list):
fake_fip = copy.deepcopy(fake_fip_dict)
fake_fip['floating_ip_address'] = ''
mock_fip_list.return_value = [fake_fip]
self.assertRaises(exception.ResourceAttributesException,
self.net_resource.extract_floatingips,
[])
@mock.patch.object(neutron.API, 'floatingip_list')
def test_extract_fips_with_invalid_net(self, mock_fip_list):
fake_fip = copy.deepcopy(fake_fip_dict)
fake_fip['floating_network_id'] = ''
mock_fip_list.return_value = [fake_fip]
self.assertRaises(exception.ResourceAttributesException,
self.net_resource.extract_floatingips,
[])
@mock.patch.object(neutron.API, 'get_router')
@mock.patch.object(neutron.API, 'get_subnet', mock_new_subnet)
@mock.patch.object(neutron.API, 'get_network', mock_new_net)
@mock.patch.object(neutron.API, 'get_floatingip')
def test_extract_fips_with_router(self, mock_fip, mock_router):
# -------------------------------------------------------
# | subnet_0(ext-sb) subnet_1(pri-sb)|
# | | | |
# | | | |
# | net_0(ext-net)<---fip router--->net_1(pri-net) |
# -------------------------------------------------------
fake_fip = copy.deepcopy(fake_fip_dict)
fake_fip['floating_network_id'] = ext_net_id
fake_fip['router_id'] = fake_route_dict['id']
mock_fip.return_value = fake_fip
fake_router = copy.deepcopy(fake_route_dict)
mock_router.return_value = fake_router
result = self.net_resource.extract_floatingips([fake_fip['id']])
self.assertEqual(1, len(result))
self.assertEqual(fake_fip['id'], result[0].id)
self.assertEqual(6, len(self.net_resource.get_collected_resources()))
deps = self.net_resource.get_collected_dependencies()
self.assertEqual(6, len(deps))
fip_dep = deps.get(fake_fip['id'])
self.assertEqual(1, len(fip_dep.dependencies))
router_dep = deps.get(fake_fip['router_id'])
self.assertEqual(1, len(router_dep.dependencies))
@mock.patch.object(neutron.API, 'get_port')
@mock.patch.object(neutron.API, 'get_network')
@mock.patch.object(neutron.API, 'get_floatingip')
def test_extract_fips_with_port(self, mock_fip, mock_net, mock_port):
# NOTE: without router; with secgroup, router interface
# Here, we will make net_0 and port_0 in cache and without any
# dependencies, so only net_0, fip, fipAss, port_0 will be extracted
# at last.
# ----------------------------------------------------
# | net_1 |
# | / | |
# | subnet_0(ext-sb) / subnet_1 |
# | | \ | |
# | | \ | |
# | net_0(ext-net)<---fip<----fipAss--->port_0 |
# ----------------------------------------------------
fake_fip = copy.deepcopy(fake_fip_dict)
fake_fip['floating_network_id'] = ext_net_id
fake_fip['port_id'] = fake_port_dict['id']
mock_fip.return_value = fake_fip
fake_port = copy.deepcopy(fake_port_dict)
mock_port.return_value = fake_port
fake_port_id = fake_port['id']
fake_port_name = fake_port['name']
fake_port_res = resource.Resource('port_0',
'OS::Neutron::Port',
fake_port_id)
fake_port_dep = resource.ResourceDependency(fake_port_id,
fake_port_name,
'port_0',
'OS::Neutron::Port')
fake_net = copy.deepcopy(fake_net_dict)
fake_net['id'] = fake_fip['floating_network_id']
fake_net_id = fake_net['id']
mock_net.return_value = fake_net
fake_net_res = resource.Resource('net_0',
'OS::Neutron::Net',
fake_net_id)
fake_net_dep = resource.ResourceDependency(fake_net_id,
fake_net_dict['name'],
'net_0',
'OS::Neutron::Net')
self.net_resource = networks.NetworkResource(
self.context,
collected_resources={
fake_port_id: fake_port_res,
fake_net_id: fake_net_res
},
collected_dependencies={
fake_port_id: fake_port_dep,
fake_net_id: fake_net_dep
}
)
result = self.net_resource.extract_floatingips([fake_fip['id']])
self.assertEqual(1, len(result))
self.assertEqual(4, len(self.net_resource.get_collected_resources()))
deps = self.net_resource.get_collected_dependencies()
net_dep = deps.pop(fake_net_id)
fip_dep = deps.pop(fake_fip['id'])
port_dep = deps.pop(fake_port_id)
fip_ass_dep = deps.values()[0]
self.assertIn(net_dep.name_in_template, fip_dep.dependencies)
self.assertIn(fip_dep.name_in_template, fip_ass_dep.dependencies)
self.assertIn(port_dep.name_in_template, fip_ass_dep.dependencies)
@mock.patch.object(neutron.API, 'get_router')
@mock.patch.object(neutron.API, 'get_port')
@mock.patch.object(neutron.API, 'get_subnet', mock_new_subnet)
@mock.patch.object(neutron.API, 'get_network', mock_new_net)
@mock.patch.object(neutron.API, 'get_floatingip')
def test_extract_fips_with_port_and_router(self, mock_fip, mock_port,
mock_router):
# NOTE: without router
# ------------------------------------------------------
# | router-->net_1 |
# | / | |
# | subnet_0(ext-sb) / subnet_1 |
# | | \ | |
# | | \ | |
# | net_0(ext-net)<---fip<----fipAss--->port_0 |
# ------------------------------------------------------
fake_fip = copy.deepcopy(fake_fip_dict)
fake_fip['floating_network_id'] = ext_net_id
fake_fip['port_id'] = fake_port_dict['id']
fake_fip['router_id'] = fake_route_dict['id']
mock_fip.return_value = fake_fip
fake_router = copy.deepcopy(fake_route_dict)
mock_router.return_value = fake_router
fake_port = copy.deepcopy(fake_port_dict)
mock_port.return_value = fake_port
result = self.net_resource.extract_floatingips([fake_fip['id']])
self.assertEqual(1, len(result))
self.assertEqual(8, len(self.net_resource.get_collected_resources()))
deps = self.net_resource.get_collected_dependencies()
fip_dep = deps.pop(fake_fip['id'])
net0_dep = deps.pop(fake_fip['floating_network_id'])
subnet0_dep = deps.pop(ext_subnet_id)
router_dep = deps.pop(fake_router['id'])
net1_dep = deps.pop(fake_router['external_gateway_info']['network_id'])
subnet1_dep = deps.pop(fake_net_dict['subnets'][0])
port0_dep = deps.pop(fake_port['id'])
fipass_dep = deps.values()[0]
self.assertEqual(0, len(net0_dep.dependencies))
self.assertEqual(0, len(net1_dep.dependencies))
self.assertIn(net0_dep.name_in_template, subnet0_dep.dependencies)
self.assertIn(net0_dep.name_in_template, fip_dep.dependencies)
self.assertEqual(2, len(fipass_dep.dependencies))
self.assertIn(fip_dep.name_in_template, fipass_dep.dependencies)
self.assertIn(port0_dep.name_in_template, fipass_dep.dependencies)
self.assertEqual(1, len(router_dep.dependencies))
self.assertIn(net1_dep.name_in_template, router_dep.dependencies)
self.assertIn(net1_dep.name_in_template, subnet1_dep.dependencies)
self.assertEqual(2, len(port0_dep.dependencies))
self.assertIn(net1_dep.name_in_template, port0_dep.dependencies)
self.assertIn(subnet1_dep.name_in_template, port0_dep.dependencies)
@mock.patch.object(neutron.API, 'get_subnet')
@mock.patch.object(neutron.API, 'get_network')
@mock.patch.object(neutron.API, 'router_list')
def test_extract_all_routers(self, mock_router_list,
mock_net, mock_subnet):
fake_router = copy.deepcopy(fake_route_dict)
mock_router_list.return_value = [fake_router]
fake_net = copy.deepcopy(fake_net_dict)
mock_net.return_value = fake_net
fake_subnet = copy.deepcopy(fake_subnet_dict)
mock_subnet.return_value = fake_subnet
result = self.net_resource.extract_routers([])
self.assertTrue(1 == len(result))
self.assertTrue(3 == len(self.net_resource.get_collected_resources()))
fake_router_id = fake_router['id']
deps = self.net_resource\
.get_collected_dependencies()[fake_router_id].dependencies
self.assertTrue(1 == len(deps))
self.assertEqual('network_0', deps[0])
@mock.patch.object(neutron.API, 'get_subnet')
@mock.patch.object(neutron.API, 'get_network')
@mock.patch.object(neutron.API, 'get_router')
def test_extract_routers_with_ids(self, mock_router,
mock_net, mock_subnet):
fake_router = copy.deepcopy(fake_route_dict)
mock_router.return_value = fake_router
fake_net = copy.deepcopy(fake_net_dict)
mock_net.return_value = fake_net
fake_subnet = copy.deepcopy(fake_subnet_dict)
mock_subnet.return_value = fake_subnet
result = self.net_resource.extract_routers([fake_router['id']])
self.assertTrue(1 == len(result))
self.assertTrue(3 == len(self.net_resource.get_collected_resources()))
@mock.patch.object(neutron.API, 'get_router', side_effect=Exception)
def test_extract_rotuers_failed(self, mock_router):
self.assertRaises(exception.ResourceNotFound,
self.net_resource.extract_routers,
[fake_route_dict['id']])
@mock.patch.object(neutron.API, 'router_list')
def test_extract_routers_from_cache(self, mock_router_list):
fake_router = copy.deepcopy(fake_route_dict)
mock_router_list.return_value = [fake_router]
fake_router_id = fake_router['id']
fake_router_name = fake_router['name']
fake_router_res = resource.Resource(fake_router_name,
'OS::Neutron::Router',
fake_router_id)
fake_router_dep = resource.ResourceDependency(
fake_router_id, fake_router_name, 'router_0',
'OS::Neutron::Router')
self.net_resource = networks.NetworkResource(
self.context,
collected_resources={fake_router_id: fake_router_res},
collected_dependencies={fake_router_id: fake_router_dep})
result = self.net_resource.extract_routers([])
self.assertTrue(1 == len(result))
self.assertEqual(fake_router_id, result[0].id)
self.assertTrue(1 == len(self.net_resource.get_collected_resources()))
self.assertFalse(len(self.net_resource.get_collected_dependencies()
[fake_router_id].dependencies))
@mock.patch.object(neutron.API, 'get_router')
@mock.patch.object(neutron.API, 'port_list')
@mock.patch.object(neutron.API, 'get_subnet', mock_new_subnet)
@mock.patch.object(neutron.API, 'get_network', mock_new_net)
def test_extract_network_resource(self, mock_port_list, mock_router):
# structure chart
# -------------------------------------------------
# | net_0 subnet_1 |
# | | | |
# | | | |
# | subnet_0 <---router_if---->router--->net_1 |
# -------------------------------------------------
# 1. extract net: returned from mock_new_net
# fake_net = copy.deepcopy(fake_net_dict)
# mock_net.return_value = fake_net
# 2. extract subnet for net: returned from mock_new_subnet
# fake_subnet = copy.deepcopy(fake_subnet_dict)
# mock_subnet.return_value = fake_subnet
# 3. extract interface: 'network:router_interface'
fake_port = copy.deepcopy(fake_port_dict)
if_id = fake_port['id']
# device_id for connecting to router
fake_port['device_id'] = fake_route_dict['id']
fake_port['fixed_ips'][0]['subnet_id'] = sb_of_net_0_id
fake_port['network_id'] = net_0_id
mock_port_list.return_value = [fake_port]
# 3.1 extract router interface
fake_router = copy.deepcopy(fake_route_dict)
# network_id for associating with other net
fake_router['external_gateway_info']['network_id'] = other_net_id
mock_router.return_value = fake_router
# 3.2 generate interface: do not need interact with any other service,
# only construct Res and ResDep in conveyor resource side.
other_net_ids = [other_net_id]
self.net_resource.extract_network_resource(net_0_id, other_net_ids)
res = self.net_resource.get_collected_resources()
deps = self.net_resource.get_collected_dependencies()
self.assertEqual(6, len(res))
self.assertEqual(6, len(deps))
# Check deps
net_0_dep = deps.get(net_0_id)
net_0_sb_dep = deps.get(sb_of_net_0_id)
other_net_dep = deps.get(other_net_id)
other_net_sb_dep = deps.get(sb_of_other_net_id)
if_dep = deps.get(if_id)
router_dep = deps.get(fake_router['id'])
self.assertEqual(0, len(net_0_dep.dependencies))
self.assertEqual(0, len(other_net_dep.dependencies))
self.assertIn(net_0_dep.name_in_template, net_0_sb_dep.dependencies)
self.assertIn(other_net_dep.name_in_template,
other_net_sb_dep.dependencies)
self.assertIn(other_net_dep.name_in_template,
router_dep.dependencies)
self.assertIn(router_dep.name_in_template, if_dep.dependencies)
self.assertIn(net_0_sb_dep.name_in_template, if_dep.dependencies)
self.assertEqual(2, len(if_dep.dependencies))
@mock.patch.object(neutron.API, 'get_router')
@mock.patch.object(neutron.API, 'port_list')
@mock.patch.object(neutron.API, 'get_subnet', mock_new_subnet)
@mock.patch.object(neutron.API, 'get_network', mock_new_net)
def test_extract_networks_resource(self, mock_port_list, mock_router):
fake_port = copy.deepcopy(fake_port_dict)
if_id = fake_port['id']
fake_port['device_id'] = fake_route_dict['id']
fake_port['fixed_ips'][0]['subnet_id'] = sb_of_net_0_id
fake_port['network_id'] = net_0_id
mock_port_list.return_value = [fake_port]
fake_router = copy.deepcopy(fake_route_dict)
fake_router['external_gateway_info']['network_id'] = other_net_id
mock_router.return_value = fake_router
self.net_resource.extract_networks_resource([net_0_id, other_net_id])
res = self.net_resource.get_collected_resources()
deps = self.net_resource.get_collected_dependencies()
self.assertEqual(6, len(res))
self.assertEqual(6, len(deps))
# Check deps
net_0_dep = deps.get(net_0_id)
net_0_sb_dep = deps.get(sb_of_net_0_id)
other_net_dep = deps.get(other_net_id)
other_net_sb_dep = deps.get(sb_of_other_net_id)
if_dep = deps.get(if_id)
router_dep = deps.get(fake_router['id'])
self.assertEqual(0, len(net_0_dep.dependencies))
self.assertEqual(0, len(other_net_dep.dependencies))
self.assertIn(net_0_dep.name_in_template, net_0_sb_dep.dependencies)
self.assertIn(other_net_dep.name_in_template,
other_net_sb_dep.dependencies)
self.assertIn(other_net_dep.name_in_template,
router_dep.dependencies)
self.assertIn(router_dep.name_in_template, if_dep.dependencies)
self.assertIn(net_0_sb_dep.name_in_template, if_dep.dependencies)
self.assertEqual(2, len(if_dep.dependencies))
|
|
# Fed
#
# A portal for federated access to AWS console and resources
#
#
# so many imports
import os
import base64
from io import BytesIO
from flask import (
Flask,
render_template,
redirect,
url_for,
flash,
session,
abort,
request,
send_from_directory,
)
from werkzeug.security import generate_password_hash, check_password_hash
from werkzeug.contrib.fixers import ProxyFix
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager, UserMixin, login_user, logout_user, current_user
from flask_bootstrap import Bootstrap
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField
from wtforms.validators import DataRequired, Length, EqualTo
import onetimepass
import pyqrcode
import boto3
from botocore.exceptions import ClientError
import json
import urllib.parse
import requests
import safe
from datetime import datetime
import pprint
import logging
import arrow
from apscheduler.schedulers.background import BackgroundScheduler
import time
import re
# from logging.handlers import RotatingFileHandler
# import yaml
# create application instance
app = Flask(__name__, static_url_path="/static")
app.wsgi_app = ProxyFix(app.wsgi_app)
app.config.from_object("config")
if app.config["ENV"] == "dev":
debug = True
else:
debug = False
# initialize extensions
bootstrap = Bootstrap(app)
db = SQLAlchemy(app)
lm = LoginManager(app)
awsconfig = {"users": {}, "roles": {}}
# from https://blog.hartwork.org/posts/flask-behind-a-reverse-proxy-actual-client-ips/
def fix_werkzeug_logging():
from werkzeug.serving import WSGIRequestHandler
def address_string(self):
forwarded_for = self.headers.get(
'X-Forwarded-For', '').split(',')
if forwarded_for and forwarded_for[0]:
return forwarded_for[0]
else:
return self.client_address[0]
WSGIRequestHandler.address_string = address_string
# Print timestamped line to log
def plog(text):
print(f"{ arrow.now().isoformat() } { text }")
# Modify the description field of an AWS Security Group Rule targeted by CIDR
def modify_description(account, cidr, description):
accountinfo = awsconfig["accounts"][account]
session = boto3.Session(accountinfo["id"], accountinfo["secret"])
security_groups = accountinfo["security_groups"]
for sg in security_groups:
sgid = sg["sgid"]
region = sg["region_name"]
ec2 = session.client("ec2", region_name=region)
try:
ec2.update_security_group_rule_descriptions_ingress(
GroupId=sgid,
IpPermissions=[
{
"IpProtocol": "tcp",
"FromPort": 22,
"ToPort": 22,
"IpRanges": [{"CidrIp": cidr, "Description": description}],
}
],
)
except ClientError as e:
plog(f"exception updating description {description} in {sgid} : {e.response['Error']['Code']}")
# Add user and their CIDR to the security group
def authorize_ssh(account, cidr, username):
if "ssh-" in account:
plog(f"Adding {username} {cidr} to {account}")
description = (
"fed-" + username + ":" + arrow.utcnow().shift(hours=20).isoformat()
)
accountinfo = awsconfig["accounts"].get(account)
if accountinfo is None:
plog(f'account {account} not found, skipping')
return
session = boto3.Session(accountinfo["id"], accountinfo["secret"])
security_groups = accountinfo["security_groups"]
for sg in security_groups:
sgid = sg["sgid"]
region = sg["region_name"]
ec2 = session.client("ec2", region_name=region)
try:
ec2.authorize_security_group_ingress(
GroupId=sgid,
IpPermissions=[
{
"IpProtocol": "tcp",
"FromPort": 22,
"ToPort": 22,
"IpRanges": [{"CidrIp": cidr, "Description": description}],
}
],
)
except Exception:
modify_description(account, cidr, description)
# Search Security groups removing rule allowing a specific CIDR
def revoke_ssh(account, cidr):
if "ssh-" in account:
accountinfo = awsconfig["accounts"][account]
session = boto3.Session(accountinfo["id"], accountinfo["secret"])
security_groups = accountinfo["security_groups"]
for sg in security_groups:
try:
sgid = sg["sgid"]
region = sg["region_name"]
ec2 = session.client("ec2", region_name=region)
ec2.revoke_security_group_ingress(
GroupId=sgid,
IpPermissions=[
{
"IpProtocol": "tcp",
"FromPort": 22,
"ToPort": 22,
"IpRanges": [{"CidrIp": cidr}],
}
],
)
except ClientError as e:
plog(f"Failed to remove {cidr} from {sgid} in {account} {region} : {e.response['Error']['Code']}")
# Scan a security group, looking at timestamps on rules, and expire old rules
def expire_sg(account):
if "ssh-" in account:
accountinfo = awsconfig["accounts"][account]
session = boto3.Session(accountinfo["id"], accountinfo["secret"])
security_groups = accountinfo["security_groups"]
for sg in security_groups:
sgid = sg["sgid"]
region = sg["region_name"]
# plog(f"Trying to expire {sgid} {region}")
ec2 = session.client("ec2", region_name=region)
sginfo = ec2.describe_security_groups(GroupIds=[sgid])["SecurityGroups"][0]
for ipp in sginfo["IpPermissions"]:
for rule in ipp["IpRanges"]:
cidr = rule["CidrIp"]
description = rule.get("Description")
r = description.split(":", 1)
if len(r) > 1:
user, expires = r
if "fed-" in user:
e = arrow.get(expires)
now = arrow.utcnow()
# plog(f'{user} expires {e.humanize()}')
if e < now:
plog(f"expiring {user}")
revoke_ssh(account, cidr)
# else:
# plog(f'{r[0]} is ignored')
# add user to SG for every "ssh-" account
def update_security_groups(cidr, user):
accounts = get_accounts(user)
for account in accounts:
if "ssh-" in account:
authorize_ssh(account, cidr, user)
# Call expire_sg for all relevant accounts, with a delay so we don't kill the API
def expire_all_sgs():
accounts = awsconfig.get("accounts")
if accounts:
# plog("Expiring Security Groups")
for account in accounts:
if "ssh-" in account:
time.sleep(5)
expire_sg(account)
# Get all of the roles for a user
def get_groups(user):
return awsconfig["users"].get(user, [])
# Return a list of accounts available to a given user
def get_accounts(user):
out = []
grouplist = awsconfig["users"].get(user, [])
for group in grouplist:
roles = awsconfig["roles"].get(group, [])
for role in roles:
out.append(role)
return list(sorted(set(out)))
# Return the description field for a given account
def get_account_description(name):
accountinfo = awsconfig["accounts"].get(name)
if accountinfo is None:
return f'account_{name}_not_found'
return accountinfo.get("description", name)
# Export for use in templates
app.jinja_env.globals.update(get_accounts=get_accounts)
app.jinja_env.globals.update(get_account_description=get_account_description)
# return session creds for a given account, user, and optional policy
def getsessioncreds(account, user, policy):
if not policy:
policy = '{"Statement":[{"Resource":"*","Action":"*","Effect":"Allow"}],"Version":"2012-10-17"}'
accountinfo = awsconfig["accounts"][account]
session = boto3.Session(accountinfo["id"], accountinfo["secret"])
sts = session.client("sts")
fedname = "fed-" + user
fedname = re.sub(r"[^\w+=,.@-]", '', fedname)
usersession = sts.get_federation_token(Name=fedname, Policy=policy)
creds = usersession.get("Credentials")
return json.dumps(
{
"sessionId": creds["AccessKeyId"],
"sessionKey": creds["SecretAccessKey"],
"sessionToken": creds["SessionToken"],
}
)
# return session creds environment return for scripts
def getsessioncredsenv(account, user, policy):
if not policy:
policy = '{"Statement":[{"Resource":"*","Action":"*","Effect":"Allow"}],"Version":"2012-10-17"}'
accountinfo = awsconfig["accounts"][account]
session = boto3.Session(accountinfo["id"], accountinfo["secret"])
sts = session.client("sts")
usersession = sts.get_federation_token(Name="fsc-" + user, Policy=policy)
creds = usersession.get("Credentials")
return "\n".join(
[
"export AWS_ACCESS_KEY_ID=" + creds["AccessKeyId"],
"export AWS_SECRET_ACCESS_KEY=" + creds["SecretAccessKey"],
"export AWS_SESSION_TOKEN=" + creds["SessionToken"],
]
)
# Get a URL for signin for a given account/user/policy
def getfedlink(account, user, policy):
session_json = getsessioncreds(account, user, policy)
issuer_url = app.config["SCHEME"] + "://" + app.config["FQDN"]
console_url = "https://console.aws.amazon.com/ec2"
signin_url = "https://signin.aws.amazon.com/federation"
get_signin_token_url = (
signin_url
+ "?Action=getSigninToken&SessionType=json&Session="
+ urllib.parse.quote_plus(session_json)
)
returned_content = requests.get(get_signin_token_url)
signin_token = returned_content.json().get("SigninToken")
signin_token_param = "&SigninToken=" + urllib.parse.quote_plus(signin_token)
issuer_param = "&Issuer=" + urllib.parse.quote_plus(issuer_url)
destination_param = "&Destination=" + urllib.parse.quote_plus(console_url)
login_url = (
signin_url
+ "?Action=login"
+ signin_token_param
+ issuer_param
+ destination_param
)
# @c.out("status" => "303", "Connection" => "close", "Content-Length" => 1, "Location" => login_url) {' '}
return login_url
class User(UserMixin, db.Model):
"""User model."""
__tablename__ = "users"
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), index=True)
fullname = db.Column(db.String(64))
email = db.Column(db.String(64))
password_hash = db.Column(db.String(128))
otp_secret = db.Column(db.String(16))
def __init__(self, **kwargs):
super(User, self).__init__(**kwargs)
if self.otp_secret is None:
# generate a random secret
self.otp_secret = base64.b32encode(os.urandom(10)).decode("utf-8")
@property
def password(self):
raise AttributeError("password is not a readable attribute")
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def get_totp_uri(self):
return "otpauth://totp/awsfed:{0}?secret={1}&issuer=AWSFed".format(
self.username, self.otp_secret
)
def verify_totp(self, token):
return onetimepass.valid_totp(token, self.otp_secret, window=1)
# @property <-- This doesn't work. I don't know why. Is a puzzlement.
def is_admin(self):
return "fedadmin" in get_groups(self.username)
is_admin = property(is_admin)
@lm.user_loader
def load_user(user_id):
"""User loader callback for Flask-Login."""
return User.query.get(int(user_id))
class RegisterForm(FlaskForm):
"""Registration form."""
username = StringField("Username", validators=[DataRequired(), Length(1, 24)])
fullname = StringField("Full Name", validators=[DataRequired(), Length(1, 64)])
email = StringField("Email Address", validators=[DataRequired(), Length(1, 64)])
password = PasswordField("Password", validators=[DataRequired()])
password_again = PasswordField(
"Password again", validators=[DataRequired(), EqualTo("password")]
)
token = PasswordField(
"Registration secret from the wiki (if you don't have wiki access, talk to someone!)", validators=[DataRequired()]
)
submit = SubmitField("Register")
def validate_password(self, field):
c = safe.check(field.data)
if bool(c):
return True
self.password.errors.append(str(c))
return False
class LoginForm(FlaskForm):
"""Login form."""
username = StringField("Username", validators=[DataRequired(), Length(1, 64)])
password = PasswordField("Password", validators=[DataRequired()])
token = StringField("Token", validators=[DataRequired(), Length(6, 6)])
submit = SubmitField("Login")
# inject debug flag into all templates
@app.context_processor
def inject_debug():
return dict(debug=app.debug)
@app.route("/")
def index():
return render_template("index.html", request=request, url=app.config["URL"])
@app.route('/robots.txt')
def robots():
return send_from_directory(os.path.join(app.root_path, 'static'),
'robots.txt', mimetype='text/plain')
@app.route('/favicon.ico')
def favicon():
return send_from_directory(os.path.join(app.root_path, 'static'),
'favicon.ico', mimetype='image/vnd.microsoft.icon')
@app.route('/apple-touch-icon.png')
def appletouchicon():
return send_from_directory(os.path.join(app.root_path, 'static'),
'apple-touch-icon-180x180.png', mimetype='image/png')
@app.route("/about")
def about():
return render_template("about.html")
@app.route("/time")
def get_time():
"""Get current server time - used for troubleshooting, MFA can be picky"""
return str(datetime.now().timestamp())
# new user registration
@app.route("/register", methods=["GET", "POST"])
def register():
"""User registration route."""
if current_user.is_authenticated:
# if user is logged in we get out of here
return redirect(url_for("index", _external=True, _scheme=app.config["SCHEME"]))
form = RegisterForm()
if form.validate_on_submit():
token = form.token.data
if token != "elderberries":
plog(f"Bad registration secret input {token}")
flash("Unauthorized Registration Denied. Go read the wiki to get the right secret.")
return redirect(
url_for("register", _external=True, _scheme=app.config["SCHEME"])
)
user = User.query.filter_by(username=form.username.data).first()
if user is not None:
plog(f'Username already exists: { form.username.data }')
flash("Username already exists.")
return redirect(
url_for("register", _external=True, _scheme=app.config["SCHEME"])
)
# add new user to the database
user = User(
username=form.username.data,
password=form.password.data,
fullname=form.fullname.data,
email=form.email.data,
)
db.session.add(user)
db.session.commit()
# redirect to the two-factor auth page, passing username in session
session["username"] = user.username
plog(url_for("two_factor_setup", _external=True, _scheme=app.config["SCHEME"]))
return redirect(
url_for("two_factor_setup", _external=True, _scheme=app.config["SCHEME"])
)
return render_template("register.html", form=form)
# Display page with the QA code as part of registration
@app.route("/twofactor")
def two_factor_setup():
if "username" not in session:
return redirect(url_for("index", _external=True, _scheme=app.config["SCHEME"]))
user = User.query.filter_by(username=session["username"]).first()
if user is None:
return redirect(url_for("index", _external=True, _scheme=app.config["SCHEME"]))
# since this page contains the sensitive qrcode, make sure the browser
# does not cache it
return (
render_template("two-factor-setup.html"),
200,
{
"Cache-Control": "no-cache, no-store, must-revalidate",
"Pragma": "no-cache",
"Expires": "0",
},
)
# Display a QR Code for the User's MFA
@app.route("/qrcode")
def qrcode():
if "username" not in session:
abort(404)
user = User.query.filter_by(username=session["username"]).first()
if user is None:
abort(404)
# for added security, remove username from session
del session["username"]
# render qrcode for FreeTOTP
url = pyqrcode.create(user.get_totp_uri())
stream = BytesIO()
url.svg(stream, scale=3)
return (
stream.getvalue(),
200,
{
"Content-Type": "image/svg+xml",
"Cache-Control": "no-cache, no-store, must-revalidate",
"Pragma": "no-cache",
"Expires": "0",
},
)
@app.route("/login", methods=["GET", "POST"])
def login():
"""User login route."""
remote_ip = request.environ.get("HTTP_X_FORWARDED_FOR", request.environ.get("REMOTE_ADDR"))
if current_user.is_authenticated:
# if user is logged in we get out of here
return redirect(url_for("index", _external=True, _scheme=app.config["SCHEME"]))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
if user is not None:
plog(
"LOGIN "
+ " ip:"
+ str(remote_ip)
+ " user:"
+ str(user.username)
+ " pass:"
+ str(user.verify_password(form.password.data))
+ " mfa:"
+ str(user.verify_totp(form.token.data))
)
if (
user is None
or not user.verify_password(form.password.data)
or not user.verify_totp(form.token.data)
):
flash("Invalid username, password or token.")
return redirect(
url_for("login", _external=True, _scheme=app.config["SCHEME"])
)
# log user in
login_user(user)
# Update Security Groups
update_security_groups(remote_ip + "/32", user.username)
flash("You are now logged in!")
return redirect(url_for("index", _external=True, _scheme=app.config["SCHEME"]))
return render_template("login.html", form=form)
@app.route("/logout")
def logout():
"""User logout route."""
try:
if current_user.is_authenticated:
user = current_user.username
remote_ip = request.environ.get(
"HTTP_X_FORWARDED_FOR", request.environ.get("REMOTE_ADDR")
)
plog(f"LOGOUT {user} {remote_ip}")
accounts = get_accounts(user)
for account in accounts:
revoke_ssh(account, remote_ip + "/32")
logout_user()
return redirect(url_for("index", _external=True, _scheme=app.config["SCHEME"]))
except Exception as e:
plog(f'logout issue for {user} {remote_ip} : {e.message}')
logout_user()
return redirect(url_for("index", _external=True, _scheme=app.config["SCHEME"]))
# This handles the actual Console URIs the user clicks on
@app.route("/aws/<accountname>")
def aws(accountname):
if current_user.is_authenticated:
user = current_user.username
if accountname in get_accounts(user):
url = getfedlink(accountname, user, None)
return redirect(url, code=303)
else:
abort(404)
else:
flash("session expired. Please log in again.")
return redirect(url_for("login", _external=True, _scheme=app.config["SCHEME"]))
# This handles the actual Console URIs the user clicks on
@app.route("/awscreds/<accountname>")
def awscreds(accountname):
if current_user.is_authenticated:
user = current_user.username
if accountname in get_accounts(user):
creds = getsessioncredsenv(accountname, user, None)
return render_template("awscreds.html", accountname=accountname, creds=creds, awsconfig=awsconfig)
else:
abort(404)
else:
flash("session expired. Please log in again.")
return redirect(url_for("login", _external=True, _scheme=app.config["SCHEME"]))
# Configuration upload.
# If the config is blank, accept the first config uploaded.
# Thereafter, accept uploads if the secret matches.
@app.route("/configure", methods=["POST"])
def configure():
global awsconfig
secret1 = awsconfig.get("secret")
newconfig = request.get_json()
if secret1 is None:
awsconfig = newconfig
plog("New configuration loaded")
return "OK"
secret1 = awsconfig.get("secret")
secret2 = newconfig.get("secret")
if secret2 is None:
return "NO secret is not present"
if secret1 == secret2:
plog("Updated configuration loaded.")
awsconfig = newconfig
return "OK"
return "NO"
@app.route("/admin")
def admin():
if current_user.is_authenticated:
if "master" in get_groups(current_user.username):
users = User.query.all()
return render_template("admin.html", users=users, awsconfig=awsconfig)
abort(403)
@app.route("/delete_user", methods=["POST"])
def delete_user():
if current_user.is_authenticated:
if "master" in get_groups(current_user.username):
user_to_delete = request.form["userid"]
flash(f'Deleted user "{user_to_delete}""')
User.query.filter(User.username == user_to_delete).delete()
db.session.commit()
return redirect(
url_for("admin", _external=True, _scheme=app.config["SCHEME"])
)
abort(403)
# Drop a copy of the request to logging
@app.route("/log")
def log_callback():
"""Dump the request object to stdout for debugging"""
plog(pprint.pformat(request.__dict__, depth=5))
return "OK"
# test the key for a given accoubt to confirm functionality
# this is called by the key rotation external script
@app.route("/testkey/<keyname>")
def testkey(keyname):
try:
accountinfo = awsconfig["accounts"][keyname]
session = boto3.Session(accountinfo["id"], accountinfo["secret"])
sts = session.client("sts")
usersession = sts.get_caller_identity()
return usersession['Arn']
except Exception as e:
abort(404)
# test all keys
# Should probably hardcode this to specific IPs
@app.route("/testaccesskeys")
def testkeys():
success = 0
fail = 0
bad = []
for keyname in awsconfig["accounts"]:
try:
accountinfo = awsconfig["accounts"][keyname]
session = boto3.Session(accountinfo["id"], accountinfo["secret"])
sts = session.client("sts")
sts.get_caller_identity()
success = success + 1
except Exception as e:
fail = fail + 1
bad.append(keyname)
return json.dumps(
{
"success": success,
"fail": fail,
"bad": bad
}
)
# Custom 404 handler because we have needs.
@app.errorhandler(404)
def page_not_found(e):
# note that we set the 404 status explicitly
# remote_ip = request.environ.get("HTTP_X_FORWARDED_FOR", request.environ.get("REMOTE_ADDR"))
# plog(f'404 from {remote_ip}') # log the actual source IP because logging gets the ALB internal IP.
time.sleep(5) # tarpit so scanners have a bad day
return render_template('404.html'), 404
# Add headers to all outgoing responses to deal with common security concerns
@app.after_request
def apply_caching(response):
response.headers['server'] = "Zanzibar" # Let's not tell people what we run
response.headers["X-Frame-Options"] = "deny"
response.headers["X-Content-Type-Options"] = "nosniff"
response.headers["X-XSS-Protection"] = "1; mode=block"
response.headers["Referrer-Policy"] = "no-referrer"
response.headers["Strict-Transport-Security"] = "max-age=31536000 ; includeSubDomains ; preload"
response.headers["Content-Security-Policy"] = ("default-src 'self' "
"'sha256-JMZOU8BcaItzGyYxtaMNNNUOnQq9LQbKEaXfw/WUkfo=' "
"'sha256-RQr56zHCuub99h6rKodb5eik75gEYMw/OD6cYTtCpOM=' "
"cdnjs.cloudflare.com "
"; object-src " + app.config["URL"]
# " ; script-src 'strict-dynamic' "
)
return response
if __name__ == "__main__":
# create database tables if they don't exist yet
db.create_all()
# app.logger.disabled = True
# log = logging.getLogger("werkzeug")
# log.disabled = True
class Stoplogs(logging.Filter):
"""Stop logging messages from health checks"""
def __init__(self, name=None):
pass
def filter(self, rec):
# Stop logging of ALBs doing health checks
logblacklist = ["10.30.253.123", "10.30.253.29", "10.30.254.70",
"10.30.253.121", "10.30.253.23", "10.30.254.130"]
if '"GET / HTTP/1.1" 200 -' in rec.msg:
for ip in logblacklist:
if ip in rec.msg:
return False
return True
log = logging.getLogger("werkzeug")
stoplogs = Stoplogs()
log.addFilter(stoplogs)
scheduler = BackgroundScheduler(timezone="UTC")
scheduler.add_job(func=expire_all_sgs, trigger="interval", seconds=300)
scheduler.start()
if debug:
app.run(host="0.0.0.0", debug=True, threaded=True)
else:
app.run(host="0.0.0.0", debug=False, use_evalex=False, threaded=True)
|
|
# -*- coding: utf-8 -*-
'''
Support for OSQuery - https://osquery.io.
.. versionadded:: 2015.8.0
'''
from __future__ import absolute_import
# Import python libs
import json
# Import Salt libs
import salt.utils
import logging
log = logging.getLogger(__name__)
__func_alias__ = {
'file_': 'file',
'hash_': 'hash',
'time_': 'time',
}
def __virtual__():
if salt.utils.which('osqueryi'):
return 'osquery'
return False
def _table_attrs(table):
'''
Helper function to find valid table attributes
'''
cmd = 'osqueryi --json "pragma table_info({0})"'.format(table)
res = __salt__['cmd.run_all'](cmd)
if res['retcode'] == 0:
attrs = []
text = json.loads(res['stdout'])
for item in text:
attrs.append(item['name'])
return attrs
return False
def _osquery(sql, format='json'):
'''
Helper function to run raw osquery queries
'''
ret = {
'result': True,
}
cmd = 'osqueryi --json "{0}"'.format(sql)
res = __salt__['cmd.run_all'](cmd)
if res['retcode'] == 0:
ret['data'] = json.loads(res['stdout'])
else:
ret['result'] = False
ret['error'] = res['stderr']
return ret
def _osquery_cmd(table, attrs=None, where=None, format='json'):
'''
Helper function to run osquery queries
'''
ret = {
'result': True,
}
if attrs:
if isinstance(attrs, list):
valid_attrs = _table_attrs(table)
if valid_attrs:
for a in attrs:
if a not in valid_attrs:
ret['result'] = False
ret['comment'] = '{0} is not a valid attribute for table {1}'.format(a, table)
return ret
_attrs = ','.join(attrs)
else:
ret['result'] = False
ret['comment'] = 'Invalid table {0}.'.format(table)
return ret
else:
ret['comment'] = 'attrs must be specified as a list.'
ret['result'] = False
return ret
else:
_attrs = '*'
sql = 'select {0} from {1}'.format(_attrs, table)
if where:
sql = '{0} where {1}'.format(sql, where)
sql = '{0};'.format(sql)
res = _osquery(sql)
if res['result']:
ret['data'] = res['data']
else:
ret['comment'] = res['error']
return ret
def version():
'''
Return version of osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.version
'''
res = _osquery_cmd(table='osquery_info', attrs=['version'])
if res and isinstance(res, list):
return res[0].get('version', '') or False
def rpm_packages(attrs=None, where=None):
'''
Return cpuid information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.rpm_packages
'''
if __grains__['os_family'] == 'RedHat':
return _osquery_cmd(table='rpm_packages', attrs=attrs, where=where)
return {'result': False, 'comment': 'Only available on Red Hat based systems.'}
def kernel_integrity(attrs=None, where=None):
'''
Return kernel_integrity information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.kernel_integrity
'''
if __grains__['os_family'] in ['RedHat', 'Debian']:
return _osquery_cmd(table='kernel_integrity', attrs=attrs, where=where)
return {'result': False, 'comment': 'Only available on Red Hat or Debian based systems.'}
def kernel_modules(attrs=None, where=None):
'''
Return kernel_modules information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.kernel_modules
'''
if __grains__['os_family'] in ['RedHat', 'Debian']:
return _osquery_cmd(table='kernel_modules', attrs=attrs, where=where)
return {'result': False, 'comment': 'Only available on Red Hat or Debian based systems.'}
def memory_map(attrs=None, where=None):
'''
Return memory_map information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.memory_map
'''
if __grains__['os_family'] in ['RedHat', 'Debian']:
return _osquery_cmd(table='memory_map', attrs=attrs, where=where)
return {'result': False, 'comment': 'Only available on Red Hat or Debian based systems.'}
def process_memory_map(attrs=None, where=None):
'''
Return process_memory_map information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.process_memory_map
'''
if __grains__['os_family'] in ['RedHat', 'Debian']:
return _osquery_cmd(table='process_memory_map', attrs=attrs, where=where)
return {'result': False, 'comment': 'Only available on Red Hat or Debian based systems.'}
def shared_memory(attrs=None, where=None):
'''
Return shared_memory information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.shared_memory
'''
if __grains__['os_family'] in ['RedHat', 'Debian']:
return _osquery_cmd(table='shared_memory', attrs=attrs, where=where)
return {'result': False, 'comment': 'Only available on Red Hat or Debian based systems.'}
def apt_sources(attrs=None, where=None):
'''
Return apt_sources information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.apt_sources
'''
if __grains__['os_family'] == 'Debian':
return _osquery_cmd(table='apt_sources', attrs=attrs, where=where)
return {'result': False, 'comment': 'Only available on Debian based systems.'}
def deb_packages(attrs=None, where=None):
'''
Return deb_packages information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.deb_packages
'''
if __grains__['os_family'] == 'Debian':
return _osquery_cmd(table='deb_packages', attrs=attrs, where=where)
return {'result': False, 'comment': 'Only available on Debian based systems.'}
def acpi_tables(attrs=None, where=None):
'''
Return acpi_tables information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.acpi_tables
'''
return _osquery_cmd(table='acpi_tables', attrs=attrs, where=where)
def arp_cache(attrs=None, where=None):
'''
Return arp_cache information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.arp_cache
'''
return _osquery_cmd(table='arp_cache', attrs=attrs, where=where)
def block_devices(attrs=None, where=None):
'''
Return block_devices information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.block_devices
'''
return _osquery_cmd(table='block_devices', attrs=attrs, where=where)
def cpuid(attrs=None, where=None):
'''
Return cpuid information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.cpuid
'''
return _osquery_cmd(table='cpuid', attrs=attrs, where=where)
def crontab(attrs=None, where=None):
'''
Return crontab information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.crontab
'''
return _osquery_cmd(table='crontab', attrs=attrs, where=where)
def etc_hosts(attrs=None, where=None):
'''
Return etc_hosts information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.etc_hosts
'''
return _osquery_cmd(table='etc_hosts', attrs=attrs, where=where)
def etc_services(attrs=None, where=None):
'''
Return etc_services information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.etc_services
'''
return _osquery_cmd(table='etc_services', attrs=attrs, where=where)
def file_changes(attrs=None, where=None):
'''
Return file_changes information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.file_changes
'''
return _osquery_cmd(table='file_changes', attrs=attrs, where=where)
def groups(attrs=None, where=None):
'''
Return groups information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.groups
'''
return _osquery_cmd(table='groups', attrs=attrs, where=where)
def hardware_events(attrs=None, where=None):
'''
Return hardware_events information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.hardware_events
'''
return _osquery_cmd(table='hardware_events', attrs=attrs, where=where)
def interface_addresses(attrs=None, where=None):
'''
Return interface_addresses information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.interface_addresses
'''
return _osquery_cmd(table='interface_addresses', attrs=attrs, where=where)
def interface_details(attrs=None, where=None):
'''
Return interface_details information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.interface_details
'''
return _osquery_cmd(table='interface_details', attrs=attrs, where=where)
def kernel_info(attrs=None, where=None):
'''
Return kernel_info information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.kernel_info
'''
return _osquery_cmd(table='kernel_info', attrs=attrs, where=where)
def last(attrs=None, where=None):
'''
Return last information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.last
'''
return _osquery_cmd(table='last', attrs=attrs, where=where)
def listening_ports(attrs=None, where=None):
r'''
Return listening_ports information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.listening_ports
'''
return _osquery_cmd(table='listening_ports', attrs=attrs, where=where)
def logged_in_users(attrs=None, where=None):
r'''
Return logged_in_users information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.logged_in_users
'''
return _osquery_cmd(table='logged_in_users', attrs=attrs, where=where)
def mounts(attrs=None, where=None):
r'''
Return mounts information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.mounts
'''
return _osquery_cmd(table='mounts', attrs=attrs, where=where)
def os_version(attrs=None, where=None):
'''
Return os_version information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.os_version
'''
return _osquery_cmd(table='os_version', attrs=attrs, where=where)
def passwd_changes(attrs=None, where=None):
'''
Return passwd_changes information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.passwd_changes
'''
return _osquery_cmd(table='passwd_changes', attrs=attrs, where=where)
def pci_devices(attrs=None, where=None):
'''
Return pci_devices information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.pci_devices
'''
return _osquery_cmd(table='pci_devices', attrs=attrs, where=where)
def process_envs(attrs=None, where=None):
'''
Return process_envs information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.process_envs
'''
return _osquery_cmd(table='process_envs', attrs=attrs, where=where)
def process_open_files(attrs=None, where=None):
'''
Return process_open_files information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.process_open_files
'''
return _osquery_cmd(table='process_open_files', attrs=attrs, where=where)
def process_open_sockets(attrs=None, where=None):
'''
Return process_open_sockets information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.process_open_sockets
'''
return _osquery_cmd(table='process_open_sockets', attrs=attrs, where=where)
def processes(attrs=None, where=None):
'''
Return processes information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.processes
'''
return _osquery_cmd(table='processes', attrs=attrs, where=where)
def routes(attrs=None, where=None):
'''
Return routes information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.routes
'''
return _osquery_cmd(table='routes', attrs=attrs, where=where)
def shell_history(attrs=None, where=None):
'''
Return shell_history information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.shell_history
'''
return _osquery_cmd(table='shell_history', attrs=attrs, where=where)
def smbios_tables(attrs=None, where=None):
'''
Return smbios_tables information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.smbios_tables
'''
return _osquery_cmd(table='smbios_tables', attrs=attrs, where=where)
def suid_bin(attrs=None, where=None):
'''
Return suid_bin information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.suid_bin
'''
return _osquery_cmd(table='suid_bin', attrs=attrs, where=where)
def system_controls(attrs=None, where=None):
'''
Return system_controls information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.system_controls
'''
return _osquery_cmd(table='system_controls', attrs=attrs, where=where)
def usb_devices(attrs=None, where=None):
'''
Return usb_devices information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.usb_devices
'''
return _osquery_cmd(table='usb_devices', attrs=attrs, where=where)
def users(attrs=None, where=None):
'''
Return users information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.users
'''
return _osquery_cmd(table='users', attrs=attrs, where=where)
def alf(attrs=None, where=None):
'''
Return alf information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.alf
'''
if salt.utils.is_darwin():
return _osquery_cmd(table='alf', attrs=attrs, where=where)
return {'result': False, 'comment': 'Only available on OS X systems.'}
def alf_exceptions(attrs=None, where=None):
'''
Return alf_exceptions information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.alf_exceptions
'''
if salt.utils.is_darwin():
return _osquery_cmd(table='alf_exceptions', attrs=attrs, where=where)
return {'result': False, 'comment': 'Only available on OS X systems.'}
def alf_explicit_auths(attrs=None, where=None):
'''
Return alf_explicit_auths information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.alf_explicit_auths
'''
if salt.utils.is_darwin():
return _osquery_cmd(table='alf_explicit_auths', attrs=attrs, where=where)
return {'result': False, 'comment': 'Only available on OS X systems.'}
def alf_services(attrs=None, where=None):
'''
Return alf_services information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.alf_services
'''
if salt.utils.is_darwin():
return _osquery_cmd(table='alf_services', attrs=attrs, where=where)
return {'result': False, 'comment': 'Only available on OS X systems.'}
def apps(attrs=None, where=None):
'''
Return apps information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.apps
'''
if salt.utils.is_darwin():
return _osquery_cmd(table='apps', attrs=attrs, where=where)
return {'result': False, 'comment': 'Only available on OS X systems.'}
def certificates(attrs=None, where=None):
'''
Return certificates information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.certificates
'''
if salt.utils.is_darwin():
return _osquery_cmd(table='certificates', attrs=attrs, where=where)
return {'result': False, 'comment': 'Only available on OS X systems.'}
def chrome_extensions(attrs=None, where=None):
'''
Return chrome_extensions information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.chrome_extensions
'''
if salt.utils.is_darwin():
return _osquery_cmd(table='chrome_extensions', attrs=attrs, where=where)
return {'result': False, 'comment': 'Only available on OS X systems.'}
def firefox_addons(attrs=None, where=None):
'''
Return firefox_addons information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.firefox_addons
'''
if salt.utils.is_darwin():
return _osquery_cmd(table='firefox_addons', attrs=attrs, where=where)
return {'result': False, 'comment': 'Only available on OS X systems.'}
def homebrew_packages(attrs=None, where=None):
'''
Return homebrew_packages information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.homebrew_packages
'''
if salt.utils.is_darwin():
return _osquery_cmd(table='homebrew_packages', attrs=attrs, where=where)
return {'result': False, 'comment': 'Only available on OS X systems.'}
def iokit_devicetree(attrs=None, where=None):
'''
Return iokit_devicetree information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.iokit_devicetree
'''
if salt.utils.is_darwin():
return _osquery_cmd(table='iokit_devicetree', attrs=attrs, where=where)
return {'result': False, 'comment': 'Only available on OS X systems.'}
def iokit_registry(attrs=None, where=None):
'''
Return iokit_registry information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.iokit_registry
'''
if salt.utils.is_darwin():
return _osquery_cmd(table='iokit_registry', attrs=attrs, where=where)
return {'result': False, 'comment': 'Only available on OS X systems.'}
def kernel_extensions(attrs=None, where=None):
'''
Return kernel_extensions information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.kernel_extensions
'''
if salt.utils.is_darwin():
return _osquery_cmd(table='kernel_extensions', attrs=attrs, where=where)
return {'result': False, 'comment': 'Only available on OS X systems.'}
def keychain_items(attrs=None, where=None):
'''
Return keychain_items information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.keychain_items
'''
if salt.utils.is_darwin():
return _osquery_cmd(table='keychain_items', attrs=attrs, where=where)
return {'result': False, 'comment': 'Only available on OS X systems.'}
def launchd(attrs=None, where=None):
'''
Return launchd information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.launchd
'''
if salt.utils.is_darwin():
return _osquery_cmd(table='launchd', attrs=attrs, where=where)
return {'result': False, 'comment': 'Only available on OS X systems.'}
def nfs_shares(attrs=None, where=None):
'''
Return nfs_shares information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.nfs_shares
'''
if salt.utils.is_darwin():
return _osquery_cmd(table='nfs_shares', attrs=attrs, where=where)
return {'result': False, 'comment': 'Only available on OS X systems.'}
def nvram(attrs=None, where=None):
'''
Return nvram information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.nvram
'''
if salt.utils.is_darwin():
return _osquery_cmd(table='nvram', attrs=attrs, where=where)
return {'result': False, 'comment': 'Only available on OS X systems.'}
def preferences(attrs=None, where=None):
'''
Return preferences information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.preferences
'''
if salt.utils.is_darwin():
return _osquery_cmd(table='preferences', attrs=attrs, where=where)
return {'result': False, 'comment': 'Only available on OS X systems.'}
def quarantine(attrs=None, where=None):
'''
Return quarantine information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.quarantine
'''
if salt.utils.is_darwin():
return _osquery_cmd(table='quarantine', attrs=attrs, where=where)
return {'result': False, 'comment': 'Only available on OS X systems.'}
def safari_extensions(attrs=None, where=None):
'''
Return safari_extensions information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.safari_extensions
'''
if salt.utils.is_darwin():
return _osquery_cmd(table='safari_extensions', attrs=attrs, where=where)
return {'result': False, 'comment': 'Only available on OS X systems.'}
def startup_items(attrs=None, where=None):
'''
Return startup_items information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.startup_items
'''
if salt.utils.is_darwin():
return _osquery_cmd(table='startup_items', attrs=attrs, where=where)
return {'result': False, 'comment': 'Only available on OS X systems.'}
def xattr_where_from(attrs=None, where=None):
'''
Return xattr_where_from information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.xattr_where_from
'''
if salt.utils.is_darwin():
return _osquery_cmd(table='xattr_where_from', attrs=attrs, where=where)
return {'result': False, 'comment': 'Only available on OS X systems.'}
def xprotect_entries(attrs=None, where=None):
'''
Return xprotect_entries information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.xprotect_entries
'''
if salt.utils.is_darwin():
return _osquery_cmd(table='xprotect_entries', attrs=attrs, where=where)
return {'result': False, 'comment': 'Only available on OS X systems.'}
def xprotect_reports(attrs=None, where=None):
'''
Return xprotect_reports information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.xprotect_reports
'''
if salt.utils.is_darwin():
return _osquery_cmd(table='xprotect_reports', attrs=attrs, where=where)
return {'result': False, 'comment': 'Only available on OS X systems.'}
def file_(attrs=None, where=None):
'''
Return file information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.file
'''
return _osquery_cmd(table='file', attrs=attrs, where=where)
def hash_(attrs=None, where=None):
'''
Return hash information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.hash
'''
return _osquery_cmd(table='hash', attrs=attrs, where=where)
def osquery_extensions(attrs=None, where=None):
'''
Return osquery_extensions information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.osquery_extensions
'''
return _osquery_cmd(table='osquery_extensions', attrs=attrs, where=where)
def osquery_flags(attrs=None, where=None):
'''
Return osquery_flags information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.osquery_flags
'''
return _osquery_cmd(table='osquery_flags', attrs=attrs, where=where)
def osquery_info(attrs=None, where=None):
'''
Return osquery_info information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.osquery_info
'''
return _osquery_cmd(table='osquery_info', attrs=attrs, where=where)
def osquery_registry(attrs=None, where=None):
'''
Return osquery_registry information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.osquery_registry
'''
return _osquery_cmd(table='osquery_registry', attrs=attrs, where=where)
def time_(attrs=None):
'''
Return time information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.time
'''
return _osquery_cmd(table='time', attrs=attrs)
def query(sql=None):
'''
Return time information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.query "select * from users;"
'''
return _osquery(sql)
|
|
import json
import traceback
from typing import Dict, cast
import ansible_runner
import demistomock as demisto # noqa: F401
import ssh_agent_setup
from CommonServerPython import * # noqa: F401
# Dict to Markdown Converter adapted from https://github.com/PolBaladas/torsimany/
def dict2md(json_block, depth=0):
markdown = ""
if isinstance(json_block, dict):
markdown = parseDict(json_block, depth)
if isinstance(json_block, list):
markdown = parseList(json_block, depth)
return markdown
def parseDict(d, depth):
markdown = ""
for k in d:
if isinstance(d[k], (dict, list)):
markdown += addHeader(k, depth)
markdown += dict2md(d[k], depth + 1)
else:
markdown += buildValueChain(k, d[k], depth)
return markdown
def parseList(rawlist, depth):
markdown = ""
for value in rawlist:
if not isinstance(value, (dict, list)):
index = rawlist.index(value)
markdown += buildValueChain(index, value, depth)
else:
markdown += parseDict(value, depth)
return markdown
def buildHeaderChain(depth):
list_tag = '* '
htag = '#'
chain = list_tag * (bool(depth)) + htag * (depth + 1) + \
' value ' + (htag * (depth + 1) + '\n')
return chain
def buildValueChain(key, value, depth):
tab = " "
list_tag = '* '
chain = tab * (bool(depth - 1)) + list_tag + \
str(key) + ": " + str(value) + "\n"
return chain
def addHeader(value, depth):
chain = buildHeaderChain(depth)
chain = chain.replace('value', value.title())
return chain
# Remove ansible branding from results
def rec_ansible_key_strip(obj):
if isinstance(obj, dict):
return {key.replace('ansible_', ''): rec_ansible_key_strip(val) for key, val in obj.items()}
return obj
# COMMAND FUNCTIONS
def generic_ansible(integration_name, command, args: Dict[str, Any]) -> CommandResults:
readable_output = ""
sshkey = ""
fork_count = 1 # default to executing against 1 host at a time
if args.get('concurrency'):
fork_count = cast(int, args.get('concurrency'))
inventory: Dict[str, dict] = {}
inventory['all'] = {}
inventory['all']['hosts'] = {}
inventory['all']['hosts']['localhost'] = {}
inventory['all']['hosts']['localhost']['ansible_connection'] = 'local'
module_args = ""
# build module args list
for arg_key, arg_value in args.items():
# skip hardcoded host arg, as it doesn't related to module
if arg_key == 'host':
continue
module_args += "%s=\"%s\" " % (arg_key, arg_value)
# If this isn't host based, then all the integratation parms will be used as command args
for arg_key, arg_value in demisto.params().items():
module_args += "%s=\"%s\" " % (arg_key, arg_value)
r = ansible_runner.run(inventory=inventory, host_pattern='all', module=command, quiet=True,
omit_event_data=True, ssh_key=sshkey, module_args=module_args, forks=fork_count)
results = []
for each_host_event in r.events:
# Troubleshooting
# demisto.log("%s: %s\n" % (each_host_event['event'], each_host_event))
if each_host_event['event'] in ["runner_on_ok", "runner_on_unreachable", "runner_on_failed"]:
# parse results
result = json.loads('{' + each_host_event['stdout'].split('{', 1)[1])
host = each_host_event['stdout'].split('|', 1)[0].strip()
status = each_host_event['stdout'].replace('=>', '|').split('|', 3)[1]
# if successful build outputs
if each_host_event['event'] == "runner_on_ok":
if 'fact' in command:
result = result['ansible_facts']
else:
if result.get(command) is not None:
result = result[command]
else:
result.pop("ansible_facts", None)
result = rec_ansible_key_strip(result)
if host != "localhost":
readable_output += "# %s - %s\n" % (host, status)
else:
# This is integration is not host based
readable_output += "# %s\n" % status
readable_output += dict2md(result)
# add host and status to result
result['host'] = host
result['status'] = status
results.append(result)
if each_host_event['event'] == "runner_on_unreachable":
msg = "Host %s unreachable\nError Details: %s" % (host, result)
return_error(msg)
if each_host_event['event'] == "runner_on_failed":
msg = "Host %s failed running command\nError Details: %s" % (host, result)
return_error(msg)
# This is integration is not host based and always runs against localhost
results = results[0]
return CommandResults(
readable_output=readable_output,
outputs_prefix=integration_name + '.' + command,
outputs_key_field='',
outputs=results
)
# MAIN FUNCTION
def main() -> None:
"""main function, parses params and runs command functions
:return:
:rtype:
"""
# SSH Key integration requires ssh_agent to be running in the background
ssh_agent_setup.setup()
try:
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
return_results('ok')
elif demisto.command() == 'vmware-about-info':
return_results(generic_ansible('vmwarev2', 'vmware_about_info', demisto.args()))
elif demisto.command() == 'vmware-category':
return_results(generic_ansible('vmwarev2', 'vmware_category', demisto.args()))
elif demisto.command() == 'vmware-category-info':
return_results(generic_ansible('vmwarev2', 'vmware_category_info', demisto.args()))
elif demisto.command() == 'vmware-cfg-backup':
return_results(generic_ansible('vmwarev2', 'vmware_cfg_backup', demisto.args()))
elif demisto.command() == 'vmware-cluster':
return_results(generic_ansible('vmwarev2', 'vmware_cluster', demisto.args()))
elif demisto.command() == 'vmware-cluster-drs':
return_results(generic_ansible('vmwarev2', 'vmware_cluster_drs', demisto.args()))
elif demisto.command() == 'vmware-cluster-ha':
return_results(generic_ansible('vmwarev2', 'vmware_cluster_ha', demisto.args()))
elif demisto.command() == 'vmware-cluster-info':
return_results(generic_ansible('vmwarev2', 'vmware_cluster_info', demisto.args()))
elif demisto.command() == 'vmware-cluster-vsan':
return_results(generic_ansible('vmwarev2', 'vmware_cluster_vsan', demisto.args()))
elif demisto.command() == 'vmware-content-deploy-template':
return_results(generic_ansible('vmwarev2', 'vmware_content_deploy_template', demisto.args()))
elif demisto.command() == 'vmware-content-library-info':
return_results(generic_ansible('vmwarev2', 'vmware_content_library_info', demisto.args()))
elif demisto.command() == 'vmware-content-library-manager':
return_results(generic_ansible('vmwarev2', 'vmware_content_library_manager', demisto.args()))
elif demisto.command() == 'vmware-datacenter':
return_results(generic_ansible('vmwarev2', 'vmware_datacenter', demisto.args()))
elif demisto.command() == 'vmware-datastore-cluster':
return_results(generic_ansible('vmwarev2', 'vmware_datastore_cluster', demisto.args()))
elif demisto.command() == 'vmware-datastore-info':
return_results(generic_ansible('vmwarev2', 'vmware_datastore_info', demisto.args()))
elif demisto.command() == 'vmware-datastore-maintenancemode':
return_results(generic_ansible('vmwarev2', 'vmware_datastore_maintenancemode', demisto.args()))
elif demisto.command() == 'vmware-dns-config':
return_results(generic_ansible('vmwarev2', 'vmware_dns_config', demisto.args()))
elif demisto.command() == 'vmware-drs-group':
return_results(generic_ansible('vmwarev2', 'vmware_drs_group', demisto.args()))
elif demisto.command() == 'vmware-drs-group-info':
return_results(generic_ansible('vmwarev2', 'vmware_drs_group_info', demisto.args()))
elif demisto.command() == 'vmware-drs-rule-info':
return_results(generic_ansible('vmwarev2', 'vmware_drs_rule_info', demisto.args()))
elif demisto.command() == 'vmware-dvs-host':
return_results(generic_ansible('vmwarev2', 'vmware_dvs_host', demisto.args()))
elif demisto.command() == 'vmware-dvs-portgroup':
return_results(generic_ansible('vmwarev2', 'vmware_dvs_portgroup', demisto.args()))
elif demisto.command() == 'vmware-dvs-portgroup-find':
return_results(generic_ansible('vmwarev2', 'vmware_dvs_portgroup_find', demisto.args()))
elif demisto.command() == 'vmware-dvs-portgroup-info':
return_results(generic_ansible('vmwarev2', 'vmware_dvs_portgroup_info', demisto.args()))
elif demisto.command() == 'vmware-dvswitch':
return_results(generic_ansible('vmwarev2', 'vmware_dvswitch', demisto.args()))
elif demisto.command() == 'vmware-dvswitch-lacp':
return_results(generic_ansible('vmwarev2', 'vmware_dvswitch_lacp', demisto.args()))
elif demisto.command() == 'vmware-dvswitch-nioc':
return_results(generic_ansible('vmwarev2', 'vmware_dvswitch_nioc', demisto.args()))
elif demisto.command() == 'vmware-dvswitch-pvlans':
return_results(generic_ansible('vmwarev2', 'vmware_dvswitch_pvlans', demisto.args()))
elif demisto.command() == 'vmware-dvswitch-uplink-pg':
return_results(generic_ansible('vmwarev2', 'vmware_dvswitch_uplink_pg', demisto.args()))
elif demisto.command() == 'vmware-evc-mode':
return_results(generic_ansible('vmwarev2', 'vmware_evc_mode', demisto.args()))
elif demisto.command() == 'vmware-folder-info':
return_results(generic_ansible('vmwarev2', 'vmware_folder_info', demisto.args()))
elif demisto.command() == 'vmware-guest':
return_results(generic_ansible('vmwarev2', 'vmware_guest', demisto.args()))
elif demisto.command() == 'vmware-guest-boot-info':
return_results(generic_ansible('vmwarev2', 'vmware_guest_boot_info', demisto.args()))
elif demisto.command() == 'vmware-guest-boot-manager':
return_results(generic_ansible('vmwarev2', 'vmware_guest_boot_manager', demisto.args()))
elif demisto.command() == 'vmware-guest-custom-attribute-defs':
return_results(generic_ansible('vmwarev2', 'vmware_guest_custom_attribute_defs', demisto.args()))
elif demisto.command() == 'vmware-guest-custom-attributes':
return_results(generic_ansible('vmwarev2', 'vmware_guest_custom_attributes', demisto.args()))
elif demisto.command() == 'vmware-guest-customization-info':
return_results(generic_ansible('vmwarev2', 'vmware_guest_customization_info', demisto.args()))
elif demisto.command() == 'vmware-guest-disk':
return_results(generic_ansible('vmwarev2', 'vmware_guest_disk', demisto.args()))
elif demisto.command() == 'vmware-guest-disk-info':
return_results(generic_ansible('vmwarev2', 'vmware_guest_disk_info', demisto.args()))
elif demisto.command() == 'vmware-guest-find':
return_results(generic_ansible('vmwarev2', 'vmware_guest_find', demisto.args()))
elif demisto.command() == 'vmware-guest-info':
return_results(generic_ansible('vmwarev2', 'vmware_guest_info', demisto.args()))
elif demisto.command() == 'vmware-guest-move':
return_results(generic_ansible('vmwarev2', 'vmware_guest_move', demisto.args()))
elif demisto.command() == 'vmware-guest-network':
return_results(generic_ansible('vmwarev2', 'vmware_guest_network', demisto.args()))
elif demisto.command() == 'vmware-guest-powerstate':
return_results(generic_ansible('vmwarev2', 'vmware_guest_powerstate', demisto.args()))
elif demisto.command() == 'vmware-guest-screenshot':
return_results(generic_ansible('vmwarev2', 'vmware_guest_screenshot', demisto.args()))
elif demisto.command() == 'vmware-guest-sendkey':
return_results(generic_ansible('vmwarev2', 'vmware_guest_sendkey', demisto.args()))
elif demisto.command() == 'vmware-guest-snapshot':
return_results(generic_ansible('vmwarev2', 'vmware_guest_snapshot', demisto.args()))
elif demisto.command() == 'vmware-guest-snapshot-info':
return_results(generic_ansible('vmwarev2', 'vmware_guest_snapshot_info', demisto.args()))
elif demisto.command() == 'vmware-guest-tools-upgrade':
return_results(generic_ansible('vmwarev2', 'vmware_guest_tools_upgrade', demisto.args()))
elif demisto.command() == 'vmware-guest-tools-wait':
return_results(generic_ansible('vmwarev2', 'vmware_guest_tools_wait', demisto.args()))
elif demisto.command() == 'vmware-guest-video':
return_results(generic_ansible('vmwarev2', 'vmware_guest_video', demisto.args()))
elif demisto.command() == 'vmware-guest-vnc':
return_results(generic_ansible('vmwarev2', 'vmware_guest_vnc', demisto.args()))
elif demisto.command() == 'vmware-host':
return_results(generic_ansible('vmwarev2', 'vmware_host', demisto.args()))
elif demisto.command() == 'vmware-host-acceptance':
return_results(generic_ansible('vmwarev2', 'vmware_host_acceptance', demisto.args()))
elif demisto.command() == 'vmware-host-active-directory':
return_results(generic_ansible('vmwarev2', 'vmware_host_active_directory', demisto.args()))
elif demisto.command() == 'vmware-host-capability-info':
return_results(generic_ansible('vmwarev2', 'vmware_host_capability_info', demisto.args()))
elif demisto.command() == 'vmware-host-config-info':
return_results(generic_ansible('vmwarev2', 'vmware_host_config_info', demisto.args()))
elif demisto.command() == 'vmware-host-config-manager':
return_results(generic_ansible('vmwarev2', 'vmware_host_config_manager', demisto.args()))
elif demisto.command() == 'vmware-host-datastore':
return_results(generic_ansible('vmwarev2', 'vmware_host_datastore', demisto.args()))
elif demisto.command() == 'vmware-host-dns-info':
return_results(generic_ansible('vmwarev2', 'vmware_host_dns_info', demisto.args()))
elif demisto.command() == 'vmware-host-facts':
return_results(generic_ansible('vmwarev2', 'vmware_host_facts', demisto.args()))
elif demisto.command() == 'vmware-host-feature-info':
return_results(generic_ansible('vmwarev2', 'vmware_host_feature_info', demisto.args()))
elif demisto.command() == 'vmware-host-firewall-info':
return_results(generic_ansible('vmwarev2', 'vmware_host_firewall_info', demisto.args()))
elif demisto.command() == 'vmware-host-firewall-manager':
return_results(generic_ansible('vmwarev2', 'vmware_host_firewall_manager', demisto.args()))
elif demisto.command() == 'vmware-host-hyperthreading':
return_results(generic_ansible('vmwarev2', 'vmware_host_hyperthreading', demisto.args()))
elif demisto.command() == 'vmware-host-ipv6':
return_results(generic_ansible('vmwarev2', 'vmware_host_ipv6', demisto.args()))
elif demisto.command() == 'vmware-host-kernel-manager':
return_results(generic_ansible('vmwarev2', 'vmware_host_kernel_manager', demisto.args()))
elif demisto.command() == 'vmware-host-lockdown':
return_results(generic_ansible('vmwarev2', 'vmware_host_lockdown', demisto.args()))
elif demisto.command() == 'vmware-host-ntp':
return_results(generic_ansible('vmwarev2', 'vmware_host_ntp', demisto.args()))
elif demisto.command() == 'vmware-host-ntp-info':
return_results(generic_ansible('vmwarev2', 'vmware_host_ntp_info', demisto.args()))
elif demisto.command() == 'vmware-host-package-info':
return_results(generic_ansible('vmwarev2', 'vmware_host_package_info', demisto.args()))
elif demisto.command() == 'vmware-host-powermgmt-policy':
return_results(generic_ansible('vmwarev2', 'vmware_host_powermgmt_policy', demisto.args()))
elif demisto.command() == 'vmware-host-powerstate':
return_results(generic_ansible('vmwarev2', 'vmware_host_powerstate', demisto.args()))
elif demisto.command() == 'vmware-host-scanhba':
return_results(generic_ansible('vmwarev2', 'vmware_host_scanhba', demisto.args()))
elif demisto.command() == 'vmware-host-service-info':
return_results(generic_ansible('vmwarev2', 'vmware_host_service_info', demisto.args()))
elif demisto.command() == 'vmware-host-service-manager':
return_results(generic_ansible('vmwarev2', 'vmware_host_service_manager', demisto.args()))
elif demisto.command() == 'vmware-host-snmp':
return_results(generic_ansible('vmwarev2', 'vmware_host_snmp', demisto.args()))
elif demisto.command() == 'vmware-host-ssl-info':
return_results(generic_ansible('vmwarev2', 'vmware_host_ssl_info', demisto.args()))
elif demisto.command() == 'vmware-host-vmhba-info':
return_results(generic_ansible('vmwarev2', 'vmware_host_vmhba_info', demisto.args()))
elif demisto.command() == 'vmware-host-vmnic-info':
return_results(generic_ansible('vmwarev2', 'vmware_host_vmnic_info', demisto.args()))
elif demisto.command() == 'vmware-local-role-info':
return_results(generic_ansible('vmwarev2', 'vmware_local_role_info', demisto.args()))
elif demisto.command() == 'vmware-local-role-manager':
return_results(generic_ansible('vmwarev2', 'vmware_local_role_manager', demisto.args()))
elif demisto.command() == 'vmware-local-user-info':
return_results(generic_ansible('vmwarev2', 'vmware_local_user_info', demisto.args()))
elif demisto.command() == 'vmware-local-user-manager':
return_results(generic_ansible('vmwarev2', 'vmware_local_user_manager', demisto.args()))
elif demisto.command() == 'vmware-maintenancemode':
return_results(generic_ansible('vmwarev2', 'vmware_maintenancemode', demisto.args()))
elif demisto.command() == 'vmware-migrate-vmk':
return_results(generic_ansible('vmwarev2', 'vmware_migrate_vmk', demisto.args()))
elif demisto.command() == 'vmware-object-role-permission':
return_results(generic_ansible('vmwarev2', 'vmware_object_role_permission', demisto.args()))
elif demisto.command() == 'vmware-portgroup':
return_results(generic_ansible('vmwarev2', 'vmware_portgroup', demisto.args()))
elif demisto.command() == 'vmware-portgroup-info':
return_results(generic_ansible('vmwarev2', 'vmware_portgroup_info', demisto.args()))
elif demisto.command() == 'vmware-resource-pool':
return_results(generic_ansible('vmwarev2', 'vmware_resource_pool', demisto.args()))
elif demisto.command() == 'vmware-resource-pool-info':
return_results(generic_ansible('vmwarev2', 'vmware_resource_pool_info', demisto.args()))
elif demisto.command() == 'vmware-tag':
return_results(generic_ansible('vmwarev2', 'vmware_tag', demisto.args()))
elif demisto.command() == 'vmware-tag-info':
return_results(generic_ansible('vmwarev2', 'vmware_tag_info', demisto.args()))
elif demisto.command() == 'vmware-tag-manager':
return_results(generic_ansible('vmwarev2', 'vmware_tag_manager', demisto.args()))
elif demisto.command() == 'vmware-target-canonical-info':
return_results(generic_ansible('vmwarev2', 'vmware_target_canonical_info', demisto.args()))
elif demisto.command() == 'vmware-vcenter-settings':
return_results(generic_ansible('vmwarev2', 'vmware_vcenter_settings', demisto.args()))
elif demisto.command() == 'vmware-vcenter-statistics':
return_results(generic_ansible('vmwarev2', 'vmware_vcenter_statistics', demisto.args()))
elif demisto.command() == 'vmware-vm-host-drs-rule':
return_results(generic_ansible('vmwarev2', 'vmware_vm_host_drs_rule', demisto.args()))
elif demisto.command() == 'vmware-vm-info':
return_results(generic_ansible('vmwarev2', 'vmware_vm_info', demisto.args()))
elif demisto.command() == 'vmware-vm-shell':
return_results(generic_ansible('vmwarev2', 'vmware_vm_shell', demisto.args()))
elif demisto.command() == 'vmware-vm-storage-policy-info':
return_results(generic_ansible('vmwarev2', 'vmware_vm_storage_policy_info', demisto.args()))
elif demisto.command() == 'vmware-vm-vm-drs-rule':
return_results(generic_ansible('vmwarev2', 'vmware_vm_vm_drs_rule', demisto.args()))
elif demisto.command() == 'vmware-vm-vss-dvs-migrate':
return_results(generic_ansible('vmwarev2', 'vmware_vm_vss_dvs_migrate', demisto.args()))
elif demisto.command() == 'vmware-vmkernel':
return_results(generic_ansible('vmwarev2', 'vmware_vmkernel', demisto.args()))
elif demisto.command() == 'vmware-vmkernel-info':
return_results(generic_ansible('vmwarev2', 'vmware_vmkernel_info', demisto.args()))
elif demisto.command() == 'vmware-vmkernel-ip-config':
return_results(generic_ansible('vmwarev2', 'vmware_vmkernel_ip_config', demisto.args()))
elif demisto.command() == 'vmware-vmotion':
return_results(generic_ansible('vmwarev2', 'vmware_vmotion', demisto.args()))
elif demisto.command() == 'vmware-vsan-cluster':
return_results(generic_ansible('vmwarev2', 'vmware_vsan_cluster', demisto.args()))
elif demisto.command() == 'vmware-vspan-session':
return_results(generic_ansible('vmwarev2', 'vmware_vspan_session', demisto.args()))
elif demisto.command() == 'vmware-vswitch':
return_results(generic_ansible('vmwarev2', 'vmware_vswitch', demisto.args()))
elif demisto.command() == 'vmware-vswitch-info':
return_results(generic_ansible('vmwarev2', 'vmware_vswitch_info', demisto.args()))
elif demisto.command() == 'vmware-vsphere-file':
return_results(generic_ansible('vmwarev2', 'vsphere_file', demisto.args()))
elif demisto.command() == 'vmware-vcenter-extension':
return_results(generic_ansible('vmwarev2', 'vcenter_extension', demisto.args()))
elif demisto.command() == 'vmware-vcenter-extension-info':
return_results(generic_ansible('vmwarev2', 'vcenter_extension_info', demisto.args()))
elif demisto.command() == 'vmware-vcenter-folder':
return_results(generic_ansible('vmwarev2', 'vcenter_folder', demisto.args()))
elif demisto.command() == 'vmware-vcenter-license':
return_results(generic_ansible('vmwarev2', 'vcenter_license', demisto.args()))
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
# ENTRY POINT
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
|
|
import copy
import datetime
from django.core.exceptions import EmptyResultSet, FieldError
from django.db.backends import utils as backend_utils
from django.db.models import fields
from django.db.models.query_utils import Q
from django.utils import six
from django.utils.functional import cached_property
class Combinable(object):
"""
Provides the ability to combine one or two objects with
some connector. For example F('foo') + F('bar').
"""
# Arithmetic connectors
ADD = '+'
SUB = '-'
MUL = '*'
DIV = '/'
POW = '^'
# The following is a quoted % operator - it is quoted because it can be
# used in strings that also have parameter substitution.
MOD = '%%'
# Bitwise operators - note that these are generated by .bitand()
# and .bitor(), the '&' and '|' are reserved for boolean operator
# usage.
BITAND = '&'
BITOR = '|'
BITLEFTSHIFT = '<<'
BITRIGHTSHIFT = '>>'
def _combine(self, other, connector, reversed, node=None):
if not hasattr(other, 'resolve_expression'):
# everything must be resolvable to an expression
if isinstance(other, datetime.timedelta):
other = DurationValue(other, output_field=fields.DurationField())
else:
other = Value(other)
if reversed:
return CombinedExpression(other, connector, self)
return CombinedExpression(self, connector, other)
#############
# OPERATORS #
#############
def __add__(self, other):
return self._combine(other, self.ADD, False)
def __sub__(self, other):
return self._combine(other, self.SUB, False)
def __mul__(self, other):
return self._combine(other, self.MUL, False)
def __truediv__(self, other):
return self._combine(other, self.DIV, False)
def __div__(self, other): # Python 2 compatibility
return type(self).__truediv__(self, other)
def __mod__(self, other):
return self._combine(other, self.MOD, False)
def __pow__(self, other):
return self._combine(other, self.POW, False)
def __and__(self, other):
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
def bitand(self, other):
return self._combine(other, self.BITAND, False)
def bitleftshift(self, other):
return self._combine(other, self.BITLEFTSHIFT, False)
def bitrightshift(self, other):
return self._combine(other, self.BITRIGHTSHIFT, False)
def __or__(self, other):
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
def bitor(self, other):
return self._combine(other, self.BITOR, False)
def __radd__(self, other):
return self._combine(other, self.ADD, True)
def __rsub__(self, other):
return self._combine(other, self.SUB, True)
def __rmul__(self, other):
return self._combine(other, self.MUL, True)
def __rtruediv__(self, other):
return self._combine(other, self.DIV, True)
def __rdiv__(self, other): # Python 2 compatibility
return type(self).__rtruediv__(self, other)
def __rmod__(self, other):
return self._combine(other, self.MOD, True)
def __rpow__(self, other):
return self._combine(other, self.POW, True)
def __rand__(self, other):
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
def __ror__(self, other):
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
class BaseExpression(object):
"""
Base class for all query expressions.
"""
# aggregate specific fields
is_summary = False
_output_field = None
def __init__(self, output_field=None):
if output_field is not None:
self._output_field = output_field
def get_db_converters(self, connection):
return [self.convert_value] + self.output_field.get_db_converters(connection)
def get_source_expressions(self):
return []
def set_source_expressions(self, exprs):
assert len(exprs) == 0
def _parse_expressions(self, *expressions):
return [
arg if hasattr(arg, 'resolve_expression') else (
F(arg) if isinstance(arg, six.string_types) else Value(arg)
) for arg in expressions
]
def as_sql(self, compiler, connection):
"""
Responsible for returning a (sql, [params]) tuple to be included
in the current query.
Different backends can provide their own implementation, by
providing an `as_{vendor}` method and patching the Expression:
```
def override_as_sql(self, compiler, connection):
# custom logic
return super(Expression, self).as_sql(compiler, connection)
setattr(Expression, 'as_' + connection.vendor, override_as_sql)
```
Arguments:
* compiler: the query compiler responsible for generating the query.
Must have a compile method, returning a (sql, [params]) tuple.
Calling compiler(value) will return a quoted `value`.
* connection: the database connection used for the current query.
Returns: (sql, params)
Where `sql` is a string containing ordered sql parameters to be
replaced with the elements of the list `params`.
"""
raise NotImplementedError("Subclasses must implement as_sql()")
@cached_property
def contains_aggregate(self):
for expr in self.get_source_expressions():
if expr and expr.contains_aggregate:
return True
return False
@cached_property
def contains_column_references(self):
for expr in self.get_source_expressions():
if expr and expr.contains_column_references:
return True
return False
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
"""
Provides the chance to do any preprocessing or validation before being
added to the query.
Arguments:
* query: the backend query implementation
* allow_joins: boolean allowing or denying use of joins
in this query
* reuse: a set of reusable joins for multijoins
* summarize: a terminal aggregate clause
* for_save: whether this expression about to be used in a save or update
Returns: an Expression to be added to the query.
"""
c = self.copy()
c.is_summary = summarize
c.set_source_expressions([
expr.resolve_expression(query, allow_joins, reuse, summarize)
for expr in c.get_source_expressions()
])
return c
def _prepare(self, field):
"""
Hook used by Lookup.get_prep_lookup() to do custom preparation.
"""
return self
@property
def field(self):
return self.output_field
@cached_property
def output_field(self):
"""
Returns the output type of this expressions.
"""
if self._output_field_or_none is None:
raise FieldError("Cannot resolve expression type, unknown output_field")
return self._output_field_or_none
@cached_property
def _output_field_or_none(self):
"""
Returns the output field of this expression, or None if no output type
can be resolved. Note that the 'output_field' property will raise
FieldError if no type can be resolved, but this attribute allows for
None values.
"""
if self._output_field is None:
self._resolve_output_field()
return self._output_field
def _resolve_output_field(self):
"""
Attempts to infer the output type of the expression. If the output
fields of all source fields match then we can simply infer the same
type here. This isn't always correct, but it makes sense most of the
time.
Consider the difference between `2 + 2` and `2 / 3`. Inferring
the type here is a convenience for the common case. The user should
supply their own output_field with more complex computations.
If a source does not have an `_output_field` then we exclude it from
this check. If all sources are `None`, then an error will be thrown
higher up the stack in the `output_field` property.
"""
if self._output_field is None:
sources = self.get_source_fields()
num_sources = len(sources)
if num_sources == 0:
self._output_field = None
else:
for source in sources:
if self._output_field is None:
self._output_field = source
if source is not None and not isinstance(self._output_field, source.__class__):
raise FieldError(
"Expression contains mixed types. You must set output_field")
def convert_value(self, value, expression, connection, context):
"""
Expressions provide their own converters because users have the option
of manually specifying the output_field which may be a different type
from the one the database returns.
"""
field = self.output_field
internal_type = field.get_internal_type()
if value is None:
return value
elif internal_type == 'FloatField':
return float(value)
elif internal_type.endswith('IntegerField'):
return int(value)
elif internal_type == 'DecimalField':
return backend_utils.typecast_decimal(value)
return value
def get_lookup(self, lookup):
return self.output_field.get_lookup(lookup)
def get_transform(self, name):
return self.output_field.get_transform(name)
def relabeled_clone(self, change_map):
clone = self.copy()
clone.set_source_expressions(
[e.relabeled_clone(change_map) for e in self.get_source_expressions()])
return clone
def copy(self):
c = copy.copy(self)
c.copied = True
return c
def get_group_by_cols(self):
if not self.contains_aggregate:
return [self]
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
def get_source_fields(self):
"""
Returns the underlying field types used by this
aggregate.
"""
return [e._output_field_or_none for e in self.get_source_expressions()]
def asc(self, **kwargs):
return OrderBy(self, **kwargs)
def desc(self, **kwargs):
return OrderBy(self, descending=True, **kwargs)
def reverse_ordering(self):
return self
def flatten(self):
"""
Recursively yield this expression and all subexpressions, in
depth-first order.
"""
yield self
for expr in self.get_source_expressions():
if expr:
for inner_expr in expr.flatten():
yield inner_expr
class Expression(BaseExpression, Combinable):
"""
An expression that can be combined with other expressions.
"""
pass
class CombinedExpression(Expression):
def __init__(self, lhs, connector, rhs, output_field=None):
super(CombinedExpression, self).__init__(output_field=output_field)
self.connector = connector
self.lhs = lhs
self.rhs = rhs
def __repr__(self):
return "<{}: {}>".format(self.__class__.__name__, self)
def __str__(self):
return "{} {} {}".format(self.lhs, self.connector, self.rhs)
def get_source_expressions(self):
return [self.lhs, self.rhs]
def set_source_expressions(self, exprs):
self.lhs, self.rhs = exprs
def as_sql(self, compiler, connection):
try:
lhs_output = self.lhs.output_field
except FieldError:
lhs_output = None
try:
rhs_output = self.rhs.output_field
except FieldError:
rhs_output = None
if (not connection.features.has_native_duration_field and
((lhs_output and lhs_output.get_internal_type() == 'DurationField') or
(rhs_output and rhs_output.get_internal_type() == 'DurationField'))):
return DurationExpression(self.lhs, self.connector, self.rhs).as_sql(compiler, connection)
if (lhs_output and rhs_output and self.connector == self.SUB and
lhs_output.get_internal_type() in {'DateField', 'DateTimeField', 'TimeField'} and
lhs_output.get_internal_type() == rhs_output.get_internal_type()):
return TemporalSubtraction(self.lhs, self.rhs).as_sql(compiler, connection)
expressions = []
expression_params = []
sql, params = compiler.compile(self.lhs)
expressions.append(sql)
expression_params.extend(params)
sql, params = compiler.compile(self.rhs)
expressions.append(sql)
expression_params.extend(params)
# order of precedence
expression_wrapper = '(%s)'
sql = connection.ops.combine_expression(self.connector, expressions)
return expression_wrapper % sql, expression_params
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = self.copy()
c.is_summary = summarize
c.lhs = c.lhs.resolve_expression(query, allow_joins, reuse, summarize, for_save)
c.rhs = c.rhs.resolve_expression(query, allow_joins, reuse, summarize, for_save)
return c
class DurationExpression(CombinedExpression):
def compile(self, side, compiler, connection):
if not isinstance(side, DurationValue):
try:
output = side.output_field
except FieldError:
pass
else:
if output.get_internal_type() == 'DurationField':
sql, params = compiler.compile(side)
return connection.ops.format_for_duration_arithmetic(sql), params
return compiler.compile(side)
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
expressions = []
expression_params = []
sql, params = self.compile(self.lhs, compiler, connection)
expressions.append(sql)
expression_params.extend(params)
sql, params = self.compile(self.rhs, compiler, connection)
expressions.append(sql)
expression_params.extend(params)
# order of precedence
expression_wrapper = '(%s)'
sql = connection.ops.combine_duration_expression(self.connector, expressions)
return expression_wrapper % sql, expression_params
class TemporalSubtraction(CombinedExpression):
def __init__(self, lhs, rhs):
super(TemporalSubtraction, self).__init__(lhs, self.SUB, rhs, output_field=fields.DurationField())
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
lhs = compiler.compile(self.lhs, connection)
rhs = compiler.compile(self.rhs, connection)
return connection.ops.subtract_temporals(self.lhs.output_field.get_internal_type(), lhs, rhs)
class F(Combinable):
"""
An object capable of resolving references to existing query objects.
"""
def __init__(self, name):
"""
Arguments:
* name: the name of the field this expression references
"""
self.name = name
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.name)
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
return query.resolve_ref(self.name, allow_joins, reuse, summarize)
def asc(self, **kwargs):
return OrderBy(self, **kwargs)
def desc(self, **kwargs):
return OrderBy(self, descending=True, **kwargs)
class ResolvedOuterRef(F):
"""
An object that contains a reference to an outer query.
In this case, the reference to the outer query has been resolved because
the inner query has been used as a subquery.
"""
def as_sql(self, *args, **kwargs):
raise ValueError(
'This queryset contains a reference to an outer query and may '
'only be used in a subquery.'
)
def _prepare(self, output_field=None):
return self
class OuterRef(F):
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
if isinstance(self.name, self.__class__):
return self.name
return ResolvedOuterRef(self.name)
def _prepare(self, output_field=None):
return self
class Func(Expression):
"""
An SQL function call.
"""
function = None
template = '%(function)s(%(expressions)s)'
arg_joiner = ', '
arity = None # The number of arguments the function accepts.
def __init__(self, *expressions, **extra):
if self.arity is not None and len(expressions) != self.arity:
raise TypeError(
"'%s' takes exactly %s %s (%s given)" % (
self.__class__.__name__,
self.arity,
"argument" if self.arity == 1 else "arguments",
len(expressions),
)
)
output_field = extra.pop('output_field', None)
super(Func, self).__init__(output_field=output_field)
self.source_expressions = self._parse_expressions(*expressions)
self.extra = extra
def __repr__(self):
args = self.arg_joiner.join(str(arg) for arg in self.source_expressions)
extra = ', '.join(str(key) + '=' + str(val) for key, val in self.extra.items())
if extra:
return "{}({}, {})".format(self.__class__.__name__, args, extra)
return "{}({})".format(self.__class__.__name__, args)
def get_source_expressions(self):
return self.source_expressions
def set_source_expressions(self, exprs):
self.source_expressions = exprs
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = self.copy()
c.is_summary = summarize
for pos, arg in enumerate(c.source_expressions):
c.source_expressions[pos] = arg.resolve_expression(query, allow_joins, reuse, summarize, for_save)
return c
def as_sql(self, compiler, connection, function=None, template=None, arg_joiner=None, **extra_context):
connection.ops.check_expression_support(self)
sql_parts = []
params = []
for arg in self.source_expressions:
arg_sql, arg_params = compiler.compile(arg)
sql_parts.append(arg_sql)
params.extend(arg_params)
data = self.extra.copy()
data.update(**extra_context)
# Use the first supplied value in this order: the parameter to this
# method, a value supplied in __init__()'s **extra (the value in
# `data`), or the value defined on the class.
if function is not None:
data['function'] = function
else:
data.setdefault('function', self.function)
template = template or data.get('template', self.template)
arg_joiner = arg_joiner or data.get('arg_joiner', self.arg_joiner)
data['expressions'] = data['field'] = arg_joiner.join(sql_parts)
return template % data, params
def as_sqlite(self, compiler, connection, **extra_context):
sql, params = self.as_sql(compiler, connection, **extra_context)
try:
if self.output_field.get_internal_type() == 'DecimalField':
sql = 'CAST(%s AS NUMERIC)' % sql
except FieldError:
pass
return sql, params
def copy(self):
copy = super(Func, self).copy()
copy.source_expressions = self.source_expressions[:]
copy.extra = self.extra.copy()
return copy
class Value(Expression):
"""
Represents a wrapped value as a node within an expression
"""
def __init__(self, value, output_field=None):
"""
Arguments:
* value: the value this expression represents. The value will be
added into the sql parameter list and properly quoted.
* output_field: an instance of the model field type that this
expression will return, such as IntegerField() or CharField().
"""
super(Value, self).__init__(output_field=output_field)
self.value = value
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.value)
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
val = self.value
# check _output_field to avoid triggering an exception
if self._output_field is not None:
if self.for_save:
val = self.output_field.get_db_prep_save(val, connection=connection)
else:
val = self.output_field.get_db_prep_value(val, connection=connection)
if hasattr(self._output_field, 'get_placeholder'):
return self._output_field.get_placeholder(val, compiler, connection), [val]
if val is None:
# cx_Oracle does not always convert None to the appropriate
# NULL type (like in case expressions using numbers), so we
# use a literal SQL NULL
return 'NULL', []
return '%s', [val]
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = super(Value, self).resolve_expression(query, allow_joins, reuse, summarize, for_save)
c.for_save = for_save
return c
def get_group_by_cols(self):
return []
class DurationValue(Value):
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
if connection.features.has_native_duration_field:
return super(DurationValue, self).as_sql(compiler, connection)
return connection.ops.date_interval_sql(self.value)
class RawSQL(Expression):
def __init__(self, sql, params, output_field=None):
if output_field is None:
output_field = fields.Field()
self.sql, self.params = sql, params
super(RawSQL, self).__init__(output_field=output_field)
def __repr__(self):
return "{}({}, {})".format(self.__class__.__name__, self.sql, self.params)
def as_sql(self, compiler, connection):
return '(%s)' % self.sql, self.params
def get_group_by_cols(self):
return [self]
class Star(Expression):
def __repr__(self):
return "'*'"
def as_sql(self, compiler, connection):
return '*', []
class Random(Expression):
def __init__(self):
super(Random, self).__init__(output_field=fields.FloatField())
def __repr__(self):
return "Random()"
def as_sql(self, compiler, connection):
return connection.ops.random_function_sql(), []
class Col(Expression):
contains_column_references = True
def __init__(self, alias, target, output_field=None):
if output_field is None:
output_field = target
super(Col, self).__init__(output_field=output_field)
self.alias, self.target = alias, target
def __repr__(self):
return "{}({}, {})".format(
self.__class__.__name__, self.alias, self.target)
def as_sql(self, compiler, connection):
qn = compiler.quote_name_unless_alias
return "%s.%s" % (qn(self.alias), qn(self.target.column)), []
def relabeled_clone(self, relabels):
return self.__class__(relabels.get(self.alias, self.alias), self.target, self.output_field)
def get_group_by_cols(self):
return [self]
def get_db_converters(self, connection):
if self.target == self.output_field:
return self.output_field.get_db_converters(connection)
return (self.output_field.get_db_converters(connection) +
self.target.get_db_converters(connection))
class Ref(Expression):
"""
Reference to column alias of the query. For example, Ref('sum_cost') in
qs.annotate(sum_cost=Sum('cost')) query.
"""
def __init__(self, refs, source):
super(Ref, self).__init__()
self.refs, self.source = refs, source
def __repr__(self):
return "{}({}, {})".format(self.__class__.__name__, self.refs, self.source)
def get_source_expressions(self):
return [self.source]
def set_source_expressions(self, exprs):
self.source, = exprs
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
# The sub-expression `source` has already been resolved, as this is
# just a reference to the name of `source`.
return self
def relabeled_clone(self, relabels):
return self
def as_sql(self, compiler, connection):
return "%s" % connection.ops.quote_name(self.refs), []
def get_group_by_cols(self):
return [self]
class ExpressionWrapper(Expression):
"""
An expression that can wrap another expression so that it can provide
extra context to the inner expression, such as the output_field.
"""
def __init__(self, expression, output_field):
super(ExpressionWrapper, self).__init__(output_field=output_field)
self.expression = expression
def set_source_expressions(self, exprs):
self.expression = exprs[0]
def get_source_expressions(self):
return [self.expression]
def as_sql(self, compiler, connection):
return self.expression.as_sql(compiler, connection)
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.expression)
class When(Expression):
template = 'WHEN %(condition)s THEN %(result)s'
def __init__(self, condition=None, then=None, **lookups):
if lookups and condition is None:
condition, lookups = Q(**lookups), None
if condition is None or not isinstance(condition, Q) or lookups:
raise TypeError("__init__() takes either a Q object or lookups as keyword arguments")
super(When, self).__init__(output_field=None)
self.condition = condition
self.result = self._parse_expressions(then)[0]
def __str__(self):
return "WHEN %r THEN %r" % (self.condition, self.result)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def get_source_expressions(self):
return [self.condition, self.result]
def set_source_expressions(self, exprs):
self.condition, self.result = exprs
def get_source_fields(self):
# We're only interested in the fields of the result expressions.
return [self.result._output_field_or_none]
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = self.copy()
c.is_summary = summarize
if hasattr(c.condition, 'resolve_expression'):
c.condition = c.condition.resolve_expression(query, allow_joins, reuse, summarize, False)
c.result = c.result.resolve_expression(query, allow_joins, reuse, summarize, for_save)
return c
def as_sql(self, compiler, connection, template=None, **extra_context):
connection.ops.check_expression_support(self)
template_params = extra_context
sql_params = []
condition_sql, condition_params = compiler.compile(self.condition)
template_params['condition'] = condition_sql
sql_params.extend(condition_params)
result_sql, result_params = compiler.compile(self.result)
template_params['result'] = result_sql
sql_params.extend(result_params)
template = template or self.template
return template % template_params, sql_params
def get_group_by_cols(self):
# This is not a complete expression and cannot be used in GROUP BY.
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
class Case(Expression):
"""
An SQL searched CASE expression:
CASE
WHEN n > 0
THEN 'positive'
WHEN n < 0
THEN 'negative'
ELSE 'zero'
END
"""
template = 'CASE %(cases)s ELSE %(default)s END'
case_joiner = ' '
def __init__(self, *cases, **extra):
if not all(isinstance(case, When) for case in cases):
raise TypeError("Positional arguments must all be When objects.")
default = extra.pop('default', None)
output_field = extra.pop('output_field', None)
super(Case, self).__init__(output_field)
self.cases = list(cases)
self.default = self._parse_expressions(default)[0]
self.extra = extra
def __str__(self):
return "CASE %s, ELSE %r" % (', '.join(str(c) for c in self.cases), self.default)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def get_source_expressions(self):
return self.cases + [self.default]
def set_source_expressions(self, exprs):
self.cases = exprs[:-1]
self.default = exprs[-1]
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = self.copy()
c.is_summary = summarize
for pos, case in enumerate(c.cases):
c.cases[pos] = case.resolve_expression(query, allow_joins, reuse, summarize, for_save)
c.default = c.default.resolve_expression(query, allow_joins, reuse, summarize, for_save)
return c
def copy(self):
c = super(Case, self).copy()
c.cases = c.cases[:]
return c
def as_sql(self, compiler, connection, template=None, case_joiner=None, **extra_context):
connection.ops.check_expression_support(self)
if not self.cases:
return compiler.compile(self.default)
template_params = self.extra.copy()
template_params.update(extra_context)
case_parts = []
sql_params = []
for case in self.cases:
try:
case_sql, case_params = compiler.compile(case)
except EmptyResultSet:
continue
case_parts.append(case_sql)
sql_params.extend(case_params)
default_sql, default_params = compiler.compile(self.default)
if not case_parts:
return default_sql, default_params
case_joiner = case_joiner or self.case_joiner
template_params['cases'] = case_joiner.join(case_parts)
template_params['default'] = default_sql
sql_params.extend(default_params)
template = template or template_params.get('template', self.template)
sql = template % template_params
if self._output_field_or_none is not None:
sql = connection.ops.unification_cast_sql(self.output_field) % sql
return sql, sql_params
class Subquery(Expression):
"""
An explicit subquery. It may contain OuterRef() references to the outer
query which will be resolved when it is applied to that query.
"""
template = '(%(subquery)s)'
def __init__(self, queryset, output_field=None, **extra):
self.queryset = queryset
self.extra = extra
if output_field is None and len(self.queryset.query.select) == 1:
output_field = self.queryset.query.select[0].field
super(Subquery, self).__init__(output_field)
def copy(self):
clone = super(Subquery, self).copy()
clone.queryset = clone.queryset.all()
return clone
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
clone = self.copy()
clone.is_summary = summarize
clone.queryset.query.bump_prefix(query)
# Need to recursively resolve these.
def resolve_all(child):
if hasattr(child, 'children'):
[resolve_all(_child) for _child in child.children]
if hasattr(child, 'rhs'):
child.rhs = resolve(child.rhs)
def resolve(child):
if hasattr(child, 'resolve_expression'):
resolved = child.resolve_expression(
query=query, allow_joins=allow_joins, reuse=reuse,
summarize=summarize, for_save=for_save,
)
# Add table alias to the parent query's aliases to prevent
# quoting.
if hasattr(resolved, 'alias') and resolved.alias != resolved.target.model._meta.db_table:
clone.queryset.query.external_aliases.add(resolved.alias)
return resolved
return child
resolve_all(clone.queryset.query.where)
for key, value in clone.queryset.query.annotations.items():
if isinstance(value, Subquery):
clone.queryset.query.annotations[key] = resolve(value)
return clone
def get_source_expressions(self):
return [
x for x in [
getattr(expr, 'lhs', None)
for expr in self.queryset.query.where.children
] if x
]
def relabeled_clone(self, change_map):
clone = self.copy()
clone.queryset.query = clone.queryset.query.relabeled_clone(change_map)
clone.queryset.query.external_aliases.update(
alias for alias in change_map.values()
if alias not in clone.queryset.query.tables
)
return clone
def as_sql(self, compiler, connection, template=None, **extra_context):
connection.ops.check_expression_support(self)
template_params = self.extra.copy()
template_params.update(extra_context)
template_params['subquery'], sql_params = self.queryset.query.get_compiler(connection=connection).as_sql()
template = template or template_params.get('template', self.template)
sql = template % template_params
return sql, sql_params
def _prepare(self, output_field):
# This method will only be called if this instance is the "rhs" in an
# expression: the wrapping () must be removed (as the expression that
# contains this will provide them). SQLite evaluates ((subquery))
# differently than the other databases.
if self.template == '(%(subquery)s)':
clone = self.copy()
clone.template = '%(subquery)s'
return clone
return self
class Exists(Subquery):
template = 'EXISTS(%(subquery)s)'
def __init__(self, *args, **kwargs):
self.negated = kwargs.pop('negated', False)
super(Exists, self).__init__(*args, **kwargs)
def __invert__(self):
return type(self)(self.queryset, self.output_field, negated=(not self.negated), **self.extra)
@property
def output_field(self):
return fields.BooleanField()
def resolve_expression(self, query=None, **kwargs):
# As a performance optimization, remove ordering since EXISTS doesn't
# care about it, just whether or not a row matches.
self.queryset = self.queryset.order_by()
return super(Exists, self).resolve_expression(query, **kwargs)
def as_sql(self, compiler, connection, template=None, **extra_context):
sql, params = super(Exists, self).as_sql(compiler, connection, template, **extra_context)
if self.negated:
sql = 'NOT {}'.format(sql)
return sql, params
def as_oracle(self, compiler, connection, template=None, **extra_context):
# Oracle doesn't allow EXISTS() in the SELECT list, so wrap it with a
# CASE WHEN expression. Change the template since the When expression
# requires a left hand side (column) to compare against.
sql, params = self.as_sql(compiler, connection, template, **extra_context)
sql = 'CASE WHEN {} THEN 1 ELSE 0 END'.format(sql)
return sql, params
class OrderBy(BaseExpression):
template = '%(expression)s %(ordering)s'
def __init__(self, expression, descending=False, nulls_first=False, nulls_last=False):
if nulls_first and nulls_last:
raise ValueError('nulls_first and nulls_last are mutually exclusive')
self.nulls_first = nulls_first
self.nulls_last = nulls_last
self.descending = descending
if not hasattr(expression, 'resolve_expression'):
raise ValueError('expression must be an expression type')
self.expression = expression
def __repr__(self):
return "{}({}, descending={})".format(
self.__class__.__name__, self.expression, self.descending)
def set_source_expressions(self, exprs):
self.expression = exprs[0]
def get_source_expressions(self):
return [self.expression]
def as_sql(self, compiler, connection, template=None, **extra_context):
if not template:
if self.nulls_last:
template = '%s NULLS LAST' % self.template
elif self.nulls_first:
template = '%s NULLS FIRST' % self.template
connection.ops.check_expression_support(self)
expression_sql, params = compiler.compile(self.expression)
placeholders = {
'expression': expression_sql,
'ordering': 'DESC' if self.descending else 'ASC',
}
placeholders.update(extra_context)
template = template or self.template
return (template % placeholders).rstrip(), params
def as_sqlite(self, compiler, connection):
template = None
if self.nulls_last:
template = '%(expression)s IS NULL, %(expression)s %(ordering)s'
elif self.nulls_first:
template = '%(expression)s IS NOT NULL, %(expression)s %(ordering)s'
return self.as_sql(compiler, connection, template=template)
def as_mysql(self, compiler, connection):
template = None
if self.nulls_last:
template = 'IF(ISNULL(%(expression)s),1,0), %(expression)s %(ordering)s '
elif self.nulls_first:
template = 'IF(ISNULL(%(expression)s),0,1), %(expression)s %(ordering)s '
return self.as_sql(compiler, connection, template=template)
def get_group_by_cols(self):
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
def reverse_ordering(self):
self.descending = not self.descending
if self.nulls_first or self.nulls_last:
self.nulls_first = not self.nulls_first
self.nulls_last = not self.nulls_last
return self
def asc(self):
self.descending = False
def desc(self):
self.descending = True
|
|
from optparse import OptionParser
import os
import sys
import gensim
import random
from collections import OrderedDict
import numpy as np
import theano
from theano import tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
from ..util import file_handling as fh
from ..dataset_scripts.sst.read_data import load_ptb_bitrees
# This is the same as constituency_tree_lstm, but with only one mask for L/R
# Otherwise the deepcopy fails
sys.setrecursionlimit(5000)
THEANO_FLAGS='floatX=float32'
#THEANO_FLAGS='floatX=float32, optimizer=None'
#DYLD_FALLBACK_LIBRARY_PATH='/Applications/anaconda/lib/'
class DyTreeLSTM(object):
def __init__(self, dv, dh, dx, de, ne, nc, alpha=1.0, init_scale=0.2, initial_embeddings=None, edge_embeddings=None,
params_init=None,
update='adagrad', seed=None, drop_p=0.5, momentum=0.9, train_embeddings=False):
self.dv = dv # vocabulary size
self.dh = dh # hidden vector size
self.dx = dx # word embedding size
self.de = de # edge embedding size
dxe = dx + de
self.dxe = dxe # dimension of word embeddings concatenated with edge embeddings
self.ne = ne # number of types of edges (edge vocab size)
self.nc = nc # number of classes
self.alpha = alpha # regularization strength
self.drop_p = drop_p # probability of dropping an input with dropout
# adagrad parameters
self.epsilon = 0.00001
if initial_embeddings is None:
self.emb = theano.shared(name='embeddings',
value=init_scale * np.random.uniform(-1.0, 1.0,
(dv, dx)).astype(theano.config.floatX))
else:
self.emb = theano.shared(name='embeddings', value=initial_embeddings.astype(theano.config.floatX))
if edge_embeddings is None:
self.edge_emb = theano.shared(name='edge_embeddings',
value=init_scale * np.random.uniform(-1.0, 1.0,
(ne, de)).astype(theano.config.floatX))
else:
self.edge_emb = theano.shared(name='edge_embeddings', value=edge_embeddings.astype(theano.config.floatX))
self.W_x_i = theano.shared(name='W_x_i', value=init_scale * np.random.uniform(-1.0, 1.0, (dxe, dh))
.astype(theano.config.floatX))
self.W_h_i = theano.shared(name='W_h_i', value=init_scale * np.random.uniform(-1.0, 1.0, (dh, dh))
.astype(theano.config.floatX))
self.b_h_i = theano.shared(name='b_h_i', value=np.array(np.zeros(dh),
dtype=theano.config.floatX))
self.W_x_f = theano.shared(name='W_x_f', value=init_scale * np.random.uniform(-1.0, 1.0, (dxe, dh))
.astype(theano.config.floatX))
self.W_h_f = theano.shared(name='W_h_f', value=init_scale * np.random.uniform(-1.0, 1.0, (dh, dh))
.astype(theano.config.floatX))
self.b_h_f = theano.shared(name='b_h_f', value=np.array(np.random.uniform(0.0, 1.0, dh),
dtype=theano.config.floatX))
self.W_x_o = theano.shared(name='W_x_o', value=init_scale * np.random.uniform(-1.0, 1.0, (dxe, dh))
.astype(theano.config.floatX))
self.W_h_o = theano.shared(name='W_h_o', value=init_scale * np.random.uniform(-1.0, 1.0, (dh, dh))
.astype(theano.config.floatX))
self.b_h_o = theano.shared(name='b_h_o', value=np.array(np.zeros(dh),
dtype=theano.config.floatX))
self.W_x_u = theano.shared(name='W_x_u', value=init_scale * np.random.uniform(-1.0, 1.0, (dxe, dh))
.astype(theano.config.floatX))
self.W_h_u = theano.shared(name='W_h_u', value=init_scale * np.random.uniform(-1.0, 1.0, (dh, dh))
.astype(theano.config.floatX))
self.b_h_u = theano.shared(name='b_h_u', value=np.array(np.zeros(dh),
dtype=theano.config.floatX))
self.W_x_z = theano.shared(name='W_x_z', value=init_scale * np.random.uniform(-1.0, 1.0, (dx, nc))
.astype(theano.config.floatX))
self.W_h_z = theano.shared(name='W_h_z', value=init_scale * np.random.uniform(-1.0, 1.0, (dh, nc))
.astype(theano.config.floatX))
self.b_z = theano.shared(name='b_z', value=np.array(np.zeros(nc),
dtype=theano.config.floatX))
self.W_x_a = theano.shared(name='W_x_a', value=init_scale * np.random.uniform(-1.0, 1.0, (dxe, dh))
.astype(theano.config.floatX))
self.W_h_a = theano.shared(name='W_h_a', value=init_scale * np.random.uniform(-1.0, 1.0, (dh, dh))
.astype(theano.config.floatX))
self.v_a = theano.shared(name='v_a', value=init_scale * np.random.uniform(-1.0, 1.0, (dh,))
.astype(theano.config.floatX))
self.params = [self.W_x_i, self.W_h_i, self.b_h_i]
self.params += [self.W_x_f, self.W_h_f, self.b_h_f]
self.params += [self.W_x_o, self.W_h_o, self.b_h_o]
self.params += [self.W_x_u, self.W_h_u, self.b_h_u]
self.params += [self.W_x_z, self.W_h_z, self.b_z]
self.params += [self.W_h_a, self.v_a]
self.param_shapes = [(dxe, dh), (dh, dh), (dh,),
(dxe, dh), (dh, dh), (dh,),
(dxe, dh), (dh, dh), (dh,),
(dxe, dh), (dh, dh), (dh,),
(dx, nc), (dh, nc), (nc,),
(dh, dh), (dh,),
]
if update == 'adagrad':
self.grad_histories = [
theano.shared(
value=np.zeros(param_shape, dtype=theano.config.floatX),
borrow=True,
name="grad_hist:" + param.name
)
for param_shape, param in zip(self.param_shapes, self.params)
]
elif update == 'sgdm':
self.velocity = [
theano.shared(
value=np.zeros(param_shape, dtype=theano.config.floatX),
borrow=True,
name="momentum:" + param.name
)
for param_shape, param in zip(self.param_shapes, self.params)
]
self.momentum = momentum
self.theano_rng = RandomStreams(seed)
input_idx = T.ivector('input_idx')
edge_mask = T.imatrix('edge_mask')
output_mask = T.imatrix('output_mask')
y = T.ivector('y')
n_nodes, n_edges = T.shape(output_mask)
idxs = T.ivector('idxs')
temp = self.emb[idxs]
x = temp.reshape([n_nodes, dx])
edges_idx = T.ivector('edges_idx')
temp = self.edge_emb[edges_idx]
edges = temp.reshape([n_edges, de])
lr = T.scalar('lr', dtype=theano.config.floatX)
emb_lr = T.scalar('emb_lr', dtype=theano.config.floatX)
is_train = T.iscalar('is_train')
drop_x = T.iscalar('drop_x')
# This is a bit annoying; the 0th dimension of x needs to be sequence, so we can iterate over it
# but the 0th dimension of the hidden nodes needs to be hidden-node dimension, so that we can broadcast
# the mask out to it
def pass_edges(input_idx_t, edge_t, edge_mask_t, counter_t, h_tm1, c_tm1, x):
h_t = h_tm1
c_t = c_tm1
# select the input vector to use for this edge (source)
x_t_i = x[input_idx_t, :]
# zero out the input unless this is a leaf node
x_t_0 = T.switch(T.eq(T.sum(edge_mask_t), 0), x_t_i, x_t_i*0)
# concatenate with the input edge vector
x_t_edge = T.concatenate([x_t_0, edge_t])
# compute attention weights, using a manual softmax
attention_scores = T.dot(self.v_a, T.tanh(T.dot(self.W_h_a, h_tm1))) # (1, n_edges)
# find the max of the unmasked values
max_score = T.max(attention_scores + edge_mask_t * 10000.0) - 10000.0
# exponentiate the differences, masking first to avoid inf, and then to keep only relevant scores
exp_scores = T.exp((attention_scores - max_score) * edge_mask_t) * edge_mask_t
# take the sum, and add one if the mask is all zeros to avoid an inf
exp_scores_sum = T.sum(exp_scores) + T.switch(T.eq(T.sum(edge_mask_t), 0), 1.0, 0.0)
# normalize to compute the weights
weighted_mask = exp_scores / exp_scores_sum
i_t = T.nnet.sigmoid(T.dot(x_t_edge, self.W_x_i) + T.sum(T.dot(self.W_h_i.T, (weighted_mask * h_tm1)).T, axis=0) + self.b_h_i)
f_t = T.nnet.sigmoid(T.dot(x_t_edge, self.W_x_f) + T.sum(T.dot(self.W_h_f.T, (weighted_mask * h_tm1)).T, axis=0) + self.b_h_f)
o_t = T.nnet.sigmoid(T.dot(x_t_edge, self.W_x_o) + T.sum(T.dot(self.W_h_o.T, (weighted_mask * h_tm1)).T, axis=0) + self.b_h_o)
u_t = T.tanh(T.dot(x_t_edge, self.W_x_u) + T.sum(T.dot(self.W_h_u.T, (weighted_mask * h_tm1)).T, axis=0) + self.b_h_u)
c_temp = i_t * u_t + f_t * T.sum((weighted_mask * c_tm1).T, axis=0)
h_temp = o_t * T.tanh(c_temp)
h_t = T.set_subtensor(h_t[:, counter_t], h_temp)
c_t = T.set_subtensor(c_t[:, counter_t], c_temp)
return h_t, c_t
def drop(drop_input, drop_p, is_train):
mask = self.theano_rng.binomial(p=1.0-drop_p, size=drop_input.shape, dtype=theano.config.floatX)
return T.cast(T.switch(T.neq(is_train, 0), drop_input * mask, drop_input * (1.0-self.drop_p)), dtype=theano.config.floatX)
# do dropout on x, if specified
x = T.switch(T.neq(drop_x, 0), drop(x, self.drop_p, is_train), x)
output, _ = theano.scan(fn=pass_edges, sequences=[input_idx, edges, edge_mask, T.arange(0, n_edges, dtype='int32')], outputs_info=[T.zeros((dh, n_edges), dtype=theano.config.floatX), T.zeros((dh, n_edges), dtype=theano.config.floatX)], non_sequences=[x])
full_h, full_c = output
edge_vectors_h = full_h[-1, :, :]
#edge_vectors_c = full_c[-1, :, :]
#eval_node = T.iscalar('eval_node')
# try just computing a single output node from the edge vectors
h_drop = drop(edge_vectors_h, self.drop_p, is_train)
"""
# (W_h_z.T \cdot (output_mask \cdot h_drop.T).T).T
temp = T.dot(x, self.W_x_z) + T.dot(self.W_h_z.T, T.dot(output_mask, h_drop.T).T).T + self.b_z
p_y_given_x = T.cast(T.nnet.softmax(temp), dtype=theano.config.floatX)
pred_y = T.argmax(p_y_given_x, axis=1)
log_loss = T.sum(-T.log(p_y_given_x[T.arange(0, n_nodes), y])) / T.cast(n_nodes, dtype=theano.config.floatX)
"""
# compute attention weights, using a manual softmax
attention_scores = T.dot(self.v_a, T.tanh(T.dot(self.W_h_a, h_drop))) # (1, n_edges)
# find the max of the relevant scores for each node
max_scores = T.max(attention_scores + output_mask * 10000.0, axis=1, keepdims=True) - 10000.0
# exponentiate the differences, masking first to avoid inf, and then to keep only relevant scores
exp_scores = T.exp((attention_scores - max_scores) * output_mask) * output_mask
# normalize to compute the weights
weighted_mask = exp_scores / exp_scores.sum(axis=1, keepdims=True)
temp = T.dot(x, self.W_x_z) + T.dot(self.W_h_z.T, T.dot(weighted_mask, h_drop.T).T).T + self.b_z
p_y_given_x = T.cast(T.nnet.softmax(temp), dtype=theano.config.floatX)
pred_y = T.argmax(p_y_given_x, axis=1)
log_loss = T.sum(-T.log(p_y_given_x[T.arange(0, n_nodes), y])) / T.cast(n_nodes, dtype=theano.config.floatX)
penalty = T.sum([T.sum(p ** 2) for p in self.params])
cost = log_loss + alpha * penalty
gradients = [T.grad(cost, param) for param in self.params]
if update == 'adagrad':
new_grad_histories = [
T.cast(g_hist + g ** 2, dtype=theano.config.floatX)
for g_hist, g in zip(self.grad_histories, gradients)
]
grad_hist_update = zip(self.grad_histories, new_grad_histories)
# give a special learning rate to embeddings (not using adagrad)
param_updates = [(param, T.cast(param - lr / (T.sqrt(g_hist) + self.epsilon) * param_grad, dtype=theano.config.floatX))
for param, param_grad, g_hist in zip(self.params, gradients, new_grad_histories)]
updates = grad_hist_update + param_updates
# sgd with momentum
elif update == 'sgdm':
velocity_t = [momentum * v + lr * g for v, g in zip(self.velocity, gradients)]
velocity_updates = [(v, T.cast(v_t, theano.config.floatX)) for v, v_t in zip(self.velocity, velocity_t)]
param_updates = [(param, T.cast(param - v_t, theano.config.floatX)) for param, v_t in zip(self.params, velocity_t)]
updates = velocity_updates + param_updates
# else, sgd
else:
updates = OrderedDict((p, T.cast(p - lr * g, dtype=theano.config.floatX)) for p, g in zip(self.params, gradients))
self.train = theano.function(inputs=[idxs, input_idx, edges_idx, edge_mask, output_mask, y, lr, is_train, drop_x],
outputs=[pred_y, p_y_given_x, log_loss, cost], updates=updates, on_unused_input='ignore')
self.predict = theano.function(inputs=[idxs, input_idx, edges_idx, edge_mask, output_mask, is_train, drop_x],
outputs=[pred_y, p_y_given_x], on_unused_input='ignore')
# good example of how to see a value in a tensor; way easier than theano.printing.Print()
idx = T.iscalar('idx')
emb = self.emb[idx]
self.get_embedding = theano.function(inputs=[idx], outputs=emb)
def print_params(self):
for param in self.params:
print param.name, param.get_value()
def main():
usage = "%prog input_dir"
parser = OptionParser(usage=usage)
parser.add_option('-a', dest='alpha', default=0.000002,
help='Regularization strength: default=%default')
parser.add_option('-d', dest='hidden_dim', default=150,
help='Hidden node dimension: default=%default')
parser.add_option('-e', dest='epochs', default=20,
help='Number of epochs: default=%default')
parser.add_option('-i', dest='iter_display', default=5000,
help='Number of iterations between output: default=%default')
parser.add_option('-o', dest='optimization', default='adagrad',
help='Optimization method [sgd|sgdm|adagrad]: default=%default')
parser.add_option('-l', dest='learning_rate', default=0.002,
help='Initial learning rate: default=%default')
parser.add_option('--emb_lr', dest='emb_lr', default=0.004,
help='Learning rate for embeddings (not for sgd): default=%default')
parser.add_option('--decay', dest='decay', default=0.95,
help='Learning rate decay (sgd|sgdm only): default=%default')
parser.add_option('--momentum', dest='momentum', default=0.5,
help='Momentum parameter (sgdm only): default=%default')
parser.add_option('-s', dest='seed', default=42,
help='Random seed: default=%default')
parser.add_option('--word2vec_file', dest='word2vec_file', default='',
help='Location of word2vec file: default=do not load')
parser.add_option('--save_vectors', action="store_true", dest="save_vectors", default=False,
help='Save relevant word vectors (for faster loading next time)')
parser.add_option('--root_only', action="store_true", dest="root_only", default=False,
help='Train only on the full sentences (not subtrees): default=%default')
parser.add_option('--no_eval', action="store_true", dest="no_eval", default=False,
help='Skip the evaluation between epochs: default=%default')
parser.add_option('--drop_x', action="store_true", dest="drop_x", default=False,
help='Add dropout to the input layer: default=%default')
(options, args) = parser.parse_args()
input_dir = args[0]
seed = int(options.seed)
n_epochs = int(options.epochs)
alpha = float(options.alpha)
lr = float(options.learning_rate)
emb_lr = float(options.emb_lr)
iter_display = int(options.iter_display)
opti_method = options.optimization
lr_decay = float(options.decay)
momentum = float(options.momentum)
word2vec_file = options.word2vec_file
save_vectors = options.save_vectors
root_only = options.root_only
no_eval = options.no_eval
drop_x = int(options.drop_x)
nc = 5
if seed > 0:
np.random.seed(seed)
random.seed(seed)
dh = int(options.hidden_dim)
dx = 300
de = 2
np.__config__.show()
# load sentiment trees
print "Loading trees"
trees, train_vocab, edge_vocab = load_ptb_bitrees(input_dir, "train", root_only=root_only)
print len(trees), "train trees loaded"
ne = len(edge_vocab) # number of types of edges
train_root_trees, _, _ = load_ptb_bitrees(input_dir, "train", root_only=True)
print len(train_root_trees), "train root trees loaded"
dev_root_trees, dev_vocab, dev_edge_vocab = load_ptb_bitrees(input_dir, "dev", root_only=True)
print len(dev_root_trees), "dev root trees loaded"
test_root_trees, test_vocab, test_edge_vocab = load_ptb_bitrees(input_dir, "test", root_only=True)
print len(test_root_trees), "test root trees loaded"
if word2vec_file != '':
# load pre-trained word vectors
print "Loading pre-trained word vectors"
vectors = gensim.models.Word2Vec.load_word2vec_format(word2vec_file, binary=True)
pruned_vocab = set()
for v in list(train_vocab):
if v in vectors:
pruned_vocab.add(v)
pruned_vocab.add('_')
pruned_vocab.add('_OOV_')
vocab = list(pruned_vocab)
vocab.sort()
vocab_size = len(vocab)
vocab_index = dict(zip(vocab, range(vocab_size)))
print "Preparing word vectors"
missing_count = 0
total_count = 0
initial_embeddings = np.zeros([vocab_size, dx], dtype=np.float32)
for v, i in vocab_index.items():
total_count += 1
if v == '_':
initial_embeddings[i, :] = np.zeros(dx)
elif v == '_OOV_':
initial_embeddings[i, :] = 0.05 * np.random.uniform(-1.0, 1.0, (1, dx))
elif v in vectors:
initial_embeddings[i, :] = vectors[v]
else:
sys.exit('word not in vocab')
if save_vectors:
print "Saving word vectors"
fh.pickle_data(initial_embeddings, os.path.join(input_dir, 'initial_embeddings.pkl'))
fh.write_to_json(vocab, os.path.join(input_dir, 'vocab.json'))
else:
print "Loading relevant word vectors"
initial_embeddings = fh.unpickle_data(os.path.join(input_dir, 'initial_embeddings.pkl'))
vocab = fh.read_json(os.path.join(input_dir, 'vocab.json'))
vocab.sort()
vocab_size = len(vocab)
vocab_index = dict(zip(vocab, range(vocab_size)))
# create edge vectors
#edge_embeddings = np.zeros([ne, de], dtype=float)
edge_embeddings = np.eye(2, dtype=np.float32)
print len(vocab), "words in pruned vocab"
print len(list(train_vocab - set(vocab))), "words missing from training vocabulary"
print len(list(dev_vocab - set(vocab))), "words missing from dev vocabulary"
print len(list(test_vocab - set(vocab))), "words missing from test vocabulary"
print "Indexing words"
for k, t in trees.items():
t['idxs'] = [vocab_index[w] if w in vocab_index else vocab_index['_OOV_'] for w in t['words']]
for k, t in train_root_trees.items():
t['idxs'] = [vocab_index[w] if w in vocab_index else vocab_index['_OOV_'] for w in t['words']]
if not no_eval:
for k, t in dev_root_trees.items():
t['idxs'] = [vocab_index[w] if w in vocab_index else vocab_index['_OOV_'] for w in t['words']]
for k, t in test_root_trees.items():
t['idxs'] = [vocab_index[w] if w in vocab_index else vocab_index['_OOV_'] for w in t['words']]
# create the LSTM
theano_seed = np.random.randint(2 ** 30)
ctreeLSTM = DyTreeLSTM(vocab_size, dh, dx, de, ne, nc, initial_embeddings=initial_embeddings, alpha=alpha,
update=opti_method, seed=theano_seed, momentum=momentum, edge_embeddings=edge_embeddings)
tree_lengths = [(len(tree['words']), key) for key, tree in trees.items()]
tree_lengths.sort()
#keys = trees.keys()
#random.shuffle(keys)
#print "Null vector:"
#print vocab_index['_']
#print np.array(ctreeLSTM.get_embedding(vocab_index['_']))
if not no_eval:
print "Pre-training evaluation"
train_z_o_loss, train_log_loss = evaluate(train_root_trees, ctreeLSTM, vocab_index, drop_x)
valid_z_o_loss, valid_log_loss = evaluate(dev_root_trees, ctreeLSTM, vocab_index, drop_x)
test_z_o_loss, test_log_loss = evaluate(test_root_trees, ctreeLSTM, vocab_index, drop_x)
print ('epoch=%d\ttrain_0/1=%.3f\ttrain_log=%.3f\tdev_0/1=%.3f\tdev_log=%.3f\ttest_0/1=%.3f\ttest_log=%.3f') % \
(-1, train_z_o_loss, train_log_loss, valid_z_o_loss, valid_log_loss, test_z_o_loss, test_log_loss)
print "Training"
for epoch in range(n_epochs):
sum_log_loss = 0
sum_loss = 0
mistakes = 0
n_trees = 0
# sort by keys on the first pass, then shuffle
if epoch == 0:
keys = [key for length, key in tree_lengths]
else:
trees = train_root_trees
keys = trees.keys()
random.shuffle(keys)
print "epoch\titems\tloss\tl+reg\terrs"
for k_i, t_i in enumerate(keys):
t = trees[t_i]
idxs = t['idxs']
input_idx = t['input_idx']
edges_idx = t['edges']
edge_mask = t['edge_mask']
output_mask = t['output_mask']
true_values = t['values']
n_nodes, n_edges = output_mask.shape
n_trees += n_nodes
pred_y, p_y_given_x, log_loss, loss = ctreeLSTM.train(idxs, input_idx, edges_idx, edge_mask, output_mask, true_values, lr, 1, drop_x)
sum_log_loss += log_loss
sum_loss += loss
#if pred_y != true_values[node_to_eval]:
# mistakes += 1
for i in range(n_nodes):
if pred_y[i] != true_values[i]:
mistakes += 1
if k_i % iter_display == 0:
d = float(n_trees)
#print t['words']
#print list(true_values)
#print list(pred_y)
print '%d\t%d\t%.4f\t%.4f\t%.4f' % \
(epoch, k_i, sum_log_loss/d, sum_loss/d, mistakes/d)
if not no_eval:
train_z_o_loss, train_log_loss = evaluate(train_root_trees, ctreeLSTM, vocab_index, drop_x)
valid_z_o_loss, valid_log_loss = evaluate(dev_root_trees, ctreeLSTM, vocab_index, drop_x)
test_z_o_loss, test_log_loss = evaluate(test_root_trees, ctreeLSTM, vocab_index, drop_x)
print ('epoch=%d\ttrain_0/1=%.3f\ttrain_log=%.3f\tdev_0/1=%.3f\tdev_log=%.3f\ttest_0/1=%.3f\ttest_log=%.3f') % \
(epoch, train_z_o_loss, train_log_loss, valid_z_o_loss, valid_log_loss, test_z_o_loss, test_log_loss)
lr *= lr_decay
def evaluate(trees, rnn, vocab_index, drop_x):
zero_one_loss = 0
log_loss = 0
n_trees = len(trees)
for k_i, key in enumerate(trees.keys()):
t = trees[key]
idxs = t['idxs']
input_idx = t['input_idx']
edges_idx = t['edges']
edge_mask = t['edge_mask']
output_mask = t['output_mask']
true_values = t['values']
n_nodes, n_edges = output_mask.shape
"""
print t['words']
print "input_idx", input_idx.shape
print "edge_mask", edge_mask.shape
print "output_mask", output_mask.shape
"""
pred_y, p_y_given_x = rnn.predict(idxs, input_idx, edges_idx, edge_mask, output_mask, 0, drop_x)
"""
t['tree'].print_tree()
print t['tree'].root.get_children_in_sequence()
print t['tree'].root.get_edges_in_sequence()
print t['words']
print t['output_mask']
print p_y_given_x
print pred_y
print pred_y.shape
sys.exit()
"""
# only evaluate the root node (index = 0)
log_loss += -np.log(p_y_given_x[0, true_values[0]])
if pred_y[0] != true_values[0]:
zero_one_loss += 1
if k_i % 1000 == 0 and k_i > 0:
print t['words']
print "true:", list(true_values)
print "pred:", list(pred_y)
print "root probabilities:", p_y_given_x[0, :]
print k_i, log_loss, zero_one_loss
return zero_one_loss/float(n_trees), log_loss/float(n_trees)
if __name__ == '__main__':
main()
|
|
# Copyright 2015 CloudByte Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import uuid
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import units
import six
from six.moves import http_client
from six.moves import urllib
from cinder import exception
from cinder.i18n import _, _LE, _LI
from cinder.volume.drivers.cloudbyte import options
from cinder.volume.drivers.san import san
LOG = logging.getLogger(__name__)
class CloudByteISCSIDriver(san.SanISCSIDriver):
"""CloudByte ISCSI Driver.
Version history:
1.0.0 - Initial driver
1.1.0 - Add chap support and minor bug fixes
"""
VERSION = '1.1.0'
volume_stats = {}
def __init__(self, *args, **kwargs):
super(CloudByteISCSIDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(
options.cloudbyte_add_qosgroup_opts)
self.configuration.append_config_values(
options.cloudbyte_create_volume_opts)
self.configuration.append_config_values(
options.cloudbyte_connection_opts)
self.cb_use_chap = self.configuration.use_chap_auth
self.get_volume_stats()
def _get_url(self, cmd, params, apikey):
"""Will prepare URL that connects to CloudByte."""
if params is None:
params = {}
params['command'] = cmd
params['response'] = 'json'
sanitized_params = {}
for key in params:
value = params[key]
if value is not None:
sanitized_params[key] = six.text_type(value)
sanitized_params = urllib.parse.urlencode(sanitized_params)
url = ('/client/api?%s' % sanitized_params)
LOG.debug("CloudByte URL to be executed: [%s].", url)
# Add the apikey
api = {}
api['apiKey'] = apikey
url = url + '&' + urllib.parse.urlencode(api)
return url
def _extract_http_error(self, error_data):
# Extract the error message from error_data
error_msg = ""
# error_data is a single key value dict
for key, value in error_data.items():
error_msg = value.get('errortext')
return error_msg
def _execute_and_get_response_details(self, host, url):
"""Will prepare response after executing an http request."""
res_details = {}
try:
# Prepare the connection
connection = http_client.HTTPSConnection(host)
# Make the connection
connection.request('GET', url)
# Extract the response as the connection was successful
response = connection.getresponse()
# Read the response
data = response.read()
# Transform the json string into a py object
data = json.loads(data)
# Extract http error msg if any
error_details = None
if response.status != 200:
error_details = self._extract_http_error(data)
# Prepare the return object
res_details['data'] = data
res_details['error'] = error_details
res_details['http_status'] = response.status
finally:
connection.close()
LOG.debug("CloudByte connection was closed successfully.")
return res_details
def _api_request_for_cloudbyte(self, cmd, params, version=None):
"""Make http calls to CloudByte."""
LOG.debug("Executing CloudByte API for command [%s].", cmd)
if version is None:
version = CloudByteISCSIDriver.VERSION
# Below is retrieved from /etc/cinder/cinder.conf
apikey = self.configuration.cb_apikey
if apikey is None:
msg = (_("API key is missing for CloudByte driver."))
raise exception.VolumeBackendAPIException(data=msg)
host = self.configuration.san_ip
# Construct the CloudByte URL with query params
url = self._get_url(cmd, params, apikey)
data = {}
error_details = None
http_status = None
try:
# Execute CloudByte API & frame the response
res_obj = self._execute_and_get_response_details(host, url)
data = res_obj['data']
error_details = res_obj['error']
http_status = res_obj['http_status']
except http_client.HTTPException as ex:
msg = (_("Error executing CloudByte API [%(cmd)s], "
"Error: %(err)s.") %
{'cmd': cmd, 'err': ex})
raise exception.VolumeBackendAPIException(data=msg)
# Check if it was an error response from CloudByte
if http_status != 200:
msg = (_("Failed to execute CloudByte API [%(cmd)s]."
" Http status: %(status)s,"
" Error: %(error)s.") %
{'cmd': cmd, 'status': http_status,
'error': error_details})
raise exception.VolumeBackendAPIException(data=msg)
LOG.info(_LI("CloudByte API executed successfully for command [%s]."),
cmd)
return data
def _request_tsm_details(self, account_id):
params = {"accountid": account_id}
# List all CloudByte tsm
data = self._api_request_for_cloudbyte("listTsm", params)
return data
def _override_params(self, default_dict, filtered_user_dict):
"""Override the default config values with user provided values."""
if filtered_user_dict is None:
# Nothing to override
return default_dict
for key, value in default_dict.items():
# Fill the user dict with default options based on condition
if filtered_user_dict.get(key) is None and value is not None:
filtered_user_dict[key] = value
return filtered_user_dict
def _add_qos_group_request(self, volume, tsmid, volume_name):
# Get qos related params from configuration
params = self.configuration.cb_add_qosgroup
if params is None:
params = {}
params['name'] = "QoS_" + volume_name
params['tsmid'] = tsmid
data = self._api_request_for_cloudbyte("addQosGroup", params)
return data
def _create_volume_request(self, volume, datasetid, qosgroupid,
tsmid, volume_name):
size = volume.get('size')
quotasize = six.text_type(size) + "G"
# Prepare the user input params
params = {
"datasetid": datasetid,
"name": volume_name,
"qosgroupid": qosgroupid,
"tsmid": tsmid,
"quotasize": quotasize
}
# Get the additional params from configuration
params = self._override_params(self.configuration.cb_create_volume,
params)
data = self._api_request_for_cloudbyte("createVolume", params)
return data
def _queryAsyncJobResult_request(self, jobid):
async_cmd = "queryAsyncJobResult"
params = {
"jobId": jobid,
}
data = self._api_request_for_cloudbyte(async_cmd, params)
return data
def _get_tsm_details(self, data, tsm_name, account_name):
# Filter required tsm's details
tsms = data['listTsmResponse'].get('listTsm')
if tsms is None:
msg = (_("TSM [%(tsm)s] was not found in CloudByte storage "
"for account [%(account)s].") %
{'tsm': tsm_name, 'account': account_name})
raise exception.VolumeBackendAPIException(data=msg)
tsmdetails = {}
for tsm in tsms:
if tsm['name'] == tsm_name:
tsmdetails['datasetid'] = tsm['datasetid']
tsmdetails['tsmid'] = tsm['id']
break
return tsmdetails
def _wait_for_volume_creation(self, volume_response, cb_volume_name):
"""Given the job wait for it to complete."""
vol_res = volume_response.get('createvolumeresponse')
if vol_res is None:
msg = _("Null response received while creating volume [%s] "
"at CloudByte storage.") % cb_volume_name
raise exception.VolumeBackendAPIException(data=msg)
jobid = vol_res.get('jobid')
if jobid is None:
msg = _("Jobid not found in CloudByte's "
"create volume [%s] response.") % cb_volume_name
raise exception.VolumeBackendAPIException(data=msg)
def _retry_check_for_volume_creation():
"""Called at an interval till the volume is created."""
retries = kwargs['retries']
max_retries = kwargs['max_retries']
jobid = kwargs['jobid']
cb_vol = kwargs['cb_vol']
# Query the CloudByte storage with this jobid
volume_response = self._queryAsyncJobResult_request(jobid)
result_res = None
if volume_response is not None:
result_res = volume_response.get('queryasyncjobresultresponse')
if volume_response is None or result_res is None:
msg = _(
"Null response received while querying "
"for create volume job [%s] "
"at CloudByte storage.") % jobid
raise exception.VolumeBackendAPIException(data=msg)
status = result_res.get('jobstatus')
if status == 1:
LOG.info(_LI("Volume [%s] created successfully in "
"CloudByte storage."), cb_vol)
raise loopingcall.LoopingCallDone()
elif retries == max_retries:
# All attempts exhausted
LOG.error(_LE("Error in creating volume [%(vol)s] in "
"CloudByte storage. "
"Exhausted all [%(max)s] attempts."),
{'vol': cb_vol, 'max': retries})
raise loopingcall.LoopingCallDone(retvalue=False)
else:
retries += 1
kwargs['retries'] = retries
LOG.debug("Wait for volume [%(vol)s] creation, "
"retry [%(retry)s] of [%(max)s].",
{'vol': cb_vol,
'retry': retries,
'max': max_retries})
retry_interval = (
self.configuration.cb_confirm_volume_create_retry_interval)
max_retries = (
self.configuration.cb_confirm_volume_create_retries)
kwargs = {'retries': 0,
'max_retries': max_retries,
'jobid': jobid,
'cb_vol': cb_volume_name}
timer = loopingcall.FixedIntervalLoopingCall(
_retry_check_for_volume_creation)
timer.start(interval=retry_interval).wait()
def _get_volume_id_from_response(self, cb_volumes, volume_name):
"""Search the volume in CloudByte storage."""
vol_res = cb_volumes.get('listFilesystemResponse')
if vol_res is None:
msg = _("Null response received from CloudByte's "
"list filesystem.")
raise exception.VolumeBackendAPIException(data=msg)
volumes = vol_res.get('filesystem')
if volumes is None:
msg = _('No volumes found in CloudByte storage.')
raise exception.VolumeBackendAPIException(data=msg)
volume_id = None
for vol in volumes:
if vol['name'] == volume_name:
volume_id = vol['id']
break
if volume_id is None:
msg = _("Volume [%s] not found in CloudByte "
"storage.") % volume_name
raise exception.VolumeBackendAPIException(data=msg)
return volume_id
def _get_qosgroupid_id_from_response(self, cb_volumes, volume_id):
volumes = cb_volumes['listFilesystemResponse']['filesystem']
qosgroup_id = None
for vol in volumes:
if vol['id'] == volume_id:
qosgroup_id = vol['groupid']
break
return qosgroup_id
def _build_provider_details_from_volume(self, volume, chap):
model_update = {}
model_update['provider_location'] = (
'%s %s %s' % (volume['ipaddress'] + ':3260', volume['iqnname'], 0)
)
# Will provide CHAP Authentication on forthcoming patches/release
model_update['provider_auth'] = None
if chap:
model_update['provider_auth'] = ('CHAP %(username)s %(password)s'
% chap)
model_update['provider_id'] = volume['id']
LOG.debug("CloudByte volume iqn: [%(iqn)s] provider id: [%(proid)s].",
{'iqn': volume['iqnname'], 'proid': volume['id']})
return model_update
def _build_provider_details_from_response(self,
cb_volumes,
volume_name,
chap):
"""Get provider information."""
model_update = {}
volumes = cb_volumes['listFilesystemResponse']['filesystem']
for vol in volumes:
if vol['name'] == volume_name:
model_update = self._build_provider_details_from_volume(vol,
chap)
break
return model_update
def _get_initiator_group_id_from_response(self, data):
"""Find iSCSI initiator group id."""
ig_list_res = data.get('listInitiatorsResponse')
if ig_list_res is None:
msg = _("Null response received from CloudByte's "
"list iscsi initiators.")
raise exception.VolumeBackendAPIException(data=msg)
ig_list = ig_list_res.get('initiator')
if ig_list is None:
msg = _('No iscsi initiators were found in CloudByte.')
raise exception.VolumeBackendAPIException(data=msg)
ig_id = None
for ig in ig_list:
if ig.get('initiatorgroup') == 'ALL':
ig_id = ig['id']
break
return ig_id
def _get_iscsi_service_id_from_response(self, volume_id, data):
iscsi_service_res = data.get('listVolumeiSCSIServiceResponse')
if iscsi_service_res is None:
msg = _("Null response received from CloudByte's "
"list volume iscsi service.")
raise exception.VolumeBackendAPIException(data=msg)
iscsi_service_list = iscsi_service_res.get('iSCSIService')
if iscsi_service_list is None:
msg = _('No iscsi services found in CloudByte storage.')
raise exception.VolumeBackendAPIException(data=msg)
iscsi_id = None
for iscsi_service in iscsi_service_list:
if iscsi_service['volume_id'] == volume_id:
iscsi_id = iscsi_service['id']
break
if iscsi_id is None:
msg = _("No iscsi service found for CloudByte "
"volume [%s].") % volume_id
raise exception.VolumeBackendAPIException(data=msg)
else:
return iscsi_id
def _request_update_iscsi_service(self, iscsi_id, ig_id, ag_id):
params = {
"id": iscsi_id,
"igid": ig_id
}
if ag_id:
params['authgroupid'] = ag_id
params['authmethod'] = "CHAP"
self._api_request_for_cloudbyte(
'updateVolumeiSCSIService', params)
def _get_cb_snapshot_path(self, snapshot_name, volume_id):
"""Find CloudByte snapshot path."""
params = {"id": volume_id}
# List all snapshot from CloudByte
cb_snapshots_list = self._api_request_for_cloudbyte(
'listStorageSnapshots', params)
# Filter required snapshot from list
cb_snap_res = cb_snapshots_list.get('listDatasetSnapshotsResponse')
cb_snapshot = {}
if cb_snap_res is not None:
cb_snapshot = cb_snap_res.get('snapshot')
path = None
# Filter snapshot path
for snap in cb_snapshot:
if snap['name'] == snapshot_name:
path = snap['path']
break
return path
def _get_account_id_from_name(self, account_name):
params = {}
data = self._api_request_for_cloudbyte("listAccount", params)
accounts = data["listAccountResponse"]["account"]
account_id = None
for account in accounts:
if account.get("name") == account_name:
account_id = account.get("id")
break
if account_id is None:
msg = _("Failed to get CloudByte account details "
"for account [%s].") % account_name
raise exception.VolumeBackendAPIException(data=msg)
return account_id
def _search_volume_id(self, cb_volumes, cb_volume_id):
"""Search the volume in CloudByte."""
volumes_res = cb_volumes.get('listFilesystemResponse')
if volumes_res is None:
msg = _("No response was received from CloudByte's "
"list filesystem api call.")
raise exception.VolumeBackendAPIException(data=msg)
volumes = volumes_res.get('filesystem')
if volumes is None:
msg = _("No volume was found at CloudByte storage.")
raise exception.VolumeBackendAPIException(data=msg)
volume_id = None
for vol in volumes:
if vol['id'] == cb_volume_id:
volume_id = vol['id']
break
return volume_id
def _get_storage_info(self, tsmname):
"""Get CloudByte TSM that is associated with OpenStack backend."""
# List all TSMs from CloudByte storage
tsm_list = self._api_request_for_cloudbyte('listTsm', params={})
tsm_details_res = tsm_list.get('listTsmResponse')
if tsm_details_res is None:
msg = _("No response was received from CloudByte storage "
"list tsm API call.")
raise exception.VolumeBackendAPIException(data=msg)
tsm_details = tsm_details_res.get('listTsm')
data = {}
flag = 0
# Filter required TSM and get storage info
for tsms in tsm_details:
if tsms['name'] == tsmname:
flag = 1
data['total_capacity_gb'] = (
float(tsms['numericquota']) / units.Ki)
data['free_capacity_gb'] = (
float(tsms['availablequota']) / units.Ki)
break
# TSM not found in CloudByte storage
if flag == 0:
LOG.error(_LE("TSM [%s] not found in CloudByte storage."), tsmname)
data['total_capacity_gb'] = 0.0
data['free_capacity_gb'] = 0.0
return data
def _get_auth_group_id_from_response(self, data):
"""Find iSCSI auth group id."""
chap_group = self.configuration.cb_auth_group
ag_list_res = data.get('listiSCSIAuthGroupResponse')
if ag_list_res is None:
msg = _("Null response received from CloudByte's "
"list iscsi auth groups.")
raise exception.VolumeBackendAPIException(data=msg)
ag_list = ag_list_res.get('authgroup')
if ag_list is None:
msg = _('No iscsi auth groups were found in CloudByte.')
raise exception.VolumeBackendAPIException(data=msg)
ag_id = None
for ag in ag_list:
if ag.get('name') == chap_group:
ag_id = ag['id']
break
else:
msg = _("Auth group [%s] details not found in "
"CloudByte storage.") % chap_group
raise exception.VolumeBackendAPIException(data=msg)
return ag_id
def _get_auth_group_info(self, account_id, ag_id):
"""Fetch the auth group details."""
params = {"accountid": account_id, "authgroupid": ag_id}
auth_users = self._api_request_for_cloudbyte(
'listiSCSIAuthUser', params)
auth_user_details_res = auth_users.get('listiSCSIAuthUsersResponse')
if auth_user_details_res is None:
msg = _("No response was received from CloudByte storage "
"list iSCSI auth user API call.")
raise exception.VolumeBackendAPIException(data=msg)
auth_user_details = auth_user_details_res.get('authuser')
if auth_user_details is None:
msg = _("Auth user details not found in CloudByte storage.")
raise exception.VolumeBackendAPIException(data=msg)
chapuser = auth_user_details[0].get('chapusername')
chappassword = auth_user_details[0].get('chappassword')
if chapuser is None or chappassword is None:
msg = _("Invalid chap user details found in CloudByte storage.")
raise exception.VolumeBackendAPIException(data=msg)
data = {'username': chapuser, 'password': chappassword, 'ag_id': ag_id}
return data
def _get_chap_info(self, account_id):
"""Fetch the chap details."""
params = {"accountid": account_id}
iscsi_auth_data = self._api_request_for_cloudbyte(
'listiSCSIAuthGroup', params)
ag_id = self._get_auth_group_id_from_response(
iscsi_auth_data)
return self._get_auth_group_info(account_id, ag_id)
def _export(self):
model_update = {'provider_auth': None}
if self.cb_use_chap is True:
account_name = self.configuration.cb_account_name
account_id = self._get_account_id_from_name(account_name)
chap = self._get_chap_info(account_id)
model_update['provider_auth'] = ('CHAP %(username)s %(password)s'
% chap)
return model_update
def create_volume(self, volume):
tsm_name = self.configuration.cb_tsm_name
account_name = self.configuration.cb_account_name
# Get account id of this account
account_id = self._get_account_id_from_name(account_name)
# Set backend storage volume name using OpenStack volume id
cb_volume_name = volume['id'].replace("-", "")
LOG.debug("Will create a volume [%(cb_vol)s] in TSM [%(tsm)s] "
"at CloudByte storage w.r.t "
"OpenStack volume [%(stack_vol)s].",
{'cb_vol': cb_volume_name,
'stack_vol': volume.get('id'),
'tsm': tsm_name})
tsm_data = self._request_tsm_details(account_id)
tsm_details = self._get_tsm_details(tsm_data, tsm_name, account_name)
# Send request to create a qos group before creating a volume
LOG.debug("Creating qos group for CloudByte volume [%s].",
cb_volume_name)
qos_data = self._add_qos_group_request(
volume, tsm_details.get('tsmid'), cb_volume_name)
# Extract the qos group id from response
qosgroupid = qos_data['addqosgroupresponse']['qosgroup']['id']
LOG.debug("Successfully created qos group for CloudByte volume [%s].",
cb_volume_name)
# Send a create volume request to CloudByte API
vol_data = self._create_volume_request(
volume, tsm_details.get('datasetid'), qosgroupid,
tsm_details.get('tsmid'), cb_volume_name)
# Since create volume is an async call;
# need to confirm the creation before proceeding further
self._wait_for_volume_creation(vol_data, cb_volume_name)
# Fetch iscsi id
cb_volumes = self._api_request_for_cloudbyte(
'listFileSystem', params={})
volume_id = self._get_volume_id_from_response(cb_volumes,
cb_volume_name)
params = {"storageid": volume_id}
iscsi_service_data = self._api_request_for_cloudbyte(
'listVolumeiSCSIService', params)
iscsi_id = self._get_iscsi_service_id_from_response(
volume_id, iscsi_service_data)
# Fetch the initiator group ID
params = {"accountid": account_id}
iscsi_initiator_data = self._api_request_for_cloudbyte(
'listiSCSIInitiator', params)
ig_id = self._get_initiator_group_id_from_response(
iscsi_initiator_data)
LOG.debug("Updating iscsi service for CloudByte volume [%s].",
cb_volume_name)
ag_id = None
chap_info = {}
if self.cb_use_chap is True:
chap_info = self._get_chap_info(account_id)
ag_id = chap_info['ag_id']
# Update the iscsi service with above fetched iscsi_id & ig_id
self._request_update_iscsi_service(iscsi_id, ig_id, ag_id)
LOG.debug("CloudByte volume [%(vol)s] updated with "
"iscsi id [%(iscsi)s] and initiator group [%(ig)s] and "
"authentication group [%(ag)s].",
{'vol': cb_volume_name, 'iscsi': iscsi_id,
'ig': ig_id, 'ag': ag_id})
# Provide the model after successful completion of above steps
provider = self._build_provider_details_from_response(
cb_volumes, cb_volume_name, chap_info)
LOG.info(_LI("Successfully created a CloudByte volume [%(cb_vol)s] "
"w.r.t OpenStack volume [%(stack_vol)s]."),
{'cb_vol': cb_volume_name, 'stack_vol': volume.get('id')})
return provider
def delete_volume(self, volume):
params = {}
# OpenStack source volume id
source_volume_id = volume['id']
# CloudByte volume id equals OpenStack volume's provider_id
cb_volume_id = volume.get('provider_id')
LOG.debug("Will delete CloudByte volume [%(cb_vol)s] "
"w.r.t OpenStack volume [%(stack_vol)s].",
{'cb_vol': cb_volume_id, 'stack_vol': source_volume_id})
# Delete volume at CloudByte
if cb_volume_id is not None:
cb_volumes = self._api_request_for_cloudbyte(
'listFileSystem', params)
# Search cb_volume_id in CloudByte volumes
# incase it has already been deleted from CloudByte
cb_volume_id = self._search_volume_id(cb_volumes, cb_volume_id)
# Delete volume at CloudByte
if cb_volume_id is not None:
params = {"id": cb_volume_id}
self._api_request_for_cloudbyte('deleteFileSystem', params)
LOG.info(
_LI("Successfully deleted volume [%(cb_vol)s] "
"at CloudByte corresponding to "
"OpenStack volume [%(stack_vol)s]."),
{'cb_vol': cb_volume_id,
'stack_vol': source_volume_id})
else:
LOG.error(_LE("CloudByte does not have a volume corresponding "
"to OpenStack volume [%s]."), source_volume_id)
else:
LOG.error(_LE("CloudByte volume information not available for"
" OpenStack volume [%s]."), source_volume_id)
def create_snapshot(self, snapshot):
"""Creates a snapshot at CloudByte."""
# OpenStack volume
source_volume_id = snapshot['volume_id']
# CloudByte volume id equals OpenStack volume's provider_id
cb_volume_id = snapshot.get('volume').get('provider_id')
if cb_volume_id is not None:
# Set backend storage snapshot name using OpenStack snapshot id
snapshot_name = "snap_" + snapshot['id'].replace("-", "")
params = {
"name": snapshot_name,
"id": cb_volume_id
}
LOG.debug(
"Will create CloudByte snapshot [%(cb_snap)s] "
"w.r.t CloudByte volume [%(cb_vol)s] "
"and OpenStack volume [%(stack_vol)s].",
{'cb_snap': snapshot_name,
'cb_vol': cb_volume_id,
'stack_vol': source_volume_id})
self._api_request_for_cloudbyte('createStorageSnapshot', params)
# Get the snapshot path from CloudByte
path = self._get_cb_snapshot_path(snapshot_name, cb_volume_id)
LOG.info(
_LI("Created CloudByte snapshot [%(cb_snap)s] "
"w.r.t CloudByte volume [%(cb_vol)s] "
"and OpenStack volume [%(stack_vol)s]."),
{'cb_snap': path,
'cb_vol': cb_volume_id,
'stack_vol': source_volume_id})
model_update = {}
# Store snapshot path as snapshot provider_id
model_update['provider_id'] = path
else:
msg = _("Failed to create snapshot. CloudByte volume information "
"not found for OpenStack volume [%s].") % source_volume_id
raise exception.VolumeBackendAPIException(data=msg)
return model_update
def create_cloned_volume(self, cloned_volume, src_volume):
"""Create a clone of an existing volume.
First it will create a snapshot of the source/parent volume,
then it creates a clone of this newly created snapshot.
"""
# Extract necessary information from input params
parent_volume_id = cloned_volume.get('source_volid')
# Generating id for snapshot
# as this is not user entered in this particular usecase
snapshot_id = six.text_type(uuid.uuid1())
# Prepare the params for create_snapshot
# as well as create_volume_from_snapshot method
snapshot_params = {
'id': snapshot_id,
'volume_id': parent_volume_id,
'volume': src_volume,
}
# Create a snapshot
snapshot = self.create_snapshot(snapshot_params)
snapshot_params['provider_id'] = snapshot.get('provider_id')
# Create a clone of above snapshot
return self.create_volume_from_snapshot(cloned_volume, snapshot_params)
def create_volume_from_snapshot(self, cloned_volume, snapshot):
"""Create a clone from an existing snapshot."""
# Getting necessary data from input params
parent_volume_id = snapshot['volume_id']
cloned_volume_name = cloned_volume['id'].replace("-", "")
# CloudByte volume id equals OpenStack volume's provider_id
cb_volume_id = snapshot.get('volume').get('provider_id')
# CloudByte snapshot path equals OpenStack snapshot's provider_id
cb_snapshot_path = snapshot['provider_id']
params = {
"id": cb_volume_id,
"clonename": cloned_volume_name,
"path": cb_snapshot_path
}
LOG.debug(
"Will create CloudByte clone [%(cb_clone)s] "
"at CloudByte snapshot path [%(cb_snap)s] "
"w.r.t parent OpenStack volume [%(stack_vol)s].",
{'cb_clone': cloned_volume_name,
'cb_snap': cb_snapshot_path,
'stack_vol': parent_volume_id})
# Create clone of the snapshot
clone_dataset_snapshot_res = (
self._api_request_for_cloudbyte('cloneDatasetSnapshot', params))
cb_snap = clone_dataset_snapshot_res.get('cloneDatasetSnapshot')
cb_vol = {}
if cb_snap is not None:
cb_vol = cb_snap.get('filesystem')
else:
msg = ("Error: Clone creation failed for "
"OpenStack volume [%(vol)s] with CloudByte "
"snapshot path [%(path)s]" %
{'vol': parent_volume_id, 'path': cb_snapshot_path})
raise exception.VolumeBackendAPIException(data=msg)
LOG.info(
_LI("Created a clone [%(cb_clone)s] "
"at CloudByte snapshot path [%(cb_snap)s] "
"w.r.t parent OpenStack volume [%(stack_vol)s]."),
{'cb_clone': cloned_volume_name,
'cb_snap': cb_snapshot_path,
'stack_vol': parent_volume_id})
chap_info = {}
if self.cb_use_chap is True:
account_name = self.configuration.cb_account_name
# Get account id of this account
account_id = self._get_account_id_from_name(account_name)
chap_info = self._get_chap_info(account_id)
model_update = self._build_provider_details_from_volume(cb_vol,
chap_info)
return model_update
def delete_snapshot(self, snapshot):
"""Delete a snapshot at CloudByte."""
# Find volume id
source_volume_id = snapshot['volume_id']
# CloudByte volume id equals OpenStack volume's provider_id
cb_volume_id = snapshot.get('volume').get('provider_id')
# CloudByte snapshot path equals OpenStack snapshot's provider_id
cb_snapshot_path = snapshot['provider_id']
# If cb_snapshot_path is 'None'
# then no need to execute CloudByte API
if cb_snapshot_path is not None:
params = {
"id": cb_volume_id,
"path": cb_snapshot_path
}
LOG.debug("Will delete CloudByte snapshot [%(snap)s] w.r.t "
"parent CloudByte volume [%(cb_vol)s] "
"and parent OpenStack volume [%(stack_vol)s].",
{'snap': cb_snapshot_path,
'cb_vol': cb_volume_id,
'stack_vol': source_volume_id})
# Execute CloudByte API
self._api_request_for_cloudbyte('deleteSnapshot', params)
LOG.info(
_LI("Deleted CloudByte snapshot [%(snap)s] w.r.t "
"parent CloudByte volume [%(cb_vol)s] "
"and parent OpenStack volume [%(stack_vol)s]."),
{'snap': cb_snapshot_path,
'cb_vol': cb_volume_id,
'stack_vol': source_volume_id})
else:
LOG.error(_LE("CloudByte snapshot information is not available"
" for OpenStack volume [%s]."), source_volume_id)
def extend_volume(self, volume, new_size):
# CloudByte volume id equals OpenStack volume's provider_id
cb_volume_id = volume.get('provider_id')
params = {
"id": cb_volume_id,
"quotasize": six.text_type(new_size) + 'G'
}
# Request the CloudByte api to update the volume
self._api_request_for_cloudbyte('updateFileSystem', params)
def create_export(self, context, volume):
"""Setup the iscsi export info."""
return self._export()
def ensure_export(self, context, volume):
"""Verify the iscsi export info."""
return self._export()
def get_volume_stats(self, refresh=False):
"""Get volume statistics.
If 'refresh' is True, update/refresh the statistics first.
"""
if refresh:
# Get the TSM name from configuration
tsm_name = self.configuration.cb_tsm_name
# Get the storage details of this TSM
data = self._get_storage_info(tsm_name)
data["volume_backend_name"] = (
self.configuration.safe_get('volume_backend_name') or
'CloudByte')
data["vendor_name"] = 'CloudByte'
data['reserved_percentage'] = 0
data["driver_version"] = CloudByteISCSIDriver.VERSION
data["storage_protocol"] = 'iSCSI'
LOG.debug("CloudByte driver stats: [%s].", data)
# Set this to the instance variable
self.volume_stats = data
return self.volume_stats
|
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""XMPP API.
This module allows AppEngine apps to interact with a bot representing that app
on the Google Talk network.
Functions defined in this module:
get_presence: Gets the presence for a JID.
send_message: Sends a chat message to any number of JIDs.
send_invite: Sends an invitation to chat to a JID.
send_presence: Sends a presence to a JID.
Classes defined in this module:
Message: A class to encapsulate received messages.
"""
from google.appengine.api import apiproxy_stub_map
from google.appengine.api.xmpp import xmpp_service_pb
from google.appengine.runtime import apiproxy_errors
NO_ERROR = xmpp_service_pb.XmppMessageResponse.NO_ERROR
INVALID_JID = xmpp_service_pb.XmppMessageResponse.INVALID_JID
OTHER_ERROR = xmpp_service_pb.XmppMessageResponse.OTHER_ERROR
MESSAGE_TYPE_NONE = ""
MESSAGE_TYPE_CHAT = "chat"
MESSAGE_TYPE_ERROR = "error"
MESSAGE_TYPE_GROUPCHAT = "groupchat"
MESSAGE_TYPE_HEADLINE = "headline"
MESSAGE_TYPE_NORMAL = "normal"
_VALID_MESSAGE_TYPES = frozenset([MESSAGE_TYPE_NONE, MESSAGE_TYPE_CHAT,
MESSAGE_TYPE_ERROR, MESSAGE_TYPE_GROUPCHAT,
MESSAGE_TYPE_HEADLINE, MESSAGE_TYPE_NORMAL])
PRESENCE_TYPE_AVAILABLE = ""
PRESENCE_TYPE_UNAVAILABLE = "unavailable"
PRESENCE_TYPE_PROBE = "probe"
_VALID_PRESENCE_TYPES = frozenset([PRESENCE_TYPE_AVAILABLE,
PRESENCE_TYPE_UNAVAILABLE,
PRESENCE_TYPE_PROBE])
PRESENCE_SHOW_NONE = ""
PRESENCE_SHOW_AWAY = "away"
PRESENCE_SHOW_CHAT = "chat"
PRESENCE_SHOW_DND = "dnd"
PRESENCE_SHOW_XA = "xa"
_VALID_PRESENCE_SHOWS = frozenset([PRESENCE_SHOW_NONE, PRESENCE_SHOW_AWAY,
PRESENCE_SHOW_CHAT, PRESENCE_SHOW_DND,
PRESENCE_SHOW_XA])
MAX_STATUS_MESSAGE_SIZE = 1024
class Error(Exception):
"""Base error class for this module."""
class InvalidJidError(Error):
"""Error that indicates a request for an invalid JID."""
class InvalidTypeError(Error):
"""Error that indicates a request has an invalid type."""
class InvalidXmlError(Error):
"""Error that indicates a send message request has invalid XML."""
class NoBodyError(Error):
"""Error that indicates a send message request has no body."""
class InvalidMessageError(Error):
"""Error that indicates a received message was invalid or incomplete."""
class InvalidShowError(Error):
"""Error that indicates a send presence request has an invalid show."""
class InvalidStatusError(Error):
"""Error that indicates a send presence request has an invalid status."""
def get_presence(jid, from_jid=None):
"""Gets the presence for a JID.
Args:
jid: The JID of the contact whose presence is requested.
from_jid: The optional custom JID to use for sending. Currently, the default
is <appid>@appspot.com. This is supported as a value. Custom JIDs can be
of the form <anything>@<appid>.appspotchat.com.
Returns:
bool, Whether the user is online.
Raises:
InvalidJidError if any of the JIDs passed are invalid.
Error if an unspecified error happens processing the request.
"""
if not jid:
raise InvalidJidError()
request = xmpp_service_pb.PresenceRequest()
response = xmpp_service_pb.PresenceResponse()
request.set_jid(_to_str(jid))
if from_jid:
request.set_from_jid(_to_str(from_jid))
try:
apiproxy_stub_map.MakeSyncCall("xmpp",
"GetPresence",
request,
response)
except apiproxy_errors.ApplicationError, e:
if (e.application_error ==
xmpp_service_pb.XmppServiceError.INVALID_JID):
raise InvalidJidError()
else:
raise Error()
return bool(response.is_available())
def send_invite(jid, from_jid=None):
"""Sends an invitation to chat to a JID.
Args:
jid: The JID of the contact to invite.
from_jid: The optional custom JID to use for sending. Currently, the default
is <appid>@appspot.com. This is supported as a value. Custom JIDs can be
of the form <anything>@<appid>.appspotchat.com.
Raises:
InvalidJidError if the JID passed is invalid.
Error if an unspecified error happens processing the request.
"""
if not jid:
raise InvalidJidError()
request = xmpp_service_pb.XmppInviteRequest()
response = xmpp_service_pb.XmppInviteResponse()
request.set_jid(_to_str(jid))
if from_jid:
request.set_from_jid(_to_str(from_jid))
try:
apiproxy_stub_map.MakeSyncCall("xmpp",
"SendInvite",
request,
response)
except apiproxy_errors.ApplicationError, e:
if (e.application_error ==
xmpp_service_pb.XmppServiceError.INVALID_JID):
raise InvalidJidError()
else:
raise Error()
return
def send_message(jids, body, from_jid=None, message_type=MESSAGE_TYPE_CHAT,
raw_xml=False):
"""Sends a chat message to a list of JIDs.
Args:
jids: A list of JIDs to send the message to, or a single JID to send the
message to.
from_jid: The optional custom JID to use for sending. Currently, the default
is <appid>@appspot.com. This is supported as a value. Custom JIDs can be
of the form <anything>@<appid>.appspotchat.com.
body: The body of the message.
message_type: Optional type of the message. Should be one of the types
specified in RFC 3921, section 2.1.1. An empty string will result in a
message stanza without a type attribute. For convenience, all of the
valid types are in the MESSAGE_TYPE_* constants in this file. The
default is MESSAGE_TYPE_CHAT. Anything else will throw an exception.
raw_xml: Optionally specifies that the body should be interpreted as XML. If
this is false, the contents of the body will be escaped and placed inside
of a body element inside of the message. If this is true, the contents
will be made children of the message.
Returns:
list, A list of statuses, one for each JID, corresponding to the result of
sending the message to that JID. Or, if a single JID was passed in,
returns the status directly.
Raises:
InvalidJidError if there is no valid JID in the list.
InvalidTypeError if the type argument is invalid.
InvalidXmlError if the body is malformed XML and raw_xml is True.
NoBodyError if there is no body.
Error if another error occurs processing the request.
"""
request = xmpp_service_pb.XmppMessageRequest()
response = xmpp_service_pb.XmppMessageResponse()
if not body:
raise NoBodyError()
if not jids:
raise InvalidJidError()
if not message_type in _VALID_MESSAGE_TYPES:
raise InvalidTypeError()
single_jid = False
if isinstance(jids, basestring):
single_jid = True
jids = [jids]
for jid in jids:
if not jid:
raise InvalidJidError()
request.add_jid(_to_str(jid))
request.set_body(_to_str(body))
request.set_type(_to_str(message_type))
request.set_raw_xml(raw_xml)
if from_jid:
request.set_from_jid(_to_str(from_jid))
try:
apiproxy_stub_map.MakeSyncCall("xmpp",
"SendMessage",
request,
response)
except apiproxy_errors.ApplicationError, e:
if (e.application_error ==
xmpp_service_pb.XmppServiceError.INVALID_JID):
raise InvalidJidError()
elif (e.application_error ==
xmpp_service_pb.XmppServiceError.INVALID_TYPE):
raise InvalidTypeError()
elif (e.application_error ==
xmpp_service_pb.XmppServiceError.INVALID_XML):
raise InvalidXmlError()
elif (e.application_error ==
xmpp_service_pb.XmppServiceError.NO_BODY):
raise NoBodyError()
raise Error()
if single_jid:
return response.status_list()[0]
return response.status_list()
def send_presence(jid, status=None, from_jid=None,
presence_type=PRESENCE_TYPE_AVAILABLE,
presence_show=PRESENCE_SHOW_NONE):
"""Sends a presence to a given JID.
Args:
jid: A JID to send the presence to.
status: The optional status message. Size is limited to 1KB.
from_jid: The optional custom JID to use for sending. Currently, the default
is <appid>@appspot.com. This is supported as a value. Custom JIDs can be
of the form <anything>@<appid>.appspotchat.com.
presence_type: Optional type of the presence. This accepts a subset of the
types specified in RFC 3921, section 2.2.1. An empty string will result
in a presence stanza without a type attribute. For convenience, all of the
valid types are in the PRESENCE_TYPE_* constants in this file. The default
is PRESENCE_TYPE_AVAILABLE. Anything else will throw an exception.
presence_show: Optional show value for the presence. Should be one of the
values specified in RFC 3921, section 2.2.2.1. An empty string will result
in a presence stanza without a show element. For convenience, all of the
valid types are in the PRESENCE_SHOW_* constants in this file. The
default is PRESENCE_SHOW_NONE. Anything else will throw an exception.
Raises:
InvalidJidError if there is no valid JID in the list.
InvalidTypeError if the type argument is invalid.
InvalidShowError if the show argument is invalid.
InvalidStatusError if the status argument is too large.
Error if another error occurs processing the request.
"""
request = xmpp_service_pb.XmppSendPresenceRequest()
response = xmpp_service_pb.XmppSendPresenceResponse()
if not jid:
raise InvalidJidError()
if presence_type and not _to_str(presence_type) in _VALID_PRESENCE_TYPES:
raise InvalidTypeError()
if presence_show and not _to_str(presence_show) in _VALID_PRESENCE_SHOWS:
raise InvalidShowError()
if status and len(status) > MAX_STATUS_MESSAGE_SIZE:
raise InvalidStatusError()
request.set_jid(_to_str(jid))
if status:
request.set_status(_to_str(status))
if presence_type:
request.set_type(_to_str(presence_type))
if presence_show:
request.set_show(_to_str(presence_show))
if from_jid:
request.set_from_jid(_to_str(from_jid))
try:
apiproxy_stub_map.MakeSyncCall("xmpp",
"SendPresence",
request,
response)
except apiproxy_errors.ApplicationError, e:
if (e.application_error ==
xmpp_service_pb.XmppServiceError.INVALID_JID):
raise InvalidJidError()
elif (e.application_error ==
xmpp_service_pb.XmppServiceError.INVALID_TYPE):
raise InvalidTypeError()
elif (e.application_error ==
xmpp_service_pb.XmppServiceError.INVALID_SHOW):
raise InvalidShowError()
raise Error()
return
class Message(object):
"""Encapsulates an XMPP message received by the application."""
def __init__(self, vars):
"""Constructs a new XMPP Message from an HTTP request.
Args:
vars: A dict-like object to extract message arguments from.
"""
try:
self.__sender = vars["from"]
self.__to = vars["to"]
self.__body = vars["body"]
except KeyError, e:
raise InvalidMessageError(e[0])
self.__command = None
self.__arg = None
@property
def sender(self):
return self.__sender
@property
def to(self):
return self.__to
@property
def body(self):
return self.__body
def __parse_command(self):
if self.__arg != None:
return
body = self.__body
if body.startswith('\\'):
body = '/' + body[1:]
self.__arg = ''
if body.startswith('/'):
parts = body.split(' ', 1)
self.__command = parts[0][1:]
if len(parts) > 1:
self.__arg = parts[1].strip()
else:
self.__arg = self.__body.strip()
@property
def command(self):
self.__parse_command()
return self.__command
@property
def arg(self):
self.__parse_command()
return self.__arg
def reply(self, body, message_type=MESSAGE_TYPE_CHAT, raw_xml=False,
send_message=send_message):
"""Convenience function to reply to a message.
Args:
body: str: The body of the message
message_type, raw_xml: As per send_message.
send_message: Used for testing.
Returns:
A status code as per send_message.
Raises:
See send_message.
"""
return send_message([self.sender], body, from_jid=self.to,
message_type=message_type, raw_xml=raw_xml)
def _to_str(value):
"""Helper function to make sure unicode values converted to utf-8
Args:
value: str or unicode to convert to utf-8.
Returns:
UTF-8 encoded str of value, otherwise value unchanged.
"""
if isinstance(value, unicode):
return value.encode('utf-8')
return value
|
|
import fnmatch
import glob
import os
import re
import tempfile
from datetime import datetime
from gppylib import gplog
from gppylib.commands.base import WorkerPool, Command, REMOTE
from gppylib.commands.unix import Scp
from gppylib.db import dbconn
from gppylib.db.dbconn import execSQL
from gppylib.gparray import GpArray
from gppylib.mainUtils import gp
from gppylib import pgconf
from optparse import Values
from pygresql import pg
from gppylib.operations.utils import DEFAULT_NUM_WORKERS
import gzip
logger = gplog.get_default_logger()
class Context(Values, object):
filename_dict = {
"ao": ("dump", "_ao_state_file"), "cdatabase": ("cdatabase_1_1", ""), "co": ("dump", "_co_state_file"), "dirty_table": ("dump", "_dirty_list"),
"dump": ("dump_%d_%d", ""), "files": ("dump", "_regular_files"), "filter": ("dump", "_filter"), "global": ("global_1_1", ""),
"increments": ("dump", "_increments"), "last_operation": ("dump", "_last_operation"), "master_config": ("master_config_files", ".tar"),
"metadata": ("dump_1_1", ""), "partition_list": ("dump", "_table_list"), "pipes": ("dump", "_pipes"), "plan": ("restore", "_plan"),
"postdata": ("dump_1_1", "_post_data"), "report": ("dump", ".rpt"), "schema": ("dump", "_schema"), "segment_config": ("segment_config_files_%d_%d", ".tar"),
"stats": ("statistics_1_1", ""), "status": ("dump_status_%d_%d", ""),
}
defaults = {
"backup_dir": None, "batch_default": 64, "change_schema": None, "cleanup_date": None, "cleanup_total": None, "clear_catalog_dumps": False,
"clear_dumps": False, "clear_dumps_only": False, "compress": True, "db_host_path": None, "ddboost": False, "ddboost_backupdir": None, "ddboost_config_remove": False,
"ddboost_hosts": None, "ddboost_ping": True, "ddboost_remote": False, "ddboost_show_config": False, "ddboost_storage_unit": None, "ddboost_user": None,
"ddboost_verify": False, "drop_db": False, "dump_config": False, "dump_databases": [], "dump_dir": "db_dumps", "dump_global": False, "dump_prefix": "",
"dump_schema": "", "dump_stats": False, "encoding": None, "exclude_dump_schema": "", "exclude_dump_tables": "", "exclude_dump_tables_file": "",
"exclude_schema_file": "", "free_space_percent": None, "history": False, "include_dump_tables": "", "include_dump_tables_file": "",
"include_schema_file": "", "incremental": False, "list_filter_tables": False, "local_dump_prefix": None, "masterDataDirectory": None,
"master_port": 0, "max_streams": None, "netbackup_block_size": None, "netbackup_keyword": None, "netbackup_policy": None, "netbackup_schedule": None,
"netbackup_service_host": None, "metadata_only": False, "no_analyze": False, "no_ao_stats": False, "no_plan": False, "no_validate_table_name": False,
"output_options": [], "post_script": "", "redirected_restore_db": None, "report_dir": "", "report_status_dir": "", "restore_db": None,
"restore_global": False, "restore_schemas": None, "restore_stats": None, "restore_tables": [], "timestamp": None, "timestamp_key": None,
"full_dump_timestamp": None,
}
def __init__(self, values=None):
if values:
self.defaults.update(values.__dict__) # Ensure that context has default values for all unset variables
super(self.__class__, self).__init__(vars(Values(self.defaults)))
if self.masterDataDirectory:
self.master_datadir = self.masterDataDirectory
else:
self.master_datadir = gp.get_masterdatadir()
self.master_port = self.get_master_port()
if self.local_dump_prefix:
self.dump_prefix = self.local_dump_prefix + "_"
else:
self.dump_prefix = ""
if not self.include_dump_tables: self.include_dump_tables = []
if not self.exclude_dump_tables: self.exclude_dump_tables = []
if not self.output_options: self.output_options = []
if not self.dump_schema: self.dump_schema = []
if not self.exclude_dump_schema: self.exclude_dump_schema = []
if self.netbackup_keyword and (len(self.netbackup_keyword) > 100):
raise Exception('NetBackup Keyword provided has more than max limit (100) characters. Cannot proceed with backup.')
def get_master_port(self):
pgconf_dict = pgconf.readfile(self.master_datadir + "/postgresql.conf")
return pgconf_dict.int('port')
def generate_filename(self, filetype, dbid=1, timestamp=None, directory=None):
if timestamp is None:
timestamp = self.timestamp
if directory:
use_dir = directory
else:
use_dir = self.get_backup_dir(timestamp)
format_str = "%s/%sgp_%s_%s%s" % (use_dir, self.dump_prefix, "%s", timestamp, "%s")
filename = format_str % (self.filename_dict[filetype][0], self.filename_dict[filetype][1])
if "%d" in filename:
if dbid == 1:
filename = filename % (1, 1)
else:
filename = filename % (0, dbid)
if self.compress and filetype in ["metadata", "dump", "postdata"]:
filename += ".gz"
return filename
def generate_prefix(self, filetype, dbid=1, timestamp=None):
if timestamp is None:
timestamp = self.timestamp
format_str = "%sgp_%s_" % (self.dump_prefix, "%s")
filename = format_str % (self.filename_dict[filetype][0])
if "%d" in filename:
if dbid == 1:
filename = filename % (1, 1)
else:
filename = filename % (0, dbid)
return filename
def get_backup_dir(self, timestamp=None, directory=None):
if directory is not None:
use_dir = directory
elif self.backup_dir and not self.ddboost:
use_dir = self.backup_dir
elif self.master_datadir:
use_dir = self.master_datadir
else:
raise Exception("Cannot locate backup directory with existing parameters")
if timestamp:
use_timestamp = timestamp
else:
use_timestamp = self.timestamp
if not use_timestamp:
raise Exception("Cannot locate backup directory without timestamp")
if not validate_timestamp(use_timestamp):
raise Exception('Invalid timestamp: "%s"' % use_timestamp)
return "%s/%s/%s" % (use_dir, self.dump_dir, use_timestamp[0:8])
def get_backup_root(self):
if self.backup_dir and not self.ddboost:
return self.backup_dir
else:
return self.master_datadir
def get_gpd_path(self):
gpd_path = os.path.join(self.dump_dir, self.timestamp[0:8])
if self.backup_dir:
gpd_path = os.path.join(self.backup_dir, gpd_path)
return gpd_path
def get_date_dir(self):
return os.path.join(self.get_backup_root(), self.dump_dir, self.db_date_dir)
def backup_dir_is_writable(self):
if self.backup_dir and not self.report_status_dir:
try:
check_dir_writable(self.get_backup_dir())
except Exception as e:
logger.warning('Backup directory %s is not writable. Error %s' % (self.get_backup_dir(), str(e)))
logger.warning('Since --report-status-dir option is not specified, report and status file will be written in segment data directory.')
return False
return True
def generate_dump_timestamp(self):
if self.timestamp_key:
timestamp_key = self.timestamp_key
else:
timestamp_key = datetime.now().strftime("%Y%m%d%H%M%S")
if not validate_timestamp(timestamp_key):
raise Exception('Invalid timestamp key')
year = int(timestamp_key[:4])
month = int(timestamp_key[4:6])
day = int(timestamp_key[6:8])
hours = int(timestamp_key[8:10])
minutes = int(timestamp_key[10:12])
seconds = int(timestamp_key[12:14])
self.timestamp = timestamp_key
self.db_date_dir = "%4d%02d%02d" % (year, month, day)
self.timestamp_object = datetime(year, month, day, hours, minutes, seconds)
def expand_partitions_and_populate_filter_file(dbname, partition_list, file_prefix):
expanded_partitions = expand_partition_tables(dbname, partition_list)
dump_partition_list = list(set(expanded_partitions + partition_list))
return create_temp_file_from_list(dump_partition_list, file_prefix)
def populate_filter_tables(table, rows, non_partition_tables, partition_leaves):
if not rows:
non_partition_tables.append(table)
else:
for (schema_name, partition_leaf_name) in rows:
partition_leaf = schema_name.strip() + '.' + partition_leaf_name.strip()
partition_leaves.append(partition_leaf)
return (non_partition_tables, partition_leaves)
def get_all_parent_tables(dbname):
SQL = "SELECT DISTINCT (schemaname || '.' || tablename) FROM pg_partitions"
data = []
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
curs = dbconn.execSQL(conn, SQL)
data = curs.fetchall()
return set([d[0] for d in data])
def list_to_quoted_string(filter_tables):
filter_string = "'" + "', '".join([pg.escape_string(t) for t in filter_tables]) + "'"
return filter_string
def convert_parents_to_leafs(dbname, parents):
partition_leaves_sql = """
SELECT x.partitionschemaname || '.' || x.partitiontablename
FROM (
SELECT distinct schemaname, tablename, partitionschemaname, partitiontablename, partitionlevel
FROM pg_partitions
WHERE schemaname || '.' || tablename in (%s)
) as X,
(SELECT schemaname, tablename maxtable, max(partitionlevel) maxlevel
FROM pg_partitions
group by (tablename, schemaname)
) as Y
WHERE x.schemaname = y.schemaname and x.tablename = Y.maxtable and x.partitionlevel = Y.maxlevel;
"""
if not parents:
return []
conn = dbconn.connect(dbconn.DbURL(dbname=dbname))
partition_sql = partition_leaves_sql % list_to_quoted_string(parents)
curs = dbconn.execSQL(conn, partition_sql)
rows = curs.fetchall()
return [r[0] for r in rows]
#input: list of tables to be filtered
#output: same list but parent tables converted to leafs
def expand_partition_tables(dbname, filter_tables):
if not filter_tables or len(filter_tables) == 0:
return filter_tables
parent_tables = list()
non_parent_tables = list()
expanded_list = list()
all_parent_tables = get_all_parent_tables(dbname)
for table in filter_tables:
if table in all_parent_tables:
parent_tables.append(table)
else:
non_parent_tables.append(table)
expanded_list += non_parent_tables
local_batch_size = 1000
for (s, e) in get_batch_from_list(len(parent_tables), local_batch_size):
tmp = convert_parents_to_leafs(dbname, parent_tables[s:e])
expanded_list += tmp
return expanded_list
def get_batch_from_list(length, batch_size):
indices = []
for i in range(0, length, batch_size):
indices.append((i, i+batch_size))
return indices
def create_temp_file_from_list(entries, prefix):
"""
When writing the entries into temp file, don't do any strip as there might be
white space in schema name and table name.
"""
if len(entries) == 0:
return None
fd = tempfile.NamedTemporaryFile(mode='w', prefix=prefix, delete=False)
for entry in entries:
fd.write(entry + '\n')
tmp_file_name = fd.name
fd.close()
return tmp_file_name
def create_temp_file_with_tables(table_list):
return create_temp_file_from_list(table_list, 'table_list_')
def create_temp_file_with_schemas(schema_list):
return create_temp_file_from_list(schema_list, 'schema_file_')
def validate_timestamp(timestamp):
if not timestamp:
return False
if len(timestamp) != 14:
return False
if timestamp.isdigit():
return True
else:
return False
def check_successful_dump(report_file_contents):
for line in report_file_contents:
if line.strip() == 'gp_dump utility finished successfully.':
return True
return False
# raise exception for bad data
def convert_report_filename_to_cdatabase_filename(context, report_file):
(dirname, fname) = os.path.split(report_file)
timestamp = fname[-18:-4]
ddboost_parent_dir = None
if context.ddboost:
ddboost_parent_dir = context.get_backup_dir(directory='')
return context.generate_filename("cdatabase", timestamp=timestamp, directory=ddboost_parent_dir)
def get_lines_from_dd_file(filename, ddboost_storage_unit):
cmdStr = 'gpddboost --readFile --from-file=%s' % filename
if ddboost_storage_unit:
cmdStr += ' --ddboost-storage-unit=%s' % ddboost_storage_unit
cmd = Command('DDBoost copy of master dump file', cmdStr)
cmd.run(validateAfter=True)
contents = cmd.get_results().stdout.splitlines()
return contents
def check_cdatabase_exists(context, report_file):
try:
filename = convert_report_filename_to_cdatabase_filename(context, report_file)
except Exception, err:
return False
if context.ddboost:
cdatabase_contents = get_lines_from_dd_file(filename, context.ddboost_storage_unit)
elif context.netbackup_service_host:
restore_file_with_nbu(context, path=filename)
cdatabase_contents = get_lines_from_file(filename)
else:
cdatabase_contents = get_lines_from_file(filename, context)
dbname = escapeDoubleQuoteInSQLString(context.dump_database, forceDoubleQuote=False)
for line in cdatabase_contents:
if 'CREATE DATABASE' in line:
dump_dbname = get_dbname_from_cdatabaseline(line)
if dump_dbname is None:
continue
else:
if dbname == checkAndRemoveEnclosingDoubleQuote(dump_dbname):
return True
return False
def get_dbname_from_cdatabaseline(line):
"""
Line format: CREATE DATABASE "DBNAME" WITH TEMPLATE = template0 ENCODING = 'UTF8' OWNER = gpadmin;
To get the dbname:
substring between the ending index of the first statement: CREATE DATABASE and the starting index
of WITH TEMPLATE whichever is not inside any double quotes, based on the fact that double quote
inside any name will be escaped by extra double quote, so there's always only one WITH TEMPLATE not
inside any doubles, means its previous and post string should have only even number of double
quotes.
Note: OWER name can also have special characters with double quote.
"""
cdatabase = "CREATE DATABASE "
try:
start = line.index(cdatabase)
except Exception as e:
logger.error('Failed to find substring %s in line %s, error: %s' % (cdatabase, line, str(e)))
return None
keyword = " WITH TEMPLATE = "
pos = get_nonquoted_keyword_index(line, keyword, '"', len(keyword))
if pos != -1:
dbname = line[start+len(cdatabase) : pos]
return dbname
return None
def get_nonquoted_keyword_index(line, keyword, quote, keyword_len):
# quote can be single quote or double quote
all_positions = get_all_occurrences(keyword, line)
if all_positions != None and len(all_positions) > 0:
for pos in all_positions:
pre_string = line[:pos]
post_string = line[pos + keyword_len:]
quotes_before = get_all_occurrences('%s' % quote, pre_string)
quotes_after = get_all_occurrences('%s' % quote, post_string)
num_quotes_before = 0 if (quotes_before is None or len(quotes_before) == 0) else len(quotes_before)
num_quotes_after = 0 if (quotes_after is None or len(quotes_after) == 0) else len(quotes_after)
if num_quotes_before % 2 == 0 and num_quotes_after % 2 == 0:
return pos
return -1
def get_all_occurrences(substr, line):
# substr is used for generating the pattern, escape those special chars in regexp
if substr is None or line is None or len(substr) > len(line):
return None
return [m.start() for m in re.finditer('(?=%s)' % substr, line)]
def get_type_ts_from_report_file(context, report_file, backup_type):
report_file_contents = get_lines_from_file(report_file)
if not check_successful_dump(report_file_contents):
return None
if not check_cdatabase_exists(context, report_file):
return None
if check_backup_type(report_file_contents, backup_type):
return get_timestamp_val(report_file_contents)
return None
def get_full_ts_from_report_file(context, report_file):
return get_type_ts_from_report_file(context, report_file, 'Full')
def get_incremental_ts_from_report_file(context, report_file):
return get_type_ts_from_report_file(context, report_file, 'Incremental')
def get_timestamp_val(report_file_contents):
for line in report_file_contents:
if line.startswith('Timestamp Key'):
timestamp = line.split(':')[-1].strip()
if not validate_timestamp(timestamp):
raise Exception('Invalid timestamp value found in report_file')
return timestamp
return None
def check_backup_type(report_file_contents, backup_type):
for line in report_file_contents:
if line.startswith('Backup Type'):
if line.split(':')[-1].strip() == backup_type:
return True
return False
def get_lines_from_zipped_file(fname):
"""
Don't strip white space here as it may be part of schema name and table name
"""
content = []
fd = gzip.open(fname, 'r')
try:
for line in fd:
content.append(line.strip('\n'))
except Exception as err:
raise Exception("Error reading from file %s: %s" % (fname, err))
finally:
fd.close()
return content
def get_lines_from_file(fname, context=None):
"""
Don't strip white space here as it may be part of schema name and table name
"""
content = []
if context and context.ddboost:
contents = get_lines_from_dd_file(fname, context.ddboost_storage_unit)
return contents
else:
with open(fname) as fd:
for line in fd:
content.append(line.strip('\n'))
return content
def write_lines_to_file(filename, lines):
"""
Don't do strip in line for white space in case it is part of schema name or table name
"""
with open(filename, 'w') as fp:
for line in lines:
fp.write("%s\n" % line.strip('\n'))
def verify_lines_in_file(fname, expected):
lines = get_lines_from_file(fname)
if lines != expected:
raise Exception("After writing file '%s' contents not as expected.\nLines read from file: %s\nLines expected from file: %s\n" % (fname, lines, expected))
def check_dir_writable(directory):
fp = None
try:
tmp_file = os.path.join(directory, 'tmp_file')
fp = open(tmp_file, 'w')
except IOError as e:
raise Exception('No write access permission on %s' % directory)
except Exception as e:
raise Exception(str(e))
finally:
if fp is not None:
fp.close()
if os.path.isfile(tmp_file):
os.remove(tmp_file)
def execute_sql(query, master_port, dbname):
dburl = dbconn.DbURL(port=master_port, dbname=dbname)
conn = dbconn.connect(dburl)
cursor = execSQL(conn, query)
return cursor.fetchall()
def generate_master_status_prefix(dump_prefix):
return '%sgp_dump_status_1_1_' % (dump_prefix)
def generate_seg_dbdump_prefix(dump_prefix):
return '%sgp_dump_0_' % (dump_prefix)
def generate_seg_status_prefix(dump_prefix):
return '%sgp_dump_status_0_' % (dump_prefix)
def get_dump_dirs(context):
use_dir = context.get_backup_root()
dump_path = os.path.join(use_dir, context.dump_dir)
if not os.path.isdir(dump_path):
return []
initial_list = os.listdir(dump_path)
initial_list = fnmatch.filter(initial_list, '[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]')
dirnames = []
for d in initial_list:
pth = os.path.join(dump_path, d)
if os.path.isdir(pth):
dirnames.append(pth)
if len(dirnames) == 0:
return []
dirnames = sorted(dirnames, key=lambda x: int(os.path.basename(x)), reverse=True)
return dirnames
def get_latest_report_timestamp(context):
dump_dirs = get_dump_dirs(context)
for d in dump_dirs:
latest = get_latest_report_in_dir(d, context.dump_prefix)
if latest:
return latest
return None
def get_latest_report_in_dir(report_dir, dump_prefix):
files = os.listdir(report_dir)
if len(files) == 0:
return None
dump_report_files = fnmatch.filter(files, '%sgp_dump_[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9].rpt' % dump_prefix)
if len(dump_report_files) == 0:
return None
dump_report_files = sorted(dump_report_files, key=lambda x: int(x.split('_')[-1].split('.')[0]), reverse=True)
return dump_report_files[0][-18:-4]
def get_timestamp_from_increments_filename(filename, dump_prefix):
fname = os.path.basename(filename)
parts = fname.split('_')
# Check for 4 underscores if there is no prefix, or more than 4 if there is a prefix
if not ((not dump_prefix and len(parts) == 4) or (dump_prefix and len(parts) > 4)):
raise Exception("Invalid increments file '%s' passed to get_timestamp_from_increments_filename" % filename)
return parts[-2].strip()
def get_full_timestamp_for_incremental(context):
full_timestamp = None
if context.netbackup_service_host:
full_timestamp = get_full_timestamp_for_incremental_with_nbu(context)
else:
pattern = '%s/%s/[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]/%sgp_dump_[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]_increments' % \
(context.get_backup_root(), context.dump_dir, context.dump_prefix)
increments_files = glob.glob(pattern)
for increments_file in increments_files:
if os.path.exists(increments_file):
increment_ts = get_lines_from_file(increments_file)
else:
continue
if context.timestamp in increment_ts:
full_timestamp = get_timestamp_from_increments_filename(increments_file, context.dump_prefix)
break
if not full_timestamp:
raise Exception("Could not locate full backup associated with timestamp '%s'. "
"Either increments file or full backup is missing.\n"
% (context.timestamp))
return full_timestamp
# backup_dir will be either MDD or some other directory depending on call
def get_latest_full_dump_timestamp(context):
backup_dir = context.get_backup_root()
dump_dirs = get_dump_dirs(context)
for dump_dir in dump_dirs:
files = sorted(os.listdir(dump_dir))
if len(files) == 0:
logger.warn('Dump directory %s is empty' % dump_dir)
continue
dump_report_files = fnmatch.filter(files, '%sgp_dump_[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9].rpt' % context.dump_prefix)
if len(dump_report_files) == 0:
logger.warn('No dump report files found in dump directory %s' % dump_dir)
continue
dump_report_files = sorted(dump_report_files, key=lambda x: int(x.split('_')[-1].split('.')[0]), reverse=True)
for dump_report_file in dump_report_files:
report_path = os.path.join(dump_dir, dump_report_file)
logger.debug('Checking for latest timestamp in report file %s' % report_path)
timestamp = get_full_ts_from_report_file(context, report_path)
logger.debug('Timestamp = %s' % timestamp)
if timestamp is not None:
return timestamp
raise Exception('No full backup found for incremental')
def get_all_segment_addresses(master_port):
gparray = GpArray.initFromCatalog(dbconn.DbURL(port=master_port), utility=True)
addresses = [seg.getSegmentAddress() for seg in gparray.getDbList() if seg.isSegmentPrimary(current_role=True)]
return list(set(addresses))
def scp_file_to_hosts(host_list, filename, batch_default):
pool = WorkerPool(numWorkers=min(len(host_list), batch_default))
for hname in host_list:
pool.addCommand(Scp('Copying table_filter_file to %s' % hname,
srcFile=filename,
dstFile=filename,
dstHost=hname))
pool.join()
pool.haltWork()
pool.check_results()
def run_pool_command(host_list, cmd_str, batch_default, check_results=True):
pool = WorkerPool(numWorkers=min(len(host_list), batch_default))
for host in host_list:
cmd = Command(host, cmd_str, ctxt=REMOTE, remoteHost=host)
pool.addCommand(cmd)
pool.join()
pool.haltWork()
if check_results:
pool.check_results()
def check_funny_chars_in_names(names, is_full_qualified_name=True):
"""
'\n' inside table name makes it hard to specify the object name in shell command line,
this may be worked around by using table file, but currently we read input line by line.
'!' inside table name will mess up with the shell history expansion.
',' is used for separating tables in plan file during incremental restore.
'.' dot is currently being used for full qualified table name in format: schema.table
"""
if names and len(names) > 0:
for name in names:
if ('\t' in name or '\n' in name or '!' in name or ',' in name or
(is_full_qualified_name and name.count('.') > 1) or (not is_full_qualified_name and name.count('.') > 0)):
raise Exception('Name has an invalid character "\\t" "\\n" "!" "," ".": "%s"' % name)
def backup_file_with_ddboost(context, filetype=None, dbid=1, timestamp=None):
if filetype is None:
raise Exception("Cannot call backup_file_with_ddboost without a filetype argument")
if timestamp is None:
timestamp = context.timestamp
path = context.generate_filename(filetype, dbid=dbid, timestamp=timestamp)
copy_file_to_dd(context, path, timestamp)
def copy_file_to_dd(context, filename, timestamp=None):
if timestamp is None:
timestamp = context.timestamp
basefilename = os.path.basename(filename)
cmdStr = "gpddboost --copyToDDBoost --from-file=%s --to-file=%s/%s/%s" % (filename, context.dump_dir, context.timestamp[0:8], basefilename)
if context.ddboost_storage_unit:
cmdStr += " --ddboost-storage-unit=%s" % context.ddboost_storage_unit
cmd = Command('copy file %s to DD machine' % basefilename, cmdStr)
cmd.run(validateAfter=True)
#Form and run command line to backup individual file with NBU
def backup_file_with_nbu(context, filetype=None, path=None, dbid=1, hostname=None, timestamp=None):
if filetype and path:
raise Exception("Cannot supply both a file type and a file path to backup_file_with_nbu")
if filetype is None and path is None:
raise Exception("Cannot call backup_file_with_nbu with no type or path argument")
if timestamp is None:
timestamp = context.timestamp
if filetype:
path = context.generate_filename(filetype, dbid=dbid, timestamp=timestamp)
command_string = "cat %s | gp_bsa_dump_agent --netbackup-service-host %s --netbackup-policy %s --netbackup-schedule %s --netbackup-filename %s" % \
(path, context.netbackup_service_host, context.netbackup_policy, context.netbackup_schedule, path)
if context.netbackup_block_size is not None:
command_string += " --netbackup-block-size %s" % context.netbackup_block_size
if context.netbackup_keyword is not None:
command_string += " --netbackup-keyword %s" % context.netbackup_keyword
logger.debug("Command string inside backup_%s_file_with_nbu: %s\n", filetype, command_string)
if hostname is None:
Command("dumping metadata files from master", command_string).run(validateAfter=True)
else:
Command("dumping metadata files from segment", command_string, ctxt=REMOTE, remoteHost=hostname).run(validateAfter=True)
logger.debug("Command ran successfully\n")
#Form and run command line to restore individual file with NBU
def restore_file_with_nbu(context, filetype=None, path=None, dbid=1, hostname=None, timestamp=None):
if filetype and path:
raise Exception("Cannot supply both a file type and a file path to restore_file_with_nbu")
if filetype is None and path is None:
raise Exception("Cannot call restore_file_with_nbu with no type or path argument")
if timestamp is None:
timestamp = context.timestamp
if filetype:
path = context.generate_filename(filetype, dbid=dbid, timestamp=timestamp)
command_string = "gp_bsa_restore_agent --netbackup-service-host %s" % context.netbackup_service_host
if context.netbackup_block_size is not None:
command_string += " --netbackup-block-size %s" % context.netbackup_block_size
command_string += " --netbackup-filename %s > %s" % (path, path)
logger.debug("Command string inside restore_%s_file_with_nbu: %s\n", filetype, command_string)
if hostname is None:
Command("restoring metadata files to master", command_string).run(validateAfter=True)
else:
Command("restoring metadata files to segment", command_string, ctxt=REMOTE, remoteHost=hostname).run(validateAfter=True)
def check_file_dumped_with_nbu(context, filetype=None, path=None, dbid=1, hostname=None):
if filetype and path:
raise Exception("Cannot supply both a file type and a file path toeck_file_dumped_with_nbu")
if filetype is None and path is None:
raise Exception("Cannot call check_file_dumped_with_nbu with no type or path argument")
if filetype:
path = context.generate_filename(filetype, dbid=dbid)
command_string = "gp_bsa_query_agent --netbackup-service-host %s --netbackup-filename %s" % (context.netbackup_service_host, path)
logger.debug("Command string inside 'check_file_dumped_with_nbu': %s\n", command_string)
if hostname is None:
cmd = Command("Querying NetBackup server to check for dumped file", command_string)
else:
cmd = Command("Querying NetBackup server to check for dumped file", command_string, ctxt=REMOTE, remoteHost=hostname)
cmd.run(validateAfter=True)
if cmd.get_results().stdout.strip() == path:
return True
else:
return False
def get_full_timestamp_for_incremental_with_nbu(context):
if context.dump_prefix:
get_inc_files_cmd = "gp_bsa_query_agent --netbackup-service-host=%s --netbackup-list-dumped-objects=%sgp_dump_*_increments" % (context.netbackup_service_host, context.dump_prefix)
else:
get_inc_files_cmd = "gp_bsa_query_agent --netbackup-service-host=%s --netbackup-list-dumped-objects=gp_dump_*_increments" % context.netbackup_service_host
cmd = Command("Query NetBackup server to get the list of increments files backed up", get_inc_files_cmd)
cmd.run(validateAfter=True)
files_list = cmd.get_results().stdout.strip().split('\n')
for line in files_list:
fname = line.strip()
restore_file_with_nbu(context, path=fname)
contents = get_lines_from_file(fname)
if context.timestamp in contents:
full_timestamp = get_timestamp_from_increments_filename(fname, context.dump_prefix)
return full_timestamp
return None
def get_latest_full_ts_with_nbu(context):
if context.dump_prefix:
get_rpt_files_cmd = "gp_bsa_query_agent --netbackup-service-host=%s --netbackup-list-dumped-objects=%sgp_dump_*.rpt" % \
(context.netbackup_service_host, context.dump_prefix)
else:
get_rpt_files_cmd = "gp_bsa_query_agent --netbackup-service-host=%s --netbackup-list-dumped-objects=gp_dump_*.rpt" % context.netbackup_service_host
cmd = Command("Query NetBackup server to get the list of report files backed up", get_rpt_files_cmd)
cmd.run(validateAfter=True)
files_list = cmd.get_results().stdout.strip().split('\n')
for line in files_list:
fname = line.strip()
if fname == '':
continue
if context.backup_dir is not None and context.backup_dir not in fname:
continue
if ("No object matched the specified predicate" in fname) or ("No objects of the format" in fname):
return None
restore_file_with_nbu(context, path=fname)
timestamp = get_full_ts_from_report_file(context, report_file=fname)
logger.debug('Timestamp = %s' % timestamp)
if timestamp is not None:
return timestamp
raise Exception('No full backup found for given incremental on the specified NetBackup server')
def getRows(dbname, exec_sql):
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
curs = dbconn.execSQL(conn, exec_sql)
results = curs.fetchall()
return results
def check_schema_exists(schema_name, dbname):
schemaname = pg.escape_string(schema_name)
schema_check_sql = "select * from pg_catalog.pg_namespace where nspname='%s';" % schemaname
if len(getRows(dbname, schema_check_sql)) < 1:
return False
return True
def unescape_string(string):
if string:
string = string.replace('\\\\', '\\').replace("''", "'")
return string
def isDoubleQuoted(string):
if len(string) > 2 and string[0] == '"' and string[-1] == '"':
return True
return False
def checkAndRemoveEnclosingDoubleQuote(string):
if isDoubleQuoted(string):
string = string[1 : len(string) - 1]
return string
def checkAndAddEnclosingDoubleQuote(string):
if not isDoubleQuoted(string):
string = '"' + string + '"'
return string
def escapeDoubleQuoteInSQLString(string, forceDoubleQuote=True):
"""
Accept true database name, schema name, table name, escape the double quote
inside the name, add enclosing double quote by default.
"""
string = string.replace('"', '""')
if forceDoubleQuote:
string = '"' + string + '"'
return string
def removeEscapingDoubleQuoteInSQLString(string, forceDoubleQuote=True):
"""
Remove the escaping double quote in database/schema/table name.
"""
if string is None:
return string
string = string.replace('""', '"')
if forceDoubleQuote:
string = '"' + string + '"'
return string
def formatSQLString(rel_file, isTableName=False):
"""
Read the full qualified schema or table name, do a split
if each item is a table name into schema and table,
escape the double quote inside the name properly.
"""
relnames = []
if rel_file and os.path.exists(rel_file):
with open(rel_file, 'r') as fr:
lines = fr.read().strip('\n').split('\n')
for line in lines:
if isTableName:
schema, table = split_fqn(line)
schema = escapeDoubleQuoteInSQLString(schema)
table = escapeDoubleQuoteInSQLString(table)
relnames.append(schema + '.' + table)
else:
schema = escapeDoubleQuoteInSQLString(line)
relnames.append(schema)
if len(relnames) > 0:
write_lines_to_file(rel_file, relnames)
return rel_file
def split_fqn(fqn_name):
"""
Split full qualified table name into schema and table by separator '.',
"""
try:
schema, table = fqn_name.split('.')
except Exception as e:
logger.error("Failed to split name %s into schema and table, please check the format is schema.table" % fqn_name)
raise Exception('%s' % str(e))
return schema, table
def remove_file_on_segments(context, filename):
addresses = get_all_segment_addresses(context.master_port)
try:
cmd = 'rm -f %s' % filename
run_pool_command(addresses, cmd, context.batch_default, check_results=False)
except Exception as e:
logger.error("cleaning up file failed: %s" % e.__str__())
def get_table_info(line):
"""
It's complex to split when table name/schema name/user name/ tablespace name
contains full context of one of others', which is very unlikely, but in
case it happens, return None.
Since we only care about table name, type, and schema name, strip the input
is safe here.
line: contains the true (un-escaped) schema name, table name, and user name.
"""
COMMENT_EXPR = '-- Name: '
TYPE_EXPR = '; Type: '
SCHEMA_EXPR = '; Schema: '
OWNER_EXPR = '; Owner: '
TABLESPACE_EXPR = '; Tablespace: '
temp = line.strip('\n')
type_start = get_all_occurrences(TYPE_EXPR, temp)
schema_start = get_all_occurrences(SCHEMA_EXPR, temp)
owner_start = get_all_occurrences(OWNER_EXPR, temp)
tblspace_start = get_all_occurrences(TABLESPACE_EXPR, temp)
if len(type_start) != 1 or len(schema_start) != 1 or len(owner_start) != 1:
return (None, None, None, None)
name = temp[len(COMMENT_EXPR) : type_start[0]]
type = temp[type_start[0] + len(TYPE_EXPR) : schema_start[0]]
schema = temp[schema_start[0] + len(SCHEMA_EXPR) : owner_start[0]]
if not tblspace_start:
tblspace_start.append(None)
owner = temp[owner_start[0] + len(OWNER_EXPR) : tblspace_start[0]]
return (name, type, schema, owner)
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to test TFLite models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from six import PY2
from google.protobuf import text_format as _text_format
from google.protobuf.message import DecodeError
from tensorflow.core.framework import graph_pb2 as _graph_pb2
from tensorflow.lite.python import convert_saved_model as _convert_saved_model
from tensorflow.lite.python import lite as _lite
from tensorflow.lite.python import util as _util
from tensorflow.python import keras as _keras
from tensorflow.python.client import session as _session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework.importer import import_graph_def as _import_graph_def
from tensorflow.python.keras.preprocessing import image
from tensorflow.python.lib.io import file_io as _file_io
from tensorflow.python.platform import resource_loader as _resource_loader
from tensorflow.python.saved_model import load as _load
from tensorflow.python.saved_model import loader as _loader
from tensorflow.python.saved_model import signature_constants as _signature_constants
from tensorflow.python.saved_model import tag_constants as _tag_constants
def get_filepath(filename, base_dir=None):
"""Returns the full path of the filename.
Args:
filename: Subdirectory and name of the model file.
base_dir: Base directory containing model file.
Returns:
str.
"""
if base_dir is None:
base_dir = "learning/brain/mobile/tflite_compat_models"
return os.path.join(_resource_loader.get_root_dir_with_all_resources(),
base_dir, filename)
def get_image(size):
"""Returns an image loaded into an np.ndarray with dims [1, size, size, 3].
Args:
size: Size of image.
Returns:
np.ndarray.
"""
img_filename = _resource_loader.get_path_to_datafile(
"testdata/grace_hopper.jpg")
img = image.load_img(img_filename, target_size=(size, size))
img_array = image.img_to_array(img)
img_array = np.expand_dims(img_array, axis=0)
return img_array
def _convert(converter, **kwargs):
"""Converts the model.
Args:
converter: TFLiteConverter object.
**kwargs: Additional arguments to be passed into the converter. Supported
flags are {"target_ops", "post_training_quantize",
"quantize_to_float16", "post_training_quantize_16x8", "model_input_size"}.
Returns:
The converted TFLite model in serialized format.
Raises:
ValueError: Invalid version number.
"""
if "target_ops" in kwargs:
converter.target_spec.supported_ops = kwargs["target_ops"]
if "post_training_quantize" in kwargs:
converter.optimizations = [_lite.Optimize.DEFAULT]
if kwargs.get("quantize_to_float16", False):
converter.target_spec.supported_types = [dtypes.float16]
if kwargs.get("post_training_quantize_16x8", False):
input_size = kwargs.get("model_input_size")
def _get_calib_data_func():
def representative_data_gen():
num_calibration = 20
for _ in range(num_calibration):
yield [
np.random.rand(
1,
input_size[0],
input_size[1],
input_size[2],
).astype(np.float32)
]
return representative_data_gen
converter.optimizations = [_lite.Optimize.DEFAULT]
converter.target_spec.supported_ops = \
[_lite.OpsSet.\
EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8]
converter.representative_dataset = _get_calib_data_func()
return converter.convert()
def _check_model_quantized_to_16x8(tflite_model):
"""Checks that the activations are quantized into int16.
Args:
tflite_model: Serialized TensorFlow Lite model.
Raises:
ValueError: Activations with int16 type are not found.
"""
interpreter = _get_tflite_interpreter(tflite_model)
interpreter.allocate_tensors()
all_tensor_details = interpreter.get_tensor_details()
found_input = False
for tensor in all_tensor_details:
if "_int16" in tensor["name"]:
found_input = True
if tensor["dtype"] is not np.int16:
raise ValueError("Activations should be int16.")
# Check that we found activations in the correct type: int16
if not found_input:
raise ValueError("Could not find int16 activations.")
def _get_tflite_interpreter(tflite_model, input_shapes_resize=None):
"""Creates a TFLite interpreter with resized input tensors.
Args:
tflite_model: Serialized TensorFlow Lite model.
input_shapes_resize: A map where the key is the input tensor name and the
value is the shape of the input tensor. This resize happens after model
conversion, prior to calling allocate tensors. (default None)
Returns:
lite.Interpreter
"""
interpreter = _lite.Interpreter(model_content=tflite_model)
if input_shapes_resize:
input_details = interpreter.get_input_details()
input_details_map = {
detail["name"]: detail["index"] for detail in input_details
}
for name, shape in input_shapes_resize.items():
idx = input_details_map[name]
interpreter.resize_tensor_input(idx, shape)
return interpreter
def _get_input_data_map(tflite_model, input_data):
"""Generates a map of input data based on the TFLite model.
Args:
tflite_model: Serialized TensorFlow Lite model.
input_data: List of np.ndarray.
Returns:
{str: [np.ndarray]}.
"""
interpreter = _get_tflite_interpreter(tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
return {
input_tensor["name"]: data
for input_tensor, data in zip(input_details, input_data)
}
def _generate_random_input_data(tflite_model,
seed=None,
input_data_range=None,
input_shapes_resize=None):
"""Generates input data based on the input tensors in the TFLite model.
Args:
tflite_model: Serialized TensorFlow Lite model.
seed: Integer seed for the random generator. (default None)
input_data_range: A map where the key is the input tensor name and
the value is a tuple (min_val, max_val) which specifies the value range of
the corresponding input tensor. For example, '{'input1': (1, 5)}' means to
generate a random value for tensor `input1` within range [1.0, 5.0)
(half-inclusive). (default None)
input_shapes_resize: A map where the key is the input tensor name and the
value is the shape of the input tensor. This resize happens after model
conversion, prior to calling allocate tensors. (default None)
Returns:
([np.ndarray], {str : [np.ndarray]}).
"""
interpreter = _get_tflite_interpreter(tflite_model, input_shapes_resize)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
if seed:
np.random.seed(seed=seed)
# Generate random input data. If a tensor's value range is specified, say
# [a, b), then the generated value will be (b - a) * Unif[0.0, 1.0) + a,
# otherwise it's Unif[0.0, 1.0).
input_data = []
for input_tensor in input_details:
val = np.random.random_sample(input_tensor["shape"])
if (input_data_range is not None and
input_tensor["name"] in input_data_range):
val = (input_data_range[input_tensor["name"]][1] -
input_data_range[input_tensor["name"]][0]
) * val + input_data_range[input_tensor["name"]][0]
input_data.append(np.array(val, dtype=input_tensor["dtype"]))
input_data_map = _get_input_data_map(tflite_model, input_data)
return input_data, input_data_map
def _evaluate_tflite_model(tflite_model, input_data, input_shapes_resize=None):
"""Returns evaluation of input data on TFLite model.
Args:
tflite_model: Serialized TensorFlow Lite model.
input_data: List of np.ndarray.
input_shapes_resize: A map where the key is the input tensor name and the
value is the shape of the input tensor. This resize happens after model
conversion, prior to calling allocate tensors. (default None)
Returns:
List of np.ndarray.
"""
interpreter = _get_tflite_interpreter(tflite_model, input_shapes_resize)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
for input_tensor, tensor_data in zip(input_details, input_data):
interpreter.set_tensor(input_tensor["index"], tensor_data)
interpreter.invoke()
output_data = [
interpreter.get_tensor(output_tensor["index"])
for output_tensor in output_details
]
output_labels = [output_tensor["name"] for output_tensor in output_details]
return output_data, output_labels
def evaluate_frozen_graph(filename, input_arrays, output_arrays):
"""Returns a function that evaluates the frozen graph on input data.
Args:
filename: Full filepath of file containing frozen GraphDef.
input_arrays: List of input tensors to freeze graph with.
output_arrays: List of output tensors to freeze graph with.
Returns:
Lambda function ([np.ndarray data] : [np.ndarray result]).
"""
with _file_io.FileIO(filename, "rb") as f:
file_content = f.read()
graph_def = _graph_pb2.GraphDef()
try:
graph_def.ParseFromString(file_content)
except (_text_format.ParseError, DecodeError):
if not isinstance(file_content, str):
if PY2:
file_content = file_content.encode("utf-8")
else:
file_content = file_content.decode("utf-8")
_text_format.Merge(file_content, graph_def)
graph = ops.Graph()
with graph.as_default():
_import_graph_def(graph_def, name="")
inputs = _util.get_tensors_from_tensor_names(graph, input_arrays)
outputs = _util.get_tensors_from_tensor_names(graph, output_arrays)
def run_session(input_data):
with _session.Session(graph=graph) as sess:
return sess.run(outputs, dict(zip(inputs, input_data)))
return run_session
def evaluate_saved_model(directory, tag_set, signature_key):
"""Returns a function that evaluates the SavedModel on input data.
Args:
directory: SavedModel directory to convert.
tag_set: Set of tags identifying the MetaGraphDef within the SavedModel to
analyze. All tags in the tag set must be present.
signature_key: Key identifying SignatureDef containing inputs and outputs.
Returns:
Lambda function ([np.ndarray data] : [np.ndarray result]).
"""
with _session.Session().as_default() as sess:
if tag_set is None:
tag_set = set([_tag_constants.SERVING])
if signature_key is None:
signature_key = _signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
meta_graph = _loader.load(sess, tag_set, directory)
signature_def = _convert_saved_model.get_signature_def(
meta_graph, signature_key)
inputs, outputs = _convert_saved_model.get_inputs_outputs(signature_def)
return lambda input_data: sess.run(outputs, dict(zip(inputs, input_data)))
def evaluate_keras_model(filename):
"""Returns a function that evaluates the tf.keras model on input data.
Args:
filename: Full filepath of HDF5 file containing the tf.keras model.
Returns:
Lambda function ([np.ndarray data] : [np.ndarray result]).
"""
keras_model = _keras.models.load_model(filename)
return lambda input_data: [keras_model.predict(input_data)]
def compare_models(tflite_model,
tf_eval_func,
input_shapes_resize=None,
input_data=None,
input_data_range=None,
tolerance=5):
"""Compares TensorFlow and TFLite models.
Unless the input data is provided, the models are compared with random data.
Args:
tflite_model: Serialized TensorFlow Lite model.
tf_eval_func: Lambda function that takes in input data and outputs the
results of the TensorFlow model ([np.ndarray data] : [np.ndarray result]).
input_shapes_resize: A map where the key is the input tensor name and the
value is the shape of the input tensor. This resize happens after model
conversion, prior to calling allocate tensors. (default None)
input_data: np.ndarray to pass into models during inference. (default None)
input_data_range: A map where the key is the input tensor name and
the value is a tuple (min_val, max_val) which specifies the value range of
the corresponding input tensor. For example, '{'input1': (1, 5)}' means to
generate a random value for tensor `input1` within range [1.0, 5.0)
(half-inclusive). (default None)
tolerance: Decimal place to check accuracy to. (default 5).
"""
if input_data is None:
input_data, _ = _generate_random_input_data(
tflite_model=tflite_model,
input_data_range=input_data_range,
input_shapes_resize=input_shapes_resize)
tf_results = tf_eval_func(input_data)
tflite_results, _ = _evaluate_tflite_model(
tflite_model, input_data, input_shapes_resize=input_shapes_resize)
for tf_result, tflite_result in zip(tf_results, tflite_results):
np.testing.assert_almost_equal(tf_result, tflite_result, tolerance)
def compare_models_v2(tflite_model,
tf_eval_func,
input_data=None,
input_data_range=None,
tolerance=5):
"""Compares TensorFlow and TFLite models for TensorFlow 2.0.
Unless the input data is provided, the models are compared with random data.
Currently only 1 input and 1 output are supported by this function.
Args:
tflite_model: Serialized TensorFlow Lite model.
tf_eval_func: Function to evaluate TensorFlow model. Either a lambda
function that takes in input data and outputs the results or a TensorFlow
ConcreteFunction.
input_data: np.ndarray to pass into models during inference. (default None).
input_data_range: A map where the key is the input tensor name and
the value is a tuple (min_val, max_val) which specifies the value range of
the corresponding input tensor. For example, '{'input1': (1, 5)}' means to
generate a random value for tensor `input1` within range [1.0, 5.0)
(half-inclusive). (default None)
tolerance: Decimal place to check accuracy to. (default 5)
"""
# Convert the input data into a map.
if input_data is None:
input_data, input_data_map = _generate_random_input_data(
tflite_model=tflite_model, input_data_range=input_data_range)
else:
input_data_map = _get_input_data_map(tflite_model, input_data)
input_data_func_map = {
input_name: constant_op.constant(input_data)
for input_name, input_data in input_data_map.items()
}
if len(input_data) > 1:
tf_results = tf_eval_func(**input_data_func_map)
else:
tf_results = tf_eval_func(constant_op.constant(input_data[0]))
tflite_results, tflite_labels = _evaluate_tflite_model(
tflite_model, input_data)
# Convert the output TensorFlow results into an ordered list.
if isinstance(tf_results, dict):
if len(tf_results) == 1:
tf_results = [tf_results[list(tf_results.keys())[0]]]
else:
tf_results = [tf_results[tflite_label] for tflite_label in tflite_labels]
else:
tf_results = [tf_results]
for tf_result, tflite_result in zip(tf_results, tflite_results):
np.testing.assert_almost_equal(tf_result, tflite_result, tolerance)
def test_frozen_graph_quant(filename,
input_arrays,
output_arrays,
input_shapes=None,
**kwargs):
"""Sanity check to validate post quantize flag alters the graph.
This test does not check correctness of the converted model. It converts the
TensorFlow frozen graph to TFLite with and without the post_training_quantized
flag. It ensures some tensors have different types between the float and
quantized models in the case of an all TFLite model or mix-and-match model.
It ensures tensor types do not change in the case of an all Flex model.
Args:
filename: Full filepath of file containing frozen GraphDef.
input_arrays: List of input tensors to freeze graph with.
output_arrays: List of output tensors to freeze graph with.
input_shapes: Dict of strings representing input tensor names to list of
integers representing input shapes (e.g., {"foo" : [1, 16, 16, 3]}).
Automatically determined when input shapes is None (e.g., {"foo" : None}).
(default None)
**kwargs: Additional arguments to be passed into the converter.
Raises:
ValueError: post_training_quantize flag doesn't act as intended.
"""
# Convert and load the float model.
converter = _lite.TFLiteConverter.from_frozen_graph(
filename, input_arrays, output_arrays, input_shapes)
tflite_model_float = _convert(converter, **kwargs)
interpreter_float = _get_tflite_interpreter(tflite_model_float)
interpreter_float.allocate_tensors()
float_tensors = interpreter_float.get_tensor_details()
# Convert and load the quantized model.
converter = _lite.TFLiteConverter.from_frozen_graph(filename, input_arrays,
output_arrays,
input_shapes)
tflite_model_quant = _convert(
converter, post_training_quantize=True, **kwargs)
interpreter_quant = _get_tflite_interpreter(tflite_model_quant)
interpreter_quant.allocate_tensors()
quant_tensors = interpreter_quant.get_tensor_details()
quant_tensors_map = {
tensor_detail["name"]: tensor_detail for tensor_detail in quant_tensors
}
# Check if weights are of different types in the float and quantized models.
num_tensors_float = len(float_tensors)
num_tensors_same_dtypes = sum(
float_tensor["dtype"] == quant_tensors_map[float_tensor["name"]]["dtype"]
for float_tensor in float_tensors)
has_quant_tensor = num_tensors_float != num_tensors_same_dtypes
# For the "flex" case, post_training_quantize should not alter the graph,
# unless we are quantizing to float16.
if ("target_ops" in kwargs and
not kwargs.get("quantize_to_float16", False) and
not kwargs.get("post_training_quantize_16x8", False) and
set(kwargs["target_ops"]) == set([_lite.OpsSet.SELECT_TF_OPS])):
if has_quant_tensor:
raise ValueError("--post_training_quantize flag unexpectedly altered the "
"full Flex mode graph.")
elif not has_quant_tensor:
raise ValueError("--post_training_quantize flag was unable to quantize the "
"graph as expected in TFLite and mix-and-match mode.")
def test_frozen_graph(filename,
input_arrays,
output_arrays,
input_shapes=None,
input_shapes_resize=None,
input_data=None,
input_data_range=None,
**kwargs):
"""Validates the TensorFlow frozen graph converts to a TFLite model.
Converts the TensorFlow frozen graph to TFLite and checks the accuracy of the
model on random data.
Args:
filename: Full filepath of file containing frozen GraphDef.
input_arrays: List of input tensors to freeze graph with.
output_arrays: List of output tensors to freeze graph with.
input_shapes: Dict of strings representing input tensor names to list of
integers representing input shapes (e.g., {"foo" : [1, 16, 16, 3]}).
Automatically determined when input shapes is None (e.g., {"foo" : None}).
(default None)
input_shapes_resize: A map where the key is the input tensor name and the
value is the shape of the input tensor. This resize happens after model
conversion, prior to calling allocate tensors. (default None)
input_data: np.ndarray to pass into models during inference. (default None).
input_data_range: A map where the key is the input tensor name and
the value is a tuple (min_val, max_val) which specifies the value range of
the corresponding input tensor. For example, '{'input1': (1, 5)}' means to
generate a random value for tensor `input1` within range [1.0, 5.0)
(half-inclusive). (default None)
**kwargs: Additional arguments to be passed into the converter.
"""
converter = _lite.TFLiteConverter.from_frozen_graph(
filename, input_arrays, output_arrays, input_shapes)
tflite_model = _convert(converter, **kwargs)
tf_eval_func = evaluate_frozen_graph(filename, input_arrays, output_arrays)
compare_models(
tflite_model,
tf_eval_func,
input_shapes_resize=input_shapes_resize,
input_data=input_data,
input_data_range=input_data_range)
def test_saved_model(directory,
input_shapes=None,
tag_set=None,
signature_key=None,
input_data=None,
input_data_range=None,
**kwargs):
"""Validates the TensorFlow SavedModel converts to a TFLite model.
Converts the TensorFlow SavedModel to TFLite and checks the accuracy of the
model on random data.
Args:
directory: SavedModel directory to convert.
input_shapes: Dict of strings representing input tensor names to list of
integers representing input shapes (e.g., {"foo" : [1, 16, 16, 3]}).
Automatically determined when input shapes is None (e.g., {"foo" : None}).
(default None)
tag_set: Set of tags identifying the MetaGraphDef within the SavedModel to
analyze. All tags in the tag set must be present.
signature_key: Key identifying SignatureDef containing inputs and outputs.
input_data: np.ndarray to pass into models during inference. (default None).
input_data_range: A map where the key is the input tensor name and
the value is a tuple (min_val, max_val) which specifies the value range of
the corresponding input tensor. For example, '{'input1': (1, 5)}' means to
generate a random value for tensor `input1` within range [1.0, 5.0)
(half-inclusive). (default None)
**kwargs: Additional arguments to be passed into the converter.
"""
converter = _lite.TFLiteConverter.from_saved_model(
directory,
input_shapes=input_shapes,
tag_set=tag_set,
signature_key=signature_key)
tflite_model = _convert(converter, **kwargs)
# 5 decimal places by default
tolerance = 5
if kwargs.get("post_training_quantize_16x8", False):
_check_model_quantized_to_16x8(tflite_model)
# only 2 decimal places for full quantization
tolerance = 2
tf_eval_func = evaluate_saved_model(directory, tag_set, signature_key)
compare_models(
tflite_model,
tf_eval_func,
input_data=input_data,
input_data_range=input_data_range,
tolerance=tolerance)
def test_saved_model_v2(directory,
tag_set=None,
signature_key=None,
input_data=None,
input_data_range=None,
**kwargs):
"""Validates the TensorFlow SavedModel converts to a TFLite model.
Converts the TensorFlow SavedModel to TFLite and checks the accuracy of the
model on random data.
Args:
directory: SavedModel directory to convert.
tag_set: Set of tags identifying the MetaGraphDef within the SavedModel to
analyze. All tags in the tag set must be present.
signature_key: Key identifying SignatureDef containing inputs and outputs.
input_data: np.ndarray to pass into models during inference. (default None).
input_data_range: A map where the key is the input tensor name and
the value is a tuple (min_val, max_val) which specifies the value range of
the corresponding input tensor. For example, '{'input1': (1, 5)}' means to
generate a random value for tensor `input1` within range [1.0, 5.0)
(half-inclusive). (default None)
**kwargs: Additional arguments to be passed into the converter.
"""
model = _load.load(directory, tags=tag_set)
if not signature_key:
signature_key = _signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
concrete_func = model.signatures[signature_key]
converter = _lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
tflite_model = _convert(converter, **kwargs)
compare_models_v2(
tflite_model,
concrete_func,
input_data=input_data,
input_data_range=input_data_range)
def test_saved_model_v2_quant_float16(directory, **kwargs):
"""Validates the TensorFlow SavedModel converts to a TFLite model."""
converter = _lite.TFLiteConverterV2.from_saved_model(directory)
tflite_model_float = _convert(converter, version=2, **kwargs)
interpreter_float = _get_tflite_interpreter(tflite_model_float)
interpreter_float.allocate_tensors()
float_tensors = interpreter_float.get_tensor_details()
tflite_model_quant = _convert(
converter,
version=2,
post_training_quantize=True,
quantize_to_float16=True,
**kwargs)
interpreter_quant = _get_tflite_interpreter(tflite_model_quant)
interpreter_quant.allocate_tensors()
quant_tensors = interpreter_quant.get_tensor_details()
quant_tensors_map = {
tensor_detail["name"]: tensor_detail for tensor_detail in quant_tensors
}
# Check if weights are of different types in the float and quantized models.
num_tensors_float = len(float_tensors)
num_tensors_same_dtypes = sum(
float_tensor["dtype"] == quant_tensors_map[float_tensor["name"]]["dtype"]
for float_tensor in float_tensors)
has_quant_tensor = num_tensors_float != num_tensors_same_dtypes
if not has_quant_tensor:
raise ValueError("--post_training_quantize flag was unable to quantize the "
"graph as expected.")
def test_keras_model(filename,
input_arrays=None,
input_shapes=None,
input_data=None,
input_data_range=None,
**kwargs):
"""Validates the tf.keras model converts to a TFLite model.
Converts the tf.keras model to TFLite and checks the accuracy of the model on
random data.
Args:
filename: Full filepath of HDF5 file containing the tf.keras model.
input_arrays: List of input tensors to freeze graph with.
input_shapes: Dict of strings representing input tensor names to list of
integers representing input shapes (e.g., {"foo" : [1, 16, 16, 3]}).
Automatically determined when input shapes is None (e.g., {"foo" : None}).
(default None)
input_data: np.ndarray to pass into models during inference. (default None).
input_data_range: A map where the key is the input tensor name and
the value is a tuple (min_val, max_val) which specifies the value range of
the corresponding input tensor. For example, '{'input1': (1, 5)}' means to
generate a random value for tensor `input1` within range [1.0, 5.0)
(half-inclusive). (default None)
**kwargs: Additional arguments to be passed into the converter.
"""
converter = _lite.TFLiteConverter.from_keras_model_file(
filename, input_arrays=input_arrays, input_shapes=input_shapes)
tflite_model = _convert(converter, **kwargs)
tf_eval_func = evaluate_keras_model(filename)
compare_models(
tflite_model,
tf_eval_func,
input_data=input_data,
input_data_range=input_data_range)
def test_keras_model_v2(filename,
input_shapes=None,
input_data=None,
input_data_range=None,
**kwargs):
"""Validates the tf.keras model converts to a TFLite model.
Converts the tf.keras model to TFLite and checks the accuracy of the model on
random data.
Args:
filename: Full filepath of HDF5 file containing the tf.keras model.
input_shapes: List of list of integers representing input shapes in the
order of the tf.keras model's .input attribute (e.g., [[1, 16, 16, 3]]).
(default None)
input_data: np.ndarray to pass into models during inference. (default None).
input_data_range: A map where the key is the input tensor name and
the value is a tuple (min_val, max_val) which specifies the value range of
the corresponding input tensor. For example, '{'input1': (1, 5)}' means to
generate a random value for tensor `input1` within range [1.0, 5.0)
(half-inclusive). (default None)
**kwargs: Additional arguments to be passed into the converter.
"""
keras_model = _keras.models.load_model(filename)
if input_shapes:
for tensor, shape in zip(keras_model.inputs, input_shapes):
tensor.set_shape(shape)
converter = _lite.TFLiteConverterV2.from_keras_model(keras_model)
tflite_model = _convert(converter, **kwargs)
tf_eval_func = evaluate_keras_model(filename)
compare_models_v2(
tflite_model,
tf_eval_func,
input_data=input_data,
input_data_range=input_data_range)
|
|
# coding=utf-8
"""
Module containing evaluation functions suitable for judging the performance of
a fitted LightFM model.
"""
import numpy as np
from ._lightfm_fast import CSRMatrix, calculate_auc_from_rank
__all__ = ["precision_at_k", "recall_at_k", "auc_score", "reciprocal_rank"]
def precision_at_k(
model,
test_interactions,
train_interactions=None,
k=10,
user_features=None,
item_features=None,
preserve_rows=False,
num_threads=1,
check_intersections=True,
):
"""
Measure the precision at k metric for a model: the fraction of known
positives in the first k positions of the ranked list of results.
A perfect score is 1.0.
Parameters
----------
model: LightFM instance
the fitted model to be evaluated
test_interactions: np.float32 csr_matrix of shape [n_users, n_items]
Non-zero entries representing known positives in the evaluation set.
train_interactions: np.float32 csr_matrix of shape [n_users, n_items], optional
Non-zero entries representing known positives in the train set. These
will be omitted from the score calculations to avoid re-recommending
known positives.
k: integer, optional
The k parameter.
user_features: np.float32 csr_matrix of shape [n_users, n_user_features], optional
Each row contains that user's weights over features.
item_features: np.float32 csr_matrix of shape [n_items, n_item_features], optional
Each row contains that item's weights over features.
preserve_rows: boolean, optional
When False (default), the number of rows in the output will be equal
to the number of users with interactions in the evaluation set.
When True, the number of rows in the output will be equal to the
number of users.
num_threads: int, optional
Number of parallel computation threads to use. Should
not be higher than the number of physical cores.
check_intersections: bool, optional, True by default,
Only relevant when train_interactions are supplied.
A flag that signals whether the test and train matrices should be checked
for intersections to prevent optimistic ranks / wrong evaluation / bad data split.
Returns
-------
np.array of shape [n_users with interactions or n_users,]
Numpy array containing precision@k scores for each user. If there are
no interactions for a given user the returned precision will be 0.
"""
if num_threads < 1:
raise ValueError("Number of threads must be 1 or larger.")
ranks = model.predict_rank(
test_interactions,
train_interactions=train_interactions,
user_features=user_features,
item_features=item_features,
num_threads=num_threads,
check_intersections=check_intersections,
)
ranks.data = np.less(ranks.data, k, ranks.data)
precision = np.squeeze(np.array(ranks.sum(axis=1))) / k
if not preserve_rows:
precision = precision[test_interactions.getnnz(axis=1) > 0]
return precision
def recall_at_k(
model,
test_interactions,
train_interactions=None,
k=10,
user_features=None,
item_features=None,
preserve_rows=False,
num_threads=1,
check_intersections=True,
):
"""
Measure the recall at k metric for a model: the number of positive items in
the first k positions of the ranked list of results divided by the number
of positive items in the test period. A perfect score is 1.0.
Parameters
----------
model: LightFM instance
the fitted model to be evaluated
test_interactions: np.float32 csr_matrix of shape [n_users, n_items]
Non-zero entries representing known positives in the evaluation set.
train_interactions: np.float32 csr_matrix of shape [n_users, n_items], optional
Non-zero entries representing known positives in the train set. These
will be omitted from the score calculations to avoid re-recommending
known positives.
k: integer, optional
The k parameter.
user_features: np.float32 csr_matrix of shape [n_users, n_user_features], optional
Each row contains that user's weights over features.
item_features: np.float32 csr_matrix of shape [n_items, n_item_features], optional
Each row contains that item's weights over features.
preserve_rows: boolean, optional
When False (default), the number of rows in the output will be equal
to the number of users with interactions in the evaluation set.
When True, the number of rows in the output will be equal to the
number of users.
num_threads: int, optional
Number of parallel computation threads to use. Should
not be higher than the number of physical cores.
check_intersections: bool, optional, True by default,
Only relevant when train_interactions are supplied.
A flag that signals whether the test and train matrices should be checked
for intersections to prevent optimistic ranks / wrong evaluation / bad data split.
Returns
-------
np.array of shape [n_users with interactions or n_users,]
Numpy array containing recall@k scores for each user. If there are no
interactions for a given user having items in the test period, the
returned recall will be 0.
"""
if num_threads < 1:
raise ValueError("Number of threads must be 1 or larger.")
ranks = model.predict_rank(
test_interactions,
train_interactions=train_interactions,
user_features=user_features,
item_features=item_features,
num_threads=num_threads,
check_intersections=check_intersections,
)
ranks.data = np.less(ranks.data, k, ranks.data)
retrieved = np.squeeze(test_interactions.getnnz(axis=1))
hit = np.squeeze(np.array(ranks.sum(axis=1)))
if not preserve_rows:
hit = hit[test_interactions.getnnz(axis=1) > 0]
retrieved = retrieved[test_interactions.getnnz(axis=1) > 0]
return hit / retrieved
def auc_score(
model,
test_interactions,
train_interactions=None,
user_features=None,
item_features=None,
preserve_rows=False,
num_threads=1,
check_intersections=True,
):
"""
Measure the ROC AUC metric for a model: the probability that a randomly
chosen positive example has a higher score than a randomly chosen negative
example.
A perfect score is 1.0.
Parameters
----------
model: LightFM instance
the fitted model to be evaluated
test_interactions: np.float32 csr_matrix of shape [n_users, n_items]
Non-zero entries representing known positives in the evaluation set.
train_interactions: np.float32 csr_matrix of shape [n_users, n_items], optional
Non-zero entries representing known positives in the train set. These
will be omitted from the score calculations to avoid re-recommending
known positives.
user_features: np.float32 csr_matrix of shape [n_users, n_user_features], optional
Each row contains that user's weights over features.
item_features: np.float32 csr_matrix of shape [n_items, n_item_features], optional
Each row contains that item's weights over features.
preserve_rows: boolean, optional
When False (default), the number of rows in the output will be equal
to the number of users with interactions in the evaluation set.
When True, the number of rows in the output will be equal to the
number of users.
num_threads: int, optional
Number of parallel computation threads to use. Should
not be higher than the number of physical cores.
check_intersections: bool, optional, True by default,
Only relevant when train_interactions are supplied.
A flag that signals whether the test and train matrices should be checked
for intersections to prevent optimistic ranks / wrong evaluation / bad data split.
Returns
-------
np.array of shape [n_users with interactions or n_users,]
Numpy array containing AUC scores for each user. If there are no
interactions for a given user the returned AUC will be 0.5.
"""
if num_threads < 1:
raise ValueError("Number of threads must be 1 or larger.")
ranks = model.predict_rank(
test_interactions,
train_interactions=train_interactions,
user_features=user_features,
item_features=item_features,
num_threads=num_threads,
check_intersections=check_intersections,
)
assert np.all(ranks.data >= 0)
auc = np.zeros(ranks.shape[0], dtype=np.float32)
if train_interactions is not None:
num_train_positives = np.squeeze(
np.array(train_interactions.getnnz(axis=1)).astype(np.int32)
)
else:
num_train_positives = np.zeros(test_interactions.shape[0], dtype=np.int32)
# The second argument is modified in-place, but
# here we don't care about the inconsistency
# introduced into the ranks matrix.
calculate_auc_from_rank(
CSRMatrix(ranks), num_train_positives, ranks.data, auc, num_threads
)
if not preserve_rows:
auc = auc[test_interactions.getnnz(axis=1) > 0]
return auc
def reciprocal_rank(
model,
test_interactions,
train_interactions=None,
user_features=None,
item_features=None,
preserve_rows=False,
num_threads=1,
check_intersections=True,
):
"""
Measure the reciprocal rank metric for a model: 1 / the rank of the highest
ranked positive example. A perfect score is 1.0.
Parameters
----------
model: LightFM instance
the fitted model to be evaluated
test_interactions: np.float32 csr_matrix of shape [n_users, n_items]
Non-zero entries representing known positives in the evaluation set.
train_interactions: np.float32 csr_matrix of shape [n_users, n_items], optional
Non-zero entries representing known positives in the train set. These
will be omitted from the score calculations to avoid re-recommending
known positives.
user_features: np.float32 csr_matrix of shape [n_users, n_user_features], optional
Each row contains that user's weights over features.
item_features: np.float32 csr_matrix of shape [n_items, n_item_features], optional
Each row contains that item's weights over features.
preserve_rows: boolean, optional
When False (default), the number of rows in the output will be equal
to the number of users with interactions in the evaluation set.
When True, the number of rows in the output will be equal to the
number of users.
num_threads: int, optional
Number of parallel computation threads to use. Should
not be higher than the number of physical cores.
check_intersections: bool, optional, True by default,
Only relevant when train_interactions are supplied.
A flag that signals whether the test and train matrices should be checked
for intersections to prevent optimistic ranks / wrong evaluation / bad data split.
Returns
-------
np.array of shape [n_users with interactions or n_users,]
Numpy array containing reciprocal rank scores for each user.
If there are no interactions for a given user the returned value will
be 0.0.
"""
if num_threads < 1:
raise ValueError("Number of threads must be 1 or larger.")
ranks = model.predict_rank(
test_interactions,
train_interactions=train_interactions,
user_features=user_features,
item_features=item_features,
num_threads=num_threads,
check_intersections=check_intersections,
)
ranks.data = 1.0 / (ranks.data + 1.0)
ranks = np.squeeze(np.array(ranks.max(axis=1).todense()))
if not preserve_rows:
ranks = ranks[test_interactions.getnnz(axis=1) > 0]
return ranks
|
|
"""
Manage Windows features via the ServerManager powershell module. Can install and
remove roles/features.
:maintainer: Shane Lee <slee@saltstack.com>
:platform: Windows Server 2008R2 or greater
:depends: win_servermanager.install
:depends: win_servermanager.remove
"""
# Import salt modules
import salt.utils.data
def __virtual__():
"""
Load only if win_servermanager is loaded
"""
if "win_servermanager.install" in __salt__:
return "win_servermanager"
return (False, "win_servermanager module could not be loaded")
def installed(
name,
features=None,
recurse=False,
restart=False,
source=None,
exclude=None,
**kwargs
):
"""
Install the windows feature. To install a single feature, use the ``name``
parameter. To install multiple features, use the ``features`` parameter.
.. note::
Some features require reboot after un/installation. If so, until the
server is restarted other features can not be installed!
Args:
name (str):
Short name of the feature (the right column in
win_servermanager.list_available). This can be a single feature or a
string of features in a comma delimited list (no spaces)
.. note::
A list is not allowed in the name parameter of any state. Use
the ``features`` parameter if you want to pass the features as a
list
features (Optional[list]):
A list of features to install. If this is passed it will be used
instead of the ``name`` parameter.
.. versionadded:: 2018.3.0
recurse (Optional[bool]):
Install all sub-features as well. If the feature is installed but
one of its sub-features are not installed set this will install
additional sub-features. This argument was previously renamed from
``force``. To ensure backwards compatibility ``force`` will
continue to work but please update your states to use the preferred
``recurse`` arg.
source (Optional[str]):
Path to the source files if missing from the target system. None
means that the system will use windows update services to find the
required files. Default is None
restart (Optional[bool]):
Restarts the computer when installation is complete, if required by
the role/feature installed. Default is False
exclude (Optional[str]):
The name of the feature to exclude when installing the named
feature. This can be a single feature, a string of features in a
comma-delimited list (no spaces), or a list of features.
.. warning::
As there is no exclude option for the ``Add-WindowsFeature``
or ``Install-WindowsFeature`` PowerShell commands the features
named in ``exclude`` will be installed with other sub-features
and will then be removed. **If the feature named in ``exclude``
is not a sub-feature of one of the installed items it will still
be removed.**
Example:
Do not use the role or feature names mentioned in the PKGMGR
documentation. To get a list of available roles and features run the
following command:
.. code-block:: bash
salt <minion_name> win_servermanager.list_available
Use the name in the right column of the results.
.. code-block:: yaml
# Installs the IIS Web Server Role (Web-Server)
IIS-WebServerRole:
win_servermanager.installed:
- recurse: True
- name: Web-Server
# Install multiple features, exclude the Web-Service
install_multiple_features:
win_servermanager.installed:
- recurse: True
- features:
- RemoteAccess
- XPS-Viewer
- SNMP-Service
- exclude:
- Web-Server
"""
if "force" in kwargs:
kwargs.pop("force")
ret = {"name": name, "result": True, "changes": {}, "comment": ""}
# Check if features is not passed, use name. Split commas
if features is None:
features = name.split(",")
# Make sure features is a list, split commas
if not isinstance(features, list):
features = features.split(",")
# Determine if the feature is installed
old = __salt__["win_servermanager.list_installed"]()
cur_feat = []
for feature in features:
if feature not in old:
ret["changes"][feature] = "Will be installed recurse={}".format(recurse)
elif recurse:
ret["changes"][feature] = "Already installed but might install sub-features"
else:
cur_feat.append(feature)
if cur_feat:
cur_feat.insert(0, "The following features are already installed:")
ret["comment"] = "\n- ".join(cur_feat)
if not ret["changes"]:
return ret
if __opts__["test"]:
ret["result"] = None
return ret
# Install the features
status = __salt__["win_servermanager.install"](
features, recurse=recurse, restart=restart, source=source, exclude=exclude
)
ret["result"] = status["Success"]
# Show items failed to install
fail_feat = []
new_feat = []
rem_feat = []
for feature in status["Features"]:
# Features that failed to install or be removed
if not status["Features"][feature].get("Success", True):
fail_feat.append("- {}".format(feature))
# Features that installed
elif "(exclude)" not in status["Features"][feature]["Message"]:
new_feat.append("- {}".format(feature))
# Show items that were removed because they were part of `exclude`
elif "(exclude)" in status["Features"][feature]["Message"]:
rem_feat.append("- {}".format(feature))
if fail_feat:
fail_feat.insert(0, "Failed to install the following:")
if new_feat:
new_feat.insert(0, "Installed the following:")
if rem_feat:
rem_feat.insert(0, "Removed the following (exclude):")
ret["comment"] = "\n".join(fail_feat + new_feat + rem_feat)
# Get the changes
new = __salt__["win_servermanager.list_installed"]()
ret["changes"] = salt.utils.data.compare_dicts(old, new)
return ret
def removed(name, features=None, remove_payload=False, restart=False):
"""
Remove the windows feature To remove a single feature, use the ``name``
parameter. To remove multiple features, use the ``features`` parameter.
Args:
name (str):
Short name of the feature (the right column in
win_servermanager.list_available). This can be a single feature or a
string of features in a comma-delimited list (no spaces)
.. note::
A list is not allowed in the name parameter of any state. Use
the ``features`` parameter if you want to pass the features as a
list
features (Optional[list]):
A list of features to remove. If this is passed it will be used
instead of the ``name`` parameter.
.. versionadded:: 2018.3.0
remove_payload (Optional[bool]):
True will cause the feature to be removed from the side-by-side
store. To install the feature in the future you will need to
specify the ``source``
restart (Optional[bool]):
Restarts the computer when uninstall is complete if required by the
role/feature uninstall. Default is False
.. note::
Some features require a reboot after uninstall. If so the feature will
not be completely uninstalled until the server is restarted.
Example:
Do not use the role or feature names mentioned in the PKGMGR
documentation. To get a list of available roles and features run the
following command:
.. code-block:: bash
salt <minion_name> win_servermanager.list_available
Use the name in the right column of the results.
.. code-block:: yaml
# Uninstall the IIS Web Server Rol (Web-Server)
IIS-WebserverRole:
win_servermanager.removed:
- name: Web-Server
# Uninstall multiple features, reboot if required
uninstall_multiple_features:
win_servermanager.removed:
- features:
- RemoteAccess
- XPX-Viewer
- SNMP-Service
- restart: True
"""
ret = {"name": name, "result": True, "changes": {}, "comment": ""}
# Check if features is not passed, use name. Split commas
if features is None:
features = name.split(",")
# Make sure features is a list, split commas
if not isinstance(features, list):
features = features.split(",")
# Determine if the feature is installed
old = __salt__["win_servermanager.list_installed"]()
rem_feat = []
for feature in features:
if feature in old:
ret["changes"][feature] = "Will be removed"
else:
rem_feat.append(feature)
if rem_feat:
rem_feat.insert(0, "The following features are not installed:")
ret["comment"] = "\n- ".join(rem_feat)
if not ret["changes"]:
return ret
if __opts__["test"]:
ret["result"] = None
return ret
# Remove the features
status = __salt__["win_servermanager.remove"](
features, remove_payload=remove_payload, restart=restart
)
ret["result"] = status["Success"]
# Some items failed to uninstall
fail_feat = []
rem_feat = []
for feature in status["Features"]:
# Use get because sometimes 'Success' isn't defined such as when the
# feature is already uninstalled
if not status["Features"][feature].get("Success", True):
# Show items that failed to uninstall
fail_feat.append("- {}".format(feature))
else:
# Show items that uninstalled
rem_feat.append("- {}".format(feature))
if fail_feat:
fail_feat.insert(0, "Failed to remove the following:")
if rem_feat:
rem_feat.insert(0, "Removed the following:")
ret["comment"] = "\n".join(fail_feat + rem_feat)
# Get the changes
new = __salt__["win_servermanager.list_installed"]()
ret["changes"] = salt.utils.data.compare_dicts(old, new)
return ret
|
|
# !/usr/bin/python3
# Filename: simRobot_grid.py
# This is the main file used to simulate a Robot in a grid world.
__author__ = 'Siddharth Pramod'
__email__ = 'spramod1@umbc.edu'
import random
import gui_grid
from math import pi, sin, cos
from collections import deque
from pprint import pprint
from scipy import io
inf = float('inf')
class World():
""" A class to describe a Robot World."""
def __init__(self, name, size_x, size_y):
""" :type name: str
:param name: name for the World
:type size_x: int
:param size_x: Size of the World in the x-dimension
:type size_y: int
:param size_y: Size of the World in the y-dimension
"""
self.name = name
self.xl = 0
self.xr = size_x
self.yu = 0
self.yd = size_y
self.wallList = {}
self.currentWallId = 0
self.display = gui_grid.WorldDisplay(width = size_x, height = size_y)
print('World instantiated, \n Name: {0}'
'\n Dims: ({1} x {2})'.format(self.name, self.xr, self.yd))
def __del__(self):
print('Ending world {0}'.format(self.name))
def buildWall(self, x, y):
""" Build a wall at location x, y"""
wall = Wall(x, y)
self.display.showWall(wall)
self.wallList[self.currentWallId] = (x, y)
self.currentWallId += 1
return None
class Wall():
""" A class describing a Wall/Impassable Obstruction."""
def __init__(self, x, y):
""" :param x: location of the Wall.
:param y: location of the Wall."""
self.x = x
self.y = y
class Robot():
""" Class to define a robot."""
def __init__(self, idn, world):
""" :type idn: int
:param idn: ID number
:type world: World
:param world: World object that the Robot is to be initialized in
"""
self.idn = idn
self.world = world
self.pos_x = random.randint(world.xl, world.xr) # Set random position
self.pos_y = random.randint(world.yu, world.yd)
self.pos_or = random.randint(0, 3) # Set random orientation
self.ranges = [0. for i in range(8)] # Eight direction sonar
self.getRanges()
self.records = deque([])
world.display.showRobot(self)
print('Robot instantiated in world \'{0}\', \n ID: {1}'.format(self.world.name, self.idn))
self.read_pos()
def __del__(self):
print('Robot {0} removed from World \'{1}\''.format(self.idn, self.world.name))
def read_pos(self):
print('Robot {0} is at: ({1},{2},{3}) \n Ranges:{4}'
.format(self.idn, self.pos_x, self.pos_y, self.pos_or, self.ranges))
def left(self, n):
""" Move n steps left."""
self.pos_or = (self.pos_or - n) % 4
self.getRanges()
return None
def right(self, n):
""" Move n steps right."""
self.pos_or = (self.pos_or + n) % 4
self.getRanges()
return None
def forward(self, n):
""" Move n steps forward."""
rad = pi / 2 * self.pos_or
if (self.pos_x + n * cos(rad), self.pos_y + n * sin(rad)) not in self.world.wallList.values():
self.pos_x += n * cos(rad)
self.pos_y += n * sin(rad)
if self.pos_x > self.world.xr: # ensure robot doesn't step out of world
self.pos_x = self.world.xr
elif self.pos_x < self.world.xl:
self.pos_x = self.world.xl
if self.pos_y > self.world.yd:
self.pos_y = self.world.yd
elif self.pos_y < self.world.yu:
self.pos_y = self.world.yu
# else:
# print ('Wall at ({0},{1})'.format(self.pos_x + n * cos(rad), self.pos_y + n * sin(rad)))
self.pos_x = round(self.pos_x)
self.pos_y = round(self.pos_y)
self.getRanges()
return None
def getRanges(self):
""" Get ranges to the nearest wall in 8 equally spaced directions - N, E, S, W, NE, SE, SW, NW.
Useful to simulate a SONAR."""
ranges = [inf for i in range(8)]
for i in range(4): # First for North/East/South/West ranges
rad = pi / 2 * ((self.pos_or + i) % 4)
d = 0
found = False
while not found:
if (round(self.pos_x + d * cos(rad)), round(self.pos_y + d * sin(rad))) in self.world.wallList.values():
ranges[i] = d # Since this is a grid world, else it would be the sum of sin & cos components
found = True
d += 1
if ((self.pos_x + d * cos(rad)) < self.world.xl or (self.pos_x + d * cos(rad)) > self.world.xr or
(self.pos_y + d * sin(rad)) < self.world.yu or (self.pos_y + d * sin(rad)) > self.world.yd):
break
# print (i,d,rad)
for i in range(4): # Next for NE/SE/SW/NW ranges
rad = pi / 2 * ((self.pos_or + i) % 4)
d = 0
found = False
while not found:
if (round(self.pos_x + d * (cos(rad) + sin(rad))), round(self.pos_y + d * (sin(rad) - cos(rad)))) in self.world.wallList.values():
ranges[4+i] = 2 * d # Since this is a grid world, else it would be the sum of sin & cos components
found = True
d += 1
if ((self.pos_x + d * cos(rad)) < self.world.xl or (self.pos_x + d * cos(rad)) > self.world.xr or
(self.pos_y + d * sin(rad)) < self.world.yu or (self.pos_y + d * sin(rad)) > self.world.yd):
break
self.ranges = ranges
return None
def recordRanges(self):
""" To maintain a record of range percepts."""
self.records.append(self.ranges)
return None
def brownian(self, n):
""" Perform brownian motion for n steps"""
for i in range(n):
random.choice([self.right, self.left, self.forward])(1)
self.recordRanges()
self.world.display.showRobot(self)
self.writeRecords2Mat('./records.mat')
return None
def writeRecords2Mat(self, filename):
""" Save the range percep records as a .mat file."""
io.savemat(filename, mdict={'X': self.records})
if __name__ == '__main__':
w = World('Simple', 51, 31) # Init World
for j in range(52): # Top & bottom boundaries
w.buildWall(j, 0)
w.buildWall(j, 31)
for j in range(32): # Left & Right boundaries
w.buildWall(0, j)
w.buildWall(51, j)
for j in range(10, 25): # West Wall
w.buildWall(10, j)
for j in range(10, 40): # North & south walls
w.buildWall(j, 10)
w.buildWall(j, 25)
for j in range(10, 15): # center wall top
w.buildWall(25, j)
for j in range(21, 26): # center wall bottom
w.buildWall(25, j)
r = Robot(1, w) # Init Robot
w.display.controlRobot(r) # Display robot
# r.brownian(1000) # Uncomment to perform Brownian motion
# pprint(list(r.records)) # Uncomment to print record of range percepts
w.display.root.mainloop() # Uncomment to control Robot manually, nothing below this runs
|
|
"""Config flow for the Huawei LTE platform."""
from collections import OrderedDict
import logging
from typing import Optional
from urllib.parse import urlparse
from huawei_lte_api.AuthorizedConnection import AuthorizedConnection
from huawei_lte_api.Client import Client
from huawei_lte_api.Connection import Connection
from huawei_lte_api.exceptions import (
LoginErrorPasswordWrongException,
LoginErrorUsernamePasswordOverrunException,
LoginErrorUsernamePasswordWrongException,
LoginErrorUsernameWrongException,
ResponseErrorException,
)
from requests.exceptions import Timeout
from url_normalize import url_normalize
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components import ssdp
from homeassistant.const import (
CONF_NAME,
CONF_PASSWORD,
CONF_RECIPIENT,
CONF_URL,
CONF_USERNAME,
)
from homeassistant.core import callback
# see https://github.com/PyCQA/pylint/issues/3202 about the DOMAIN's pylint issue
from .const import CONNECTION_TIMEOUT, DEFAULT_DEVICE_NAME, DEFAULT_NOTIFY_SERVICE_NAME
from .const import DOMAIN # pylint: disable=unused-import
_LOGGER = logging.getLogger(__name__)
class ConfigFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle Huawei LTE config flow."""
VERSION = 2
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get options flow."""
return OptionsFlowHandler(config_entry)
async def _async_show_user_form(self, user_input=None, errors=None):
if user_input is None:
user_input = {}
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
OrderedDict(
(
(
vol.Required(
CONF_URL,
default=user_input.get(
CONF_URL,
# https://github.com/PyCQA/pylint/issues/3167
self.context.get( # pylint: disable=no-member
CONF_URL, ""
),
),
),
str,
),
(
vol.Optional(
CONF_USERNAME, default=user_input.get(CONF_USERNAME, "")
),
str,
),
(
vol.Optional(
CONF_PASSWORD, default=user_input.get(CONF_PASSWORD, "")
),
str,
),
)
)
),
errors=errors or {},
)
async def async_step_import(self, user_input=None):
"""Handle import initiated config flow."""
return await self.async_step_user(user_input)
def _already_configured(self, user_input):
"""See if we already have a router matching user input configured."""
existing_urls = {
url_normalize(entry.data[CONF_URL], default_scheme="http")
for entry in self._async_current_entries()
}
return user_input[CONF_URL] in existing_urls
async def async_step_user(self, user_input=None):
"""Handle user initiated config flow."""
if user_input is None:
return await self._async_show_user_form()
errors = {}
# Normalize URL
user_input[CONF_URL] = url_normalize(
user_input[CONF_URL], default_scheme="http"
)
if "://" not in user_input[CONF_URL]:
errors[CONF_URL] = "invalid_url"
return await self._async_show_user_form(
user_input=user_input, errors=errors
)
if self._already_configured(user_input):
return self.async_abort(reason="already_configured")
conn = None
def logout():
if hasattr(conn, "user"):
try:
conn.user.logout()
except Exception: # pylint: disable=broad-except
_LOGGER.debug("Could not logout", exc_info=True)
def try_connect(username: Optional[str], password: Optional[str]) -> Connection:
"""Try connecting with given credentials."""
if username or password:
conn = AuthorizedConnection(
user_input[CONF_URL],
username=username,
password=password,
timeout=CONNECTION_TIMEOUT,
)
else:
try:
conn = AuthorizedConnection(
user_input[CONF_URL],
username="",
password="",
timeout=CONNECTION_TIMEOUT,
)
user_input[CONF_USERNAME] = ""
user_input[CONF_PASSWORD] = ""
except ResponseErrorException:
_LOGGER.debug(
"Could not login with empty credentials, proceeding unauthenticated",
exc_info=True,
)
conn = Connection(user_input[CONF_URL], timeout=CONNECTION_TIMEOUT)
del user_input[CONF_USERNAME]
del user_input[CONF_PASSWORD]
return conn
def get_router_title(conn: Connection) -> str:
"""Get title for router."""
title = None
client = Client(conn)
try:
info = client.device.basic_information()
except Exception: # pylint: disable=broad-except
_LOGGER.debug("Could not get device.basic_information", exc_info=True)
else:
title = info.get("devicename")
if not title:
try:
info = client.device.information()
except Exception: # pylint: disable=broad-except
_LOGGER.debug("Could not get device.information", exc_info=True)
else:
title = info.get("DeviceName")
return title or DEFAULT_DEVICE_NAME
username = user_input.get(CONF_USERNAME)
password = user_input.get(CONF_PASSWORD)
try:
conn = await self.hass.async_add_executor_job(
try_connect, username, password
)
except LoginErrorUsernameWrongException:
errors[CONF_USERNAME] = "incorrect_username"
except LoginErrorPasswordWrongException:
errors[CONF_PASSWORD] = "incorrect_password"
except LoginErrorUsernamePasswordWrongException:
errors[CONF_USERNAME] = "incorrect_username_or_password"
except LoginErrorUsernamePasswordOverrunException:
errors["base"] = "login_attempts_exceeded"
except ResponseErrorException:
_LOGGER.warning("Response error", exc_info=True)
errors["base"] = "response_error"
except Timeout:
_LOGGER.warning("Connection timeout", exc_info=True)
errors[CONF_URL] = "connection_timeout"
except Exception: # pylint: disable=broad-except
_LOGGER.warning("Unknown error connecting to device", exc_info=True)
errors[CONF_URL] = "unknown_connection_error"
if errors:
await self.hass.async_add_executor_job(logout)
return await self._async_show_user_form(
user_input=user_input, errors=errors
)
# pylint: disable=no-member
title = self.context.get("title_placeholders", {}).get(
CONF_NAME
) or await self.hass.async_add_executor_job(get_router_title, conn)
await self.hass.async_add_executor_job(logout)
return self.async_create_entry(title=title, data=user_input)
async def async_step_ssdp(self, discovery_info):
"""Handle SSDP initiated config flow."""
await self.async_set_unique_id(discovery_info[ssdp.ATTR_UPNP_UDN])
self._abort_if_unique_id_configured()
# Attempt to distinguish from other non-LTE Huawei router devices, at least
# some ones we are interested in have "Mobile Wi-Fi" friendlyName.
if "mobile" not in discovery_info.get(ssdp.ATTR_UPNP_FRIENDLY_NAME, "").lower():
return self.async_abort(reason="not_huawei_lte")
# https://github.com/PyCQA/pylint/issues/3167
url = self.context[CONF_URL] = url_normalize( # pylint: disable=no-member
discovery_info.get(
ssdp.ATTR_UPNP_PRESENTATION_URL,
f"http://{urlparse(discovery_info[ssdp.ATTR_SSDP_LOCATION]).hostname}/",
)
)
if any(
url == flow["context"].get(CONF_URL) for flow in self._async_in_progress()
):
return self.async_abort(reason="already_in_progress")
user_input = {CONF_URL: url}
if self._already_configured(user_input):
return self.async_abort(reason="already_configured")
# pylint: disable=no-member
self.context["title_placeholders"] = {
CONF_NAME: discovery_info.get(ssdp.ATTR_UPNP_FRIENDLY_NAME)
}
return await self._async_show_user_form(user_input)
class OptionsFlowHandler(config_entries.OptionsFlow):
"""Huawei LTE options flow."""
def __init__(self, config_entry: config_entries.ConfigEntry):
"""Initialize options flow."""
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Handle options flow."""
# Recipients are persisted as a list, but handled as comma separated string in UI
if user_input is not None:
# Preserve existing options, for example *_from_yaml markers
data = {**self.config_entry.options, **user_input}
if not isinstance(data[CONF_RECIPIENT], list):
data[CONF_RECIPIENT] = [
x.strip() for x in data[CONF_RECIPIENT].split(",")
]
return self.async_create_entry(title="", data=data)
data_schema = vol.Schema(
{
vol.Optional(
CONF_NAME,
default=self.config_entry.options.get(
CONF_NAME, DEFAULT_NOTIFY_SERVICE_NAME
),
): str,
vol.Optional(
CONF_RECIPIENT,
default=", ".join(
self.config_entry.options.get(CONF_RECIPIENT, [])
),
): str,
}
)
return self.async_show_form(step_id="init", data_schema=data_schema)
|
|
from __future__ import division,print_function
import sys,re,os
import logging
on_rtd = os.environ.get('READTHEDOCS') == 'True'
if not on_rtd:
import numpy as np
from astropy.coordinates import SkyCoord,Angle
import numpy.random as rand
import matplotlib.pyplot as plt
import pandas as pd
from astropy import units as u
from astropy.units.quantity import Quantity
from astropy import constants as const
MSUN = const.M_sun.cgs.value
AU = const.au.cgs.value
DAY = 86400
G = const.G.cgs.value
else:
np = None
SkyCoord, Angle = (None, None)
rand = None
plt = None
pd = None
u = None
Quantity = None
const = None
MSUN, AU, DAY, G = (None, None, None, None)
from .utils import semimajor,random_spherepos,orbitproject,orbit_posvel
from ..plotutils import setfig
from ..hashutils import hashcombine, hashdf
class TripleOrbitPopulation(object):
"""
Stars 2 and 3 orbit each other (short orbit), far from star 1 (long orbit)
This object defines the orbits of a triple star system,
with orbits calculated assuming the "long" orbit does not perturb
the "short" orbit, which will not be true in the long run, but should
be true over short timescales as long as ``Plong >> Pshort``.
A :class:`TripleOrbitPopulation` is essentially made up of two
:class:`OrbitPopulation` objects: one for the "long" orbit
and one for the "short."
:param M1,M2,M3:
Masses of stars. Stars 2 and 3 are in a short orbit, far away from star 1.
If not :class:`astropy.units.Quantity` objects, then assumed to be
in solar mass units. May be single value or array-like.
:param Plong,Pshort:
Orbital Periods. Plong is orbital period of 2+3 and 1; Pshort is orbital
period of 2 and 3. If not :class:`astropy.units.Quantity` objects,
assumed to be in days. Can be single value or array-like.
N.B. If any item in Pshort happens to be
longer than the corresponding item in Plong, they will be switched.
:param ecclong,eccshort: (optional)
Eccentricities. Same story (long vs. short). Default=0 (circular).
Can be single value or array-like.
:param n: (optional)
Number of systems to simulate (if ``M1``, ``M2``, ``M3`` aren't
arrays of size > 1 already).
:param mean_anomaly_short,mean_anomaly_long: (optional)
Mean anomalies. This is only passed if you need to restore a
particular specific configuration (i.e., a particular saved simulation),
e.g., as done by :func:`TripleOrbitPopulation.from_df`.
If not provided, then randomized on (0, 2pi).
:param obsx_short,obsy_short,obsz_short: (optional)
"Observer" positions for the short orbit. Also only passed for purposes
of restoring configuration.
:param obsx_long,obsy_long,obsz_long: (optional)
"Observer" positions for long orbit. Also only passed for purposes of
restoring configuration.
:param obspos_short,obspos_long: (optional)
"Observer positions for short and long orbits, provided
as :class:`astropy.SkyCoord` objects. These will replace
obsx_short/long, obsy_short/long, obsz_short/long parameters if present.
"""
def __init__(self,M1,M2,M3,Plong,Pshort,ecclong=0,eccshort=0,n=None,
mean_anomaly_long=None,obsx_long=None,obsy_long=None,obsz_long=None,
obspos_long=None,
mean_anomaly_short=None,obsx_short=None,
obsy_short=None,obsz_short=None,
obspos_short=None):
Pshort, Plong = (np.minimum(Pshort,Plong), np.maximum(Pshort,Plong))
#if Plong < Pshort:
# Pshort,Plong = (Plong, Pshort)
self.orbpop_long = OrbitPopulation(M1,M2+M3,Plong,ecc=ecclong,n=n,
mean_anomaly=mean_anomaly_long,
obsx=obsx_long,obsy=obsy_long,obsz=obsz_long)
self.orbpop_short = OrbitPopulation(M2,M3,Pshort,ecc=eccshort,n=n,
mean_anomaly=mean_anomaly_short,
obsx=obsx_short,obsy=obsy_short,obsz=obsz_short)
def __hash__(self):
return hashcombine(self.orbpop_long, self.orbpop_short)
@property
def RV(self):
"""
Instantaneous RV of star 1 with respect to system center-of-mass
"""
return self.RV_1
@property
def RV_1(self):
"""
Instantaneous RV of star 1 with respect to system center-of-mass
"""
return self.orbpop_long.RV * (self.orbpop_long.M2 / (self.orbpop_long.M1 + self.orbpop_long.M2))
@property
def RV_2(self):
"""
Instantaneous RV of star 2 with respect to system center-of-mass
"""
return -self.orbpop_long.RV * (self.orbpop_long.M1 /
(self.orbpop_long.M1 + self.orbpop_long.M2)) +\
self.orbpop_short.RV_com1
@property
def RV_3(self):
"""
Instantaneous RV of star 3 with respect to system center-of-mass
"""
return -self.orbpop_long.RV * (self.orbpop_long.M1 / (self.orbpop_long.M1 + self.orbpop_long.M2)) +\
self.orbpop_short.RV_com2
@property
def Rsky(self):
"""
Projected separation of star 2+3 pair from star 1 [projected AU]
"""
return self.orbpop_long.Rsky
def dRV(self,dt):
"""
Returns difference in RVs (separated by time dt) of star 1.
:param dt:
Time separation for which to compute RV change. If not an
:class:`astropy.units.Quantity` object, then assumed to be in days.
"""
return self.dRV_1(dt)
def dRV_1(self,dt):
"""
Returns difference in RVs (separated by time dt) of star 1.
:param dt:
Time separation for which to compute RV change. If not an
:class:`astropy.units.Quantity` object, then assumed to be in days.
"""
return self.orbpop_long.dRV(dt,com=True)
def dRV_2(self,dt):
"""
Returns difference in RVs (separated by time dt) of star 2.
:param dt:
Time separation for which to compute RV change. If not an
:class:`astropy.units.Quantity` object, then assumed to be in days.
"""
return -self.orbpop_long.dRV(dt) * (self.orbpop_long.M1/(self.orbpop_long.M1 + self.orbpop_long.M2)) +\
self.orbpop_short.dRV(dt,com=True)
def dRV_3(self,dt):
"""
Returns difference in RVs (separated by time dt) of star 3.
:param dt:
Time separation for which to compute RV change. If not an
:class:`astropy.units.Quantity` object, then assumed to be in days.
"""
return -self.orbpop_long.dRV(dt) * (self.orbpop_long.M1/(self.orbpop_long.M1 + self.orbpop_long.M2)) -\
self.orbpop_short.dRV(dt) * (self.orbpop_short.M1/(self.orbpop_short.M1 + self.orbpop_short.M2))
def save_hdf(self,filename,path=''):
"""
Save to HDF5 file in desired path.
"""
self.orbpop_long.save_hdf(filename,'{}/long'.format(path))
self.orbpop_short.save_hdf(filename,'{}/short'.format(path))
def __add__(self, other):
if type(self) != type(other):
raise TypeError('Can only add like types of TripleOrbitPopulation')
newdf_long = pd.concat((self.orbpop_long.dataframe, other.orbpop_long.dataframe))
newdf_short = pd.concat((self.orbpop_short.dataframe, other.orbpop_short.dataframe))
return TripleOrbitPopulation_FromDF(newdf_long, newdf_short)
@classmethod
def from_df(cls, df_long, df_short):
"""
Builds TripleOrbitPopulation from DataFrame
``DataFrame`` objects must be of appropriate form to pass
to :func:`OrbitPopulation.from_df`.
:param df_long, df_short:
:class:`pandas.DataFrame` objects to pass to
:func:`OrbitPopulation.from_df`.
"""
pop = cls(1,1,1,1,1) #dummy population
pop.orbpop_long = OrbitPopulation.from_df(df_long)
pop.orbpop_short = OrbitPopulation.from_df(df_short)
return pop
@classmethod
def load_hdf(cls, filename, path=''):
"""
Load TripleOrbitPopulation from saved .h5 file.
:param filename:
HDF file name.
:param path:
Path within HDF file where data is stored.
"""
df_long = pd.read_hdf(filename,'{}/long/df'.format(path))
df_short = pd.read_hdf(filename,'{}/short/df'.format(path))
return cls.from_df(df_long, df_short)
class OrbitPopulation(object):
"""Population of orbits.
:param M1,M2:
Primary and secondary masses (if not ``Quantity``,
assumed to be in solar masses). Can be ``float``, array-like
or ``Quantity``.
:param P:
Orbital period(s) (if not ``Quantity``, assumed to be in days)
:type P:
``float``, array-like or ``Quantity``.
:param ecc: (``float`` or array-like, optional)
Eccentricities.
:param n: (optional)
Number of instances to simulate. If not provided, then this number
will be the length of ``M2`` (or ``P``) provided.
:param mean_anomaly: (optional)
Mean anomalies of orbits. Usually this will just be set randomly,
but can be provided to initialize a particular state (e.g., when
restoring an :class:`OrbitPopulation` object from a saved state).
:param obsx, obsy, obsz: (optional)
"Observer" positions to define coordinates. Will be set randomly
if not provided.
:param obspos: (optional)
"Observer" positions may be set with a ``SkyCoord`` object (replaces
obsx, obsy, obsz)
:type obspos:
:class:`astropy.coordinates.SkyCoord`
"""
def __init__(self,M1,M2,P,ecc=0,n=None,
mean_anomaly=None,obsx=None,obsy=None,obsz=None,
obspos=None):
if type(M1) != Quantity:
M1 = Quantity(M1, unit='M_sun')
if type(M2) != Quantity:
M2 = Quantity(M2, unit='M_sun')
if type(P) != Quantity:
P = Quantity(P, unit='day')
if n is None:
if M2.size==1:
n = P.size
else:
n = M2.size
self.M1 = M1
self.M2 = M2
self.N = n
self.P = P
if np.size(ecc) == 1:
ecc = np.ones(n)*ecc
self.ecc = ecc
mred = M1*M2/(M1+M2)
self.semimajor = semimajor(P,mred) #AU
self.mred = mred
if mean_anomaly is None:
M = rand.uniform(0,2*np.pi,size=n)
else:
M = mean_anomaly
self.M = M
#coordinates of random observers
if obspos is None:
if obsx is None:
self.obspos = random_spherepos(n)
else:
self.obspos = SkyCoord(obsx,obsy,obsz,representation='cartesian')
else:
self.obspos = obspos
#get positions, velocities relative to M1
position,velocity = orbit_posvel(self.M,self.ecc,self.semimajor.value,
self.mred.value,
self.obspos)
self.position = position
self.velocity = velocity
def __add__(self, other):
if type(self) != type(other):
raise TypeError('Can only add like types of OrbitPopulation')
newdf = pd.concat((self.dataframe, other.dataframe))
return OrbitPopulation_FromDF(newdf)
def __hash__(self):
return hashdf(self.dataframe)
@property
def Rsky(self):
"""
Sky separation of stars, in projected AU.
"""
return np.sqrt(self.position.x**2 + self.position.y**2)
@property
def RV(self):
"""
Relative radial velocities of two stars
"""
return self.velocity.z
@property
def RV_com1(self):
"""
RVs of star 1 relative to center-of-mass
"""
return self.RV * (self.M2 / (self.M1 + self.M2))
@property
def RV_com2(self):
"""
RVs of star 2 relative to center-of-mass
"""
return -self.RV * (self.M1 / (self.M1 + self.M2))
def dRV(self,dt,com=False):
"""Change in RV of star 1 for time separation dt (default=days)
:param dt:
Time separation for which to compute RV change. If not a ``Quantity``,
then assumed to be in days.
:type dt:
float, array-like, or ``Quantity``
:param com: (``bool``, optional)
If ``True``, then return dRV of star 1 in center-of-mass frame.
:return dRV:
Change in radial velocity over time ``dt``.
"""
if type(dt) != Quantity:
dt *= u.day
mean_motions = np.sqrt(G*(self.mred)*MSUN/(self.semimajor*AU)**3)
mean_motions = np.sqrt(const.G*(self.mred)/(self.semimajor)**3)
newM = self.M + mean_motions * dt
pos,vel = orbit_posvel(newM,self.ecc,self.semimajor.value,
self.mred.value,
self.obspos)
if com:
return (vel.z - self.RV) * (self.M2 / (self.M1 + self.M2))
else:
return vel.z-self.RV
def RV_timeseries(self,ts,recalc=False):
"""
Radial Velocity time series for star 1 at given times ts.
:param ts:
Times. If not ``Quantity``, assumed to be in days.
:type ts:
array-like or ``Quantity``
:param recalc: (optional)
If ``False``, then if called with the exact same ``ts``
as last call, it will return cached calculation.
"""
if type(ts) != Quantity:
ts *= u.day
if not recalc and hasattr(self,'RV_measurements'):
if (ts == self.ts).all():
return self._RV_measurements
else:
pass
RVs = Quantity(np.zeros((len(ts),self.N)),unit='km/s')
for i,t in enumerate(ts):
RVs[i,:] = self.dRV(t,com=True)
self._RV_measurements = RVs
self.ts = ts
return RVs
@property
def dataframe(self):
"""
Summary DataFrame of OrbitPopulation
Used to save/restore state.
"""
if not hasattr(self,'_dataframe'):
obspos = self.obspos.represent_as('cartesian')
obsx, obsy, obsz = (obspos.x,obspos.y,obspos.z)
df = pd.DataFrame({'M1':self.M1,
'M2':self.M2,
'P':self.P,
'ecc':self.ecc,
'mean_anomaly':self.M,
'obsx':obsx,
'obsy':obsy,
'obsz':obsz})
self._dataframe = df
return self._dataframe
def scatterplot(self,fig=None,figsize=(7,7),ms=0.5,
rmax=None,log=False,**kwargs):
"""
Makes a scatter plot of projected X-Y sky separation
:param fig: (optional)
Passed to :func:`plotutils.setfig`
:param figsize: (optional)
Size of figure (in).
:param ms: (optional)
Marker size
:param rmax: (optional)
Maximum projected radius to plot.
:param log: (optional)
Whether to plot with log scale.
:param **kwargs:
Additional keyword arguments passed to ``plt.plot``.
"""
setfig(fig,figsize=figsize)
plt.plot(self.position.x.value,self.position.y.value,'o',ms=ms,**kwargs)
plt.xlabel('projected separation [AU]')
plt.ylabel('projected separation [AU]')
if rmax is not None:
plt.xlim((-rmax,rmax))
plt.ylim((-rmax,rmax))
if log:
plt.xscale('log')
plt.yscale('log')
def save_hdf(self,filename,path=''):
"""
Saves all relevant data to .h5 file; so state can be restored.
"""
self.dataframe.to_hdf(filename,'{}/df'.format(path))
@classmethod
def from_df(cls, df):
"""Creates an OrbitPopulation from a DataFrame.
:param df:
:class:`pandas.DataFrame` object. Must contain the following
columns: ``['M1','M2','P','ecc','mean_anomaly','obsx','obsy','obsz']``,
i.e., as what is accessed via :attr:`OrbitPopulation.dataframe`.
:return:
:class:`OrbitPopulation`.
"""
return cls(df['M1'], df['M2'], df['P'],
ecc=df['ecc'], mean_anomaly=df['mean_anomaly'],
obsx=df['obsx'], obsy=df['obsy'], obsz=df['obsz'])
@classmethod
def load_hdf(cls, filename, path=''):
"""Loads OrbitPopulation from HDF file.
:param filename:
HDF file
:param path:
Path within HDF file store where :class:`OrbitPopulation` is saved.
"""
df = pd.read_hdf(filename,'{}/df'.format(path))
return cls.from_df(df)
class BinaryGrid(OrbitPopulation):
def __init__(self, M1, qmin=0.1, qmax=1, Pmin=0.5, Pmax=365, N=1e5, logP=True, eccfn=None):
"""A grid of companions to primary, in mass ratio and period space.
:param M1:
Primary mass [solar masses].
:type M1:
``float``
:param qmin,qmax: (optional)
Minimum and maximum mass ratios.
:param Pmin,Pmax: (optional)
Min/max periods in days.
:param N: (optional)
Total number of simulations. Default = 10^5.
:param logP: (optional)
Whether to grid in log-period. If ``False``, then linear.
:param eccfn: (optional)
Function that returns eccentricity as a function of period.
If ``None``, then eccentricity will be zero.
:type eccfn:
callable
"""
M1s = np.ones(N)*M1
M2s = (rand.random(size=N)*(qmax-qmin) + qmin)*M1s
if logP:
Ps = 10**(rand.random(size=N)*((np.log10(Pmax) - np.log10(Pmin))) + np.log10(Pmin))
else:
Ps = rand.random(size=N)*(Pmax - Pmin) + Pmin
if eccfn is None:
eccs = 0
else:
eccs = eccfn(Ps)
self.eccfn = eccfn
OrbitPopulation.__init__(self,M1s,M2s,Ps,ecc=eccs)
def RV_RMSgrid(self,ts,res=20,mres=None,Pres=None,
conf=0.95,measured_rms=None,drv=0,
plot=True,fig=None,contour=True,sigma=1):
"""Writes a grid of RV RMS values, assuming observations at given times.
Caveat Emptor: Written a long time ago, and
hasn't really been tested.
:param ts:
Times of observations
:param res, mres, Pres: (optional)
Resolution of grids. ``res`` relates to both mass and period;
otherwise ``mres`` and ``Pres`` can be set separately.
:param conf: (optional)
Confidence at which to exclude regions. Used if ``measured_rms``
is ``None``.
:param measured_rms: (optional)
Measured RV RMS, if applicable [not sure exactly how this is used]
:param drv: (optional)
Uncertainties in RV to simulate.
:param plot: (optional)
Whether to plot result.
:param fig: (optional)
Passed to :func:`plotutils.setfig`.
:param contour: (optional)
Whether to plot contours.
:param sigma: (optional)
Level at which to exclude, based on ``measured_rms``.
"""
RVs = self.RV_timeseries(ts)
RVs += rand.normal(size=np.size(RVs)).reshape(RVs.shape)*drv
rms = RVs.std(axis=0)
if mres is None:
mres = res
if Pres is None:
Pres = res
mbins = np.linspace(self.M2.min(),self.M2.max(),mres+1)
Pbins = np.logspace(np.log10(self.P.min()),np.log10(self.P.max()),Pres+1)
logPbins = np.log10(Pbins)
mbin_centers = (mbins[:-1] + mbins[1:])/2.
logPbin_centers = (logPbins[:-1] + logPbins[1:])/2.
minds = np.digitize(self.M2,mbins)
Pinds = np.digitize(self.P,Pbins)
pctiles = np.zeros((mres,Pres))
ns = np.zeros((mres,Pres))
for i in np.arange(mres):
for j in np.arange(Pres):
w = np.where((minds==i+1) & (Pinds==j+1))
these = rms[w]
n = size(these)
ns[i,j] = n
if measured_rms is not None:
pctiles[i,j] = (these > sigma*measured_rms).sum()/float(n)
else:
inds = np.argsort(these)
pctiles[i,j] = these[inds][int((1-conf)*n)]
Ms,logPs = np.meshgrid(mbin_centers,logPbin_centers)
if plot:
setfig(fig)
if contour:
mbin_centers = (mbins[:-1] + mbins[1:])/2.
logPbins = np.log10(Pbins)
logPbin_centers = (logPbins[:-1] + logPbins[1:])/2.
if measured_rms is not None:
levels = [0.68,0.95,0.99]
else:
levels = np.arange(0,20,2)
c = plt.contour(logPbin_centers,mbin_centers,pctiles,levels=levels,colors='k')
plt.clabel(c, fontsize=10, inline=1)
else:
extent = [np.log10(self.P.min()),np.log10(self.P.max()),self.M2.min(),self.M2.max()]
im = plt.imshow(pctiles,cmap='Greys',extent=extent,aspect='auto')
fig = plt.gcf()
ax = plt.gca()
if measured_rms is None:
cbarticks = np.arange(0,21,2)
else:
cbarticks = np.arange(0,1.01,0.1)
cbar = fig.colorbar(im, ticks=cbarticks)
plt.xlabel('Log P')
plt.ylabel('M2')
return mbins,Pbins,pctiles,ns
|
|
# pylint: disable=invalid-name
"""
Model domains
=============
Regular latitude/longitude
--------------------------
For regular latitude/longitude grids a simple bounding box test is all
that is required to determine if a point lies inside the domain.
Irregular boundaries
--------------------
Regional ocean models with irregular boundaries can perform an additional
point in polygon check to determine if a point is inside the domain.
Global models
-------------
Ocean models of global extent typically have a southern boundary, since
Antarctica is a land mass covering the South Pole. A North/South extent
check may be sufficient to determine whether a point belongs to the domain or
not.
"""
from itertools import izip
import numpy as np
def inside(grid_longitudes,
grid_latitudes,
observed_longitudes,
observed_latitudes,
kind="regular"):
"""Detect points inside a model domain
Apply geometry techniques to determine if an observation is inside a model
domain. Strategies included can handle regular lon/lat, global latitude
bands and polygonal grids.
======= ===============================================
Key Description
======= ===============================================
regular model grids bounded by a longitude/latitude box
band model grid bounded north/south
polygon general case where model boundary treated as
polygon
======= ===============================================
:param grid_longitudes: 2D array shaped (X, Y)
:param grid_latitudes: 2D array shaped (X, Y)
:param observed_longitudes: 1D array shaped (N,)
:param observed_latitudes: 1D array shaped (N,)
:param kind: string indicating which kind of model grid is being used
:returns: boolean array where True indicates observation is inside domain
"""
if kind.lower() in ["regular", "box", "lonlat"]:
return Box.from2d(grid_longitudes,
grid_latitudes).inside(observed_longitudes,
observed_latitudes)
elif kind.lower() in ["band", "latitude_band"]:
return LatitudeBand.from_latitudes(grid_latitudes).inside(observed_latitudes)
elif kind.lower() in ["polygon", "irregular", "rotated"]:
return Polygon(grid_longitudes,
grid_latitudes).inside(observed_longitudes,
observed_latitudes)
class LatitudeBand(object):
"""Latitude band domain
Positions within two latitude parallels are considered inside the domain.
:param minimum: southernmost latitude
:param maximum: northernmost latitude
"""
def __init__(self, minimum, maximum):
self.minimum = minimum
self.maximum = maximum
@classmethod
def from_latitudes(cls, latitudes):
"""Construct latitude band from latitude array"""
return cls(np.ma.min(latitudes),
np.ma.max(latitudes))
def inside(self, latitudes):
"""Determine observations inside grid"""
return ((latitudes >= self.minimum) &
(latitudes <= self.maximum))
class Polygon(object):
"""General ocean grid definition
Arbitrary shaped non-intersecting polygon domain. Uses
a combination of bounding box and point in polygon
algorithms to decide if a point is inside the domain.
:param longitudes: 2D array shaped (X, Y)
:param latitudes: 2D array shaped (X, Y)
"""
def __init__(self, longitudes, latitudes):
self.bounding_box = Box.from2d(longitudes,
latitudes)
self.point_in_polygon = PointInPolygon.from2d(longitudes,
latitudes)
def inside(self, longitudes, latitudes):
"""Check observations are contained within domain
:param longitudes: x-coordinate to test
:param latitudes: y-coordinate to test
:returns: logical indicating coordinates contained in polygon
"""
longitudes = np.asarray(longitudes, dtype="d")
latitudes = np.asarray(latitudes, dtype="d")
if longitudes.ndim == 0:
return self.point_in_polygon.inside(longitudes,
latitudes)
# Optimal vector algorithm
result = np.zeros_like(longitudes, dtype=np.bool)
in_box = self.bounding_box.inside(longitudes, latitudes)
result[in_box] = self.point_in_polygon.inside(longitudes[in_box],
latitudes[in_box])
return result
class Box(object):
"""Bounding box surrounding collection of vertices"""
def __init__(self, xmin, xmax, ymin, ymax):
self.xmin = xmin
self.xmax = xmax
self.ymin = ymin
self.ymax = ymax
@classmethod
def from2d(cls, longitudes, latitudes):
"""Construct from 2D coordinates"""
return cls(np.min(longitudes),
np.max(longitudes),
np.min(latitudes),
np.max(latitudes))
def inside(self, x, y):
"""Check point lies inside box
:param x: x-coordinate to test
:param y: y-coordinate to test
:returns: logical indicating coordinates contained in box
"""
return ((x <= self.xmax) &
(x >= self.xmin) &
(y <= self.ymax) &
(y >= self.ymin))
def boundary(values):
"""Extract boundary from grid
A boundary of a grid shaped N x M consists of 2N + 2M - 4 points.
2 rows, 2 columns minus 4 corners that have been double counted.
:param values: 2D array shaped (N, M)
:returns: array shaped (B, 2) where B is the number of points on the
boundary (2N + 2M - 4).
"""
values = np.asarray(values)
return np.asarray(list(values[:, 0]) +
list(values[-1, 1:-1]) +
list(values[::-1, -1]) +
list(values[0, -2:0:-1]),
dtype="d")
def point_in_polygon(x, y, xp, yp):
"""Point in polygon algorithm"""
return PointInPolygon(x, y).inside(xp, yp)
class PointInPolygon(object):
"""Point in polygon search algorithm
The algorithm proceeds as follow:
- Detect faces that contain the y-coordinate of the test point
- Find x-coordinate (nodes) for the faces at the test y-coordinate
- Count nodes either side of the test point
- An odd number of nodes on both sides means the point is inside
- If the test point is a node (lies on boundary) it is also
counted as inside
In cases where two faces meet the lower face is counted as having a node.
This convention removes double counting. As a result,
points at the top of the polygon need to be treated separately.
:param x: array of x coordinates of polygon vertices
:param y: array of y coordinates of polygon vertices
"""
def __init__(self, x, y):
self.x, self.y = np.asarray(x), np.asarray(y)
# Define valid line segments
x1, y1, x2, y2 = as_segments(self.x, self.y)
self.x1, self.y1, self.x2, self.y2 = remove_horizontal(x1, y1, x2, y2)
# Detect intervals containing f(x) = yp
self.y_min, self.y_max = order_intervals(self.y1, self.y2)
# Determine y-axis grid limit
self.y_limit = np.max([self.y1, self.y2])
@classmethod
def from2d(cls, longitudes, latitudes):
"""Construct point in polygon search from 2D longitudes and latitudes
Conveniently maps array boundaries to polygon definition
:param longitudes: array shape (X, Y)
:param latitudes: array shape (X, Y)
"""
return cls(boundary(longitudes), boundary(latitudes))
def inside(self, xp, yp):
"""Check point(s) lie inside polygon"""
xp, yp = np.asarray(xp), np.asarray(yp)
if xp.ndim == 0:
return self._scalar_inside(xp, yp)
return self._vector_inside(xp, yp)
def _vector_inside(self, xp, yp):
return np.array([self._scalar_inside(x, y) for x, y in izip(xp, yp)],
dtype=np.bool)
def _scalar_inside(self, xp, yp):
# Apply algorithm to points at top of domain
if yp == self.y_limit:
nodes = self.x[self.y == self.y_limit]
return self.between_nodes(nodes, xp)
# Detect intervals containing f(x) = yp
points = interval_contains(self.y_min, self.y_max, yp)
# Check that nodes exist
if len(self.x1[points]) == 0:
return False
# Find x-values corresponding to yp for each segment
nodes = solve(self.x1[points],
self.y1[points],
self.x2[points],
self.y2[points],
yp)
return self.between_nodes(nodes, xp)
@staticmethod
def between_nodes(nodes, position):
"""Check position in relation to node positions
A point is inside the domain for one of two reasons, either:
- there are an odd number of nodes on either side of the point
- the point is on the boundary (is a node position)
:returns: True if position is in domain
"""
# Include solutions on boundary
if position in nodes:
return True
# Count nodes left/right of xp
return (odd(count_below(nodes, position)) and
odd(count_above(nodes, position)))
def as_segments(x, y):
"""Convert coordinates representing polygon to segments"""
return x, y, cycle(x), cycle(y)
def remove_horizontal(x1, y1, x2, y2):
"""Remove segments with zero slope"""
keep = y1 != y2
return x1[keep], y1[keep], x2[keep], y2[keep]
def cycle(values):
"""Shift array view in a cyclic manner"""
return np.append(values[1:], values[0])
def order_intervals(left, right):
"""Rearrange intervals into ascending order
:param left: left interval values
:param right: right interval values
:returns: (minimum, maximum) arrays sorted into lower/upper values
"""
return np.min([left, right], axis=0), np.max([left, right], axis=0)
def interval_contains(minimum, maximum, value):
"""Determine if interval contains point
.. note:: zero sized intervals do not contain points
.. note:: interval is closed to the left and open on the right
"""
minimum, maximum = np.asarray(minimum), np.asarray(maximum)
return (minimum <= value) & (value < maximum)
def solve(x1, y1, x2, y2, y):
"""Solve equation of line for x given y
This is the inverse of the usual approach to solving a linear equation.
Linear equations can be solved forward for y or backward for x using the
same form of equation, y0 + (dy / dx) * (x - x0). In this case with
y and x switched, the equation reads x0 + (dx / dy) * (y - y0).
:returns: value that satisfies line defined by (x1, y1), (x2, y2)
"""
dxdy = (x2 - x1) / (y2 - y1)
return x1 + (dxdy * (y - y1))
def count_below(values, threshold):
"""Count number of values lying below some threshold"""
return (values < threshold).sum()
def count_above(values, threshold):
"""Count number of values lying above some threshold"""
return (values > threshold).sum()
def odd(number):
"""Determine if number is odd"""
return (number % 2) == 1
|
|
from gym.envs.registration import registry, register, make, spec
# Algorithmic
# ----------------------------------------
register(
id='Copy-v0',
entry_point='gym.envs.algorithmic:CopyEnv',
max_episode_steps=200,
reward_threshold=25.0,
)
register(
id='RepeatCopy-v0',
entry_point='gym.envs.algorithmic:RepeatCopyEnv',
max_episode_steps=200,
reward_threshold=75.0,
)
register(
id='ReversedAddition-v0',
entry_point='gym.envs.algorithmic:ReversedAdditionEnv',
kwargs={'rows' : 2},
max_episode_steps=200,
reward_threshold=25.0,
)
register(
id='ReversedAddition3-v0',
entry_point='gym.envs.algorithmic:ReversedAdditionEnv',
kwargs={'rows' : 3},
max_episode_steps=200,
reward_threshold=25.0,
)
register(
id='DuplicatedInput-v0',
entry_point='gym.envs.algorithmic:DuplicatedInputEnv',
max_episode_steps=200,
reward_threshold=9.0,
)
register(
id='Reverse-v0',
entry_point='gym.envs.algorithmic:ReverseEnv',
max_episode_steps=200,
reward_threshold=25.0,
)
# Classic
# ----------------------------------------
register(
id='CartPole-v0',
entry_point='gym.envs.classic_control:CartPoleEnv',
max_episode_steps=200,
reward_threshold=195.0,
)
register(
id='CartPole-v1',
entry_point='gym.envs.classic_control:CartPoleEnv',
max_episode_steps=500,
reward_threshold=475.0,
)
register(
id='MountainCar-v0',
entry_point='gym.envs.classic_control:MountainCarEnv',
max_episode_steps=200,
reward_threshold=-110.0,
)
register(
id='MountainCarContinuous-v0',
entry_point='gym.envs.classic_control:Continuous_MountainCarEnv',
max_episode_steps=999,
reward_threshold=90.0,
)
register(
id='Pendulum-v0',
entry_point='gym.envs.classic_control:PendulumEnv',
max_episode_steps=200,
)
register(
id='Acrobot-v1',
entry_point='gym.envs.classic_control:AcrobotEnv',
max_episode_steps=500,
)
# Box2d
# ----------------------------------------
register(
id='LunarLander-v2',
entry_point='gym.envs.box2d:LunarLander',
max_episode_steps=1000,
reward_threshold=200,
)
register(
id='LunarLanderContinuous-v2',
entry_point='gym.envs.box2d:LunarLanderContinuous',
max_episode_steps=1000,
reward_threshold=200,
)
register(
id='BipedalWalker-v2',
entry_point='gym.envs.box2d:BipedalWalker',
max_episode_steps=1600,
reward_threshold=300,
)
register(
id='BipedalWalkerHardcore-v2',
entry_point='gym.envs.box2d:BipedalWalkerHardcore',
max_episode_steps=2000,
reward_threshold=300,
)
register(
id='CarRacing-v0',
entry_point='gym.envs.box2d:CarRacing',
max_episode_steps=1000,
reward_threshold=900,
)
# Toy Text
# ----------------------------------------
register(
id='Blackjack-v0',
entry_point='gym.envs.toy_text:BlackjackEnv',
)
register(
id='FrozenLake-v0',
entry_point='gym.envs.toy_text:FrozenLakeEnv',
kwargs={'map_name' : '4x4'},
max_episode_steps=100,
reward_threshold=0.78, # optimum = .8196
)
register(
id='FrozenLake8x8-v0',
entry_point='gym.envs.toy_text:FrozenLakeEnv',
kwargs={'map_name' : '8x8'},
max_episode_steps=200,
reward_threshold=0.99, # optimum = 1
)
register(
id='NChain-v0',
entry_point='gym.envs.toy_text:NChainEnv',
max_episode_steps=1000,
)
register(
id='Roulette-v0',
entry_point='gym.envs.toy_text:RouletteEnv',
max_episode_steps=100,
)
register(
id='Taxi-v2',
entry_point='gym.envs.toy_text.taxi:TaxiEnv',
reward_threshold=8, # optimum = 8.46
max_episode_steps=200,
)
register(
id='GuessingGame-v0',
entry_point='gym.envs.toy_text.guessing_game:GuessingGame',
max_episode_steps=200,
)
register(
id='HotterColder-v0',
entry_point='gym.envs.toy_text.hotter_colder:HotterColder',
max_episode_steps=200,
)
# Mujoco
# ----------------------------------------
# 2D
register(
id='Reacher-v1',
entry_point='gym.envs.mujoco:ReacherEnv',
max_episode_steps=50,
reward_threshold=-3.75,
)
register(
id='InvertedPendulum-v1',
entry_point='gym.envs.mujoco:InvertedPendulumEnv',
max_episode_steps=1000,
reward_threshold=950.0,
)
register(
id='InvertedDoublePendulum-v1',
entry_point='gym.envs.mujoco:InvertedDoublePendulumEnv',
max_episode_steps=1000,
reward_threshold=9100.0,
)
register(
id='HalfCheetah-v1',
entry_point='gym.envs.mujoco:HalfCheetahEnv',
max_episode_steps=1000,
reward_threshold=4800.0,
)
register(
id='Hopper-v1',
entry_point='gym.envs.mujoco:HopperEnv',
max_episode_steps=1000,
reward_threshold=3800.0,
)
register(
id='Swimmer-v1',
entry_point='gym.envs.mujoco:SwimmerEnv',
max_episode_steps=1000,
reward_threshold=360.0,
)
register(
id='Walker2d-v1',
max_episode_steps=1000,
entry_point='gym.envs.mujoco:Walker2dEnv',
)
register(
id='Ant-v1',
entry_point='gym.envs.mujoco:AntEnv',
max_episode_steps=1000,
reward_threshold=6000.0,
)
register(
id='Humanoid-v1',
entry_point='gym.envs.mujoco:HumanoidEnv',
max_episode_steps=1000,
)
register(
id='HumanoidStandup-v1',
entry_point='gym.envs.mujoco:HumanoidStandupEnv',
max_episode_steps=1000,
)
# Custom Mujoco
# ----------------------------------------
## V0: reach reward 0.4, grey
register(
id="Box3dReachPixel-v0",
entry_point="gym.envs.mujoco:Box3dFixedReachEnvPixelGrey",
max_episode_steps=200,
)
## V1: reach reward 0.1, grey
register(
id="Box3dReachPixel-v1",
entry_point="gym.envs.mujoco:Box3dFixedReachEnvPixelGreyHarder",
max_episode_steps=200,
)
## V2: no reward, 6 boxes, grey
register(
id="Box3dReachPixel-v2",
entry_point="gym.envs.mujoco:Box3dMulMulObjConAvoidPixelGreyEnv",
max_episode_steps=1000,
)
## V3: reach rew 0.1 with 4 obs
register(
id="Box3dReachPixel-v3",
entry_point="gym.envs.mujoco:Box3dFixedReachEnvPixelGreyHarderMulAct",
max_episode_steps=100,
)
## V4: Two cam, 1 box, 0.1 reward
register(
id="Box3dReachPixel-v4",
entry_point="gym.envs.mujoco:Box3dFixedReachEnvPixelGreyHarderTwoCam",
max_episode_steps=200,
)
## V5: Two cam, 1 box, 0.1 reward, 4 step
register(
id="Box3dReachPixel-v5",
entry_point="gym.envs.mujoco:Box3dFixedReachEnvPixelGreyHarderTwoCamMulAct",
max_episode_steps=200,
)
## V6: Two cam, 1 box, 0.1 reward, 2 step
register(
id="Box3dReachPixel-v6",
entry_point="gym.envs.mujoco:Box3dFixedReachEnvPixelGreyHarderTwoCamMulActLess",
max_episode_steps=200,
)
## V7: Two cam, 6 box, no reward, 2 step
register(
id="Box3dReachPixel-v7",
entry_point="gym.envs.mujoco:Box3dFixedReachEnvPixelGreyMulMulTwoCamMulActLess",
max_episode_steps=1000,
)
## V8: Two cam, 6 box, no reward, 4 step
register(
id="Box3dReachPixel-v8",
entry_point="gym.envs.mujoco:Box3dFixedReachEnvPixelGreyMulMulTwoCamMulAct",
max_episode_steps=1000,
)
## V9: Two cam, 6 box, contact reward, 2 step
register(
id="Box3dReachPixel-v9",
entry_point="gym.envs.mujoco:Box3dFixedReachEnvPixelGreyMulMulContactTwoCamMulActLess",
max_episode_steps=200,
)
## V10: Two cam, 1 box, reach reward, 4 step but 2 obs
register(
id="Box3dReachPixel-v10",
entry_point="gym.envs.mujoco:Box3dFixedReachEnvPixelGreyHarderTwoCamMulActLessRepeatTwo",
max_episode_steps=200,
)
## V11: Two cam, 6 box, contact reward, 4 step
register(
id="Box3dReachPixel-v11",
entry_point="gym.envs.mujoco:Box3dFixedReachEnvPixelGreyMulMulContactTwoCamMulAct",
max_episode_steps=200,
)
# V17: Two cam, 1 box, contact reward, 2 step
register(
id="Box3dReachPixel-v17",
entry_point="gym.envs.mujoco:Box3dFixedReachEnvPixelGreyMulMulContactTwoCamAct",
max_episode_steps=200,
)
## V12: Two cam, 6 box, contact reward, 4 step, env_info output joint pos (key: joint_pos)
register(
id="Box3dReachPixel-v12",
entry_point="gym.envs.mujoco:Box3dFixedReachEnvPixelGreyMulMulContactTwoCamMulActFusion",
max_episode_steps=200,
)
## V13: Two cam, 6 box, contact reward, 4 step, no mass for objects
register(
id="Box3dReachPixel-v13",
entry_point="gym.envs.mujoco:Box3dFixedReachEnvPixelGreyMulMulContactTwoCamMulActNoMas",
max_episode_steps=200,
)
## V14: Two cam, 1 box, contact reward
register(
id="Box3dReachPixel-v14",
entry_point="gym.envs.mujoco:Box3dFixedReachPixelMulMulObjConAvoidEnvOne",
max_episode_steps=200,
)
# V15: Two cam, 1 box, 0.4 reach reward, 4 step
register(
id="Box3dReachPixel-v15",
entry_point="gym.envs.mujoco:Box3dFixedReachPixelMulMulObjConAvoidEnvOneEasy",
max_episode_steps=200,
)
# V16: Two cam, 1 box, 0.4 reach reward, 2 step
register(
id="Box3dReachPixel-v16",
entry_point="gym.envs.mujoco:Box3dFixedReachPixelMulObjConAvoidEnvOneEasy",
max_episode_steps=200,
)
# ========= UP: PIXEL = ## = DOWN: STATE ======== #
## V18: contact reward, 10 step
register(
id="Box3dReach-v18",
entry_point="gym.envs.mujoco:Box3dFixedReachSixBoxEnvMulContactTwoCam10Step",
max_episode_steps=1000,
)
## V16: contact reward, no box velocities
register(
id="Box3dReach-v16",
entry_point="gym.envs.mujoco:Box3dFixedReachSixBoxEnvMulContactTwoCamNoBoxVel",
max_episode_steps=1000,
)
## V17: contact reward, 4 action repeat
register(
id="Box3dReach-v17",
entry_point="gym.envs.mujoco:Box3dFixedReachSixBoxEnvMulContactTwoCam4Step",
max_episode_steps=1000,
)
## V12: contact reward
register(
id="Box3dReach-v12",
entry_point="gym.envs.mujoco:Box3dFixedReachSixBoxEnvMulContactTwoCam",
max_episode_steps=1000,
)
## V11: contact reward, 1 box
register(
id="Box3dReach-v11",
entry_point="gym.envs.mujoco:Box3dFixedReachMulMulObjConAvoidEnvOne",
max_episode_steps=200,
)
## V10: no reward, 6 boxes with small random init
register(
id="Box3dReach-v10",
entry_point="gym.envs.mujoco:Box3dFixedReachMulMulObjConAvoidEnv",
max_episode_steps=1000,
)
## V9: no reward, 3 boxes with large random init
register(
id="Box3dReach-v9",
entry_point="gym.envs.mujoco:Box3dFixedReachMulObjConAvoidMoreEnv",
max_episode_steps=1000,
)
## V8: no reward, 3 boxes with previous frame velocity as input
register(
id="Box3dReach-v8",
entry_point="gym.envs.mujoco:Box3dFixedReachMulObjPrevVelEnv",
max_episode_steps=1000,
)
## V7: no reward, 3 boxes with contact checking
register(
id="Box3dReach-v7",
entry_point="gym.envs.mujoco:Box3dFixedReachMulObjConAvoidEnv",
max_episode_steps=1000,
)
## V6: no reward, 3 boxes
register(
id="Box3dReach-v6",
entry_point="gym.envs.mujoco:Box3dFixedReachMulObjEnv",
max_episode_steps=1000,
)
## test: 0.1 reach reward, 1 box, 10/4/2 step
register(
id="Box3dReach-v13",
entry_point="gym.envs.mujoco:Box3dFixedReachHarderEnv4Step",
max_episode_steps=200,
)
register(
id="Box3dReach-v14",
entry_point="gym.envs.mujoco:Box3dFixedReachHarderEnv2Step",
max_episode_steps=200,
)
## V4: 0.1 reach reward, 1 box
register(
id="Box3dReach-v4",
entry_point="gym.envs.mujoco:Box3dFixedReachHarderEnv",
max_episode_steps=200,
)
register(
id="Box3dReach-v5",
entry_point="gym.envs.mujoco:Box3dFixedReachHardestEnv",
max_episode_steps=200,
)
register(
id="Box3dReach-v3",
entry_point="gym.envs.mujoco:Box3dContactReachEnv",
max_episode_steps=200,
)
register(
id="Box3dReach-v2",
entry_point="gym.envs.mujoco:Box3dFixedReachEnv",
max_episode_steps=200,
)
register(
id='Box3dReach-v0',
entry_point='gym.envs.mujoco:Box3dReachPosEnv',
max_episode_steps=100,
)
register(
id='Box3dReach-v1',
entry_point='gym.envs.mujoco:Box3dReachEnv',
max_episode_steps=100,
)
register(
id='Box3dGrasp-v0',
entry_point='gym.envs.mujoco:Box3dGraspEnv',
max_episode_steps=1000,
)
register(
id='Box3dNoReward-v0',
entry_point='gym.envs.mujoco:Box3dNoRewardEnv',
max_episode_steps=200,
)
# Atari
# ----------------------------------------
# # print ', '.join(["'{}'".format(name.split('.')[0]) for name in atari_py.list_games()])
for game in ['air_raid', 'alien', 'amidar', 'assault', 'asterix', 'asteroids', 'atlantis',
'bank_heist', 'battle_zone', 'beam_rider', 'berzerk', 'bowling', 'boxing', 'breakout', 'carnival',
'centipede', 'chopper_command', 'crazy_climber', 'demon_attack', 'double_dunk',
'elevator_action', 'enduro', 'fishing_derby', 'freeway', 'frostbite', 'gopher', 'gravitar',
'ice_hockey', 'jamesbond', 'journey_escape', 'kangaroo', 'krull', 'kung_fu_master',
'montezuma_revenge', 'ms_pacman', 'name_this_game', 'phoenix', 'pitfall', 'pong', 'pooyan',
'private_eye', 'qbert', 'riverraid', 'road_runner', 'robotank', 'seaquest', 'skiing',
'solaris', 'space_invaders', 'star_gunner', 'tennis', 'time_pilot', 'tutankham', 'up_n_down',
'venture', 'video_pinball', 'wizard_of_wor', 'yars_revenge', 'zaxxon']:
for obs_type in ['image', 'ram']:
# space_invaders should yield SpaceInvaders-v0 and SpaceInvaders-ram-v0
name = ''.join([g.capitalize() for g in game.split('_')])
if obs_type == 'ram':
name = '{}-ram'.format(name)
nondeterministic = False
if game == 'elevator_action' and obs_type == 'ram':
# ElevatorAction-ram-v0 seems to yield slightly
# non-deterministic observations about 10% of the time. We
# should track this down eventually, but for now we just
# mark it as nondeterministic.
nondeterministic = True
register(
id='{}-v0'.format(name),
entry_point='gym.envs.atari:AtariEnv',
kwargs={'game': game, 'obs_type': obs_type, 'repeat_action_probability': 0.25},
max_episode_steps=10000,
nondeterministic=nondeterministic,
)
register(
id='{}-v3'.format(name),
entry_point='gym.envs.atari:AtariEnv',
kwargs={'game': game, 'obs_type': obs_type},
max_episode_steps=100000,
nondeterministic=nondeterministic,
)
# Standard Deterministic (as in the original DeepMind paper)
if game == 'space_invaders':
frameskip = 3
else:
frameskip = 4
# Use a deterministic frame skip.
register(
id='{}Deterministic-v0'.format(name),
entry_point='gym.envs.atari:AtariEnv',
kwargs={'game': game, 'obs_type': obs_type, 'frameskip': frameskip, 'repeat_action_probability': 0.25},
max_episode_steps=100000,
nondeterministic=nondeterministic,
)
register(
id='{}Deterministic-v3'.format(name),
entry_point='gym.envs.atari:AtariEnv',
kwargs={'game': game, 'obs_type': obs_type, 'frameskip': frameskip},
max_episode_steps=100000,
nondeterministic=nondeterministic,
)
register(
id='{}NoFrameskip-v0'.format(name),
entry_point='gym.envs.atari:AtariEnv',
kwargs={'game': game, 'obs_type': obs_type, 'frameskip': 1, 'repeat_action_probability': 0.25}, # A frameskip of 1 means we get every frame
max_episode_steps=frameskip * 100000,
nondeterministic=nondeterministic,
)
# No frameskip. (Atari has no entropy source, so these are
# deterministic environments.)
register(
id='{}NoFrameskip-v3'.format(name),
entry_point='gym.envs.atari:AtariEnv',
kwargs={'game': game, 'obs_type': obs_type, 'frameskip': 1}, # A frameskip of 1 means we get every frame
max_episode_steps=frameskip * 100000,
nondeterministic=nondeterministic,
)
# Board games
# ----------------------------------------
register(
id='Go9x9-v0',
entry_point='gym.envs.board_game:GoEnv',
kwargs={
'player_color': 'black',
'opponent': 'pachi:uct:_2400',
'observation_type': 'image3c',
'illegal_move_mode': 'lose',
'board_size': 9,
},
# The pachi player seems not to be determistic given a fixed seed.
# (Reproduce by running 'import gym; h = gym.make('Go9x9-v0'); h.seed(1); h.reset(); h.step(15); h.step(16); h.step(17)' a few times.)
#
# This is probably due to a computation time limit.
nondeterministic=True,
)
register(
id='Go19x19-v0',
entry_point='gym.envs.board_game:GoEnv',
kwargs={
'player_color': 'black',
'opponent': 'pachi:uct:_2400',
'observation_type': 'image3c',
'illegal_move_mode': 'lose',
'board_size': 19,
},
nondeterministic=True,
)
register(
id='Hex9x9-v0',
entry_point='gym.envs.board_game:HexEnv',
kwargs={
'player_color': 'black',
'opponent': 'random',
'observation_type': 'numpy3c',
'illegal_move_mode': 'lose',
'board_size': 9,
},
)
# Debugging
# ----------------------------------------
register(
id='OneRoundDeterministicReward-v0',
entry_point='gym.envs.debugging:OneRoundDeterministicRewardEnv',
local_only=True
)
register(
id='TwoRoundDeterministicReward-v0',
entry_point='gym.envs.debugging:TwoRoundDeterministicRewardEnv',
local_only=True
)
register(
id='OneRoundNondeterministicReward-v0',
entry_point='gym.envs.debugging:OneRoundNondeterministicRewardEnv',
local_only=True
)
register(
id='TwoRoundNondeterministicReward-v0',
entry_point='gym.envs.debugging:TwoRoundNondeterministicRewardEnv',
local_only=True,
)
# Parameter tuning
# ----------------------------------------
register(
id='ConvergenceControl-v0',
entry_point='gym.envs.parameter_tuning:ConvergenceControl',
)
register(
id='CNNClassifierTraining-v0',
entry_point='gym.envs.parameter_tuning:CNNClassifierTraining',
)
# Safety
# ----------------------------------------
# interpretability envs
register(
id='PredictActionsCartpole-v0',
entry_point='gym.envs.safety:PredictActionsCartpoleEnv',
max_episode_steps=200,
)
register(
id='PredictObsCartpole-v0',
entry_point='gym.envs.safety:PredictObsCartpoleEnv',
max_episode_steps=200,
)
# semi_supervised envs
# probably the easiest:
register(
id='SemisuperPendulumNoise-v0',
entry_point='gym.envs.safety:SemisuperPendulumNoiseEnv',
max_episode_steps=200,
)
# somewhat harder because of higher variance:
register(
id='SemisuperPendulumRandom-v0',
entry_point='gym.envs.safety:SemisuperPendulumRandomEnv',
max_episode_steps=200,
)
# probably the hardest because you only get a constant number of rewards in total:
register(
id='SemisuperPendulumDecay-v0',
entry_point='gym.envs.safety:SemisuperPendulumDecayEnv',
max_episode_steps=200,
)
# off_switch envs
register(
id='OffSwitchCartpole-v0',
entry_point='gym.envs.safety:OffSwitchCartpoleEnv',
max_episode_steps=200,
)
register(
id='OffSwitchCartpoleProb-v0',
entry_point='gym.envs.safety:OffSwitchCartpoleProbEnv',
max_episode_steps=200,
)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""Implementation of non-blocking server.
The main idea of the server is to receive and send requests
only from the main thread.
The thread poool should be sized for concurrent tasks, not
maximum connections
"""
import logging
import select
import socket
import struct
import threading
from collections import deque
from six.moves import queue
from thrift.transport import TTransport
from thrift.protocol.TBinaryProtocol import TBinaryProtocolFactory
__all__ = ['TNonblockingServer']
logger = logging.getLogger(__name__)
class Worker(threading.Thread):
"""Worker is a small helper to process incoming connection."""
def __init__(self, queue):
threading.Thread.__init__(self)
self.queue = queue
def run(self):
"""Process queries from task queue, stop if processor is None."""
while True:
try:
processor, iprot, oprot, otrans, callback = self.queue.get()
if processor is None:
break
processor.process(iprot, oprot)
callback(True, otrans.getvalue())
except Exception:
logger.exception("Exception while processing request", exc_info=True)
callback(False, b'')
WAIT_LEN = 0
WAIT_MESSAGE = 1
WAIT_PROCESS = 2
SEND_ANSWER = 3
CLOSED = 4
def locked(func):
"""Decorator which locks self.lock."""
def nested(self, *args, **kwargs):
self.lock.acquire()
try:
return func(self, *args, **kwargs)
finally:
self.lock.release()
return nested
def socket_exception(func):
"""Decorator close object on socket.error."""
def read(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except socket.error:
logger.debug('ignoring socket exception', exc_info=True)
self.close()
return read
class Message(object):
def __init__(self, offset, len_, header):
self.offset = offset
self.len = len_
self.buffer = None
self.is_header = header
@property
def end(self):
return self.offset + self.len
class Connection(object):
"""Basic class is represented connection.
It can be in state:
WAIT_LEN --- connection is reading request len.
WAIT_MESSAGE --- connection is reading request.
WAIT_PROCESS --- connection has just read whole request and
waits for call ready routine.
SEND_ANSWER --- connection is sending answer string (including length
of answer).
CLOSED --- socket was closed and connection should be deleted.
"""
def __init__(self, new_socket, wake_up):
self.socket = new_socket
self.socket.setblocking(False)
self.status = WAIT_LEN
self.len = 0
self.received = deque()
self._reading = Message(0, 4, True)
self._rbuf = b''
self._wbuf = b''
self.lock = threading.Lock()
self.wake_up = wake_up
self.remaining = False
@socket_exception
def read(self):
"""Reads data from stream and switch state."""
assert self.status in (WAIT_LEN, WAIT_MESSAGE)
assert not self.received
buf_size = 8192
first = True
done = False
while not done:
read = self.socket.recv(buf_size)
rlen = len(read)
done = rlen < buf_size
self._rbuf += read
if first and rlen == 0:
if self.status != WAIT_LEN or self._rbuf:
logger.error('could not read frame from socket')
else:
logger.debug('read zero length. client might have disconnected')
self.close()
while len(self._rbuf) >= self._reading.end:
if self._reading.is_header:
mlen, = struct.unpack('!i', self._rbuf[:4])
self._reading = Message(self._reading.end, mlen, False)
self.status = WAIT_MESSAGE
else:
self._reading.buffer = self._rbuf
self.received.append(self._reading)
self._rbuf = self._rbuf[self._reading.end:]
self._reading = Message(0, 4, True)
first = False
if self.received:
self.status = WAIT_PROCESS
break
self.remaining = not done
@socket_exception
def write(self):
"""Writes data from socket and switch state."""
assert self.status == SEND_ANSWER
sent = self.socket.send(self._wbuf)
if sent == len(self._wbuf):
self.status = WAIT_LEN
self._wbuf = b''
self.len = 0
else:
self._wbuf = self._wbuf[sent:]
@locked
def ready(self, all_ok, message):
"""Callback function for switching state and waking up main thread.
This function is the only function witch can be called asynchronous.
The ready can switch Connection to three states:
WAIT_LEN if request was oneway.
SEND_ANSWER if request was processed in normal way.
CLOSED if request throws unexpected exception.
The one wakes up main thread.
"""
assert self.status == WAIT_PROCESS
if not all_ok:
self.close()
self.wake_up()
return
self.len = 0
if len(message) == 0:
# it was a oneway request, do not write answer
self._wbuf = b''
self.status = WAIT_LEN
else:
self._wbuf = struct.pack('!i', len(message)) + message
self.status = SEND_ANSWER
self.wake_up()
@locked
def is_writeable(self):
"""Return True if connection should be added to write list of select"""
return self.status == SEND_ANSWER
# it's not necessary, but...
@locked
def is_readable(self):
"""Return True if connection should be added to read list of select"""
return self.status in (WAIT_LEN, WAIT_MESSAGE)
@locked
def is_closed(self):
"""Returns True if connection is closed."""
return self.status == CLOSED
def fileno(self):
"""Returns the file descriptor of the associated socket."""
return self.socket.fileno()
def close(self):
"""Closes connection"""
self.status = CLOSED
self.socket.close()
class TNonblockingServer(object):
"""Non-blocking server."""
def __init__(self,
processor,
lsocket,
inputProtocolFactory=None,
outputProtocolFactory=None,
threads=10):
self.processor = processor
self.socket = lsocket
self.in_protocol = inputProtocolFactory or TBinaryProtocolFactory()
self.out_protocol = outputProtocolFactory or self.in_protocol
self.threads = int(threads)
self.clients = {}
self.tasks = queue.Queue()
self._read, self._write = socket.socketpair()
self.prepared = False
self._stop = False
def setNumThreads(self, num):
"""Set the number of worker threads that should be created."""
# implement ThreadPool interface
assert not self.prepared, "Can't change number of threads after start"
self.threads = num
def prepare(self):
"""Prepares server for serve requests."""
if self.prepared:
return
self.socket.listen()
for _ in range(self.threads):
thread = Worker(self.tasks)
thread.setDaemon(True)
thread.start()
self.prepared = True
def wake_up(self):
"""Wake up main thread.
The server usually waits in select call in we should terminate one.
The simplest way is using socketpair.
Select always wait to read from the first socket of socketpair.
In this case, we can just write anything to the second socket from
socketpair.
"""
self._write.send(b'1')
def stop(self):
"""Stop the server.
This method causes the serve() method to return. stop() may be invoked
from within your handler, or from another thread.
After stop() is called, serve() will return but the server will still
be listening on the socket. serve() may then be called again to resume
processing requests. Alternatively, close() may be called after
serve() returns to close the server socket and shutdown all worker
threads.
"""
self._stop = True
self.wake_up()
def _select(self):
"""Does select on open connections."""
readable = [self.socket.handle.fileno(), self._read.fileno()]
writable = []
remaining = []
for i, connection in list(self.clients.items()):
if connection.is_readable():
readable.append(connection.fileno())
if connection.remaining or connection.received:
remaining.append(connection.fileno())
if connection.is_writeable():
writable.append(connection.fileno())
if connection.is_closed():
del self.clients[i]
if remaining:
return remaining, [], [], False
else:
return select.select(readable, writable, readable) + (True,)
def handle(self):
"""Handle requests.
WARNING! You must call prepare() BEFORE calling handle()
"""
assert self.prepared, "You have to call prepare before handle"
rset, wset, xset, selected = self._select()
for readable in rset:
if readable == self._read.fileno():
# don't care i just need to clean readable flag
self._read.recv(1024)
elif readable == self.socket.handle.fileno():
try:
client = self.socket.accept()
if client:
self.clients[client.handle.fileno()] = Connection(client.handle,
self.wake_up)
except socket.error:
logger.debug('error while accepting', exc_info=True)
else:
connection = self.clients[readable]
if selected:
connection.read()
if connection.received:
connection.status = WAIT_PROCESS
msg = connection.received.popleft()
itransport = TTransport.TMemoryBuffer(msg.buffer, msg.offset)
otransport = TTransport.TMemoryBuffer()
iprot = self.in_protocol.getProtocol(itransport)
oprot = self.out_protocol.getProtocol(otransport)
self.tasks.put([self.processor, iprot, oprot,
otransport, connection.ready])
for writeable in wset:
self.clients[writeable].write()
for oob in xset:
self.clients[oob].close()
del self.clients[oob]
def close(self):
"""Closes the server."""
for _ in range(self.threads):
self.tasks.put([None, None, None, None, None])
self.socket.close()
self.prepared = False
def serve(self):
"""Serve requests.
Serve requests forever, or until stop() is called.
"""
self._stop = False
self.prepare()
while not self._stop:
self.handle()
|
|
# orm/loading.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""private module containing functions used to convert database
rows into object instances and associated state.
the functions here are called primarily by Query, Mapper,
as well as some of the attribute loading strategies.
"""
from __future__ import absolute_import
from .. import util
from . import attributes, exc as orm_exc
from ..sql import util as sql_util
from . import strategy_options
from .util import _none_set, state_str
from .base import _SET_DEFERRED_EXPIRED, _DEFER_FOR_STATE
from .. import exc as sa_exc
import collections
_new_runid = util.counter()
def instances(query, cursor, context):
"""Return an ORM result as an iterator."""
context.runid = _new_runid()
filtered = query._has_mapper_entities
single_entity = len(query._entities) == 1 and \
query._entities[0].supports_single_entity
if filtered:
if single_entity:
filter_fn = id
else:
def filter_fn(row):
return tuple(
id(item)
if ent.use_id_for_hash
else item
for ent, item in zip(query._entities, row)
)
try:
(process, labels) = \
list(zip(*[
query_entity.row_processor(query,
context, cursor)
for query_entity in query._entities
]))
if not single_entity:
keyed_tuple = util.lightweight_named_tuple('result', labels)
while True:
context.partials = {}
if query._yield_per:
fetch = cursor.fetchmany(query._yield_per)
if not fetch:
break
else:
fetch = cursor.fetchall()
if single_entity:
proc = process[0]
rows = [proc(row) for row in fetch]
else:
rows = [keyed_tuple([proc(row) for proc in process])
for row in fetch]
if filtered:
rows = util.unique_list(rows, filter_fn)
for row in rows:
yield row
if not query._yield_per:
break
except Exception as err:
cursor.close()
util.raise_from_cause(err)
@util.dependencies("sqlalchemy.orm.query")
def merge_result(querylib, query, iterator, load=True):
"""Merge a result into this :class:`.Query` object's Session."""
session = query.session
if load:
# flush current contents if we expect to load data
session._autoflush()
autoflush = session.autoflush
try:
session.autoflush = False
single_entity = len(query._entities) == 1
if single_entity:
if isinstance(query._entities[0], querylib._MapperEntity):
result = [session._merge(
attributes.instance_state(instance),
attributes.instance_dict(instance),
load=load, _recursive={})
for instance in iterator]
else:
result = list(iterator)
else:
mapped_entities = [i for i, e in enumerate(query._entities)
if isinstance(e, querylib._MapperEntity)]
result = []
keys = [ent._label_name for ent in query._entities]
keyed_tuple = util.lightweight_named_tuple('result', keys)
for row in iterator:
newrow = list(row)
for i in mapped_entities:
if newrow[i] is not None:
newrow[i] = session._merge(
attributes.instance_state(newrow[i]),
attributes.instance_dict(newrow[i]),
load=load, _recursive={})
result.append(keyed_tuple(newrow))
return iter(result)
finally:
session.autoflush = autoflush
def get_from_identity(session, key, passive):
"""Look up the given key in the given session's identity map,
check the object for expired state if found.
"""
instance = session.identity_map.get(key)
if instance is not None:
state = attributes.instance_state(instance)
# expired - ensure it still exists
if state.expired:
if not passive & attributes.SQL_OK:
# TODO: no coverage here
return attributes.PASSIVE_NO_RESULT
elif not passive & attributes.RELATED_OBJECT_OK:
# this mode is used within a flush and the instance's
# expired state will be checked soon enough, if necessary
return instance
try:
state._load_expired(state, passive)
except orm_exc.ObjectDeletedError:
session._remove_newly_deleted([state])
return None
return instance
else:
return None
def load_on_ident(query, key,
refresh_state=None, lockmode=None,
only_load_props=None):
"""Load the given identity key from the database."""
if key is not None:
ident = key[1]
else:
ident = None
if refresh_state is None:
q = query._clone()
q._get_condition()
else:
q = query._clone()
if ident is not None:
mapper = query._mapper_zero()
(_get_clause, _get_params) = mapper._get_clause
# None present in ident - turn those comparisons
# into "IS NULL"
if None in ident:
nones = set([
_get_params[col].key for col, value in
zip(mapper.primary_key, ident) if value is None
])
_get_clause = sql_util.adapt_criterion_to_null(
_get_clause, nones)
_get_clause = q._adapt_clause(_get_clause, True, False)
q._criterion = _get_clause
params = dict([
(_get_params[primary_key].key, id_val)
for id_val, primary_key in zip(ident, mapper.primary_key)
])
q._params = params
if lockmode is not None:
version_check = True
q = q.with_lockmode(lockmode)
elif query._for_update_arg is not None:
version_check = True
q._for_update_arg = query._for_update_arg
else:
version_check = False
q._get_options(
populate_existing=bool(refresh_state),
version_check=version_check,
only_load_props=only_load_props,
refresh_state=refresh_state)
q._order_by = None
try:
return q.one()
except orm_exc.NoResultFound:
return None
def _setup_entity_query(
context, mapper, query_entity,
path, adapter, column_collection,
with_polymorphic=None, only_load_props=None,
polymorphic_discriminator=None, **kw):
if with_polymorphic:
poly_properties = mapper._iterate_polymorphic_properties(
with_polymorphic)
else:
poly_properties = mapper._polymorphic_properties
quick_populators = {}
path.set(
context.attributes,
"memoized_setups",
quick_populators)
for value in poly_properties:
if only_load_props and \
value.key not in only_load_props:
continue
value.setup(
context,
query_entity,
path,
adapter,
only_load_props=only_load_props,
column_collection=column_collection,
memoized_populators=quick_populators,
**kw
)
if polymorphic_discriminator is not None and \
polymorphic_discriminator \
is not mapper.polymorphic_on:
if adapter:
pd = adapter.columns[polymorphic_discriminator]
else:
pd = polymorphic_discriminator
column_collection.append(pd)
def _instance_processor(
mapper, context, result, path, adapter,
only_load_props=None, refresh_state=None,
polymorphic_discriminator=None,
_polymorphic_from=None):
"""Produce a mapper level row processor callable
which processes rows into mapped instances."""
# note that this method, most of which exists in a closure
# called _instance(), resists being broken out, as
# attempts to do so tend to add significant function
# call overhead. _instance() is the most
# performance-critical section in the whole ORM.
pk_cols = mapper.primary_key
if adapter:
pk_cols = [adapter.columns[c] for c in pk_cols]
identity_class = mapper._identity_class
populators = collections.defaultdict(list)
props = mapper._prop_set
if only_load_props is not None:
props = props.intersection(
mapper._props[k] for k in only_load_props)
quick_populators = path.get(
context.attributes, "memoized_setups", _none_set)
for prop in props:
if prop in quick_populators:
# this is an inlined path just for column-based attributes.
col = quick_populators[prop]
if col is _DEFER_FOR_STATE:
populators["new"].append(
(prop.key, prop._deferred_column_loader))
elif col is _SET_DEFERRED_EXPIRED:
# note that in this path, we are no longer
# searching in the result to see if the column might
# be present in some unexpected way.
populators["expire"].append((prop.key, False))
else:
if adapter:
col = adapter.columns[col]
getter = result._getter(col)
if getter:
populators["quick"].append((prop.key, getter))
else:
# fall back to the ColumnProperty itself, which
# will iterate through all of its columns
# to see if one fits
prop.create_row_processor(
context, path, mapper, result, adapter, populators)
else:
prop.create_row_processor(
context, path, mapper, result, adapter, populators)
propagate_options = context.propagate_options
if propagate_options:
load_path = context.query._current_path + path \
if context.query._current_path.path else path
session_identity_map = context.session.identity_map
populate_existing = context.populate_existing or mapper.always_refresh
load_evt = bool(mapper.class_manager.dispatch.load)
refresh_evt = bool(mapper.class_manager.dispatch.refresh)
instance_state = attributes.instance_state
instance_dict = attributes.instance_dict
session_id = context.session.hash_key
version_check = context.version_check
runid = context.runid
if refresh_state:
refresh_identity_key = refresh_state.key
if refresh_identity_key is None:
# super-rare condition; a refresh is being called
# on a non-instance-key instance; this is meant to only
# occur within a flush()
refresh_identity_key = \
mapper._identity_key_from_state(refresh_state)
else:
refresh_identity_key = None
if mapper.allow_partial_pks:
is_not_primary_key = _none_set.issuperset
else:
is_not_primary_key = _none_set.intersection
def _instance(row):
# determine the state that we'll be populating
if refresh_identity_key:
# fixed state that we're refreshing
state = refresh_state
instance = state.obj()
dict_ = instance_dict(instance)
isnew = state.runid != runid
currentload = True
loaded_instance = False
else:
# look at the row, see if that identity is in the
# session, or we have to create a new one
identitykey = (
identity_class,
tuple([row[column] for column in pk_cols])
)
instance = session_identity_map.get(identitykey)
if instance is not None:
# existing instance
state = instance_state(instance)
dict_ = instance_dict(instance)
isnew = state.runid != runid
currentload = not isnew
loaded_instance = False
if version_check and not currentload:
_validate_version_id(mapper, state, dict_, row, adapter)
else:
# create a new instance
# check for non-NULL values in the primary key columns,
# else no entity is returned for the row
if is_not_primary_key(identitykey[1]):
return None
isnew = True
currentload = True
loaded_instance = True
instance = mapper.class_manager.new_instance()
dict_ = instance_dict(instance)
state = instance_state(instance)
state.key = identitykey
# attach instance to session.
state.session_id = session_id
session_identity_map._add_unpresent(state, identitykey)
# populate. this looks at whether this state is new
# for this load or was existing, and whether or not this
# row is the first row with this identity.
if currentload or populate_existing:
# full population routines. Objects here are either
# just created, or we are doing a populate_existing
if isnew and propagate_options:
state.load_options = propagate_options
state.load_path = load_path
_populate_full(
context, row, state, dict_, isnew,
loaded_instance, populate_existing, populators)
if isnew:
if loaded_instance and load_evt:
state.manager.dispatch.load(state, context)
elif refresh_evt:
state.manager.dispatch.refresh(
state, context, only_load_props)
if populate_existing or state.modified:
if refresh_state and only_load_props:
state._commit(dict_, only_load_props)
else:
state._commit_all(dict_, session_identity_map)
else:
# partial population routines, for objects that were already
# in the Session, but a row matches them; apply eager loaders
# on existing objects, etc.
unloaded = state.unloaded
isnew = state not in context.partials
if not isnew or unloaded or populators["eager"]:
# state is having a partial set of its attributes
# refreshed. Populate those attributes,
# and add to the "context.partials" collection.
to_load = _populate_partial(
context, row, state, dict_, isnew,
unloaded, populators)
if isnew:
if refresh_evt:
state.manager.dispatch.refresh(
state, context, to_load)
state._commit(dict_, to_load)
return instance
if mapper.polymorphic_map and not _polymorphic_from and not refresh_state:
# if we are doing polymorphic, dispatch to a different _instance()
# method specific to the subclass mapper
_instance = _decorate_polymorphic_switch(
_instance, context, mapper, result, path,
polymorphic_discriminator, adapter)
return _instance
def _populate_full(
context, row, state, dict_, isnew,
loaded_instance, populate_existing, populators):
if isnew:
# first time we are seeing a row with this identity.
state.runid = context.runid
for key, getter in populators["quick"]:
dict_[key] = getter(row)
if populate_existing:
for key, set_callable in populators["expire"]:
dict_.pop(key, None)
if set_callable:
state.expired_attributes.add(key)
else:
for key, set_callable in populators["expire"]:
if set_callable:
state.expired_attributes.add(key)
for key, populator in populators["new"]:
populator(state, dict_, row)
for key, populator in populators["delayed"]:
populator(state, dict_, row)
else:
# have already seen rows with this identity.
for key, populator in populators["existing"]:
populator(state, dict_, row)
def _populate_partial(
context, row, state, dict_, isnew,
unloaded, populators):
if not isnew:
to_load = context.partials[state]
for key, populator in populators["existing"]:
if key in to_load:
populator(state, dict_, row)
else:
to_load = unloaded
context.partials[state] = to_load
for key, getter in populators["quick"]:
if key in to_load:
dict_[key] = getter(row)
for key, set_callable in populators["expire"]:
if key in to_load:
dict_.pop(key, None)
if set_callable:
state.expired_attributes.add(key)
for key, populator in populators["new"]:
if key in to_load:
populator(state, dict_, row)
for key, populator in populators["delayed"]:
if key in to_load:
populator(state, dict_, row)
for key, populator in populators["eager"]:
if key not in unloaded:
populator(state, dict_, row)
return to_load
def _validate_version_id(mapper, state, dict_, row, adapter):
version_id_col = mapper.version_id_col
if version_id_col is None:
return
if adapter:
version_id_col = adapter.columns[version_id_col]
if mapper._get_state_attr_by_column(
state, dict_, mapper.version_id_col) != row[version_id_col]:
raise orm_exc.StaleDataError(
"Instance '%s' has version id '%s' which "
"does not match database-loaded version id '%s'."
% (state_str(state), mapper._get_state_attr_by_column(
state, dict_, mapper.version_id_col),
row[version_id_col]))
def _decorate_polymorphic_switch(
instance_fn, context, mapper, result, path,
polymorphic_discriminator, adapter):
if polymorphic_discriminator is not None:
polymorphic_on = polymorphic_discriminator
else:
polymorphic_on = mapper.polymorphic_on
if polymorphic_on is None:
return instance_fn
if adapter:
polymorphic_on = adapter.columns[polymorphic_on]
def configure_subclass_mapper(discriminator):
try:
sub_mapper = mapper.polymorphic_map[discriminator]
except KeyError:
raise AssertionError(
"No such polymorphic_identity %r is defined" %
discriminator)
else:
if sub_mapper is mapper:
return None
return _instance_processor(
sub_mapper, context, result,
path, adapter, _polymorphic_from=mapper)
polymorphic_instances = util.PopulateDict(
configure_subclass_mapper
)
def polymorphic_instance(row):
discriminator = row[polymorphic_on]
if discriminator is not None:
_instance = polymorphic_instances[discriminator]
if _instance:
return _instance(row)
return instance_fn(row)
return polymorphic_instance
def load_scalar_attributes(mapper, state, attribute_names):
"""initiate a column-based attribute refresh operation."""
# assert mapper is _state_mapper(state)
session = state.session
if not session:
raise orm_exc.DetachedInstanceError(
"Instance %s is not bound to a Session; "
"attribute refresh operation cannot proceed" %
(state_str(state)))
has_key = bool(state.key)
result = False
if mapper.inherits and not mapper.concrete:
# because we are using Core to produce a select() that we
# pass to the Query, we aren't calling setup() for mapped
# attributes; in 1.0 this means deferred attrs won't get loaded
# by default
statement = mapper._optimized_get_statement(state, attribute_names)
if statement is not None:
result = load_on_ident(
session.query(mapper).
options(
strategy_options.Load(mapper).undefer("*")
).from_statement(statement),
None,
only_load_props=attribute_names,
refresh_state=state
)
if result is False:
if has_key:
identity_key = state.key
else:
# this codepath is rare - only valid when inside a flush, and the
# object is becoming persistent but hasn't yet been assigned
# an identity_key.
# check here to ensure we have the attrs we need.
pk_attrs = [mapper._columntoproperty[col].key
for col in mapper.primary_key]
if state.expired_attributes.intersection(pk_attrs):
raise sa_exc.InvalidRequestError(
"Instance %s cannot be refreshed - it's not "
" persistent and does not "
"contain a full primary key." % state_str(state))
identity_key = mapper._identity_key_from_state(state)
if (_none_set.issubset(identity_key) and
not mapper.allow_partial_pks) or \
_none_set.issuperset(identity_key):
util.warn_limited(
"Instance %s to be refreshed doesn't "
"contain a full primary key - can't be refreshed "
"(and shouldn't be expired, either).",
state_str(state))
return
result = load_on_ident(
session.query(mapper),
identity_key,
refresh_state=state,
only_load_props=attribute_names)
# if instance is pending, a refresh operation
# may not complete (even if PK attributes are assigned)
if has_key and result is None:
raise orm_exc.ObjectDeletedError(state)
|
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
from abc import abstractmethod
from builtins import object
from hashlib import sha1
from twitter.common.collections import OrderedSet
from pants.base.deprecated import deprecated
from pants.base.hash_utils import stable_json_hash
from pants.util.meta import AbstractClass
from pants.util.strutil import ensure_binary
def combine_hashes(hashes):
"""A simple helper function to combine other hashes. Sorts the hashes before rolling them in."""
hasher = sha1()
for h in sorted(hashes):
h = ensure_binary(h)
hasher.update(h)
return hasher.hexdigest()
class PayloadField(AbstractClass):
"""An immutable, hashable structure to be mixed into Payload instances.
:API: public
"""
_fingerprint_memo = None
def fingerprint(self):
"""A memoized sha1 hexdigest hashing the contents of this PayloadField
The fingerprint returns either a bytestring or None. If the return is None, consumers of the
fingerprint may choose to elide this PayloadField from their combined hash computation.
:API: public
"""
if self._fingerprint_memo is None:
self._fingerprint_memo = self._compute_fingerprint()
return self._fingerprint_memo
def mark_dirty(self):
"""Invalidates the memoized fingerprint for this field.
Exposed for testing.
:API: public
"""
self._fingerprint_memo = None
@abstractmethod
def _compute_fingerprint(self):
"""This method will be called and the result memoized for ``PayloadField.fingerprint``."""
pass
@property
def value(self):
"""
:API: public
"""
return self
class FingerprintedMixin(object):
"""Mixin this class to make your class suitable for passing to FingerprintedField.
:API: public
"""
def fingerprint(self):
"""Override this method to implement a fingerprint for your class.
:API: public
:returns: a sha1 hexdigest hashing the contents of this structure."""
raise NotImplementedError()
class FingerprintedField(PayloadField):
"""Use this field to fingerprint any class that mixes in FingerprintedMixin.
The caller must ensure that the class properly implements fingerprint()
to hash the contents of the object.
:API: public
"""
def __init__(self, value):
self._value = value
def _compute_fingerprint(self):
return self._value.fingerprint()
@property
def value(self):
return self._value
class PythonRequirementsField(frozenset, PayloadField):
"""A frozenset subclass that mixes in PayloadField.
Must be initialized with an iterable of PythonRequirement instances.
:API: public
"""
def _compute_fingerprint(self):
def fingerprint_iter():
for req in self:
hash_items = (
repr(req._requirement),
req._repository,
req._name,
req._use_2to3,
req.compatibility,
)
yield stable_json_hash(hash_items)
return combine_hashes(fingerprint_iter())
class ExcludesField(OrderedSet, PayloadField):
"""An OrderedSet subclass that mixes in PayloadField.
Must be initialized with an iterable of Excludes instances.
:API: public
"""
def _compute_fingerprint(self):
return stable_json_hash(tuple(repr(exclude) for exclude in self))
class JarsField(tuple, PayloadField):
"""A tuple subclass that mixes in PayloadField.
Must be initialized with an iterable of JarDependency instances.
:API: public
"""
def _compute_fingerprint(self):
return stable_json_hash(tuple(jar.cache_key() for jar in self))
class PrimitiveField(PayloadField):
"""A general field for primitive types.
As long as the contents are JSON representable, their hash can be stably inferred.
:API: public
"""
def __init__(self, underlying=None):
self._underlying = underlying
@property
def value(self):
return self._underlying
def _compute_fingerprint(self):
return stable_json_hash(self._underlying)
class PrimitivesSetField(PayloadField):
"""A general field for order-insensitive sets of primitive, ordered types.
As long as the underlying elements are JSON representable and have a consistent sort order,
their hash can be stably inferred. An underlying value of `None` is preserved to allow for
"unset" fields: to default to an empty list/set instead, pass one to the constructor.
:API: public
"""
def __init__(self, underlying=None):
self._underlying = tuple(sorted(set(underlying))) if underlying is not None else None
@property
def value(self):
return self._underlying
def _compute_fingerprint(self):
return stable_json_hash(self._underlying)
class SetOfPrimitivesField(PayloadField):
"""A general field for order-insensitive sets of primitive, ordered types.
As long as the underlying elements are JSON representable and have a consistent sort order,
their hash can be stably inferred.
:API: public
"""
@deprecated(removal_version='1.11.0.dev0',
hint_message='Use PrimitivesSetField, which preserves `None`/unset fields.')
def __init__(self, underlying=None):
self._underlying = tuple(sorted(set(underlying or [])))
@property
def value(self):
return self._underlying
def _compute_fingerprint(self):
return stable_json_hash(self._underlying)
|
|
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import ast
import copy
import uuid
import requests
from requests.auth import HTTPBasicAuth
from oslo_config import cfg
from six.moves.urllib import parse as urlparse # pylint: disable=import-error
from st2common.runners.base import ActionRunner
from st2common.runners.base import get_metadata as get_runner_metadata
from st2common import __version__ as st2_version
from st2common import log as logging
from st2common.constants.action import LIVEACTION_STATUS_SUCCEEDED
from st2common.constants.action import LIVEACTION_STATUS_FAILED
from st2common.constants.action import LIVEACTION_STATUS_TIMED_OUT
from st2common.util.jsonify import json_decode
from st2common.util.jsonify import json_encode
import six
from six.moves import range
__all__ = ["HttpRunner", "HTTPClient", "get_runner", "get_metadata"]
LOG = logging.getLogger(__name__)
SUCCESS_STATUS_CODES = [code for code in range(200, 207)]
# Lookup constants for runner params
RUNNER_ON_BEHALF_USER = "user"
RUNNER_URL = "url"
RUNNER_HEADERS = "headers" # Debatable whether this should be action params.
RUNNER_COOKIES = "cookies"
RUNNER_ALLOW_REDIRECTS = "allow_redirects"
RUNNER_HTTP_PROXY = "http_proxy"
RUNNER_HTTPS_PROXY = "https_proxy"
RUNNER_VERIFY_SSL_CERT = "verify_ssl_cert"
RUNNER_USERNAME = "username"
RUNNER_PASSWORD = "password"
RUNNER_URL_HOSTS_BLACKLIST = "url_hosts_blacklist"
RUNNER_URL_HOSTS_WHITELIST = "url_hosts_whitelist"
# Lookup constants for action params
ACTION_AUTH = "auth"
ACTION_BODY = "body"
ACTION_TIMEOUT = "timeout"
ACTION_METHOD = "method"
ACTION_QUERY_PARAMS = "params"
FILE_NAME = "file_name"
FILE_CONTENT = "file_content"
FILE_CONTENT_TYPE = "file_content_type"
RESPONSE_BODY_PARSE_FUNCTIONS = {"application/json": json_decode}
class HttpRunner(ActionRunner):
def __init__(self, runner_id):
super(HttpRunner, self).__init__(runner_id=runner_id)
self._on_behalf_user = cfg.CONF.system_user.user
self._timeout = 60
def pre_run(self):
super(HttpRunner, self).pre_run()
LOG.debug(
'Entering HttpRunner.pre_run() for liveaction_id="%s"', self.liveaction_id
)
self._on_behalf_user = self.runner_parameters.get(
RUNNER_ON_BEHALF_USER, self._on_behalf_user
)
self._url = self.runner_parameters.get(RUNNER_URL, None)
self._headers = self.runner_parameters.get(RUNNER_HEADERS, {})
self._cookies = self.runner_parameters.get(RUNNER_COOKIES, None)
self._allow_redirects = self.runner_parameters.get(
RUNNER_ALLOW_REDIRECTS, False
)
self._username = self.runner_parameters.get(RUNNER_USERNAME, None)
self._password = self.runner_parameters.get(RUNNER_PASSWORD, None)
self._http_proxy = self.runner_parameters.get(RUNNER_HTTP_PROXY, None)
self._https_proxy = self.runner_parameters.get(RUNNER_HTTPS_PROXY, None)
self._verify_ssl_cert = self.runner_parameters.get(RUNNER_VERIFY_SSL_CERT, None)
self._url_hosts_blacklist = self.runner_parameters.get(
RUNNER_URL_HOSTS_BLACKLIST, []
)
self._url_hosts_whitelist = self.runner_parameters.get(
RUNNER_URL_HOSTS_WHITELIST, []
)
def run(self, action_parameters):
client = self._get_http_client(action_parameters)
if self._url_hosts_blacklist and self._url_hosts_whitelist:
msg = (
'"url_hosts_blacklist" and "url_hosts_whitelist" parameters are mutually '
"exclusive. Only one should be provided."
)
raise ValueError(msg)
try:
result = client.run()
except requests.exceptions.Timeout as e:
result = {"error": six.text_type(e)}
status = LIVEACTION_STATUS_TIMED_OUT
else:
status = HttpRunner._get_result_status(result.get("status_code", None))
return (status, result, None)
def _get_http_client(self, action_parameters):
body = action_parameters.get(ACTION_BODY, None)
timeout = float(action_parameters.get(ACTION_TIMEOUT, self._timeout))
method = action_parameters.get(ACTION_METHOD, None)
params = action_parameters.get(ACTION_QUERY_PARAMS, None)
auth = action_parameters.get(ACTION_AUTH, {})
file_name = action_parameters.get(FILE_NAME, None)
file_content = action_parameters.get(FILE_CONTENT, None)
file_content_type = action_parameters.get(FILE_CONTENT_TYPE, None)
# Include our user agent and action name so requests can be tracked back
headers = copy.deepcopy(self._headers) if self._headers else {}
headers["User-Agent"] = "st2/v%s" % (st2_version)
headers["X-Stanley-Action"] = self.action_name
if file_name and file_content:
files = {}
if file_content_type:
value = (file_content, file_content_type)
else:
value = file_content
files[file_name] = value
else:
files = None
proxies = {}
if self._http_proxy:
proxies["http"] = self._http_proxy
if self._https_proxy:
proxies["https"] = self._https_proxy
return HTTPClient(
url=self._url,
method=method,
body=body,
params=params,
headers=headers,
cookies=self._cookies,
auth=auth,
timeout=timeout,
allow_redirects=self._allow_redirects,
proxies=proxies,
files=files,
verify=self._verify_ssl_cert,
username=self._username,
password=self._password,
url_hosts_blacklist=self._url_hosts_blacklist,
url_hosts_whitelist=self._url_hosts_whitelist,
)
@staticmethod
def _get_result_status(status_code):
return (
LIVEACTION_STATUS_SUCCEEDED
if status_code in SUCCESS_STATUS_CODES
else LIVEACTION_STATUS_FAILED
)
class HTTPClient(object):
def __init__(
self,
url=None,
method=None,
body="",
params=None,
headers=None,
cookies=None,
auth=None,
timeout=60,
allow_redirects=False,
proxies=None,
files=None,
verify=False,
username=None,
password=None,
url_hosts_blacklist=None,
url_hosts_whitelist=None,
):
if url is None:
raise Exception("URL must be specified.")
if method is None:
if files or body:
method = "POST"
else:
method = "GET"
headers = headers or {}
normalized_headers = self._normalize_headers(headers=headers)
if body and "content-length" not in normalized_headers:
headers["Content-Length"] = str(len(body))
self.url = url
self.method = method
self.headers = headers
self.body = body
self.params = params
self.headers = headers
self.cookies = cookies
self.auth = auth
self.timeout = timeout
self.allow_redirects = allow_redirects
self.proxies = proxies
self.files = files
self.verify = verify
self.username = username
self.password = password
self.url_hosts_blacklist = url_hosts_blacklist or []
self.url_hosts_whitelist = url_hosts_whitelist or []
if self.url_hosts_blacklist and self.url_hosts_whitelist:
msg = (
'"url_hosts_blacklist" and "url_hosts_whitelist" parameters are mutually '
"exclusive. Only one should be provided."
)
raise ValueError(msg)
def run(self):
results = {}
resp = None
json_content = self._is_json_content()
# Check if the provided URL is blacklisted
is_url_blacklisted = self._is_url_blacklisted(url=self.url)
if is_url_blacklisted:
raise ValueError('URL "%s" is blacklisted' % (self.url))
is_url_whitelisted = self._is_url_whitelisted(url=self.url)
if not is_url_whitelisted:
raise ValueError('URL "%s" is not whitelisted' % (self.url))
try:
if json_content:
# cast params (body) to dict
data = self._cast_object(self.body)
try:
data = json_encode(data)
except ValueError:
msg = "Request body (%s) can't be parsed as JSON" % (data)
raise ValueError(msg)
else:
data = self.body
if self.username or self.password:
self.auth = HTTPBasicAuth(self.username, self.password)
# Ensure data is bytes since that what request expects
if isinstance(data, six.text_type):
data = data.encode("utf-8")
resp = requests.request(
self.method,
self.url,
params=self.params,
data=data,
headers=self.headers,
cookies=self.cookies,
auth=self.auth,
timeout=self.timeout,
allow_redirects=self.allow_redirects,
proxies=self.proxies,
files=self.files,
verify=self.verify,
)
headers = dict(resp.headers)
body, parsed = self._parse_response_body(headers=headers, body=resp.text)
results["status_code"] = resp.status_code
results["body"] = body
results["parsed"] = parsed # flag which indicates if body has been parsed
results["headers"] = headers
return results
except Exception as e:
LOG.exception("Exception making request to remote URL: %s, %s", self.url, e)
raise
finally:
if resp:
resp.close()
def _parse_response_body(self, headers, body):
"""
:param body: Response body.
:type body: ``str``
:return: (parsed body, flag which indicates if body has been parsed)
:rtype: (``object``, ``bool``)
"""
body = body or ""
headers = self._normalize_headers(headers=headers)
content_type = headers.get("content-type", None)
parsed = False
if not content_type:
return (body, parsed)
# The header can also contain charset which we simply discard
content_type = content_type.split(";")[0]
parse_func = RESPONSE_BODY_PARSE_FUNCTIONS.get(content_type, None)
if not parse_func:
return (body, parsed)
LOG.debug("Parsing body with content type: %s", content_type)
try:
body = parse_func(body)
except Exception:
LOG.exception("Failed to parse body")
else:
parsed = True
return (body, parsed)
def _normalize_headers(self, headers):
"""
Normalize the header keys by lowercasing all the keys.
"""
result = {}
for key, value in headers.items():
result[key.lower()] = value
return result
def _is_json_content(self):
normalized = self._normalize_headers(self.headers)
return normalized.get("content-type", None) == "application/json"
def _cast_object(self, value):
if isinstance(value, str) or isinstance(value, six.text_type):
try:
return json_decode(value)
except:
return ast.literal_eval(value)
else:
return value
def _is_url_blacklisted(self, url):
"""
Verify if the provided URL is blacklisted via url_hosts_blacklist runner parameter.
"""
if not self.url_hosts_blacklist:
# Blacklist is empty
return False
host = self._get_host_from_url(url=url)
if host in self.url_hosts_blacklist:
return True
return False
def _is_url_whitelisted(self, url):
"""
Verify if the provided URL is whitelisted via url_hosts_whitelist runner parameter.
"""
if not self.url_hosts_whitelist:
return True
host = self._get_host_from_url(url=url)
if host in self.url_hosts_whitelist:
return True
return False
def _get_host_from_url(self, url):
"""
Return sanitized host (netloc) value from the provided url.
"""
parsed = urlparse.urlparse(url)
# Remove port and []
host = parsed.netloc.replace("[", "").replace("]", "")
if parsed.port is not None:
host = host.replace(":%s" % (parsed.port), "")
return host
def get_runner():
return HttpRunner(str(uuid.uuid4()))
def get_metadata():
return get_runner_metadata("http_runner")[0]
|
|
from __future__ import unicode_literals
import os
import unittest
import tempfile
import shutil
import time
import mock
import webtest
from pyramid.config import Configurator
NOT_SET = object()
class TestGenshiTemplateRendererIntegration(unittest.TestCase):
def make_app(self, config_decorator=None, settings=None):
settings = settings or {}
config = Configurator(settings=settings)
config.include('pyramid_genshi')
if config_decorator is not None:
config.include(config_decorator)
app = config.make_wsgi_app()
testapp = webtest.TestApp(app)
return testapp
def make_minimal_app(
self,
template='fixtures/minimal.genshi',
values=NOT_SET,
):
"""Make a minimal app for rendering given template and values
"""
if values is NOT_SET:
values = {}
def minimal(request):
return values
def add_config(config):
config.add_view(minimal, renderer=template)
testapp = self.make_app(add_config)
return testapp
def test_simple(self):
testapp = self.make_minimal_app(
template='fixtures/simple.genshi',
values=dict(name='foobar'),
)
resp = testapp.get('/')
self.assertEqual(resp.text, '<div>\nfoobar\n</div>')
def test_render_method_and_format(self):
testapp = self.make_minimal_app()
def assert_render_method(method, expected):
testapp.app.registry.settings['genshi.method'] = method
resp = testapp.get('/')
self.assertEqual(resp.text, expected)
assert_render_method(
'xml',
'<div xmlns="http://www.w3.org/1999/xhtml">\n</div>',
)
assert_render_method(
'xhtml',
'<div xmlns="http://www.w3.org/1999/xhtml">\n</div>',
)
assert_render_method('text', '\n')
def assert_render_format(format, expected):
testapp.app.registry.settings['genshi.default_format'] = format
resp = testapp.get('/')
self.assertEqual(resp.text, expected)
assert_render_format(
'xml',
'<div xmlns="http://www.w3.org/1999/xhtml">\n</div>',
)
assert_render_format(
'xhtml',
'<div xmlns="http://www.w3.org/1999/xhtml">\n</div>',
)
assert_render_format('text', '\n')
def test_render_doctype(self):
testapp = self.make_minimal_app()
def assert_doctype(doctype, expected):
testapp.app.registry.settings['genshi.default_doctype'] = doctype
resp = testapp.get('/')
self.assertEqual(resp.text, expected)
assert_doctype(
'html5',
'<!DOCTYPE html>\n<div>\n</div>'
)
assert_doctype(
'xhtml',
'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"'
' "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">\n'
'<div>\n</div>'
)
def test_render_encoding(self):
testapp = self.make_minimal_app('fixtures/chinese.genshi')
def assert_encoding(encoding, expected):
testapp.app.registry.settings['genshi.default_encoding'] = encoding
resp = testapp.get('/')
self.assertEqual(resp.body, expected)
assert_encoding(
'utf8',
b'<div>\n\xe4\xb8\xad\xe6\x96\x87\xe5\xad\x97\n</div>',
)
assert_encoding(
'cp950',
b'<div>\n\xa4\xa4\xa4\xe5\xa6r\n</div>',
)
@mock.patch('pyramid.i18n.Localizer.translate')
def test_i18n_msg(self, translate_method):
testapp = self.make_minimal_app('fixtures/i18n_msg.genshi')
def translate(msg):
if msg == 'Hello':
return 'Hola'
return msg
translate_method.side_effect = translate
resp = testapp.get('/')
self.assertEqual(resp.text, '<div>Hola World</div>')
@mock.patch('pyramid.i18n.Localizer.translate')
def test_default_domain(self, translate_method):
translate_method.side_effect = lambda text: text
testapp = self.make_minimal_app('fixtures/i18n_msg.genshi')
testapp.app.registry.settings['genshi.default_domain'] = 'test_domain'
testapp.get('/')
self.assertEqual(translate_method.call_count, 2)
ts1 = translate_method.call_args_list[0][0][0]
ts2 = translate_method.call_args_list[1][0][0]
self.assertEqual(ts1.domain, 'test_domain')
self.assertEqual(ts2.domain, 'test_domain')
@unittest.skip('Known bug, wont fix currently')
@mock.patch('pyramid.i18n.Localizer.translate')
def test_i18n_domain(self, translate_method):
translate_method.side_effect = lambda text: text
testapp = self.make_minimal_app('fixtures/i18n_domain.genshi')
testapp.app.registry.settings['genshi.default_domain'] = 'my_domain'
testapp.get('/')
self.assertEqual(translate_method.call_count, 2)
ts1 = translate_method.call_args_list[0][0][0]
ts2 = translate_method.call_args_list[1][0][0]
self.assertEqual(ts1.domain, 'test_domain')
# TODO: this _('xxx') call should also be in test_domain
# but since our _ method cannot access genshi context,
# so that its is wrong, maybe we should address this issue later
#
# A temporary solution would be
#
# _('xxx', domain='test_domain')
#
self.assertEqual(ts2.domain, 'test_domain')
def test_render_with_wrong_argument(self):
testapp = self.make_minimal_app(values=None)
with self.assertRaises(ValueError):
testapp.get('/')
def test_render_assert_path_include(self):
testapp = self.make_minimal_app('fixtures/asset_include.genshi')
resp = testapp.get('/')
self.assertIn('replaced', resp.text)
def test_render_relative_path_include(self):
testapp = self.make_minimal_app('fixtures/relative_include.genshi')
resp = testapp.get('/')
self.assertIn('replaced', resp.text)
def test_render_asset_include_auto_reload(self):
tmp_dir = tempfile.mkdtemp()
fixtures_dir = os.path.join(os.path.dirname(__file__), 'fixtures')
try:
included = """
<div xmlns="http://www.w3.org/1999/xhtml"
xmlns:py="http://genshi.edgewall.org/"
py:strip="True"
>
<py:match path="span">
<span>replaced</span>
</py:match>
</div>
"""
included_path = os.path.join(fixtures_dir, '_updated_included.genshi')
with open(included_path, 'wt') as tmpl_file:
tmpl_file.write(included)
asset_include = """
<div xmlns="http://www.w3.org/1999/xhtml"
xmlns:py="http://genshi.edgewall.org/"
xmlns:xi="http://www.w3.org/2001/XInclude"
>
<xi:include href="tests:fixtures/_updated_included.genshi" />
<span>To be replaced</span>
</div>
"""
asset_include_path = os.path.join(tmp_dir, 'asset_include.genshi')
with open(asset_include_path, 'wt') as tmpl_file:
tmpl_file.write(asset_include)
testapp = self.make_minimal_app(asset_include_path)
resp = testapp.get('/')
self.assertIn('replaced', resp.text)
# Notice: we need to sleep for a while, otherwise the modification
# time of file will be the same
time.sleep(1)
included = """
<div xmlns="http://www.w3.org/1999/xhtml"
xmlns:py="http://genshi.edgewall.org/"
py:strip="True"
>
<py:match path="span">
<span>updated</span>
</py:match>
</div>
"""
with open(included_path, 'wt') as tmpl_file:
tmpl_file.write(included)
resp = testapp.get('/')
self.assertIn('updated', resp.text)
finally:
shutil.rmtree(tmp_dir)
if os.path.exists(included_path):
os.remove(included_path)
|
|
"""
Support and standalone functions for Robust Linear Models
References
----------
PJ Huber. 'Robust Statistics' John Wiley and Sons, Inc., New York, 1981.
R Venables, B Ripley. 'Modern Applied Statistics in S'
Springer, New York, 2002.
"""
import numpy as np
from scipy.stats import norm as Gaussian
import norms
from scikits.statsmodels import tools
def mad(a, c=Gaussian.ppf(3/4.), axis=0): # c \approx .6745
"""
The Median Absolute Deviation along given axis of an array
Parameters
----------
a : array-like
Input array.
c : float, optional
The normalization constant. Defined as scipy.stats.norm.ppf(3/4.),
which is approximately .6745.
axis : int, optional
The defaul is 0.
Returns
-------
mad : float
`mad` = median(abs(`a`))/`c`
"""
a = np.asarray(a)
return np.median((np.fabs(a))/c, axis=axis)
def stand_mad(a, c=Gaussian.ppf(3/4.), axis=0):
"""
The standardized Median Absolute Deviation along given axis of an array.
Parameters
----------
a : array-like
Input array.
c : float, optional
The normalization constant. Defined as scipy.stats.norm.ppf(3/4.),
which is approximately .6745.
axis : int, optional
The defaul is 0.
Returns
-------
mad : float
`mad` = median(abs(`a`-median(`a`))/`c`
"""
a = np.asarray(a)
d = np.median(a, axis = axis)
d = tools.unsqueeze(d, axis, a.shape)
return np.median(np.fabs(a - d)/c, axis = axis)
class Huber(object):
"""
Huber's proposal 2 for estimating location and scale jointly.
Parameters
----------
c : float, optional
Threshold used in threshold for chi=psi**2. Default value is 1.5.
tol : float, optional
Tolerance for convergence. Default value is 1e-08.
maxiter : int, optional0
Maximum number of iterations. Default value is 30.
norm : scikits.statsmodels.robust.norms.RobustNorm, optional
A robust norm used in M estimator of location. If None,
the location estimator defaults to a one-step
fixed point version of the M-estimator using Huber's T.
call
Return joint estimates of Huber's scale and location.
Examples
--------
>>> import numpy as np
>>> import scikits.statsmodels as sm
>>> chem_data = np.array([2.20, 2.20, 2.4, 2.4, 2.5, 2.7, 2.8, 2.9, 3.03,
... 3.03, 3.10, 3.37, 3.4, 3.4, 3.4, 3.5, 3.6, 3.7, 3.7, 3.7, 3.7,
... 3.77, 5.28, 28.95])
>>> sm.robust.scale.huber(chem_data)
(array(3.2054980819923693), array(0.67365260010478967))
"""
def __init__(self, c=1.5, tol=1.0e-08, maxiter=30, norm=None):
self.c = c
self.maxiter = maxiter
self.tol = tol
self.norm = norm
tmp = 2 * Gaussian.cdf(c) - 1
self.gamma = tmp + c**2 * (1 - tmp) - 2 * c * Gaussian.pdf(c)
def __call__(self, a, mu=None, initscale=None, axis=0):
"""
Compute Huber's proposal 2 estimate of scale, using an optional
initial value of scale and an optional estimate of mu. If mu
is supplied, it is not reestimated.
Parameters
----------
a : array
1d array
mu : float or None, optional
If the location mu is supplied then it is not reestimated.
Default is None, which means that it is estimated.
initscale : float or None, optional
A first guess on scale. If initscale is None then the standardized
median absolute deviation of a is used.
Notes
-----
`Huber` minimizes the function
sum(psi((a[i]-mu)/scale)**2)
as a function of (mu, scale), where
psi(x) = np.clip(x, -self.c, self.c)
"""
a = np.asarray(a)
if mu is None:
n = a.shape[0] - 1
mu = np.median(a, axis=axis)
est_mu = True
else:
n = a.shape[0]
mu = mu
est_mu = False
if initscale is None:
scale = stand_mad(a, axis=axis)
else:
scale = initscale
scale = tools.unsqueeze(scale, axis, a.shape)
mu = tools.unsqueeze(mu, axis, a.shape)
return self._estimate_both(a, scale, mu, axis, est_mu, n)
def _estimate_both(self, a, scale, mu, axis, est_mu, n):
"""
Estimate scale and location simultaneously with the following
pseudo_loop:
while not_converged:
mu, scale = estimate_location(a, scale, mu), estimate_scale(a, scale, mu)
where estimate_location is an M-estimator and estimate_scale implements
the check used in Section 5.5 of Venables & Ripley
"""
for _ in range(self.maxiter):
# Estimate the mean along a given axis
if est_mu:
if self.norm is None:
# This is a one-step fixed-point estimator
# if self.norm == norms.HuberT
# It should be faster than using norms.HuberT
nmu = np.clip(a, mu-self.c*scale,
mu+self.c*scale).sum(axis) / a.shape[axis]
else:
nmu = norms.estimate_location(a, scale, self.norm, axis, mu,
self.maxiter, self.tol)
else:
# Effectively, do nothing
nmu = mu.squeeze()
nmu = tools.unsqueeze(nmu, axis, a.shape)
subset = np.less_equal(np.fabs((a - mu)/scale), self.c)
card = subset.sum(axis)
nscale = np.sqrt(np.sum(subset * (a - nmu)**2, axis) \
/ (n * self.gamma - (a.shape[axis] - card) * self.c**2))
nscale = tools.unsqueeze(nscale, axis, a.shape)
test1 = np.alltrue(np.less_equal(np.fabs(scale - nscale),
nscale * self.tol))
test2 = np.alltrue(np.less_equal(np.fabs(mu - nmu), nscale*self.tol))
if not (test1 and test2):
mu = nmu; scale = nscale
else:
return nmu.squeeze(), nscale.squeeze()
raise ValueError('joint estimation of location and scale failed to converge in %d iterations' % self.maxiter)
huber = Huber()
class HuberScale(object):
"""
Huber's scaling for fitting robust linear models.
Huber's scale is intended to be used as the scale estimate in the
IRLS algorithm and is slightly different than the `Huber` class.
Parameters
----------
d : float, optional
d is the tuning constant for Huber's scale. Default is 2.5
tol : float, optional
The convergence tolerance
maxiter : int, optiona
The maximum number of iterations. The default is 30.
Methods
-------
call
Return's Huber's scale computed as below
Notes
--------
Huber's scale is the iterative solution to
scale_(i+1)**2 = 1/(n*h)*sum(chi(r/sigma_i)*sigma_i**2
where the Huber function is
chi(x) = (x**2)/2 for |x| < d
chi(x) = (d**2)/2 for |x| >= d
and the Huber constant h = (n-p)/n*(d**2 + (1-d**2)*\
scipy.stats.norm.cdf(d) - .5 - d*sqrt(2*pi)*exp(-0.5*d**2)
"""
def __init__(self, d=2.5, tol=1e-08, maxiter=30):
self.d = d
self.tol = tol
self.maxiter = maxiter
def __call__(self, df_resid, nobs, resid):
h = (df_resid)/nobs*(self.d**2 + (1-self.d**2)*\
Gaussian.cdf(self.d)-.5 - self.d/(np.sqrt(2*np.pi))*\
np.exp(-.5*self.d**2))
s = stand_mad(resid)
subset = lambda x: np.less(np.fabs(resid/x),self.d)
chi = lambda s: subset(s)*(resid/s)**2/2+(1-subset(s))*(self.d**2/2)
scalehist = [np.inf,s]
niter = 1
while (np.abs(scalehist[niter-1] - scalehist[niter])>self.tol \
and niter < self.maxiter):
nscale = np.sqrt(1/(nobs*h)*np.sum(chi(scalehist[-1]))*\
scalehist[-1]**2)
scalehist.append(nscale)
niter += 1
if niter == self.maxiter:
raise ValueError, "Huber's scale failed to converge"
return scalehist[-1]
hubers_scale = HuberScale()
|
|
"""
Wireless Sensor Tags platform support.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/wirelesstag/
"""
import logging
from requests.exceptions import HTTPError, ConnectTimeout
import voluptuous as vol
from homeassistant.const import (
ATTR_BATTERY_LEVEL, ATTR_VOLTAGE, CONF_USERNAME, CONF_PASSWORD)
import homeassistant.helpers.config_validation as cv
from homeassistant import util
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.dispatcher import (
dispatcher_send)
REQUIREMENTS = ['wirelesstagpy==0.4.0']
_LOGGER = logging.getLogger(__name__)
# strength of signal in dBm
ATTR_TAG_SIGNAL_STRENGTH = 'signal_strength'
# indicates if tag is out of range or not
ATTR_TAG_OUT_OF_RANGE = 'out_of_range'
# number in percents from max power of tag receiver
ATTR_TAG_POWER_CONSUMPTION = 'power_consumption'
NOTIFICATION_ID = 'wirelesstag_notification'
NOTIFICATION_TITLE = "Wireless Sensor Tag Setup"
DOMAIN = 'wirelesstag'
DEFAULT_ENTITY_NAMESPACE = 'wirelesstag'
# template for signal - first parameter is tag_id,
# second, tag manager mac address
SIGNAL_TAG_UPDATE = 'wirelesstag.tag_info_updated_{}_{}'
# template for signal - tag_id, sensor type and
# tag manager mac address
SIGNAL_BINARY_EVENT_UPDATE = 'wirelesstag.binary_event_updated_{}_{}_{}'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
}),
}, extra=vol.ALLOW_EXTRA)
class WirelessTagPlatform:
"""Principal object to manage all registered in HA tags."""
def __init__(self, hass, api):
"""Designated initializer for wirelesstags platform."""
self.hass = hass
self.api = api
self.tags = {}
self._local_base_url = None
@property
def tag_manager_macs(self):
"""Return list of tag managers mac addresses in user account."""
return self.api.mac_addresses
def load_tags(self):
"""Load tags from remote server."""
self.tags = self.api.load_tags()
return self.tags
def arm(self, switch):
"""Arm entity sensor monitoring."""
func_name = 'arm_{}'.format(switch.sensor_type)
arm_func = getattr(self.api, func_name)
if arm_func is not None:
arm_func(switch.tag_id, switch.tag_manager_mac)
def disarm(self, switch):
"""Disarm entity sensor monitoring."""
func_name = 'disarm_{}'.format(switch.sensor_type)
disarm_func = getattr(self.api, func_name)
if disarm_func is not None:
disarm_func(switch.tag_id, switch.tag_manager_mac)
def make_notifications(self, binary_sensors, mac):
"""Create configurations for push notifications."""
_LOGGER.info("Creating configurations for push notifications.")
configs = []
bi_url = self.binary_event_callback_url
for bi_sensor in binary_sensors:
configs.extend(bi_sensor.event.build_notifications(bi_url, mac))
update_url = self.update_callback_url
from wirelesstagpy import NotificationConfig as NC
update_config = NC.make_config_for_update_event(update_url, mac)
configs.append(update_config)
return configs
def install_push_notifications(self, binary_sensors):
"""Register local push notification from tag manager."""
_LOGGER.info("Registering local push notifications.")
for mac in self.tag_manager_macs:
configs = self.make_notifications(binary_sensors, mac)
# install notifications for all tags in tag manager
# specified by mac
result = self.api.install_push_notification(0, configs, True, mac)
if not result:
self.hass.components.persistent_notification.create(
"Error: failed to install local push notifications <br />",
title="Wireless Sensor Tag Setup Local Push Notifications",
notification_id="wirelesstag_failed_push_notification")
else:
_LOGGER.info("Installed push notifications for all\
tags in %s.", mac)
@property
def local_base_url(self):
"""Define base url of hass in local network."""
if self._local_base_url is None:
self._local_base_url = "http://{}".format(util.get_local_ip())
port = self.hass.config.api.port
if port is not None:
self._local_base_url += ':{}'.format(port)
return self._local_base_url
@property
def update_callback_url(self):
"""Return url for local push notifications(update event)."""
return '{}/api/events/wirelesstag_update_tags'.format(
self.local_base_url)
@property
def binary_event_callback_url(self):
"""Return url for local push notifications(binary event)."""
return '{}/api/events/wirelesstag_binary_event'.format(
self.local_base_url)
def handle_update_tags_event(self, event):
"""Handle push event from wireless tag manager."""
_LOGGER.info("push notification for update arrived: %s", event)
try:
tag_id = event.data.get('id')
mac = event.data.get('mac')
dispatcher_send(
self.hass,
SIGNAL_TAG_UPDATE.format(tag_id, mac),
event)
except Exception as ex: # pylint: disable=broad-except
_LOGGER.error("Unable to handle tag update event:\
%s error: %s", str(event), str(ex))
def handle_binary_event(self, event):
"""Handle push notifications for binary (on/off) events."""
_LOGGER.info("Push notification for binary event arrived: %s", event)
try:
tag_id = event.data.get('id')
event_type = event.data.get('type')
mac = event.data.get('mac')
dispatcher_send(
self.hass,
SIGNAL_BINARY_EVENT_UPDATE.format(tag_id, event_type, mac),
event)
except Exception as ex: # pylint: disable=broad-except
_LOGGER.error("Unable to handle tag binary event:\
%s error: %s", str(event), str(ex))
def setup(hass, config):
"""Set up the Wireless Sensor Tag component."""
conf = config[DOMAIN]
username = conf.get(CONF_USERNAME)
password = conf.get(CONF_PASSWORD)
try:
from wirelesstagpy import (WirelessTags, WirelessTagsException)
wirelesstags = WirelessTags(username=username, password=password)
platform = WirelessTagPlatform(hass, wirelesstags)
platform.load_tags()
hass.data[DOMAIN] = platform
except (ConnectTimeout, HTTPError, WirelessTagsException) as ex:
_LOGGER.error("Unable to connect to wirelesstag.net service: %s",
str(ex))
hass.components.persistent_notification.create(
"Error: {}<br />"
"Please restart hass after fixing this."
"".format(ex),
title=NOTIFICATION_TITLE,
notification_id=NOTIFICATION_ID)
return False
# listen to custom events
hass.bus.listen('wirelesstag_update_tags',
hass.data[DOMAIN].handle_update_tags_event)
hass.bus.listen('wirelesstag_binary_event',
hass.data[DOMAIN].handle_binary_event)
return True
class WirelessTagBaseSensor(Entity):
"""Base class for HA implementation for Wireless Sensor Tag."""
def __init__(self, api, tag):
"""Initialize a base sensor for Wireless Sensor Tag platform."""
self._api = api
self._tag = tag
self._uuid = self._tag.uuid
self.tag_id = self._tag.tag_id
self.tag_manager_mac = self._tag.tag_manager_mac
self._name = self._tag.name
self._state = None
@property
def should_poll(self):
"""Return the polling state."""
return True
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def principal_value(self):
"""Return base value.
Subclasses need override based on type of sensor.
"""
return 0
def updated_state_value(self):
"""Return formatted value.
The default implementation formats principal value.
"""
return self.decorate_value(self.principal_value)
# pylint: disable=no-self-use
def decorate_value(self, value):
"""Decorate input value to be well presented for end user."""
return '{:.1f}'.format(value)
@property
def available(self):
"""Return True if entity is available."""
return self._tag.is_alive
def update(self):
"""Update state."""
if not self.should_poll:
return
updated_tags = self._api.load_tags()
updated_tag = updated_tags[self._uuid]
if updated_tag is None:
_LOGGER.error('Unable to update tag: "%s"', self.name)
return
self._tag = updated_tag
self._state = self.updated_state_value()
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {
ATTR_BATTERY_LEVEL: int(self._tag.battery_remaining*100),
ATTR_VOLTAGE: '{:.2f}V'.format(self._tag.battery_volts),
ATTR_TAG_SIGNAL_STRENGTH: '{}dBm'.format(
self._tag.signal_strength),
ATTR_TAG_OUT_OF_RANGE: not self._tag.is_in_range,
ATTR_TAG_POWER_CONSUMPTION: '{:.2f}%'.format(
self._tag.power_consumption)
}
|
|
# coding: utf-8
"""
Cloudbreak API
Cloudbreak is a powerful left surf that breaks over a coral reef, a mile off southwest the island of Tavarua, Fiji. Cloudbreak is a cloud agnostic Hadoop as a Service API. Abstracts the provisioning and ease management and monitoring of on-demand clusters. SequenceIQ's Cloudbreak is a RESTful application development platform with the goal of helping developers to build solutions for deploying Hadoop YARN clusters in different environments. Once it is deployed in your favourite servlet container it exposes a REST API allowing to span up Hadoop clusters of arbitary sizes and cloud providers. Provisioning Hadoop has never been easier. Cloudbreak is built on the foundation of cloud providers API (Amazon AWS, Microsoft Azure, Google Cloud Platform, Openstack), Apache Ambari, Docker lightweight containers, Swarm and Consul. For further product documentation follow the link: <a href=\"http://hortonworks.com/apache/cloudbreak/\">http://hortonworks.com/apache/cloudbreak/</a>
OpenAPI spec version: 2.9.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class V3workspaceIdrdsconfigsApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def create_rds_config_in_workspace(self, workspace_id, **kwargs):
"""
create RDS config in workspace
An RDS Configuration describe a connection to an external Relational Database Service that can be used as the Hive Metastore.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_rds_config_in_workspace(workspace_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int workspace_id: (required)
:param RdsConfig body:
:return: RDSConfigResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_rds_config_in_workspace_with_http_info(workspace_id, **kwargs)
else:
(data) = self.create_rds_config_in_workspace_with_http_info(workspace_id, **kwargs)
return data
def create_rds_config_in_workspace_with_http_info(self, workspace_id, **kwargs):
"""
create RDS config in workspace
An RDS Configuration describe a connection to an external Relational Database Service that can be used as the Hive Metastore.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_rds_config_in_workspace_with_http_info(workspace_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int workspace_id: (required)
:param RdsConfig body:
:return: RDSConfigResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['workspace_id', 'body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_rds_config_in_workspace" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'workspace_id' is set
if ('workspace_id' not in params) or (params['workspace_id'] is None):
raise ValueError("Missing the required parameter `workspace_id` when calling `create_rds_config_in_workspace`")
collection_formats = {}
path_params = {}
if 'workspace_id' in params:
path_params['workspaceId'] = params['workspace_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v3/{workspaceId}/rdsconfigs', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RDSConfigResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_rds_config_in_workspace(self, workspace_id, name, **kwargs):
"""
delete RDS config by name in workspace
An RDS Configuration describe a connection to an external Relational Database Service that can be used as the Hive Metastore.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_rds_config_in_workspace(workspace_id, name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int workspace_id: (required)
:param str name: (required)
:return: RDSConfigResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_rds_config_in_workspace_with_http_info(workspace_id, name, **kwargs)
else:
(data) = self.delete_rds_config_in_workspace_with_http_info(workspace_id, name, **kwargs)
return data
def delete_rds_config_in_workspace_with_http_info(self, workspace_id, name, **kwargs):
"""
delete RDS config by name in workspace
An RDS Configuration describe a connection to an external Relational Database Service that can be used as the Hive Metastore.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_rds_config_in_workspace_with_http_info(workspace_id, name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int workspace_id: (required)
:param str name: (required)
:return: RDSConfigResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['workspace_id', 'name']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_rds_config_in_workspace" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'workspace_id' is set
if ('workspace_id' not in params) or (params['workspace_id'] is None):
raise ValueError("Missing the required parameter `workspace_id` when calling `delete_rds_config_in_workspace`")
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_rds_config_in_workspace`")
collection_formats = {}
path_params = {}
if 'workspace_id' in params:
path_params['workspaceId'] = params['workspace_id']
if 'name' in params:
path_params['name'] = params['name']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v3/{workspaceId}/rdsconfigs/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RDSConfigResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_rds_config_in_workspace(self, workspace_id, name, **kwargs):
"""
get RDS config by name in workspace
An RDS Configuration describe a connection to an external Relational Database Service that can be used as the Hive Metastore.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_rds_config_in_workspace(workspace_id, name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int workspace_id: (required)
:param str name: (required)
:return: RDSConfigResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_rds_config_in_workspace_with_http_info(workspace_id, name, **kwargs)
else:
(data) = self.get_rds_config_in_workspace_with_http_info(workspace_id, name, **kwargs)
return data
def get_rds_config_in_workspace_with_http_info(self, workspace_id, name, **kwargs):
"""
get RDS config by name in workspace
An RDS Configuration describe a connection to an external Relational Database Service that can be used as the Hive Metastore.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_rds_config_in_workspace_with_http_info(workspace_id, name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int workspace_id: (required)
:param str name: (required)
:return: RDSConfigResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['workspace_id', 'name']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_rds_config_in_workspace" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'workspace_id' is set
if ('workspace_id' not in params) or (params['workspace_id'] is None):
raise ValueError("Missing the required parameter `workspace_id` when calling `get_rds_config_in_workspace`")
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `get_rds_config_in_workspace`")
collection_formats = {}
path_params = {}
if 'workspace_id' in params:
path_params['workspaceId'] = params['workspace_id']
if 'name' in params:
path_params['name'] = params['name']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v3/{workspaceId}/rdsconfigs/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RDSConfigResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_rds_request_from_name_in_workspace(self, workspace_id, name, **kwargs):
"""
get request in workspace
An RDS Configuration describe a connection to an external Relational Database Service that can be used as the Hive Metastore.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_rds_request_from_name_in_workspace(workspace_id, name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int workspace_id: (required)
:param str name: (required)
:return: RdsConfig
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_rds_request_from_name_in_workspace_with_http_info(workspace_id, name, **kwargs)
else:
(data) = self.get_rds_request_from_name_in_workspace_with_http_info(workspace_id, name, **kwargs)
return data
def get_rds_request_from_name_in_workspace_with_http_info(self, workspace_id, name, **kwargs):
"""
get request in workspace
An RDS Configuration describe a connection to an external Relational Database Service that can be used as the Hive Metastore.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_rds_request_from_name_in_workspace_with_http_info(workspace_id, name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int workspace_id: (required)
:param str name: (required)
:return: RdsConfig
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['workspace_id', 'name']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_rds_request_from_name_in_workspace" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'workspace_id' is set
if ('workspace_id' not in params) or (params['workspace_id'] is None):
raise ValueError("Missing the required parameter `workspace_id` when calling `get_rds_request_from_name_in_workspace`")
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `get_rds_request_from_name_in_workspace`")
collection_formats = {}
path_params = {}
if 'workspace_id' in params:
path_params['workspaceId'] = params['workspace_id']
if 'name' in params:
path_params['name'] = params['name']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v3/{workspaceId}/rdsconfigs/{name}/request', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RdsConfig',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_rds_configs_by_workspace(self, workspace_id, **kwargs):
"""
list RDS configs for the given workspace
An RDS Configuration describe a connection to an external Relational Database Service that can be used as the Hive Metastore.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_rds_configs_by_workspace(workspace_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int workspace_id: (required)
:return: list[RDSConfigResponse]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_rds_configs_by_workspace_with_http_info(workspace_id, **kwargs)
else:
(data) = self.list_rds_configs_by_workspace_with_http_info(workspace_id, **kwargs)
return data
def list_rds_configs_by_workspace_with_http_info(self, workspace_id, **kwargs):
"""
list RDS configs for the given workspace
An RDS Configuration describe a connection to an external Relational Database Service that can be used as the Hive Metastore.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_rds_configs_by_workspace_with_http_info(workspace_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int workspace_id: (required)
:return: list[RDSConfigResponse]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['workspace_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_rds_configs_by_workspace" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'workspace_id' is set
if ('workspace_id' not in params) or (params['workspace_id'] is None):
raise ValueError("Missing the required parameter `workspace_id` when calling `list_rds_configs_by_workspace`")
collection_formats = {}
path_params = {}
if 'workspace_id' in params:
path_params['workspaceId'] = params['workspace_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v3/{workspaceId}/rdsconfigs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[RDSConfigResponse]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def test_rds_connection_in_workspace(self, workspace_id, **kwargs):
"""
test RDS connectivity
An RDS Configuration describe a connection to an external Relational Database Service that can be used as the Hive Metastore.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.test_rds_connection_in_workspace(workspace_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int workspace_id: (required)
:param RdsTestRequest body:
:return: RdsTestResult
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.test_rds_connection_in_workspace_with_http_info(workspace_id, **kwargs)
else:
(data) = self.test_rds_connection_in_workspace_with_http_info(workspace_id, **kwargs)
return data
def test_rds_connection_in_workspace_with_http_info(self, workspace_id, **kwargs):
"""
test RDS connectivity
An RDS Configuration describe a connection to an external Relational Database Service that can be used as the Hive Metastore.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.test_rds_connection_in_workspace_with_http_info(workspace_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int workspace_id: (required)
:param RdsTestRequest body:
:return: RdsTestResult
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['workspace_id', 'body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method test_rds_connection_in_workspace" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'workspace_id' is set
if ('workspace_id' not in params) or (params['workspace_id'] is None):
raise ValueError("Missing the required parameter `workspace_id` when calling `test_rds_connection_in_workspace`")
collection_formats = {}
path_params = {}
if 'workspace_id' in params:
path_params['workspaceId'] = params['workspace_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v3/{workspaceId}/rdsconfigs/testconnect', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RdsTestResult',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
|
from __future__ import print_function
import numpy as np
from numba.compiler import compile_isolated, Flags
from numba import types, from_dtype, errors, typeof
import numba.unittest_support as unittest
from numba.tests.support import TestCase, MemoryLeakMixin
enable_pyobj_flags = Flags()
enable_pyobj_flags.set("enable_pyobject")
no_pyobj_flags = Flags()
no_pyobj_flags.set('nrt')
def reshape_array(a):
return a.reshape(3, 3)
def reshape_array_to_1d(a):
return a.reshape(a.size)
def flatten_array(a):
return a.flatten()
def ravel_array(a):
return a.ravel()
def ravel_array_size(a):
return a.ravel().size
def transpose_array(a):
return a.transpose()
def squeeze_array(a):
return a.squeeze()
def convert_array_str(a):
# astype takes no kws argument in numpy1.6
return a.astype('f4')
def convert_array_dtype(a):
# astype takes no kws argument in numpy1.6
return a.astype(np.float32)
def add_axis1(a):
return np.expand_dims(a, axis=0)
def add_axis2(a):
return a[np.newaxis, :]
def bad_index(arr, arr2d):
x = arr.x,
y = arr.y
# note that `x` is a tuple, which causes a new axis to be created.
arr2d[x, y] = 1.0
def bad_float_index(arr):
# 2D index required for this function because 1D index
# fails typing
return arr[1, 2.0]
class TestArrayManipulation(MemoryLeakMixin, TestCase):
def test_reshape_array(self, flags=enable_pyobj_flags):
a = np.arange(9)
pyfunc = reshape_array
arraytype1 = typeof(a)
cr = compile_isolated(pyfunc, (arraytype1,), flags=flags)
cfunc = cr.entry_point
expected = pyfunc(a)
got = cfunc(a)
np.testing.assert_equal(expected, got)
def test_reshape_array_npm(self):
self.test_reshape_array(flags=no_pyobj_flags)
def test_reshape_array_to_1d(self, flags=enable_pyobj_flags,
layout='C'):
a = np.arange(9).reshape(3, 3)
if layout == 'F':
a = a.T
pyfunc = reshape_array_to_1d
arraytype1 = typeof(a)
if layout == 'A':
# Force A layout
arraytype1 = arraytype1.copy(layout='A')
self.assertEqual(arraytype1.layout, layout)
cr = compile_isolated(pyfunc, (arraytype1,), flags=flags)
cfunc = cr.entry_point
expected = pyfunc(a)
got = cfunc(a)
self.assertEqual(got.ndim, 1)
np.testing.assert_equal(expected, got)
def test_reshape_array_to_1d_npm(self):
self.test_reshape_array_to_1d(flags=no_pyobj_flags)
with self.assertRaises(NotImplementedError) as raises:
self.test_reshape_array_to_1d(flags=no_pyobj_flags, layout='F')
self.assertIn("incompatible shape for array", str(raises.exception))
with self.assertTypingError() as raises:
# The following will leak due to lack of post exception cleanup
self.test_reshape_array_to_1d(flags=no_pyobj_flags, layout='A')
self.assertIn("reshape() supports contiguous array only",
str(raises.exception))
# Disable leak check for the last `test_reshape_array_to_1d` call.
self.disable_leak_check()
@unittest.expectedFailure
def test_reshape_array_to_1d_leak_error_npm(self):
"""
Rerun the test in ``test_reshape_array_to_1d_npm`` that will cause
a leak error.
"""
with self.assertRaises(NotImplementedError) as raises:
self.test_reshape_array_to_1d(flags=no_pyobj_flags, layout='F')
self.assertIn("incompatible shape for array", str(raises.exception))
# The leak check is not captured by the expectedFailure.
# We need to disable it because `test_reshape_array_to_1d` will leak
# due to the lack of post exception cleanup
self.disable_leak_check()
# The following checks for memory leak and it will fail.
# This will trigger the expectedFailure
self.assert_no_memory_leak()
def test_flatten_array(self, flags=enable_pyobj_flags):
a = np.arange(9).reshape(3, 3)
pyfunc = flatten_array
arraytype1 = typeof(a)
cr = compile_isolated(pyfunc, (arraytype1,), flags=flags)
cfunc = cr.entry_point
expected = pyfunc(a)
got = cfunc(a)
np.testing.assert_equal(expected, got)
def test_flatten_array_npm(self):
with self.assertRaises(errors.UntypedAttributeError) as raises:
self.test_flatten_array(flags=no_pyobj_flags)
self.assertIn("flatten", str(raises.exception))
def test_ravel_array(self, flags=enable_pyobj_flags):
a = np.arange(9).reshape(3, 3)
pyfunc = ravel_array
arraytype1 = typeof(a)
cr = compile_isolated(pyfunc, (arraytype1,), flags=flags)
cfunc = cr.entry_point
expected = pyfunc(a)
got = cfunc(a)
np.testing.assert_equal(expected, got)
def test_ravel_array_size(self, flags=enable_pyobj_flags):
a = np.arange(9).reshape(3, 3)
pyfunc = ravel_array_size
arraytype1 = typeof(a)
cr = compile_isolated(pyfunc, (arraytype1,), flags=flags)
cfunc = cr.entry_point
expected = pyfunc(a)
got = cfunc(a)
np.testing.assert_equal(expected, got)
def test_ravel_array_npm(self):
with self.assertRaises(errors.UntypedAttributeError) as raises:
self.test_ravel_array(flags=no_pyobj_flags)
self.assertIn("ravel", str(raises.exception))
def test_ravel_array_size_npm(self):
with self.assertRaises(errors.UntypedAttributeError) as raises:
self.test_ravel_array_size(flags=no_pyobj_flags)
self.assertIn("ravel", str(raises.exception))
def test_transpose_array(self, flags=enable_pyobj_flags):
a = np.arange(9).reshape(3, 3)
pyfunc = transpose_array
arraytype1 = typeof(a)
cr = compile_isolated(pyfunc, (arraytype1,), flags=flags)
cfunc = cr.entry_point
expected = pyfunc(a)
got = cfunc(a)
np.testing.assert_equal(expected, got)
def test_transpose_array_npm(self):
self.test_transpose_array(flags=no_pyobj_flags)
def test_squeeze_array(self, flags=enable_pyobj_flags):
a = np.arange(2 * 1 * 3 * 1 * 4).reshape(2, 1, 3, 1, 4)
pyfunc = squeeze_array
arraytype1 = typeof(a)
cr = compile_isolated(pyfunc, (arraytype1,), flags=flags)
cfunc = cr.entry_point
expected = pyfunc(a)
got = cfunc(a)
np.testing.assert_equal(expected, got)
def test_squeeze_array_npm(self):
with self.assertRaises(errors.UntypedAttributeError) as raises:
self.test_squeeze_array(flags=no_pyobj_flags)
self.assertIn("squeeze", str(raises.exception))
def test_convert_array_str(self, flags=enable_pyobj_flags):
a = np.arange(9, dtype='i4')
pyfunc = convert_array_str
arraytype1 = typeof(a)
cr = compile_isolated(pyfunc, (arraytype1,), flags=flags)
cfunc = cr.entry_point
expected = pyfunc(a)
got = cfunc(a)
self.assertPreciseEqual(expected, got)
def test_convert_array_str_npm(self):
with self.assertRaises(errors.UntypedAttributeError) as raises:
self.test_convert_array_str(flags=no_pyobj_flags)
self.assertIn("astype", str(raises.exception))
def test_convert_array(self, flags=enable_pyobj_flags):
a = np.arange(9, dtype='i4')
pyfunc = convert_array_dtype
arraytype1 = typeof(a)
cr = compile_isolated(pyfunc, (arraytype1,), flags=flags)
cfunc = cr.entry_point
expected = pyfunc(a)
got = cfunc(a)
np.testing.assert_equal(expected, got)
def test_convert_array_npm(self):
with self.assertRaises(errors.UntypedAttributeError) as raises:
self.test_convert_array(flags=no_pyobj_flags)
self.assertIn("astype", str(raises.exception))
def test_add_axis1(self, flags=enable_pyobj_flags):
a = np.arange(9).reshape(3, 3)
pyfunc = add_axis1
arraytype1 = typeof(a)
cr = compile_isolated(pyfunc, (arraytype1,), flags=flags)
cfunc = cr.entry_point
expected = pyfunc(a)
got = cfunc(a)
np.testing.assert_equal(expected, got)
def test_add_axis1_npm(self):
with self.assertRaises(errors.UntypedAttributeError) as raises:
self.test_add_axis1(flags=no_pyobj_flags)
self.assertIn("expand_dims", str(raises.exception))
def test_add_axis2(self, flags=enable_pyobj_flags):
a = np.arange(9).reshape(3, 3)
pyfunc = add_axis2
arraytype1 = typeof(a)
cr = compile_isolated(pyfunc, (arraytype1,), flags=flags)
cfunc = cr.entry_point
expected = pyfunc(a)
got = cfunc(a)
np.testing.assert_equal(expected, got)
def test_add_axis2_npm(self):
with self.assertTypingError() as raises:
self.test_add_axis2(flags=no_pyobj_flags)
self.assertIn("unsupported array index type none in",
str(raises.exception))
def test_bad_index_npm(self):
with self.assertTypingError() as raises:
arraytype1 = from_dtype(np.dtype([('x', np.int32),
('y', np.int32)]))
arraytype2 = types.Array(types.int32, 2, 'C')
compile_isolated(bad_index, (arraytype1, arraytype2),
flags=no_pyobj_flags)
self.assertIn('unsupported array index type', str(raises.exception))
def test_bad_float_index_npm(self):
with self.assertTypingError() as raises:
compile_isolated(bad_float_index,
(types.Array(types.float64, 2, 'C'),))
self.assertIn('unsupported array index type float64',
str(raises.exception))
if __name__ == '__main__':
unittest.main()
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow Debugger (tfdbg) Stepper Module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import os
import shutil
import tempfile
import time
import six
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.lib import debug_utils
from tensorflow.python.framework import ops
from tensorflow.python.ops import session_ops
# TODO(cais): Use nest.flatten once it handles nest Dicts correctly.
def _flatten_fetches(fetches):
"""Flatten list, tuple of fetches, or a single fetch into a list of fetches.
Args:
fetches: The fetches to flatten: Can be a single Tensor, Op, or a
potentially nested list, tuple or dict of such individual fetches.
Returns:
The fetches flattened to a list.
"""
flattened = []
if isinstance(fetches, (list, tuple)):
for fetch in fetches:
flattened.extend(_flatten_fetches(fetch))
elif isinstance(fetches, dict):
for key in fetches:
flattened.extend(_flatten_fetches(fetches[key]))
else:
flattened.append(fetches)
return flattened
class NodeStepper(object):
"""TensorFlow Debugger (tfdbg) stepper.
The stepper provides ability to perform "continue to" actions on a graph,
given fetch and feeds. The stepper calculates the transitive closure of the
fetch. cont() (continue to) calls can only be performed on members of the
transitive closure.
On a cont() call, the stepper performs depth-first tracing of the input
tree of the target. When it reaches an input where one of the following is
available, it will supply the available value to the feed_dict of the cont()
call:
(1) Overriding (injected) values from the client.
(2) TensorHandles from previous cont() calls.
(3) Dumped intermediate Tensors from previous cont() calls.
(4) Feeds supplied during the construction of the stepper instance.
During the cont() call, intermediate Tensors are dumped to temporary
directories. The dumped Tensor values will be used in subsequent cont() calls
when they are required as data dependencies.
The temporary directories are automatically clean when the NodeStepper
instance exits as a context mananger.
Once the tracing is complete, it will issue a run() call on the
underlying session, using the aforementioned feed_dict prepared by the input
tracing, to achieve the "continue-to" action. The above process takes into
account whether the transitive closure of an input contains Variables that
are updated during previous cont() calls on this stepper instance. If such
updates exist, we say the transitive closure is "dirty" and the stepper
can restore the "clean" state of the Variable and avoid using the
TensorHandle.
Example of basic usage:
a = tf.Variable(1.0, name="a")
b = tf.Variable(2.0, anme="b")
c = tf.add(a, b, name="c")
d = tf.multiply(a, c, name="d")
sess = tf.Session()
sess.run(tf.initialize_all_varialbes())
stepper = NodeStepper(sess, d)
stepper.cont(c) # Caches the handle to Tensor c:0.
stepper.cont(d) # Uses handle to Tensor c:0, avoiding recomputing c.
"""
# Possible types of feed used during cont() calls.
FEED_TYPE_CLIENT = "client"
FEED_TYPE_HANDLE = "handle"
FEED_TYPE_OVERRIDE = "override"
FEED_TYPE_DUMPED_INTERMEDIATE = "dumped_intermediate"
def __init__(self, sess, fetches, feed_dict=None):
"""Constructor for Debugger.
Args:
sess: (Session) the TensorFlow Session to step in.
fetches: Same as the fetches input argument to `Session.run()`.
feed_dict: Same as the feed_dict input argument to `Session.run()`.
"""
self._sess = sess
self._fetches = fetches
flattened_fetches = _flatten_fetches(fetches)
self._fetch_names, self._fetch_list = self._get_fetch_and_name_lists(
flattened_fetches)
# A map from Variable name to initializer op.
self._variable_initializers = {}
# A map from Variable name to initial value, used when overriding or
# restoring Variable values.
self._variable_initial_values = {}
# Initialize the map for output recipients (targets).
self._output_targets = {}
# Sorted transitive closure of the fetched node.
# We also collect the list of the names of the reference-type Tensors,
# because we later need to avoid using intermediate dumps for such Tensors.
(self._sorted_nodes,
self._closure_elements,
self._ref_tensor_names) = self._dfs_visit(self._sess.graph,
self._fetch_list)
self._transitive_closure_set = set(self._sorted_nodes)
# A map from Variable name to the old values (before any cont() calls).
self._cached_variable_values = {}
# A cache map from tensor name to what variables may invalidate the tensor
self._cached_invalidation_path = {}
# Keep track of which variables are in a dirty state.
self._dirty_variables = set()
# Variables updated in the last cont() call.
self._last_updated = None
# Cached tensor handles: a dict with keys as tensor names and values as
# tensor handles.
self._tensor_handles = {}
# Cached intermediate tensor values: a dict mapping tensor names to
# DebugTensorDatum.
self._dumped_intermediate_tensors = {}
self._dump_session_root = tempfile.mkdtemp(prefix="tfdbg_stepper_")
# Feed dict from the client.
self._client_feed_dict = {}
if feed_dict:
for key in feed_dict:
if isinstance(key, ops.Tensor):
self._client_feed_dict[key.name] = feed_dict[key]
else:
self._client_feed_dict[key] = feed_dict[key]
# Overriding tensor values.
self._override_tensors = {}
# What the feed types were used by the last cont() call.
self._last_feed_types = {}
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
if os.path.isdir(self._dump_session_root):
shutil.rmtree(self._dump_session_root)
def _get_fetch_and_name_lists(self, flattened_fetches):
"""Get the lists of fetches and their names.
Args:
flattened_fetches: A list of fetches or their names. Can mix fetches and
names.
Returns:
(list of str): A list of the names of the fetches.
(list): A list of the fetches.
"""
fetch_names = []
fetch_list = []
for fetch in flattened_fetches:
if isinstance(fetch, six.string_types):
fetch_names.append(fetch)
fetch_list.append(self._sess.graph.as_graph_element(fetch))
else:
fetch_names.append(fetch.name)
fetch_list.append(fetch)
return fetch_names, fetch_list
def _dfs_visit(self, graph, elem_list):
"""Trace back the input of a graph element, using depth-first search.
Uses non-recursive implementation to prevent stack overflow for deep
graphs.
Also performs the following action(s):
1) When encountering a Variable, obtain its initializer op, to
facilitate possible subsequent restoration / overriding of variable
value.
Args:
graph: A TF graph instance.
elem_list: list of graph elements: a Tensor or an Operation.
Returns:
(list of str) A topologically-sorted list of all nodes (not tensors)
in the transitive closure of elem_list. Obviously, the topological sort
is not unique in general. The return value here is just an arbitrary
one of potentially many possible topological sorts.
(list of str) A list of all graph elements (nodes and/or tensors) in the
transitive closure.
"""
# These set should hold only strings, i.e, names of the nodes.
done = set() # Keep track of visited graph elements.
# A list of str: Names of the topologically-sorted graph elements.
node_inputs = dict() # New: Input map of nodes in the transitive closure.
elem_stack = copy.copy(elem_list)
# Graph elements in the transitive closure, including the nodes and tensors.
closure_elements = [elem.name for elem in elem_list]
ref_tensor_names = set()
for element in elem_list:
if isinstance(element, ops.Tensor) and element.dtype._is_ref_dtype: # pylint: disable=protected-access
ref_tensor_names.add(element.name)
while elem_stack:
curr_elem = elem_stack.pop()
curr_node = self._get_node(curr_elem)
done.add(curr_node.name)
non_control_inputs = [inp for inp in curr_node.inputs]
control_inputs = [inp for inp in curr_node.control_inputs]
all_inputs = set(non_control_inputs + control_inputs)
if curr_node.name not in node_inputs:
all_input_nodes = set()
for inp in all_inputs:
all_input_nodes.add(self._get_node(inp).name)
node_inputs[curr_node.name] = all_input_nodes
# Iterate through the (non-control) inputs.
for inp in all_inputs:
# Set up the non-control output map.
# if is_non_control_input:
if inp.name not in self._output_targets:
self._output_targets[inp.name] = set([curr_elem.name])
else:
self._output_targets[inp.name].add(curr_elem.name)
if (isinstance(inp, ops.Tensor) and
inp.op.type in ["Variable", "VariableV2"] and
inp.name not in self._variable_initializers):
# Obtain the initializer op of the variable, in case the Variable's
# value needs to be restored later.
initializer = graph.as_graph_element(inp.op.name + "/Assign")
self._variable_initializers[inp.name] = initializer
self._variable_initial_values[inp.name] = initializer.inputs[1]
inp_node = self._get_node(inp)
if inp_node.name in done:
# Already visited.
continue
elem_stack.append(inp)
closure_elements.append(inp.name)
if isinstance(inp, ops.Tensor) and inp.dtype._is_ref_dtype: # pylint: disable=protected-access
ref_tensor_names.add(inp.name)
# Now that we have traversed the transitive closure and obtained the
# node-input map, we can topologically sort them.
sorted_nodes = []
stack = []
for node in node_inputs:
if not node_inputs[node]:
stack.append(node)
for node in stack:
del node_inputs[node]
while stack:
curr_node = stack.pop()
sorted_nodes.append(curr_node)
# Iterate through the node-input map and remove the child.
pushes = []
for node in node_inputs:
if curr_node in node_inputs[node]:
node_inputs[node].remove(curr_node)
if not node_inputs[node]:
pushes.append(node)
# Delete new pushes from node-input map.
for node in pushes:
del node_inputs[node]
stack.extend(pushes)
return sorted_nodes, closure_elements, ref_tensor_names
def sorted_nodes(self):
"""Get a topologically-sorted list of node names of the stepper.
These are the names of the nodes (i.e., not Tensors) in the transitive
closure of the stepper, in a topologically-sorted order.
Returns:
(list of str): Sorted transitive inputs to the fetch of the stepper
instance. The fetch itself is included in the list.
"""
return self._sorted_nodes
def closure_elements(self):
"""Get a name list of the graph elements of the stepper.
Returns:
(list of str): names of the graph elements (i.e., nodes and tensors) in
the transitive closure of the stepper, in a random order.
"""
return self._closure_elements
def output_slots_in_closure(self, node_name):
"""Get the output tensors in the transitive closure from node.
Args:
node_name: (str) Name of the node in question.
Returns:
(list of int) Output slots of the output tensors of the node that are in
the transitive closure of the stepper.
"""
node = self._sess.graph.as_graph_element(node_name)
tensor_slots = []
for i, _ in enumerate(node.outputs):
tensor_name = node_name + ":%d" % i
if tensor_name in self._closure_elements:
tensor_slots.append(i)
return tensor_slots
def is_feedable(self, name):
"""Determine if a graph element if feedable.
Args:
name: (str) name of the graph element (Tensor or Operation)
Returns:
(bool) whether the graph element is feedable.
"""
if not isinstance(name, six.string_types):
raise TypeError("Expected type str; got type %s" % type(name))
elem = self._sess.graph.as_graph_element(name)
return self._sess.graph.is_feedable(elem)
def override_tensor(self, tensor_name, overriding_val):
"""Override the value of a tensor.
Args:
tensor_name: (str) Name of the tensor to override.
overriding_val: (numpy.ndarray) Overriding tensor value.
Raises:
ValueError: If tensor_name does not correspond to a tensor in the input
tree to the fetched graph element of this stepper instance.
"""
if not isinstance(tensor_name, six.string_types):
raise TypeError("Expected type str; got type %s" % type(tensor_name))
node_name = self._get_node_name(tensor_name)
if node_name not in self._transitive_closure_set:
raise ValueError(
"Cannot override tensor \"%s\" because it does not exist in the "
"input tree to the fetch \"%s\"" %
(tensor_name, repr(self._fetch_names)))
self._override_tensors[tensor_name] = overriding_val
# Invalidate cache by tracing outputs.
self._invalidate_transitively_outgoing_cache(tensor_name)
def remove_override(self, tensor_name):
"""Remove the overriding value on a tensor.
Args:
tensor_name: (str) name of the tensor to remove the overriding value
from.
Raises:
ValueError: If no overriding value exists for tensor_name.
"""
if tensor_name not in self._override_tensors:
raise ValueError("No overriding value exists for tensor \"%s\"." %
tensor_name)
del self._override_tensors[tensor_name]
# Invalidate cache by tracing outputs.
self._invalidate_transitively_outgoing_cache(tensor_name)
def last_feed_types(self):
"""Obtain information about the feed in the last cont() call.
Returns:
(dict) A dict mapping tensor names to feed types.
"""
return self._last_feed_types
def cont(self,
target,
use_tensor_handles=True,
use_dumped_intermediates=True,
use_overrides=True,
invalidate_from_updated_variables=False,
restore_variable_values=False):
"""Continue till the completion of the specified target tensor.
Args:
target: A single fetched Tensor or Op, or a name (str) representing the
Tensor or Op. In the case of a name str, the graph will be searched
to find the corresponding Tensor or Op.
# TODO(cais): Support multiple fetches as in Session.run() interface.
use_tensor_handles: (bool) Whether this cont() run will use cached tensor
handles to avoid recomputation. Default: True.
use_dumped_intermediates: (bool) Whether this cont() call will use dumped
intermediate tensors to avoid recomputation.
use_overrides: (bool) Whether the overriding tensor values supplied by
the client are to be used in this cont() call. Default: True.
invalidate_from_updated_variables: (bool) Whether to invalidate the
tensor handles and intermediate tensor handles affected by the
Variable updates that happen in this cont() call.
restore_variable_values: (bool) Whether the old values of the variables
(before any cont() calls in this object) are to be restored.
Returns:
Value from Session.run() of the target.
Raises:
ValueError: If the target is specified as a string and the string does
not correspond to any tensors in the Session graph.
Or if the target of this cont() is not in the input list of the Stepper
object's target.
Or if target is a Placeholder.
"""
self._last_feed_types = {}
if isinstance(target, six.string_types):
# Fetch target is a string. Assume it is the name of the Tensor or Op and
# will attempt to find it in the Session's graph.
target_name = target
else:
target_name = target.name
graph_element = self._sess.graph.as_graph_element(target_name)
# Any additional tensor handles to obtain in this cont() action.
additional_handle_requests = []
if (isinstance(graph_element, ops.Tensor) and
graph_element.op.type == "Placeholder"):
self._last_feed_types[graph_element.name] = self.FEED_TYPE_CLIENT
return self._client_feed_dict[graph_element.name]
elif (isinstance(graph_element, ops.Operation) and
graph_element.type == "Placeholder"):
tensor_name = graph_element.name + ":0"
self._last_feed_types[tensor_name] = self.FEED_TYPE_CLIENT
return self._client_feed_dict[tensor_name]
if isinstance(graph_element, ops.Operation) and graph_element.outputs:
# Check if this op has any output tensors that also fall into this
# stepper's transitive closure.
node_outputs = [
output.name for output in graph_element.outputs
if output.name in self._closure_elements
]
if node_outputs:
# The target is an op with at least one output within the transitive
# closure. The cont() action will amount to using the 0-th
# output Tensor as the target, as well as obtaining handles to it
# and to the rest of the outputs tensors in the transitive closure
# (if any).
target_name = node_outputs[0]
additional_handle_requests = node_outputs[1:]
# Verify that the target is in the transitive closure of the stepper's
# fetch.
target_node_name = self._get_node_name(target_name)
if target_node_name not in self._transitive_closure_set:
raise ValueError(
"Target \"%s\" is not in the transitive closure for the fetch of the "
"stepper: \"%s\"." % (target_name, repr(self._fetch_names)))
# Check if a cached tensor handle can be used on the fetch directly.
if use_tensor_handles and target_name in self._tensor_handles:
self._last_feed_types[target_name] = self.FEED_TYPE_HANDLE
return self._tensor_handles[target_name].eval()
# Check if a dumped intermediate tensor can be used on the fetch directly.
if (use_dumped_intermediates and
target_name in self._dumped_intermediate_tensors):
self._last_feed_types[target_name] = self.FEED_TYPE_DUMPED_INTERMEDIATE
return self._dumped_intermediate_tensors[target_name].get_tensor()
# Check if an overriding tensor value can be used directly.
if use_overrides and target_name in self._override_tensors:
# Override is available. Return the value right away.
self._last_feed_types[target_name] = self.FEED_TYPE_OVERRIDE
return self._override_tensors[target_name]
# Keep track of which variables are restored in this cont() call.
restored_variables = set()
# Keep track of which variables are "touched" (i.e., possibly updated) in
# this cont() call.
self._last_updated = set()
# =========================================================================
# Use a non-recursive method to trace the inputs from the node and set up
# the feeds.
feeds = {} # The feeds to be used in the Session.run() call.
fetched = self._sess.graph.as_graph_element(target_name)
elem_stack = [fetched]
done = set()
while elem_stack:
curr_elem = elem_stack.pop()
curr_node = self._get_node(curr_elem)
done.add(curr_node.name)
non_control_inputs = [inp for inp in curr_node.inputs]
control_inputs = [inp for inp in curr_node.control_inputs]
all_inputs = set(non_control_inputs + control_inputs)
# Iterate through the (non-control) inputs.
for inp in all_inputs:
# Determine whether the input is feedable. Reference-type tensors,
# e.g., Variables, should not be fed, because they can change.
if isinstance(inp, ops.Tensor):
is_inp_ref = inp.dtype._is_ref_dtype # pylint: disable=protected-access
can_feed = self._sess.graph.is_feedable(inp) and not is_inp_ref
else:
is_inp_ref = False
can_feed = False
if (restore_variable_values and inp.name in self._dirty_variables and
inp.name not in restored_variables and
inp.name not in self._last_updated):
# Do not restore Variables touched or restored previously in this
# cont() call.
initializer_op = self._variable_initializers[inp.name]
initial_value_tensor = self._variable_initial_values[inp.name]
self._sess.run(initializer_op,
feed_dict={
initial_value_tensor:
self._cached_variable_values[inp.name]
})
# Mark the variable as restored.
restored_variables.add(inp.name)
# Determine if this is a reference-type input from a variable, and
# the recipient node is not Identity. In that case, the Variable
# needs to be marked as dirty and its current value recorded, due to
# the fact that the receiving op may mutate the value of the Variable.
if (is_inp_ref and inp.op.type in ["Variable", "VariableV2"] and
curr_node.type != "Identity"):
# Mark the variable as dirty.
self._last_updated.add(inp.name)
# Obtain the old value of the variable and cache it.
if inp.name not in self._cached_variable_values:
old_value = self._sess.run(inp)
self._cached_variable_values[inp.name] = old_value
# N.B.: The order of the logical branches matters. For example,
# _client_feed_dict comes after _tensor_handles, so that tensor
# handles stored in cont() calls can override the original client
# feeds. Also for example, _override_tensors comes the first, so
# the manual overriding, if exists, can always take effect.
if use_overrides and can_feed and inp.name in self._override_tensors:
# Use client-supplied overriding tensor value.
feeds[inp] = self._override_tensors[inp.name]
self._last_feed_types[inp.name] = self.FEED_TYPE_OVERRIDE
elif (can_feed and inp not in feeds and
use_tensor_handles and inp.name in self._tensor_handles):
# Tensor handle found in cache.
feeds[inp] = self._tensor_handles[inp.name]
self._last_feed_types[inp.name] = self.FEED_TYPE_HANDLE
elif (can_feed and inp not in feeds and
use_dumped_intermediates and
inp.name in self._dumped_intermediate_tensors):
# Dumped intermediate Tensor found.
feeds[inp] = self._dumped_intermediate_tensors[inp.name].get_tensor()
self._last_feed_types[inp.name] = self.FEED_TYPE_DUMPED_INTERMEDIATE
elif inp.name in self._client_feed_dict:
# This input is available in the client feed_dict.
feeds[inp] = self._client_feed_dict[inp.name]
self._last_feed_types[inp.name] = self.FEED_TYPE_CLIENT
else:
# There is no feed available for this input. So keep tracing its
# input(s).
inp_node = self._get_node(inp)
if inp_node.name in done:
# Already visited.
continue
elem_stack.append(inp)
done.add(inp_node.name)
# =========================================================================
if self._last_updated:
self._dirty_variables.update(self._last_updated)
for variable in restored_variables:
self._dirty_variables.remove(variable)
(dump_path,
run_options) = self._prepare_cont_call_dump_path_and_run_options()
if isinstance(fetched, ops.Operation):
# The fetched is an Operation: Will not get tensor handle.
self._sess.run(fetched, feed_dict=feeds, options=run_options)
return_value = None
else:
# This is a Tensor: Will get tensor handle and cache it.
# Will also get the additional requested tensor handles (if any).
tensors_to_get_handles_for = [fetched]
handle_names = [target_name]
tensors_to_get_handles_for.extend([
self._sess.graph.as_graph_element(h)
for h in additional_handle_requests
])
handle_names.extend(additional_handle_requests)
handles = self._sess.run(
[session_ops.get_session_handle(tensor) for tensor in
tensors_to_get_handles_for],
feed_dict=feeds,
options=run_options)
for handle_name, handle in zip(handle_names, handles):
self._tensor_handles[handle_name] = handle
return_value = self._tensor_handles[target_name].eval()
self._load_dumped_intermediate_tensors(dump_path, target_name)
if invalidate_from_updated_variables:
# Invalidate caches at the end.
for last_updated_variable in self._last_updated:
self._invalidate_transitively_outgoing_cache(last_updated_variable)
return return_value
def _prepare_cont_call_dump_path_and_run_options(self):
"""Prepare the dump path and RunOptions for next cont() call.
Returns:
dump_path: (str) Directory path to which the intermediate tensor will be
dumped.
run_options: (config_pb2.RunOptions) The RunOptions containing the tensor
watch options for this graph.
"""
run_options = config_pb2.RunOptions()
dump_path = self._cont_call_dump_path()
for element_name in self._closure_elements:
if ":" in element_name:
debug_utils.add_debug_tensor_watch(
run_options,
debug_data.get_node_name(element_name),
output_slot=debug_data.get_output_slot(element_name),
debug_urls=["file://" + dump_path])
return dump_path, run_options
def _cont_call_dump_path(self):
return os.path.join(self._dump_session_root,
"cont_%d" % int(time.time() * 1e6))
def _load_dumped_intermediate_tensors(self, dump_path, target_name):
dump_dir = debug_data.DebugDumpDir(dump_path, validate=False)
for dump in dump_dir.dumped_tensor_data:
if (dump.tensor_name not in self._ref_tensor_names and
dump.tensor_name not in self._tensor_handles and
dump.tensor_name not in self._override_tensors and
dump.tensor_name != target_name):
self._dumped_intermediate_tensors[dump.tensor_name] = dump
def _get_node_name(self, graph_element_name):
return graph_element_name.split(":")[0]
def _invalidate_transitively_outgoing_cache(self, source_element):
"""Invalidate the cached tensor handles by tracing output.
This method is used to invalidate caches such as cached TensorHandles
and intermediate tensor values when Variable mutation happens or when
client overrides tensor values.
Uses non-recursive implementation to avoid stack overflow on deep networks.
Args:
source_element: The source graph element (e.g., a Variable output slot)
to trace the output from.
"""
if not self._tensor_handles and not self._dumped_intermediate_tensors:
return
# First, use cached invalidation paths to eliminate some cached tensor
# handles and intermediate tensors.
to_delete_handles = []
for handle_name in self._tensor_handles:
if (handle_name in self._cached_invalidation_path and
source_element in self._cached_invalidation_path[handle_name]):
to_delete_handles.append(handle_name)
for handle_name in to_delete_handles:
del self._tensor_handles[handle_name]
to_delete_intermediates = []
for intm_tensor_name in self._dumped_intermediate_tensors:
if (intm_tensor_name in self._cached_invalidation_path and
source_element in self._cached_invalidation_path[intm_tensor_name]):
to_delete_intermediates.append(intm_tensor_name)
for intermediate in to_delete_intermediates:
del self._dumped_intermediate_tensors[intermediate]
if not self._tensor_handles and not self._dumped_intermediate_tensors:
return
stack = [source_element]
done = set()
while stack:
curr_element = stack.pop()
done.add(curr_element)
if (curr_element in self._tensor_handles or
curr_element in self._dumped_intermediate_tensors):
# Cache the invalidation path for potential future use.
if curr_element not in self._cached_invalidation_path:
self._cached_invalidation_path[curr_element] = set([source_element])
else:
self._cached_invalidation_path[curr_element].add(source_element)
if curr_element in self._tensor_handles:
del self._tensor_handles[curr_element]
else:
del self._dumped_intermediate_tensors[curr_element]
targets = self._output_targets.get(curr_element, [])
for target in targets:
if target in done:
continue
else:
stack.append(target)
def finalize(self):
"""Run the final fetch(es).
Restore the dirty variables; ignore the client-supplied overriding tensor
values.
Returns:
The same return value as self.cont() as called on the final fetch.
"""
self.restore_variable_values()
return self._sess.run(self._fetches, feed_dict=self._client_feed_dict)
def restore_variable_values(self):
"""Restore variables to the initial values.
"Initial value" refers to the value when this NodeStepper instance was
first constructed.
"""
for var_name in self._dirty_variables:
self._sess.run(self._variable_initializers[var_name],
feed_dict={
self._variable_initial_values[var_name]:
self._cached_variable_values[var_name]
})
def handle_names(self):
"""Return names of the TensorHandles that the debugger is holding.
Returns:
(list of str) Name of the tensors for which TensorHandle is available.
"""
return [name for name in self._tensor_handles]
def handle_node_names(self):
"""Get list of names of the nodes for which handles are available.
Returns:
(set of str) List of names of the nodes.
"""
return set([self._get_node_name(name) for name in self._tensor_handles])
def intermediate_tensor_names(self):
"""Get list of the names of the Tensors for which dumps are available.
Returns:
(list of str) List of the names of the Tensors for which intermediate
dumps are available.
"""
return self._dumped_intermediate_tensors.keys()
def last_updated(self):
"""Get the names of the variables updated in the last cont() call.
Returns:
A set of the variable names updated in the previous cont() call.
If no cont() call has occurred before, returns None.
"""
return self._last_updated
def dirty_variables(self):
"""Get the set of variables that are currently "dirty".
"dirty" means:
previous cont() calls have updated the value of the Variable,
and the Variable's old value (the value before any cont() calls
happened) was not restored.
Returns:
(set) A set of dirty variables.
"""
return self._dirty_variables
def is_placeholder(self, graph_element_name):
"""Check whether a graph element is a Placeholder, by name.
Args:
graph_element_name: (str) Name of the tensor or op to be tested.
Returns:
(bool) Whether the graph element of the specified name is a Placeholder
op or the output Tensor of a Placeholder op.
Raises:
ValueError: If graph_element_name is not in the transitive closure of the
stepper instance.
"""
node_name = self._get_node_name(graph_element_name)
if node_name not in self.sorted_nodes():
raise ValueError(
"%s is not in the transitive closure of this NodeStepper "
"instance" % graph_element_name)
graph_element = self._sess.graph.as_graph_element(graph_element_name)
if not isinstance(graph_element, ops.Operation):
graph_element = graph_element.op
return graph_element.type == "Placeholder"
def placeholders(self):
"""Get the list of Placeholder Tensors in the transitive closure.
Returns:
(list of str) A list of Placeholder Tensors or ops in the transitive
closure.
"""
placeholders = []
for item in self.sorted_nodes():
if self.is_placeholder(item):
placeholders.append(item)
return placeholders
def get_tensor_value(self, tensor_name):
"""Get the value of a tensor that the stepper has access to.
Args:
tensor_name: (str) Name of the tensor.
Returns:
Value of the tensor, from overriding values or cached tensor handles.
Raises:
ValueError: If the value is not available as an overriding value
or through a TensorHandle.
"""
if self.is_placeholder(tensor_name):
if ":" not in tensor_name:
tensor_name += ":0"
return self._client_feed_dict[tensor_name]
elif tensor_name in self._override_tensors:
return self._override_tensors[tensor_name]
elif tensor_name in self._tensor_handles:
return self._tensor_handles[tensor_name].eval()
elif tensor_name in self._dumped_intermediate_tensors:
return self._dumped_intermediate_tensors[tensor_name].get_tensor()
else:
raise ValueError(
"This stepper instance does not have access to the value of "
"tensor \"%s\"" % tensor_name)
def override_names(self):
"""Return names of the TensorHandles that the debugger is holding.
Returns:
(list of str) Name of the tensor for which overriding tensor values are
available.
"""
return [name for name in self._override_tensors]
def _get_node(self, element):
"""Get the node of a graph element.
Args:
element: A graph element (Op, Tensor or Node)
Returns:
The node associated with element in the graph.
"""
node_name, _ = debug_data.parse_node_or_tensor_name(element.name)
return self._sess.graph.as_graph_element(node_name)
|
|
# Authors : Alexandre Gramfort, alexandre.gramfort@inria.fr (2011)
# Denis A. Engemann <denis.engemann@gmail.com>
# License : BSD 3-clause
from functools import partial
import numpy as np
from ..parallel import parallel_func
from ..io.pick import _picks_to_idx
from ..utils import logger, verbose, _time_mask, _check_option
from .multitaper import psd_array_multitaper
def _decomp_aggregate_mask(epoch, func, average, freq_sl):
_, _, spect = func(epoch)
spect = spect[..., freq_sl, :]
# Do the averaging here (per epoch) to save memory
if average == 'mean':
spect = np.nanmean(spect, axis=-1)
elif average == 'median':
spect = np.nanmedian(spect, axis=-1)
return spect
def _spect_func(epoch, func, freq_sl, average):
"""Aux function."""
# Decide if we should split this to save memory or not, since doing
# multiple calls will incur some performance overhead. Eventually we might
# want to write (really, go back to) our own spectrogram implementation
# that, if possible, averages after each transform, but this will incur
# a lot of overhead because of the many Python calls required.
kwargs = dict(func=func, average=average, freq_sl=freq_sl)
if epoch.nbytes > 10e6:
spect = np.apply_along_axis(
_decomp_aggregate_mask, -1, epoch, **kwargs)
else:
spect = _decomp_aggregate_mask(epoch, **kwargs)
return spect
def _check_nfft(n, n_fft, n_per_seg, n_overlap):
"""Ensure n_fft, n_per_seg and n_overlap make sense."""
if n_per_seg is None and n_fft > n:
raise ValueError(('If n_per_seg is None n_fft is not allowed to be > '
'n_times. If you want zero-padding, you have to set '
'n_per_seg to relevant length. Got n_fft of %d while'
' signal length is %d.') % (n_fft, n))
n_per_seg = n_fft if n_per_seg is None or n_per_seg > n_fft else n_per_seg
n_per_seg = n if n_per_seg > n else n_per_seg
if n_overlap >= n_per_seg:
raise ValueError(('n_overlap cannot be greater than n_per_seg (or '
'n_fft). Got n_overlap of %d while n_per_seg is '
'%d.') % (n_overlap, n_per_seg))
return n_fft, n_per_seg, n_overlap
def _check_psd_data(inst, tmin, tmax, picks, proj, reject_by_annotation=False):
"""Check PSD data / pull arrays from inst."""
from ..io.base import BaseRaw
from ..epochs import BaseEpochs
from ..evoked import Evoked
if not isinstance(inst, (BaseEpochs, BaseRaw, Evoked)):
raise ValueError('epochs must be an instance of Epochs, Raw, or'
'Evoked. Got type {}'.format(type(inst)))
time_mask = _time_mask(inst.times, tmin, tmax, sfreq=inst.info['sfreq'])
picks = _picks_to_idx(inst.info, picks, 'data', with_ref_meg=False)
if proj:
# Copy first so it's not modified
inst = inst.copy().apply_proj()
sfreq = inst.info['sfreq']
if isinstance(inst, BaseRaw):
start, stop = np.where(time_mask)[0][[0, -1]]
rba = 'NaN' if reject_by_annotation else None
data = inst.get_data(picks, start, stop + 1, reject_by_annotation=rba)
elif isinstance(inst, BaseEpochs):
data = inst.get_data(picks=picks)[:, :, time_mask]
else: # Evoked
data = inst.data[picks][:, time_mask]
return data, sfreq
@verbose
def psd_array_welch(x, sfreq, fmin=0, fmax=np.inf, n_fft=256, n_overlap=0,
n_per_seg=None, n_jobs=1, average='mean', verbose=None):
"""Compute power spectral density (PSD) using Welch's method.
Parameters
----------
x : array, shape=(..., n_times)
The data to compute PSD from.
sfreq : float
The sampling frequency.
fmin : float
The lower frequency of interest.
fmax : float
The upper frequency of interest.
n_fft : int
The length of FFT used, must be ``>= n_per_seg`` (default: 256).
The segments will be zero-padded if ``n_fft > n_per_seg``.
n_overlap : int
The number of points of overlap between segments. Will be adjusted
to be <= n_per_seg. The default value is 0.
n_per_seg : int | None
Length of each Welch segment (windowed with a Hamming window). Defaults
to None, which sets n_per_seg equal to n_fft.
%(n_jobs)s
average : str | None
How to average the segments. If ``mean`` (default), calculate the
arithmetic mean. If ``median``, calculate the median, corrected for
its bias relative to the mean. If ``None``, returns the unaggregated
segments.
.. versionadded:: 0.19.0
%(verbose)s
Returns
-------
psds : ndarray, shape (..., n_freqs) or (..., n_freqs, n_segments)
The power spectral densities. If ``average='mean`` or
``average='median'``, the returned array will have the same shape
as the input data plus an additional frequency dimension.
If ``average=None``, the returned array will have the same shape as
the input data plus two additional dimensions corresponding to
frequencies and the unaggregated segments, respectively.
freqs : ndarray, shape (n_freqs,)
The frequencies.
Notes
-----
.. versionadded:: 0.14.0
"""
_check_option('average', average, (None, 'mean', 'median'))
dshape = x.shape[:-1]
n_times = x.shape[-1]
x = x.reshape(-1, n_times)
# Prep the PSD
n_fft, n_per_seg, n_overlap = _check_nfft(n_times, n_fft, n_per_seg,
n_overlap)
win_size = n_fft / float(sfreq)
logger.info("Effective window size : %0.3f (s)" % win_size)
freqs = np.arange(n_fft // 2 + 1, dtype=float) * (sfreq / n_fft)
freq_mask = (freqs >= fmin) & (freqs <= fmax)
if not freq_mask.any():
raise ValueError(
f'No frequencies found between fmin={fmin} and fmax={fmax}')
freq_sl = slice(*(np.where(freq_mask)[0][[0, -1]] + [0, 1]))
del freq_mask
freqs = freqs[freq_sl]
# Parallelize across first N-1 dimensions
x_splits = np.array_split(x, n_jobs)
from scipy.signal import spectrogram
parallel, my_spect_func, n_jobs = parallel_func(_spect_func, n_jobs=n_jobs)
func = partial(spectrogram, noverlap=n_overlap, nperseg=n_per_seg,
nfft=n_fft, fs=sfreq)
f_spect = parallel(my_spect_func(d, func=func, freq_sl=freq_sl,
average=average)
for d in x_splits)
psds = np.concatenate(f_spect, axis=0)
shape = dshape + (len(freqs),)
if average is None:
shape = shape + (-1,)
psds.shape = shape
return psds, freqs
@verbose
def psd_welch(inst, fmin=0, fmax=np.inf, tmin=None, tmax=None, n_fft=256,
n_overlap=0, n_per_seg=None, picks=None, proj=False, n_jobs=1,
reject_by_annotation=True, average='mean', verbose=None):
"""Compute the power spectral density (PSD) using Welch's method.
Calculates periodograms for a sliding window over the time dimension, then
averages them together for each channel/epoch.
Parameters
----------
inst : instance of Epochs or Raw or Evoked
The data for PSD calculation.
fmin : float
Min frequency of interest.
fmax : float
Max frequency of interest.
tmin : float | None
Min time of interest.
tmax : float | None
Max time of interest.
n_fft : int
The length of FFT used, must be ``>= n_per_seg`` (default: 256).
The segments will be zero-padded if ``n_fft > n_per_seg``.
If n_per_seg is None, n_fft must be <= number of time points
in the data.
n_overlap : int
The number of points of overlap between segments. Will be adjusted
to be <= n_per_seg. The default value is 0.
n_per_seg : int | None
Length of each Welch segment (windowed with a Hamming window). Defaults
to None, which sets n_per_seg equal to n_fft.
%(picks_good_data_noref)s
proj : bool
Apply SSP projection vectors. If inst is ndarray this is not used.
%(n_jobs)s
reject_by_annotation : bool
Whether to omit bad segments from the data while computing the
PSD. If True, annotated segments with a description that starts
with 'bad' are omitted. Has no effect if ``inst`` is an Epochs or
Evoked object. Defaults to True.
.. versionadded:: 0.15.0
average : str | None
How to average the segments. If ``mean`` (default), calculate the
arithmetic mean. If ``median``, calculate the median, corrected for
its bias relative to the mean. If ``None``, returns the unaggregated
segments.
.. versionadded:: 0.19.0
%(verbose)s
Returns
-------
psds : ndarray, shape (..., n_freqs) or (..., n_freqs, n_segments)
The power spectral densities. If ``average='mean`` or
``average='median'`` and input is of type Raw or Evoked, then psds will
be of shape (n_channels, n_freqs); if input is of type Epochs, then
psds will be of shape (n_epochs, n_channels, n_freqs).
If ``average=None``, the returned array will have an additional
dimension corresponding to the unaggregated segments.
freqs : ndarray, shape (n_freqs,)
The frequencies.
See Also
--------
mne.io.Raw.plot_psd
mne.Epochs.plot_psd
psd_multitaper
psd_array_welch
Notes
-----
.. versionadded:: 0.12.0
"""
# Prep data
data, sfreq = _check_psd_data(inst, tmin, tmax, picks, proj,
reject_by_annotation=reject_by_annotation)
return psd_array_welch(data, sfreq, fmin=fmin, fmax=fmax, n_fft=n_fft,
n_overlap=n_overlap, n_per_seg=n_per_seg,
average=average, n_jobs=n_jobs, verbose=verbose)
@verbose
def psd_multitaper(inst, fmin=0, fmax=np.inf, tmin=None, tmax=None,
bandwidth=None, adaptive=False, low_bias=True,
normalization='length', picks=None, proj=False,
n_jobs=1, verbose=None):
"""Compute the power spectral density (PSD) using multitapers.
Calculates spectral density for orthogonal tapers, then averages them
together for each channel/epoch. See [1] for a description of the tapers
and [2] for the general method.
Parameters
----------
inst : instance of Epochs or Raw or Evoked
The data for PSD calculation.
fmin : float
Min frequency of interest.
fmax : float
Max frequency of interest.
tmin : float | None
Min time of interest.
tmax : float | None
Max time of interest.
bandwidth : float
The bandwidth of the multi taper windowing function in Hz. The default
value is a window half-bandwidth of 4.
adaptive : bool
Use adaptive weights to combine the tapered spectra into PSD
(slow, use n_jobs >> 1 to speed up computation).
low_bias : bool
Only use tapers with more than 90%% spectral concentration within
bandwidth.
normalization : str
Either "full" or "length" (default). If "full", the PSD will
be normalized by the sampling rate as well as the length of
the signal (as in nitime).
%(picks_good_data_noref)s
proj : bool
Apply SSP projection vectors. If inst is ndarray this is not used.
%(n_jobs)s
%(verbose)s
Returns
-------
psds : ndarray, shape (..., n_freqs)
The power spectral densities. If input is of type Raw,
then psds will be shape (n_channels, n_freqs), if input is type Epochs
then psds will be shape (n_epochs, n_channels, n_freqs).
freqs : ndarray, shape (n_freqs,)
The frequencies.
See Also
--------
mne.io.Raw.plot_psd
mne.Epochs.plot_psd
psd_array_multitaper
psd_welch
csd_multitaper
Notes
-----
.. versionadded:: 0.12.0
References
----------
.. [1] Slepian, D. "Prolate spheroidal wave functions, Fourier analysis,
and uncertainty V: The discrete case." Bell System Technical
Journal, vol. 57, 1978.
.. [2] Percival D.B. and Walden A.T. "Spectral Analysis for Physical
Applications: Multitaper and Conventional Univariate Techniques."
Cambridge University Press, 1993.
"""
# Prep data
data, sfreq = _check_psd_data(inst, tmin, tmax, picks, proj)
return psd_array_multitaper(data, sfreq, fmin=fmin, fmax=fmax,
bandwidth=bandwidth, adaptive=adaptive,
low_bias=low_bias, normalization=normalization,
n_jobs=n_jobs, verbose=verbose)
|
|
# -*- coding: utf-8 -*-
import datetime
from typing import Optional, List, Any
from owlmixin import OwlMixin, TOption, TList, TDict, OwlEnum
from requests.structures import CaseInsensitiveDict as RequestsCaseInsensitiveDict
from requests_toolbelt.utils import deprecated
from jumeaux.addons.models import Addons
from jumeaux.domain.config.vo import (
PathReplace,
QueryCustomization,
AccessPoint,
Concurrency,
OutputSummary,
Notifier,
)
DictOrList = any # type: ignore
def to_json(value: DictOrList) -> str: # type: ignore
if isinstance(value, dict):
return TDict(value).to_json()
if isinstance(value, list):
return TList(value).to_json()
raise TypeError("A argument must be dict or list")
class CaseInsensitiveDict(RequestsCaseInsensitiveDict):
pass
class Status(OwlEnum):
SAME = "same"
DIFFERENT = "different"
FAILURE = "failure"
class HttpMethod(OwlEnum):
GET = "GET"
POST = "POST"
# or {}
class Request(OwlMixin):
name: TOption[str]
method: HttpMethod = HttpMethod.GET # type: ignore # Prevent for enum problem
path: str
qs: TDict[TList[str]] = {}
raw: TOption[str]
form: TOption[dict]
json: TOption[dict]
headers: TDict[str] = {}
url_encoding: str = "utf-8"
class Proxy(OwlMixin):
http: str
https: str
@classmethod
def from_host(cls, host: TOption[str]) -> "Proxy":
return (
Proxy.from_dict({"http": f"http://{host.get()}", "https": f"https://{host.get()}"})
if not host.is_none()
else None
)
class Response(OwlMixin):
body: bytes
encoding: TOption[str]
headers: CaseInsensitiveDict
url: str
status_code: int
elapsed: datetime.timedelta
elapsed_sec: float
type: str
@property
def text(self) -> str:
# Refer https://github.com/requests/requests/blob/e4fc3539b43416f9e9ba6837d73b1b7392d4b242/requests/models.py#L831
return self.body.decode(self.encoding.get_or("utf8"), errors="replace")
@property
def byte(self) -> int:
return len(self.body)
@property
def content_type(self) -> TOption[str]:
return TOption(self.headers.get("content-type"))
@property
def mime_type(self) -> TOption[str]:
return self.content_type.map(lambda x: x.split(";")[0])
@property
def charset(self) -> TOption[str]:
return self.content_type.map(lambda x: x.split(";")[1] if x.split(";") > 1 else None)
@property
def ok(self) -> bool:
return self.status_code == 200
@classmethod
def ___headers(cls, v):
return CaseInsensitiveDict(v)
@classmethod
def _decide_encoding(
cls, res: Any, default_encoding: TOption[str] = TOption(None)
) -> Optional[str]:
content_type = res.headers.get("content-type")
if content_type and "octet-stream" in content_type:
return None
# XXX: See 2.2 in https://tools.ietf.org/html/rfc2616#section-2.2
if res.encoding and not ("text" in content_type and res.encoding == "ISO-8859-1"):
return res.encoding
meta_encodings: List[str] = deprecated.get_encodings_from_content(res.content)
return (
meta_encodings[0] if meta_encodings else default_encoding.get() or res.apparent_encoding
)
@classmethod
def _to_type(cls, res: Any) -> str:
content_type = res.headers.get("content-type")
if not content_type:
return "unknown"
return content_type.split(";")[0].split("/")[1]
@classmethod
def from_requests(cls, res: Any, default_encoding: TOption[str] = TOption(None)) -> "Response":
encoding: Optional[str] = cls._decide_encoding(res, default_encoding)
type: str = cls._to_type(res)
return Response.from_dict(
{
"body": res.content,
"encoding": encoding,
"headers": res.headers,
"url": res.url,
"status_code": res.status_code,
"elapsed": res.elapsed,
"elapsed_sec": round(res.elapsed.seconds + res.elapsed.microseconds / 1000000, 2),
"type": type,
}
)
# --------
class ChallengeArg(OwlMixin):
seq: int
number_of_request: int
key: str
session: object
req: Request
host_one: str
host_other: str
path_one: TOption[PathReplace]
path_other: TOption[PathReplace]
query_one: TOption[QueryCustomization]
query_other: TOption[QueryCustomization]
proxy_one: TOption[Proxy]
proxy_other: TOption[Proxy]
headers_one: TDict[str]
headers_other: TDict[str]
default_response_encoding_one: TOption[str]
default_response_encoding_other: TOption[str]
res_dir: str
judge_response_header: bool
ignore_response_header_keys: TList[str]
# --------
class StatusCounts(OwlMixin):
same: int = 0
different: int = 0
failure: int = 0
class Time(OwlMixin):
start: str # yyyy/MM/dd hh:mm:ss
end: str # yyyy/MM/dd hh:mm:ss
elapsed_sec: int
class Summary(OwlMixin):
one: AccessPoint
other: AccessPoint
status: StatusCounts
tags: TList[str]
time: Time
concurrency: Concurrency
output: OutputSummary
default_encoding: TOption[str]
class DiffKeys(OwlMixin):
added: TList[str]
changed: TList[str]
removed: TList[str]
def is_empty(self) -> bool:
return len(self.added) == len(self.changed) == len(self.removed) == 0
@classmethod
def empty(cls) -> "DiffKeys":
return DiffKeys.from_dict({"added": [], "changed": [], "removed": []})
class ResponseSummary(OwlMixin):
url: str
type: str
status_code: TOption[int]
byte: TOption[int]
response_sec: TOption[float]
content_type: TOption[str]
mime_type: TOption[str]
encoding: TOption[str]
file: TOption[str]
prop_file: TOption[str]
response_header: TOption[dict]
class Trial(OwlMixin):
"""Affect `final/csv` config specifications,"""
seq: int
name: str
tags: TList[str]
headers: TDict[str]
queries: TDict[TList[str]]
raw: TOption[str]
form: TOption[dict]
json: TOption[dict]
one: ResponseSummary
other: ResponseSummary
method: HttpMethod
path: str
request_time: str
status: Status
# `None` is not same as `{}`. `{}` means no diffs, None means unknown
diffs_by_cognition: TOption[TDict[DiffKeys]]
class Report(OwlMixin):
"""Affect `final/slack` config specifications,"""
version: str
key: str
title: str
description: TOption[str]
notifiers: TOption[TDict[Notifier]]
summary: Summary
trials: TList[Trial]
addons: TOption[Addons]
retry_hash: TOption[str]
# ---
class Log2ReqsAddOnPayload(OwlMixin):
file: str
class Reqs2ReqsAddOnPayload(OwlMixin):
requests: TList[Request]
class DumpAddOnPayload(OwlMixin):
response: Response
body: bytes
encoding: TOption[str]
class Res2ResAddOnPayload(OwlMixin):
response: Response
req: Request
tags: TList[str]
class Res2DictAddOnPayload(OwlMixin):
response: Response
result: TOption[DictOrList]
class DidChallengeAddOnPayload(OwlMixin):
trial: Trial
class DidChallengeAddOnReference(OwlMixin):
res_one: Response
res_other: Response
res_one_props: TOption[DictOrList]
res_other_props: TOption[DictOrList]
class JudgementAddOnPayload(OwlMixin):
# By ignores title in config.yml
# `unknown` is diffs which didn't match any configurations
diffs_by_cognition: TOption[TDict[DiffKeys]]
regard_as_same_body: bool
regard_as_same_header: bool
@property
def regard_as_same(self) -> bool:
return self.regard_as_same_body and self.regard_as_same_header
class JudgementAddOnReference(OwlMixin):
name: str
path: str
qs: TDict[TList[str]]
headers: TDict[str]
res_one: Response
res_other: Response
dict_one: TOption[DictOrList]
dict_other: TOption[DictOrList]
class StoreCriterionAddOnPayload(OwlMixin):
stored: bool
class StoreCriterionAddOnReference(OwlMixin):
status: Status
req: Request
res_one: Response
res_other: Response
class FinalAddOnPayload(OwlMixin):
report: Report
output_summary: OutputSummary
@property
def result_path(self) -> str:
return f"{self.output_summary.response_dir}/{self.report.key}"
class FinalAddOnReference(OwlMixin):
notifiers: TOption[TDict[Notifier]]
|
|
import re
import pytest
from django.core.exceptions import ValidationError
from share.models.validators import JSONLDValidator
class TestJSONLDValidator:
CASES = [{
'out': "'@graph' is a required property at /",
'in': {},
}, {
'out': "Additional properties are not allowed ('foo' was unexpected) at /",
'in': {'foo': 'bar', '@graph': []}
}, {
'out': "{} is not of type 'array' at /@graph",
'in': {
'@graph': {}
}
}, {
'out': "1 is not of type 'array' at /@graph",
'in': {
'@graph': 1
}
}, {
'out': "1.0 is not of type 'array' at /@graph",
'in': {
'@graph': 1.0
}
}, {
'out': "None is not of type 'array' at /@graph",
'in': {
'@graph': None
}
}, {
'out': "'foo' is not of type 'array' at /@graph",
'in': {
'@graph': 'foo'
}
}, {
'out': "@graph may not be empty",
'in': {
'@graph': []
}
}, {
'out': "'@id' is a required property at /@graph/0",
'in': {
'@graph': [{'@type': ''}]
}
}, {
'out': "1 is not of type 'object' at /@graph/0",
'in': {
'@graph': [1]
}
}, {
'out': "None is not of type 'object' at /@graph/1",
'in': {
'@graph': [{'@id': '', '@type': ''}, None]
}
}, {
'out': "'@type' is a required property at /@graph/0",
'in': {
'@graph': [{'@id': ''}]
}
}, {
'out': "'Dinosaurs' is not a valid type",
'in': {
'@graph': [{'@id': '', '@type': 'Dinosaurs'}]
}
}, {
'out': re.compile(r"'Tag' is not one of \[('\w+', )+'\w+'\] at /@graph/0"),
'in': {
'@graph': [{
'@id': '_:123',
'@type': 'throughtags',
'tag': {'@id': '_:789', '@type': 'Tag'},
'creative_work': {'@id': '_:456', '@type': 'Tag'},
}]
}
}, {
'out': 'Unresolved references [{"@id": "_:456", "@type": "preprint"}, {"@id": "_:789", "@type": "tag"}]',
'in': {
'@graph': [{
'@id': '_:123',
'@type': 'throughtags',
'tag': {'@id': '_:789', '@type': 'Tag'},
'creative_work': {'@id': '_:456', '@type': 'Preprint'},
}]
}
}, {
'out': "'creative_work' is a required property at /@graph/0",
'in': {
'@graph': [{
'@id': '_:123',
'@type': 'throughtags',
'tag': {'@id': '_:789', '@type': 'Tag'},
}]
}
}, {
'out': "Additional properties are not allowed ('shouldnt' was unexpected) at /@graph/0",
'in': {
'@graph': [{
'@id': '_:123',
'@type': 'throughtags',
'shouldnt': 'behere',
'tag': {'@id': 'id', '@type': 'tag'},
'creative_work': {'@id': 'id', '@type': 'creativework'},
}]
}
}, {
'out': re.compile(r"^Additional properties are not allowed \('(shouldnt|pls)', '(shouldnt|pls)' were unexpected\) at /@graph/0$"),
'in': {
'@graph': [{
'@id': '_:123',
'@type': 'throughtags',
'pls': 'toleeb',
'shouldnt': 'behere',
'tag': {'@id': 'id', '@type': 'tag'},
'creative_work': {'@id': 'id', '@type': 'creativework'},
}]
}
}, {
'out': re.compile("{.+} is not valid under any of the given schemas at /@graph/0/tag$"),
'in': {
'@graph': [{
'@id': '_:123',
'@type': 'throughtags',
'creative_work': {'@id': '_:123', '@type': 'foo'},
'tag': {'@id': '_:123', '@type': 'foo', 'il': 'legal'}
}]
}
}, {
'out': "'extra should be a dict' is not of type 'object' at /@graph/0/extra",
'in': {
'@graph': [{
'@id': '_:123',
'@type': 'Tag',
'name': 'A Tag',
'extra': 'extra should be a dict'
}]
}
}, {
'out': None,
'in': {
'@graph': [{
'@id': '_:123',
'@type': 'Tag',
'name': 'A Tag',
'extra': {
'with some': 'extra data'
}
}]
}
}, {
'out': "1 is not of type 'string' at /@graph/0/name",
'in': {
'@graph': [{
'@id': '_:123',
'@type': 'Tag',
'name': 1
}]
}
}, {
'out': None,
'in': {
'@graph': [{
'@id': '_:123',
'@type': 'CreativeWork',
'title': 'Some title',
'description': 'description',
'tags': [{
'@id': '_:456',
'@type': 'throughtags'
}]
}, {
'@id': '_:456',
'@type': 'throughtags',
'tag': {'@id': '_:789', '@type': 'tag'},
'creative_work': {'@id': '_:123', '@type': 'creativework'},
}, {
'@id': '_:789',
'@type': 'tag',
'name': 'New Tag',
}]
}
}, {
'out': "'throughtugs' is not one of ['THROUGHTAGS', 'ThroughTags', 'throughtags'] at /@graph/0/tags/0/@type",
'in': {
'@graph': [{
'@id': '_:123',
'@type': 'CreativeWork',
'title': 'Some title',
'description': 'description',
'tags': [{
'@id': '_:456',
'@type': 'throughtugs'
}]
}, {
'@id': '_:456',
'@type': 'throughtags',
'tag': {'@id': '_:789', '@type': 'tag'},
'creative_work': {'@id': '_:123', '@type': 'creativework'},
}, {
'@id': '_:789',
'@type': 'tag',
'name': 'New Tag',
}]
}
}, {
'out': "'giraffe' is not a 'uri' at /@graph/0/uri",
'in': {
'@graph': [{
'@id': '_:123',
'@type': 'WorkIdentifier',
'uri': 'giraffe',
'creative_work': {'@id': '_:234', '@type': 'creativework'}
}, {
'@id': '_:234',
'@type': 'creativework',
'title': 'Giraffes are tall'
}]
}
}, {
'out': "'creative_work' is a required property at /@graph/0",
'in': {
'@graph': [{
'@id': '_:123',
'@type': 'workidentifier',
'uri': 'https://share.osf.io/foo',
}]
}
}, {
'out': None,
'in': {
'@graph': [{
'@id': '_:123',
'@type': 'WorkIdentifier',
'uri': 'https://share.osf.io/foo',
'creative_work': {'@id': '_:234', '@type': 'creativework'}
}, {
'@id': '_:234',
'@type': 'creativework',
'title': 'Giraffes are tall'
}]
}
}]
@pytest.mark.parametrize('data, message', [(case['in'], case['out']) for case in CASES])
def test_validator(self, data, message):
try:
JSONLDValidator()(data)
except ValidationError as e:
assert message is not None, 'Raised "{}"'.format(e.args[0])
if isinstance(message, str):
assert message == e.args[0]
else:
assert message.match(e.args[0]) is not None
else:
assert message is None, 'No exception was raised. Expecting {}'.format(message)
# @pytest.mark.parametrize('data, message', [(case['in'], case['out']) for case in CASES])
# def test_benchmark_validator(self, benchmark, data, message):
# benchmark(self.test_validator, data, message)
|
|
import time
import numpy as nm
import numpy.linalg as nla
from sfepy.base.base import output, get_default, Struct
from sfepy.base.log import Log, get_logging_conf
from sfepy.solvers.solvers import SolverMeta, NonlinearSolver
from nls import conv_test
class StabilizationFunction(Struct):
"""
Definition of stabilization material function for the Oseen solver.
Notes
-----
- tau_red <= 1.0; if tau is None: tau = tau_red * delta
- diameter mode: 'edge': longest edge 'volume': volume-based, 'max': max. of
previous
"""
def __init__(self, name_map, gamma=None, delta=None, tau=None, tau_red=1.0,
tau_mul=1.0, delta_mul=1.0, gamma_mul=1.0,
diameter_mode='max'):
Struct.__init__(self, name_map=name_map,
gamma=gamma, delta=delta, tau=tau,
tau_red=tau_red, tau_mul=tau_mul, delta_mul=delta_mul,
gamma_mul=gamma_mul, diameter_mode=diameter_mode)
def setup(self, problem):
"""
Setup common problem-dependent data.
"""
variables = problem.get_variables()
ns = self.name_map
# Indices to the state vector.
ii = {}
ii['u'] = variables.get_indx(ns['u'])
ii['us'] = variables.get_indx(ns['u'], stripped=True)
ii['ps'] = variables.get_indx(ns['p'], stripped=True)
self.indices = ii
materials = problem.get_materials()
# The viscosity.
fluid_mat = materials[ns['fluid']]
self.viscosity = fluid_mat.function()[ns['viscosity']]
# The Friedrich's constant.
self.c_friedrichs = problem.domain.get_diameter()
self.sigma = 1e-12 # 1 / dt.
self.b_norm = 1.0
def get_maps(self):
"""
Get the maps of names and indices of variables in state vector.
"""
return self.name_map, self.indices
def __call__(self, ts, coor, mode=None, term=None, problem=None,
b_norm=None, **kwargs):
"""
The actual material function.
"""
if mode != 'qp': return
if not hasattr(self, 'viscosity'):
self.setup(problem)
ns = self.name_map
# Update stored b_norm.
self.b_norm = get_default(b_norm, self.b_norm)
output('|b|_max (mat_fun):', self.b_norm)
gamma = self.viscosity + self.b_norm * self.c_friedrichs
data = {}
if self.gamma is None:
_gamma = self.gamma_mul * gamma
else:
_gamma = nm.asarray(self.gamma_mul * self.gamma, dtype=nm.float64)
_gamma = nm.tile(_gamma, (coor.shape[0], 1, 1))
if self.delta is None:
# Element diameter modes.
dm = {'edge': 0, 'volume': 1, 'max': 2}[self.diameter_mode]
field = problem.fields[ns['velocity']]
region = term.region
vg, _ = field.get_mapping(region, term.integral, 'volume')
cells = region.get_cells()
d2 = problem.domain.get_element_diameters(cells, vg, dm)
self.diameters2 = d2
val1 = min(1.0, 1.0 / self.sigma)
val2 = self.sigma * self.c_friedrichs**2
val3 = ((self.b_norm**2)
* min((self.c_friedrichs**2) / self.viscosity,
1.0 / self.sigma))
n_qp = coor.shape[0] / self.diameters2.shape[0]
diameters2 = nm.repeat(self.diameters2, n_qp)
diameters2.shape = diameters2.shape + (1, 1)
_delta = self.delta_mul * val1 * diameters2 / (_gamma + val2 + val3)
else:
val = nm.asarray(self.delta_mul * self.delta, dtype=nm.float64)
_delta = nm.tile(val, (coor.shape[0], 1, 1))
if self.tau is None:
_tau = self.tau_red * _delta
else:
_tau = nm.asarray(self.tau_mul * self.tau, dtype=nm.float64)
_tau = nm.tile(_tau, (coor.shape[0], 1, 1))
data[ns['gamma']] = _gamma
data[ns['delta']] = _delta
data[ns['tau']] = _tau
return data
def are_close(a, b, rtol=0.2, atol=1e-8):
return False
# return abs(a - b) <= max(atol, rtol * abs(b))
def scale_matrix(mtx, indx, factor):
ptr0 = mtx.indptr[indx.start]
ptr1 = mtx.indptr[indx.stop]
mtx.data[ptr0:ptr1] *= factor
class Oseen(NonlinearSolver):
"""
The Oseen solver for Navier-Stokes equations.
"""
name = 'nls.oseen'
__metaclass__ = SolverMeta
_parameters = [
('stabil_mat', 'str', None, True,
'The name of stabilization material.'),
('adimensionalize', 'bool', False, False,
'If True, adimensionalize the problem (not implemented!).'),
('check_navier_stokes_rezidual', 'bool', False, False,
'If True, check the Navier-Stokes rezidual after the nonlinear loop.'),
('i_max', 'int', 1, False,
'The maximum number of iterations.'),
('eps_a', 'float', 1e-10, False,
'The absolute tolerance for the residual, i.e. :math:`||f(x^i)||`.'),
('eps_r', 'float', 1.0, False,
"""The relative tolerance for the residual, i.e. :math:`||f(x^i)|| /
||f(x^0)||`."""),
('macheps', 'float', nm.finfo(nm.float64).eps, False,
'The float considered to be machine "zero".'),
('lin_red', 'float', 1.0, False,
"""The linear system solution error should be smaller than (`eps_a` *
`lin_red`), otherwise a warning is printed."""),
('lin_precision', 'float or None', None, False,
"""If not None, the linear system solution tolerances are set in each
nonlinear iteration relative to the current residual norm by the
`lin_precision` factor. Ignored for direct linear solvers."""),
]
def __init__(self, conf, problem, **kwargs):
NonlinearSolver.__init__(self, conf, **kwargs)
conf = self.conf
log = get_logging_conf(conf)
conf.log = log = Struct(name='log_conf', **log)
conf.is_any_log = (log.text is not None) or (log.plot is not None)
conf.problem = problem
conf = self.conf
if conf.is_any_log:
self.log = Log([[r'$||r||$'], ['iteration'],
[r'$\gamma$', r'$\max(\delta)$', r'$\max(\tau)$']],
xlabels=['', '', 'all iterations'],
ylabels=[r'$||r||$', 'iteration', 'stabilization'],
yscales=['log', 'linear', 'log'],
is_plot=conf.log.plot is not None,
log_filename=conf.log.text,
formats=[['%.8e'], ['%d'],
['%.8e', '%.8e', '%.8e']])
else:
self.log = None
def __call__(self, vec_x0, conf=None, fun=None, fun_grad=None,
lin_solver=None, status=None, problem=None):
"""
Oseen solver is problem-specific - it requires a Problem instance.
"""
conf = get_default(conf, self.conf)
fun = get_default(fun, self.fun)
fun_grad = get_default(fun_grad, self.fun_grad)
lin_solver = get_default(lin_solver, self.lin_solver)
status = get_default(status, self.status)
problem = get_default(problem, conf.problem,
'`problem` parameter needs to be set!')
time_stats = {}
stabil = problem.get_materials()[conf.stabil_mat]
ns, ii = stabil.function.function.get_maps()
variables = problem.get_variables()
update_var = variables.set_data_from_state
make_full_vec = variables.make_full_vec
output('problem size:')
output(' velocity: %s' % ii['us'])
output(' pressure: %s' % ii['ps'])
vec_x = vec_x0.copy()
vec_x_prev = vec_x0.copy()
vec_dx = None
if self.log is not None:
self.log.plot_vlines(color='r', linewidth=1.0)
err0 = -1.0
it = 0
while 1:
vec_x_prev_f = make_full_vec(vec_x_prev)
update_var(ns['b'], vec_x_prev_f, ns['u'])
vec_b = vec_x_prev_f[ii['u']]
b_norm = nla.norm(vec_b, nm.inf)
output('|b|_max: %.12e' % b_norm)
vec_x_f = make_full_vec(vec_x)
vec_u = vec_x_f[ii['u']]
u_norm = nla.norm(vec_u, nm.inf)
output('|u|_max: %.2e' % u_norm)
stabil.function.set_extra_args(b_norm=b_norm)
stabil.time_update(None, problem.equations, mode='force',
problem=problem)
max_pars = stabil.reduce_on_datas(lambda a, b: max(a, b.max()))
output('stabilization parameters:')
output(' gamma: %.12e' % max_pars[ns['gamma']])
output(' max(delta): %.12e' % max_pars[ns['delta']])
output(' max(tau): %.12e' % max_pars[ns['tau']])
if (not are_close(b_norm, 1.0)) and conf.adimensionalize:
adimensionalize = True
else:
adimensionalize = False
tt = time.clock()
try:
vec_r = fun(vec_x)
except ValueError:
ok = False
else:
ok = True
time_stats['rezidual'] = time.clock() - tt
if ok:
err = nla.norm(vec_r)
if it == 0:
err0 = err;
else:
err += nla.norm(vec_dx)
else: # Failure.
output('rezidual computation failed for iter %d!' % it)
raise RuntimeError('giving up...')
if self.log is not None:
self.log(err, it,
max_pars[ns['gamma']], max_pars[ns['delta']],
max_pars[ns['tau']])
condition = conv_test(conf, it, err, err0)
if condition >= 0:
break
if adimensionalize:
output('adimensionalizing')
## mat.viscosity = viscosity / b_norm
## vec_r[indx_us] /= b_norm
tt = time.clock()
try:
mtx_a = fun_grad(vec_x)
except ValueError:
ok = False
else:
ok = True
time_stats['matrix'] = time.clock() - tt
if not ok:
raise RuntimeError('giving up...')
tt = time.clock()
vec_dx = lin_solver(vec_r, x0=vec_x, mtx=mtx_a)
time_stats['solve'] = time.clock() - tt
vec_e = mtx_a * vec_dx - vec_r
lerr = nla.norm(vec_e)
if lerr > (conf.eps_a * conf.lin_red):
output('linear system not solved! (err = %e)' % lerr)
if adimensionalize:
output('restoring pressure...')
## vec_dx[indx_ps] *= b_norm
dx_norm = nla.norm(vec_dx)
output('||dx||: %.2e' % dx_norm)
for kv in time_stats.iteritems():
output('%10s: %7.2f [s]' % kv)
vec_x_prev = vec_x.copy()
vec_x -= vec_dx
it += 1
if conf.check_navier_stokes_rezidual:
t1 = '+ dw_div_grad.%s.%s(%s.viscosity, %s, %s)' \
% (ns['i2'], ns['omega'], ns['fluid'], ns['v'], ns['u'])
# t2 = '+ dw_lin_convect.%s(%s, %s, %s)' % (ns['omega'],
# ns['v'], b_name, ns['u'])
t2 = '+ dw_convect.%s.%s(%s, %s)' % (ns['i2'], ns['omega'],
ns['v'], ns['u'])
t3 = '- dw_stokes.%s.%s(%s, %s)' % (ns['i1'], ns['omega'],
ns['v'], ns['p'])
t4 = 'dw_stokes.%s.%s(%s, %s)' % (ns['i1'], ns['omega'],
ns['u'], ns['q'])
equations = {
'balance' : ' '.join((t1, t2, t3)),
'incompressibility' : t4,
}
problem.set_equations(equations)
try:
vec_rns0 = fun(vec_x0)
vec_rns = fun(vec_x)
except ValueError:
ok = False
else:
ok = True
if not ok:
output('Navier-Stokes rezidual computation failed!')
err_ns = err_ns0 = None
else:
err_ns0 = nla.norm(vec_rns0)
err_ns = nla.norm(vec_rns)
output('Navier-Stokes rezidual0: %.8e' % err_ns0)
output('Navier-Stokes rezidual : %.8e' % err_ns)
output('b - u: %.8e' % nla.norm(vec_b - vec_u))
output(condition)
else:
err_ns = None
if status is not None:
status['time_stats'] = time_stats
status['err0'] = err0
status['err'] = err
status['err_ns'] = err_ns
status['condition'] = condition
if conf.log.plot is not None:
if self.log is not None:
self.log(save_figure=conf.log.plot)
return vec_x
|
|
''' Purpose for this file is to verify functions associated with Manager._groups dictionary.
'''
import unittest
import c3d
import numpy as np
import test.verify as verify
from test.zipload import Zipload
from test.base import Base
class GroupSample():
''' Helper object to verify group entries persist or terminate properly. '''
def __init__(self, manager):
self.manager = manager
self.sample()
@property
def group_items(self):
'''Helper to access group items. '''
return [(k, g) for (k, g) in self.manager.group_items()]
@property
def group_listed(self):
'''Helper to access group numerical key-value pairs. '''
return [(k, g) for (k, g) in self.manager.group_listed()]
@property
def fetch_groups(self):
'''Acquire both group sets. '''
return self.group_items, self.group_listed
@property
def max_key(self):
if len(self.group_items) > 0:
return np.max([k for (k, g) in self.group_listed])
return 0
def sample(self):
'''Call before applying changes. '''
self.s_grp_items, self.s_grp_list = self.fetch_groups
def assert_entry_count(self, delta=0):
'''Assert all values in group still exist.
Arguments
---------
delta: Number of entries added (+) or removed (-) since last sample.
'''
grp_items, grp_list = self.fetch_groups
assert len(self.s_grp_items) + delta == len(grp_items),\
'Rename added item entry. Expected %i entries, now has %i.' %\
(len(self.s_grp_items), len(grp_items))
assert len(self.s_grp_list) + delta == len(grp_list),\
'Rename added list entry. Expected %i entries, now has %i.' %\
(len(self.s_grp_list) + delta, len(grp_list))
assert len(grp_items) == len(grp_list),\
'Mismatch in the number of numerical and name keys. Has %i numerical entries and %i name entries.' %\
(len(grp_items), len(grp_list))
def assert_group_items(self):
'''Assert all named (str, Group) pairs persisted after change.'''
enumerator = range(len(self.s_grp_items))
for i, (n, g), (n2, g2) in zip(enumerator, sorted(self.s_grp_items), sorted(self.group_items)):
assert n == n2, 'Group numeric id missmatch after changes for entry %i. ' % i +\
'Initially %i, after change entry was %i' % (n, n2)
assert g == g2, 'Group listed order changed for entry %i.' % i
def assert_group_list(self):
'''Assert all numerical (int, Group) pairs persisted after change.'''
enumerator = range(len(self.s_grp_list))
for i, (n, g), (n2, g2) in zip(enumerator, self.s_grp_list, self.group_listed):
assert n == n2, 'Group string id missmatch after changes for entry %i. ' % i +\
'Initially %i, after change entry was %i' % (n, n2)
assert g == g2, 'Group listed order changed for entry %i.' % i
def verify_add_group(self, N):
'''Add N groups and verify count at each iteration.'''
self.sample()
max_key = self.max_key
for i in range(1, N):
test_name = 'TEST_ADD_GROUP_%i' % i
self.manager.add_group(max_key + i, test_name, '')
assert self.manager.get(test_name) is not None, 'Added group does not exist.'
self.assert_entry_count(delta=i)
def verify_remove_all_using_numeric(self):
'''Remove all groups using numeric key and verify count at each iteration.'''
self.sample()
keys = [k for (k, g) in self.group_listed]
for i, key in enumerate(keys):
grp = self.manager.get(key)
assert grp is not None, 'Expected group to exist.'
self.manager.remove_group(key)
assert self.manager.get(key) is None, 'Removed group persisted.'
assert self.manager.get(grp.name) is None, 'Removed group persisted.'
self.assert_entry_count(delta=-1 - i)
def verify_remove_all_using_name(self):
'''Remove all groups using name key and verify count at each iteration.'''
self.sample()
keys = [k for (k, g) in self.group_items]
for i, key in enumerate(keys):
grp = self.manager.get(key)
assert grp is not None, 'Expected group to exist.'
self.manager.remove_group(key)
assert self.manager.get(key) is None, 'Removed group persisted.'
assert self.manager.get(grp.name) is None, 'Removed group persisted.'
self.assert_entry_count(delta=-1 - i)
class TestGroupAccessors(Base):
''' Tests functionality associated with editing Group entries in the Manager class.
'''
ZIP = 'sample01.zip'
INTEL_INT = 'Eb015pi.c3d'
INTEL_REAL = 'Eb015pr.c3d'
def test_Manager_group_items(self):
'''Test Manager.group_items'''
reader = c3d.Reader(Zipload._get(self.ZIP, self.INTEL_REAL))
grp_keys = [k for (k, g) in reader.group_items()]
assert len(grp_keys) > 0, 'No group items in file or Manager.group_items failed'
def test_Manager_group_listed(self):
'''Test Manager.group_listed'''
reader = c3d.Reader(Zipload._get(self.ZIP, self.INTEL_REAL))
grp_list = [k for (k, g) in reader.group_listed()]
assert len(grp_list) > 0, 'No group items in file or Manager.group_listed failed'
def test_Manager_add_group(self):
'''Test if renaming groups acts as intended.'''
reader = c3d.Reader(Zipload._get(self.ZIP, self.INTEL_REAL))
ref = GroupSample(reader)
ref.verify_add_group(100)
ref.verify_remove_all_using_numeric()
def test_Manager_removing_group_from_numeric(self):
'''Test if removing groups acts as intended.'''
reader = c3d.Reader(Zipload._get(self.ZIP, self.INTEL_REAL))
ref = GroupSample(reader)
ref.verify_remove_all_using_numeric()
ref.verify_add_group(100)
def test_Manager_removing_group_from_name(self):
'''Test if removing groups acts as intended.'''
reader = c3d.Reader(Zipload._get(self.ZIP, self.INTEL_REAL))
ref = GroupSample(reader)
ref.verify_remove_all_using_name()
ref.verify_add_group(100)
def test_Manager_rename_group(self):
'''Test if renaming groups acts as intended.'''
reader = c3d.Reader(Zipload._get(self.ZIP, self.INTEL_REAL))
ref = GroupSample(reader)
grp_keys = [k for (k, g) in ref.group_items]
new_names = ['TEST_NAME' + str(i) for i in range(len(grp_keys))]
for key, test_name in zip(grp_keys, new_names):
grp = reader.get(key)
reader.rename_group(key, test_name)
grp2 = reader.get(test_name)
assert grp2 is not None, "Rename failed, group with name '%s' does not exist."
assert grp == grp2, 'Rename failed, group acquired from new name is not identical.'
ref.assert_entry_count()
ref.assert_group_list()
try:
reader.rename_group(new_names[0], new_names[1])
raise RuntimeError('Overwriting existing numerical ID should raise a KeyError.')
except ValueError as e:
pass # Correct
def test_Manager_renumber_group(self):
'''Test if renaming (renumbering) groups acts as intended.'''
reader = c3d.Reader(Zipload._get(self.ZIP, self.INTEL_REAL))
ref = GroupSample(reader)
grp_ids = [k for (k, g) in ref.group_listed]
max_key = ref.max_key
for i, key in enumerate(grp_ids):
test_num = max_key + i + 1
grp = reader.get(key)
reader.rename_group(key, test_num)
grp2 = reader.get(test_num)
assert grp2 is not None, "Rename failed, group with name '%s' does not exist."
assert grp == grp2, 'Rename failed, group acquired from new name is not identical.'
ref.assert_entry_count()
ref.assert_group_items()
try:
reader.rename_group(max_key + 1, max_key + 2)
raise RuntimeError('Overwriting existing numerical ID should raise a KeyError.')
except ValueError as e:
pass # Correct
if __name__ == '__main__':
unittest.main()
|
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd.
# MIT License. See license.txt
# Database Module
# --------------------
from __future__ import unicode_literals
import MySQLdb
import webnotes
import conf
import datetime
class Database:
"""
Open a database connection with the given parmeters, if use_default is True, use the
login details from `conf.py`. This is called by the request handler and is accessible using
the `conn` global variable. the `sql` method is also global to run queries
"""
def __init__(self, host=None, user=None, password=None, ac_name=None, use_default = 0):
self.host = host or 'localhost'
self.user = user or conf.db_name
if ac_name:
self.user = self.get_db_login(ac_name) or conf.db_name
if use_default:
self.user = conf.db_name
self.transaction_writes = 0
self.auto_commit_on_many_writes = 0
self.password = password or webnotes.get_db_password(self.user)
self.connect()
if self.user != 'root':
self.use(self.user)
def get_db_login(self, ac_name):
return ac_name
def connect(self):
"""
Connect to a database
"""
self._conn = MySQLdb.connect(user=self.user, host=self.host, passwd=self.password,
use_unicode=True, charset='utf8')
self._conn.converter[246]=float
self._cursor = self._conn.cursor()
def use(self, db_name):
"""
`USE` db_name
"""
self._conn.select_db(db_name)
self.cur_db_name = db_name
def validate_query(self, q):
cmd = q.strip().lower().split()[0]
if cmd in ['alter', 'drop', 'truncate'] and webnotes.user.name != 'Administrator':
webnotes.msgprint('Not allowed to execute query')
raise Exception
def sql(self, query, values=(), as_dict = 0, as_list = 0, formatted = 0,
debug=0, ignore_ddl=0, as_utf8=0, auto_commit=0, update=None):
"""
* Execute a `query`, with given `values`
* returns as a dictionary if as_dict = 1
* returns as a list of lists (with cleaned up dates) if as_list = 1
"""
# in transaction validations
self.check_transaction_status(query)
# autocommit
if auto_commit: self.commit()
# execute
try:
if values!=():
if isinstance(values, dict):
values = dict(values)
if debug:
try:
self.explain_query(query, values)
webnotes.errprint(query % values)
except TypeError:
webnotes.errprint([query, values])
if getattr(conf, "logging", False)==2:
webnotes.log("<<<< query")
webnotes.log(query)
webnotes.log("with values:")
webnotes.log(values)
webnotes.log(">>>>")
self._cursor.execute(query, values)
else:
if debug:
self.explain_query(query)
webnotes.errprint(query)
if getattr(conf, "logging", False)==2:
webnotes.log("<<<< query")
webnotes.log(query)
webnotes.log(">>>>")
self._cursor.execute(query)
except Exception, e:
# ignore data definition errors
if ignore_ddl and e.args[0] in (1146,1054,1091):
pass
else:
raise e
if auto_commit: self.commit()
# scrub output if required
if as_dict:
ret = self.fetch_as_dict(formatted, as_utf8)
if update:
for r in ret:
r.update(update)
return ret
elif as_list:
return self.convert_to_lists(self._cursor.fetchall(), formatted, as_utf8)
elif as_utf8:
return self.convert_to_lists(self._cursor.fetchall(), formatted, as_utf8)
else:
return self._cursor.fetchall()
def explain_query(self, query, values=None):
try:
webnotes.errprint("--- query explain ---")
if values is None:
self._cursor.execute("explain " + query)
else:
self._cursor.execute("explain " + query, values)
import json
webnotes.errprint(json.dumps(self.fetch_as_dict(), indent=1))
webnotes.errprint("--- query explain end ---")
except:
webnotes.errprint("error in query explain")
def sql_list(self, query, values=(), debug=False):
return [r[0] for r in self.sql(query, values, debug=debug)]
def sql_ddl(self, query, values=()):
self.commit()
self.sql(query)
def check_transaction_status(self, query):
if self.transaction_writes and query and query.strip().split()[0].lower() in ['start', 'alter', 'drop', 'create', "begin"]:
raise Exception, 'This statement can cause implicit commit'
if query and query.strip().lower()=='commit':
self.transaction_writes = 0
if query[:6].lower() in ['update', 'insert']:
self.transaction_writes += 1
if self.transaction_writes > 10000:
if self.auto_commit_on_many_writes:
webnotes.conn.commit()
webnotes.conn.begin()
else:
webnotes.msgprint('A very long query was encountered. If you are trying to import data, please do so using smaller files')
raise Exception, 'Bad Query!!! Too many writes'
def fetch_as_dict(self, formatted=0, as_utf8=0):
result = self._cursor.fetchall()
ret = []
needs_formatting = self.needs_formatting(result, formatted)
for r in result:
row_dict = webnotes._dict({})
for i in range(len(r)):
if needs_formatting:
val = self.convert_to_simple_type(r[i], formatted)
else:
val = r[i]
if as_utf8 and type(val) is unicode:
val = val.encode('utf-8')
row_dict[self._cursor.description[i][0]] = val
ret.append(row_dict)
return ret
def needs_formatting(self, result, formatted):
if result and result[0]:
for v in result[0]:
if isinstance(v, (datetime.date, datetime.timedelta, datetime.datetime, long)):
return True
if formatted and isinstance(v, (int, float)):
return True
return False
def get_description(self):
return self._cursor.description
def convert_to_simple_type(self, v, formatted=0):
from webnotes.utils import formatdate, fmt_money
if isinstance(v, (datetime.date, datetime.timedelta, datetime.datetime, long)):
if isinstance(v, datetime.date):
v = unicode(v)
if formatted:
v = formatdate(v)
# time
elif isinstance(v, (datetime.timedelta, datetime.datetime)):
v = unicode(v)
# long
elif isinstance(v, long):
v=int(v)
# convert to strings... (if formatted)
if formatted:
if isinstance(v, float):
v=fmt_money(v)
elif isinstance(v, int):
v = unicode(v)
return v
def convert_to_lists(self, res, formatted=0, as_utf8=0):
nres = []
needs_formatting = self.needs_formatting(res, formatted)
for r in res:
nr = []
for c in r:
if needs_formatting:
val = self.convert_to_simple_type(c, formatted)
else:
val = c
if as_utf8 and type(val) is unicode:
val = val.encode('utf-8')
nr.append(val)
nres.append(nr)
return nres
def convert_to_utf8(self, res, formatted=0):
nres = []
for r in res:
nr = []
for c in r:
if type(c) is unicode:
c = c.encode('utf-8')
nr.append(self.convert_to_simple_type(c, formatted))
nres.append(nr)
return nres
def build_conditions(self, filters):
def _build_condition(key):
"""
filter's key is passed by map function
build conditions like:
* ifnull(`fieldname`, default_value) = %(fieldname)s
* `fieldname` [=, !=, >, >=, <, <=] %(fieldname)s
"""
_operator = "="
value = filters.get(key)
if isinstance(value, (list, tuple)):
_operator = value[0]
filters[key] = value[1]
if _operator not in ["=", "!=", ">", ">=", "<", "<=", "like"]:
_operator = "="
if "[" in key:
split_key = key.split("[")
return "ifnull(`" + split_key[0] + "`, " + split_key[1][:-1] + ") " \
+ _operator + " %(" + key + ")s"
else:
return "`" + key + "` " + _operator + " %(" + key + ")s"
if isinstance(filters, basestring):
filters = { "name": filters }
conditions = map(_build_condition, filters)
return " and ".join(conditions), filters
def get(self, doctype, filters=None, as_dict=True):
return self.get_value(doctype, filters, "*", as_dict=as_dict)
def get_value(self, doctype, filters=None, fieldname="name", ignore=None, as_dict=False, debug=False):
"""Get a single / multiple value from a record.
For Single DocType, let filters be = None"""
ret = self.get_values(doctype, filters, fieldname, ignore, as_dict, debug)
return ret and ((len(ret[0]) > 1 or as_dict) and ret[0] or ret[0][0]) or None
def get_values(self, doctype, filters=None, fieldname="name", ignore=None, as_dict=False, debug=False):
if isinstance(filters, list):
return self.get_value_for_many_names(doctype, filters, fieldname, debug=debug)
fields = fieldname
if fieldname!="*":
if isinstance(fieldname, basestring):
fields = [fieldname]
else:
fields = fieldname
if (filters is not None) and (filters!=doctype or doctype=="DocType"):
try:
return self.get_values_from_table(fields, filters, doctype, as_dict, debug)
except Exception, e:
if ignore and e.args[0] in (1146, 1054):
# table or column not found, return None
return None
elif (not ignore) and e.args[0]==1146:
# table not found, look in singles
pass
else:
raise e
return self.get_values_from_single(fields, filters, doctype, as_dict, debug)
def get_values_from_single(self, fields, filters, doctype, as_dict=False, debug=False):
if fields=="*" or isinstance(filters, dict):
r = self.sql("""select field, value from tabSingles where doctype=%s""", doctype)
# check if single doc matches with filters
values = webnotes._dict(r)
if isinstance(filters, dict):
for key, value in filters.items():
if values.get(key) != value:
return []
if as_dict:
return values and [values] or []
if isinstance(fields, list):
return [map(lambda d: values.get(d), fields)]
else:
r = self.sql("""select field, value
from tabSingles where field in (%s) and doctype=%s""" \
% (', '.join(['%s'] * len(fields)), '%s'),
tuple(fields) + (doctype,), as_dict=False, debug=debug)
if as_dict:
return r and [webnotes._dict(r)] or []
else:
return r and [[i[1] for i in r]] or []
def get_values_from_table(self, fields, filters, doctype, as_dict, debug):
fl = []
if isinstance(fields, (list, tuple)):
for f in fields:
if "(" in f: # function
fl.append(f)
else:
fl.append("`" + f + "`")
fl = ", ".join(fields)
else:
fl = fields
conditions, filters = self.build_conditions(filters)
r = self.sql("select %s from `tab%s` where %s" % (fl, doctype,
conditions), filters, as_dict=as_dict, debug=debug)
return r
def get_value_for_many_names(self, doctype, names, field, debug=False):
names = filter(None, names)
if names:
return dict(self.sql("select name, `%s` from `tab%s` where name in (%s)" \
% (field, doctype, ", ".join(["%s"]*len(names))), names, debug=debug))
else:
return {}
def set_value(self, dt, dn, field, val, modified=None, modified_by=None):
from webnotes.utils import now
if dn and dt!=dn:
self.sql("""update `tab%s` set `%s`=%s, modified=%s, modified_by=%s
where name=%s""" % (dt, field, "%s", "%s", "%s", "%s"),
(val, modified or now(), modified_by or webnotes.session["user"], dn))
else:
if self.sql("select value from tabSingles where field=%s and doctype=%s", (field, dt)):
self.sql("""update tabSingles set value=%s where field=%s and doctype=%s""",
(val, field, dt))
else:
self.sql("""insert into tabSingles(doctype, field, value)
values (%s, %s, %s)""", (dt, field, val, ))
if field!="modified":
self.set_value(dt, dn, "modified", modified or now())
def set(self, doc, field, val):
from webnotes.utils import now
doc.modified = now()
doc.modified_by = webnotes.session["user"]
self.set_value(doc.doctype, doc.name, field, val, doc.modified, doc.modified_by)
doc.fields[field] = val
def set_global(self, key, val, user='__global'):
self.set_default(key, val, user)
def get_global(self, key, user='__global'):
return self.get_default(key, user)
def set_default(self, key, val, parent="Control Panel"):
"""set control panel default (tabDefaultVal)"""
import webnotes.defaults
webnotes.defaults.set_default(key, val, parent)
def add_default(self, key, val, parent="Control Panel"):
import webnotes.defaults
webnotes.defaults.add_default(key, val, parent)
def get_default(self, key, parent="Control Panel"):
"""get default value"""
import webnotes.defaults
d = webnotes.defaults.get_defaults(parent).get(key)
return isinstance(d, list) and d[0] or d
def get_defaults_as_list(self, key, parent="Control Panel"):
import webnotes.defaults
d = webnotes.defaults.get_default(key, parent)
return isinstance(d, basestring) and [d] or d
def get_defaults(self, key=None, parent="Control Panel"):
"""get all defaults"""
import webnotes.defaults
if key:
return webnotes.defaults.get_defaults(parent).get(key)
else:
return webnotes.defaults.get_defaults(parent)
def begin(self):
return # not required
def commit(self):
self.sql("commit")
def rollback(self):
self.sql("ROLLBACK")
self.transaction_writes = 0
def field_exists(self, dt, fn):
return self.sql("select name from tabDocField where fieldname=%s and parent=%s", (dt, fn))
def table_exists(self, tablename):
return tablename in [d[0] for d in self.sql("show tables")]
def exists(self, dt, dn=None):
if isinstance(dt, basestring):
if dt==dn:
return True # single always exists (!)
try:
return self.sql('select name from `tab%s` where name=%s' % (dt, '%s'), dn)
except:
return None
elif isinstance(dt, dict) and dt.get('doctype'):
try:
conditions = []
for d in dt:
if d == 'doctype': continue
conditions.append('`%s` = "%s"' % (d, dt[d].replace('"', '\"')))
return self.sql('select name from `tab%s` where %s' % \
(dt['doctype'], " and ".join(conditions)))
except:
return None
def get_table_columns(self, doctype):
return [r[0] for r in self.sql("DESC `tab%s`" % doctype)]
def close(self):
if self._conn:
self._cursor.close()
self._conn.close()
self._cursor = None
self._conn = None
|
|
#!/usr/bin/env python
# coding=utf-8
################################################################################
import os
import sys
import optparse
import configobj
import traceback
import tempfile
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), 'src')))
def getIncludePaths(path):
for f in os.listdir(path):
cPath = os.path.abspath(os.path.join(path, f))
if os.path.isfile(cPath) and len(f) > 3 and f.endswith('.py'):
sys.path.append(os.path.dirname(cPath))
elif os.path.isdir(cPath):
getIncludePaths(cPath)
collectors = {}
def getCollectors(path):
for f in os.listdir(path):
cPath = os.path.abspath(os.path.join(path, f))
if os.path.isfile(cPath) and len(f) > 3 and f.endswith('.py'):
modname = f[:-3]
if modname.startswith('Test'):
continue
if modname.startswith('test'):
continue
try:
# Import the module
module = __import__(modname, globals(), locals(), ['*'])
# Find the name
for attr in dir(module):
if not attr.endswith('Collector'):
continue
cls = getattr(module, attr)
if cls.__name__ not in collectors:
collectors[cls.__name__] = module
except Exception:
print "Failed to import module: %s. %s" % (
modname, traceback.format_exc())
collectors[modname] = False
elif os.path.isdir(cPath):
getCollectors(cPath)
handlers = {}
def getHandlers(path):
for f in os.listdir(path):
cPath = os.path.abspath(os.path.join(path, f))
if os.path.isfile(cPath) and len(f) > 3 and f.endswith('.py'):
modname = f[:-3]
try:
# Import the module
module = __import__(modname, globals(), locals(), ['*'])
# Find the name
for attr in dir(module):
if (not attr.endswith('Handler')
or attr.startswith('Handler')):
continue
cls = getattr(module, attr)
if cls.__name__ not in handlers:
handlers[cls.__name__] = module
except Exception:
print "Failed to import module: %s. %s" % (
modname, traceback.format_exc())
handlers[modname] = False
elif os.path.isdir(cPath):
getHandlers(cPath)
################################################################################
if __name__ == "__main__":
# Initialize Options
parser = optparse.OptionParser()
parser.add_option("-c", "--configfile",
dest="configfile",
default="/etc/diamond/diamond.conf",
help="Path to the config file")
parser.add_option("-C", "--collector",
dest="collector",
default=None,
help="Configure a single collector")
parser.add_option("-p", "--print",
action="store_true",
dest="dump",
default=False,
help="Just print the defaults")
# Parse Command Line Args
(options, args) = parser.parse_args()
# Initialize Config
if os.path.exists(options.configfile):
config = configobj.ConfigObj(os.path.abspath(options.configfile))
else:
print >> sys.stderr, "ERROR: Config file: %s does not exist." % (
options.configfile)
print >> sys.stderr, ("Please run python config.py -c "
+ "/path/to/diamond.conf")
parser.print_help(sys.stderr)
sys.exit(1)
collector_path = config['server']['collectors_path']
docs_path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'docs'))
handler_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
'src', 'diamond', 'handler'))
getIncludePaths(collector_path)
# Ugly hack for snmp collector overrides
getCollectors(os.path.join(collector_path, 'snmp'))
getCollectors(collector_path)
collectorIndexFile = open(os.path.join(docs_path, "Collectors.md"), 'w')
collectorIndexFile.write("## Collectors\n")
collectorIndexFile.write("\n")
for collector in sorted(collectors.iterkeys()):
# Skip configuring the basic collector object
if collector == "Collector":
continue
if collector.startswith('Test'):
continue
print "Processing %s..." % (collector)
if not hasattr(collectors[collector], collector):
continue
cls = getattr(collectors[collector], collector)
obj = cls(config=config, handlers={})
options = obj.get_default_config_help()
defaultOptions = obj.get_default_config()
docFile = open(os.path.join(docs_path,
"collectors-" + collector + ".md"), 'w')
enabled = ''
collectorIndexFile.write(" - [%s](collectors-%s)%s\n" % (collector,
collector,
enabled))
docFile.write("%s\n" % (collector))
docFile.write("=====\n")
if collectors[collector].__doc__ is None:
print "No __doc__ string!"
docFile.write("%s\n" % (collectors[collector].__doc__))
docFile.write("#### Options - [Generic Options](Configuration)\n")
docFile.write("\n")
docFile.write("<table>")
docFile.write("<tr>")
docFile.write("<th>Setting</th>")
docFile.write("<th>Default</th>")
docFile.write("<th>Description</th>")
docFile.write("<th>Type</th>")
docFile.write("</tr>\n")
for option in sorted(options.keys()):
defaultOption = ''
defaultOptionType = ''
if option in defaultOptions:
defaultOptionType = defaultOptions[option].__class__.__name__
if isinstance(defaultOptions[option], list):
defaultOption = ', '.join(map(str, defaultOptions[option]))
defaultOption += ','
else:
defaultOption = str(defaultOptions[option])
docFile.write("<tr>")
docFile.write("<td>%s</td>" % (option))
docFile.write("<td>%s</td>" % (defaultOption))
docFile.write("<td>%s</td>" % (options[option].replace(
"\n", '<br>\n')))
docFile.write("<td>%s</td>" % (defaultOptionType))
docFile.write("</tr>\n")
docFile.write("</table>\n")
docFile.write("\n")
docFile.write("#### Example Output\n")
docFile.write("\n")
docFile.write("```\n")
docFile.write("__EXAMPLESHERE__\n")
docFile.write("```\n")
docFile.write("\n")
docFile.write("### This file was generated from the python source\n")
docFile.write("### Please edit the source to make changes\n")
docFile.write("\n")
docFile.close()
collectorIndexFile.close()
getIncludePaths(handler_path)
getHandlers(handler_path)
handlerIndexFile = open(os.path.join(docs_path, "Handlers.md"), 'w')
handlerIndexFile.write("## Handlers\n")
handlerIndexFile.write("\n")
for handler in sorted(handlers.iterkeys()):
# Skip configuring the basic handler object
if handler == "Handler":
continue
if handler[0:4] == "Test":
continue
print "Processing %s..." % (handler)
if not hasattr(handlers[handler], handler):
continue
cls = getattr(handlers[handler], handler)
tmpfile = tempfile.mkstemp()
options = None
defaultOptions = None
try:
obj = cls({
'log_file': tmpfile[1],
})
options = obj.get_default_config_help()
defaultOptions = obj.get_default_config()
except Exception, e:
print "Caught Exception %s" % e
os.remove(tmpfile[1])
docFile = open(os.path.join(docs_path,
"handler-" + handler + ".md"), 'w')
handlerIndexFile.write(" - [%s](handler-%s)\n" % (handler, handler))
docFile.write("%s\n" % (handler))
docFile.write("====\n")
docFile.write("%s" % (handlers[handler].__doc__))
docFile.write("#### Options - [Generic Options](Configuration)\n")
docFile.write("\n")
docFile.write("<table>")
docFile.write("<tr>")
docFile.write("<th>Setting</th>")
docFile.write("<th>Default</th>")
docFile.write("<th>Description</th>")
docFile.write("<th>Type</th>")
docFile.write("</tr>\n")
if options:
for option in sorted(options.keys()):
defaultOption = ''
defaultOptionType = ''
if option in defaultOptions:
defaultOptionType = defaultOptions[
option].__class__.__name__
if isinstance(defaultOptions[option], list):
defaultOption = ', '.join(map(str,
defaultOptions[option]))
defaultOption += ','
else:
defaultOption = str(defaultOptions[option])
docFile.write("<tr>")
docFile.write("<td>%s</td>" % (option))
docFile.write("<td>%s</td>" % (defaultOption))
docFile.write("<td>%s</td>" % (options[option].replace(
"\n", '<br>\n')))
docFile.write("<td>%s</td>" % (defaultOptionType))
docFile.write("</tr>\n")
docFile.write("</table>\n")
docFile.write("\n")
docFile.write("### This file was generated from the python source\n")
docFile.write("### Please edit the source to make changes\n")
docFile.write("\n")
docFile.close()
handlerIndexFile.close()
|
|
# Author: Christopher M. Shymansky <CMShymansky@gmail.com>,
# License: ALv2
# Date created: 2016-11-25
import sys
sys.path.append("../code")
import pyplearnr as ppl
import pandas as pd
import itertools
import unittest
class AugmentedTestCase(unittest.TestCase):
"""
unittest.TestCase class with an extra helper method for comparing expected
and actual errors
"""
def assert_with_messsage(self, msg, func, args, kwargs):
try:
func(*args, **kwargs)
# self.assertFail()
except Exception as inst:
self.assertEqual(inst.message, msg)
def get_cleaned_titanic_data(self):
# Read data into Pandas dataframe
df = pd.read_pickle('../trimmed_titanic_data.pkl')
simulation_df = df.copy()
# Set categorial features as such
categorical_features = ['Survived','Pclass','Sex','Embarked','Title']
for feature in categorical_features:
simulation_df[feature] = simulation_df[feature].astype('category')
# One-hot encode categorical features
simulation_df = pd.get_dummies(simulation_df,drop_first=True)
output_feature = 'Survived_1'
column_names = list(simulation_df.columns)
input_features = [x for x in column_names if x != output_feature]
# Split into features and targets
X = simulation_df[input_features].copy().values
y = simulation_df[output_feature].copy().values
return X, y
class PipelineBundleTestCase(AugmentedTestCase):
"""
Tests PipelineBundle methods
"""
def test_build_bundle(self):
# Set test pipeline bundle schematic
pipeline_bundle_schematic = [
{'scaler': {
'standard': {},
'normal': {}
}},
{'estimator': {
'knn': {
'n_neighbors': range(1,11),
'weights': ['uniform', 'distance']
},
'svm': {
'C': range(1,12)
}
}}
]
pipelines = ppl.PipelineBundle().build_pipeline_bundle(pipeline_bundle_schematic)
class NestedKFoldCrossValidationTestCase(AugmentedTestCase):
"""
Tests NestedKFoldCrossValidation class
"""
def test_init_outer_loop_fold_count_zero(self):
############### Test initialization inputs ###############
msg = "The outer_loop_fold_count" \
" keyword argument, dictating the number of folds in the outer " \
"loop, must be a positive integer"
kwargs = {
'outer_loop_fold_count': 0
}
self.assert_with_messsage(msg, ppl.NestedKFoldCrossValidation,
[],kwargs)
def test_init_outer_loop_fold_count_negative(self):
############### Test initialization inputs ###############
msg = "The outer_loop_fold_count" \
" keyword argument, dictating the number of folds in the outer " \
"loop, must be a positive integer"
kwargs = {
'outer_loop_fold_count': -5
}
self.assert_with_messsage(msg, ppl.NestedKFoldCrossValidation,
[],kwargs)
def test_init_inner_loop_fold_count_zero(self):
msg = "The inner_loop_fold_count" \
" keyword argument, dictating the number of folds in the inner" \
" loop, must be a positive integer"
kwargs = {
'inner_loop_fold_count': 0
}
self.assert_with_messsage(msg, ppl.NestedKFoldCrossValidation,
[], kwargs)
def test_init_inner_loop_fold_count_negative(self):
msg = "The inner_loop_fold_count" \
" keyword argument, dictating the number of folds in the inner" \
" loop, must be a positive integer"
kwargs = {
'inner_loop_fold_count': -5
}
self.assert_with_messsage(msg, ppl.NestedKFoldCrossValidation,
[], kwargs)
def test_init_outer_loop_split_seed_zero(self):
msg = "The " \
"outer_loop_split_seed keyword argument, dictating how the data "\
"is split into folds for the outer loop, must be an integer."
kwargs = {
'outer_loop_split_seed': 0
}
self.assert_with_messsage(msg, ppl.NestedKFoldCrossValidation,
[], kwargs)
def test_init_outer_loop_split_seed_negative(self):
msg = "The " \
"outer_loop_split_seed keyword argument, dictating how the data "\
"is split into folds for the outer loop, must be an integer."
kwargs = {
'outer_loop_split_seed': -5
}
self.assert_with_messsage(msg, ppl.NestedKFoldCrossValidation,
[], kwargs)
def test_init_inner_loop_split_seed_zero(self):
msg = "The " \
"inner_loop_split_seed keyword argument, dictating how the data "\
"is split into folds for the inner loop, must be an integer."
kwargs = {
'inner_loop_split_seed': 0
}
self.assert_with_messsage(msg, ppl.NestedKFoldCrossValidation,
[], kwargs)
def test_init_inner_loop_split_seed_negative(self):
msg = "The " \
"inner_loop_split_seed keyword argument, dictating how the data "\
"is split into folds for the inner loop, must be an integer."
kwargs = {
'inner_loop_split_seed': -5
}
self.assert_with_messsage(msg, ppl.NestedKFoldCrossValidation,
[], kwargs)
def test_get_outer_split_indices(self):
# Get data fit for testing
X, y = self.get_cleaned_titanic_data()
# Obtain test/train split indices for outer and inner folds
kfcv = ppl.NestedKFoldCrossValidation()
kfcv.get_outer_split_indices(X, y=y, stratified=False)
# Test that the resulting indices combine to form the total set of
# indices
outer_test_inds_target = set(range(X.shape[0]))
all_outer_test_inds = set()
for outer_fold_ind, outer_fold in kfcv.outer_folds.iteritems():
current_outer_test_fold_inds = outer_fold.test_fold_inds
current_outer_train_fold_inds = outer_fold.train_fold_inds
all_outer_test_inds |= set(current_outer_test_fold_inds)
inner_test_inds_target = set(range(X[current_outer_train_fold_inds].shape[0]))
all_inner_test_inds = set()
for inner_fold_ind, inner_fold in outer_fold.inner_folds.iteritems():
all_inner_test_inds |= set(inner_fold.test_fold_inds)
self.assertTrue(not all_inner_test_inds-inner_test_inds_target)
self.assertTrue(not all_outer_test_inds-outer_test_inds_target)
def test_fit(self):
# Get data fit for testing
X, y = self.get_cleaned_titanic_data()
# Obtain test/train split indices for outer and inner folds
kfcv = ppl.NestedKFoldCrossValidation()
estimators = ['logistic_regression','svm']
# feature_interaction_options = [True,False]
feature_selection_options = [None,'select_k_best']
scaling_options = [None,'standard','normal','min_max','binary']
transformations = [None,'pca']
pipeline_steps = [feature_selection_options,scaling_options,
transformations,estimators]
pipeline_options = list(itertools.product(*pipeline_steps))
kfcv.fit(X, y, pipelines=[], stratified=True)
"""
best_pipeline = {
"trained_all_pipeline": None,
"mean_validation_score": None,
"validation_score_std": None
}
| * | | |
best_outer_fold_1_pipeline = {
"outer_fold_id": None
"best_pipeline_ind": None,
"trained_all_best_pipeline": None,
"validation_score": None,
"scoring_type": None
}
pipeline_1_outer_fold_1 = {
"id": None,
"mean_test_score": None,
"test_score_std": None,
"mean_train_score": None,
"train_score_std": None,
"scoring_type": None
}
pipeline_2_outer_fold_1
....
pipeline_d_outer_fold_1
| * | | |
pipeline_1_outer_fold_1_inner_fold_1 = {
'id': None,
'outer_fold_id': None,
'inner_fold_id': None,
'pipeline': None,
'test_score': None,
'train_score': None,
'scoring_type': None,
}
pipeline_2_outer_fold_1_inner_fold_1
....
pipeline_d_outer_fold_1_inner_fold_1
| | * | |
pipeline_1_outer_fold_1_inner_fold_2
pipeline_2_outer_fold_1_inner_fold_2
....
pipeline_d_outer_fold_1_inner_fold_2
| | | * |
pipeline_1_outer_fold_1_inner_fold_3
pipeline_2_outer_fold_1_inner_fold_3
....
pipeline_d_outer_fold_1_inner_fold_3
| | * | |
............
| | | * |
............
"""
"""
Alternate setup:
'scoring_metric': None,
best_pipeline = {
"trained_all_pipeline": None,
"mean_validation_score": None,
"validation_score_std": None
},
'outer_folds' = {
'id': None,
'test_inds': None,
'train_inds': None,
'best_pipeline': {
'best_pipeline_validation_score': None,
},
'pipelines': {
'id': {
'id': None
'mean_test_score': None,
'test_score_std': None,
'pipeline': None
}
}
'inner_folds': {
'id': None,
'test_fold_inds': None,
'train_fold_inds': None,
'pipelines': {
'id': {
'id': outer_inner_pipeline
'test_score': None,
'train_score': None,
'pipeline': None
}
}
},
...
{},
}
"""
if __name__ == '__main__':
unittest.main()
|
|
# -*- coding: utf-8 -*-
"""
Contains Definitions of setup_vgg_1, setup_vgg_1_xzt, setup_vgg_1_xzt_max, setup_vgg_1_xzt_stride
autoencoder_stage: Type of training/network
0: autoencoder
1: encoder+ from autoencoder w/ frozen layers
2: encoder+ from scratch, completely unfrozen
If autoencoder_stage==1 only the first part of the autoencoder (encoder part) will be generated
These layers are frozen then.
The weights of the original model can be imported then by using load_weights('xxx.h5', by_name=True)
modelpath_and_name is used to load the encoder part for supervised training,
and only needed if make_autoencoder==False
"""
from keras.models import Model
from keras.layers import Activation, Input, Dense, Flatten, Conv3D, MaxPooling3D, UpSampling3D,BatchNormalization, ZeroPadding3D, Conv3DTranspose, AveragePooling3D
from keras import backend as K
from util.custom_layers import MaxUnpooling3D
def setup_vgg_1(autoencoder_stage, modelpath_and_name=None):
#enhanced version of vgg_0, with zero_center compatibility and batch normalization
#tag: vgg_1
#autoencoder_stage: Type of training/network
# 0: autoencoder
# 1: encoder+ from autoencoder w/ frozen layers
# 2: encoder+ from scratch, completely unfrozen
#If autoencoder_stage==1 only the first part of the autoencoder (encoder part) will be generated
#These layers are frozen then
#The weights of the original model can be imported then by using load_weights('xxx.h5', by_name=True)
#modelpath_and_name is used to load the encoder part for supervised training,
#and only needed if make_autoencoder==False
if autoencoder_stage == 1:
#Freeze encoder layers
train=False
else:
train=True
channel_axis = 1 if K.image_data_format() == "channels_first" else -1
inputs = Input(shape=(11,13,18,1))
x = Conv3D(filters=32, kernel_size=(3,3,3), padding='same', kernel_initializer='he_normal', use_bias=False, trainable=train)(inputs)
x = BatchNormalization(axis=channel_axis, trainable=train)(x)
x = Activation('relu', trainable=train)(x)
x = Conv3D(filters=32, kernel_size=(2,2,3), padding='valid', kernel_initializer='he_normal', use_bias=False, trainable=train)(x)
x = BatchNormalization(axis=channel_axis, trainable=train)(x)
x = Activation('relu', trainable=train)(x)
#10x12x16 x 32
x = AveragePooling3D((2, 2, 2), padding='valid')(x)
#5x6x8 x 32
x = Conv3D(filters=64, kernel_size=(3,3,3), padding='same', kernel_initializer='he_normal', use_bias=False, trainable=train)(x)
x = BatchNormalization(axis=channel_axis, trainable=train)(x)
x = Activation('relu', trainable=train)(x)
x = Conv3D(filters=64, kernel_size=(3,3,3), padding='same', kernel_initializer='he_normal', use_bias=False, trainable=train)(x)
x = BatchNormalization(axis=channel_axis, trainable=train)(x)
x = Activation('relu', trainable=train)(x)
x = Conv3D(filters=64, kernel_size=(2,3,3), padding='valid', kernel_initializer='he_normal', use_bias=False, trainable=train)(x)
x = BatchNormalization(axis=channel_axis, trainable=train)(x)
x = Activation('relu', trainable=train)(x)
#4x4x6 x 64
encoded = AveragePooling3D((2, 2, 2), padding='valid')(x)
#2x2x3 x 64
if autoencoder_stage == 0:
#The Decoder part:
#2x2x3 x 64
x = UpSampling3D((2, 2, 2))(encoded)
#4x4x6 x 64
x = Conv3DTranspose(filters=64, kernel_size=(2,3,3), padding='valid', kernel_initializer='he_normal', use_bias=False)(x)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
#5x6x8 x 64
x = Conv3DTranspose(filters=64, kernel_size=(3,3,3), padding='same', kernel_initializer='he_normal', use_bias=False)(x)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
x = Conv3DTranspose(filters=64, kernel_size=(3,3,3), padding='same', kernel_initializer='he_normal', use_bias=False)(x)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
x = UpSampling3D((2, 2, 2))(x)
#10x12x16 x 64
x = Conv3DTranspose(filters=32, kernel_size=(2,2,3), padding='valid', kernel_initializer='he_normal', use_bias=False)(x)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
#11x13x18 x 32
x = Conv3DTranspose(filters=32, kernel_size=(3,3,3), padding='same', kernel_initializer='he_normal', use_bias=False)(x)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
x = Conv3DTranspose(filters=32, kernel_size=(3,3,3), padding='same', kernel_initializer='he_normal', use_bias=False)(x)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
decoded = Conv3D(filters=1, kernel_size=(1,1,1), padding='same', activation='linear', kernel_initializer='he_normal')(x)
#Output 11x13x18 x 1
autoencoder = Model(inputs, decoded)
return autoencoder
else:
#Replacement for the decoder part for supervised training:
if autoencoder_stage == 1:
#Load weights of encoder part from existing autoencoder
encoder= Model(inputs=inputs, outputs=encoded)
encoder.load_weights(modelpath_and_name, by_name=True)
x = Flatten()(encoded)
x = Dense(256, activation='relu', kernel_initializer='he_normal')(x)
x = Dense(16, activation='relu', kernel_initializer='he_normal')(x)
outputs = Dense(2, activation='softmax', kernel_initializer='he_normal')(x)
model = Model(inputs=inputs, outputs=outputs)
return model
def setup_vgg_1_xzt(autoencoder_stage, modelpath_and_name=None):
#enhanced version of vgg_0, with zero_center compatibility and batch normalization
#format 11x18x50
#713k params
#autoencoder_stage: Type of training/network
# 0: autoencoder
# 1: encoder+ from autoencoder w/ frozen layers
# 2: encoder+ from scratch, completely unfrozen
#If autoencoder_stage==1 only the first part of the autoencoder (encoder part) will be generated
#These layers are frozen then
#The weights of the original model can be imported then by using load_weights('xxx.h5', by_name=True)
#modelpath_and_name is used to load the encoder part for supervised training,
#and only needed if make_autoencoder==False
if autoencoder_stage == 1:
#Freeze encoder layers
train=False
else:
train=True
channel_axis = 1 if K.image_data_format() == "channels_first" else -1
inputs = Input(shape=(11,18,50,1))
x = Conv3D(filters=32, kernel_size=(3,3,3), padding='same', kernel_initializer='he_normal', use_bias=False, trainable=train)(inputs)
x = BatchNormalization(axis=channel_axis, trainable=train)(x)
x = Activation('relu', trainable=train)(x)
x = Conv3D(filters=32, kernel_size=(3,3,3), padding='same', kernel_initializer='he_normal', use_bias=False, trainable=train)(x)
x = BatchNormalization(axis=channel_axis, trainable=train)(x)
x = Activation('relu', trainable=train)(x)
#11x18x50
x = AveragePooling3D((1, 1, 2), padding='valid')(x)
#11x18x25
x = Conv3D(filters=32, kernel_size=(3,3,3), padding='same', kernel_initializer='he_normal', use_bias=False, trainable=train)(x)
x = BatchNormalization(axis=channel_axis, trainable=train)(x)
x = Activation('relu', trainable=train)(x)
x = Conv3D(filters=32, kernel_size=(2,3,2), padding='valid', kernel_initializer='he_normal', use_bias=False, trainable=train)(x)
x = BatchNormalization(axis=channel_axis, trainable=train)(x)
x = Activation('relu', trainable=train)(x)
#10x16x24
x = AveragePooling3D((2, 2, 2), padding='valid')(x)
#5x8x12
x = Conv3D(filters=64, kernel_size=(3,3,3), padding='same', kernel_initializer='he_normal', use_bias=False, trainable=train)(x)
x = BatchNormalization(axis=channel_axis, trainable=train)(x)
x = Activation('relu', trainable=train)(x)
x = Conv3D(filters=64, kernel_size=(3,3,3), padding='same', kernel_initializer='he_normal', use_bias=False, trainable=train)(x)
x = BatchNormalization(axis=channel_axis, trainable=train)(x)
x = Activation('relu', trainable=train)(x)
x = Conv3D(filters=64, kernel_size=(2,3,3), padding='valid', kernel_initializer='he_normal', use_bias=False, trainable=train)(x)
x = BatchNormalization(axis=channel_axis, trainable=train)(x)
x = Activation('relu', trainable=train)(x)
#4x6x10
encoded = AveragePooling3D((2, 2, 2), padding='valid')(x)
#2x3x5
if autoencoder_stage == 0:
#The Decoder part:
#2x3x5 x 64
x = UpSampling3D((2, 2, 2))(encoded)
#4x6x10
x = Conv3DTranspose(filters=64, kernel_size=(2,3,3), padding='valid', kernel_initializer='he_normal', use_bias=False)(x)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
#5x8x12
x = Conv3DTranspose(filters=64, kernel_size=(3,3,3), padding='same', kernel_initializer='he_normal', use_bias=False)(x)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
x = Conv3DTranspose(filters=64, kernel_size=(3,3,3), padding='same', kernel_initializer='he_normal', use_bias=False)(x)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
x = UpSampling3D((2, 2, 2))(x)
#10x16x24
x = Conv3DTranspose(filters=32, kernel_size=(2,3,2), padding='valid', kernel_initializer='he_normal', use_bias=False)(x)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
#11x18x25
x = Conv3DTranspose(filters=32, kernel_size=(3,3,3), padding='same', kernel_initializer='he_normal', use_bias=False)(x)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
x = UpSampling3D((1, 1, 2))(x)
#11x18x50
x = Conv3DTranspose(filters=32, kernel_size=(3,3,3), padding='same', kernel_initializer='he_normal', use_bias=False)(x)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
x = Conv3DTranspose(filters=32, kernel_size=(3,3,3), padding='same', kernel_initializer='he_normal', use_bias=False)(x)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
decoded = Conv3D(filters=1, kernel_size=(1,1,1), padding='same', activation='linear', kernel_initializer='he_normal')(x)
#Output 11x13x18 x 1
autoencoder = Model(inputs, decoded)
return autoencoder
else:
#Replacement for the decoder part for supervised training:
if autoencoder_stage == 1:
#Load weights of encoder part from existing autoencoder
encoder= Model(inputs=inputs, outputs=encoded)
encoder.load_weights(modelpath_and_name, by_name=True)
x = Flatten()(encoded)
x = Dense(256, activation='relu', kernel_initializer='he_normal')(x)
x = Dense(16, activation='relu', kernel_initializer='he_normal')(x)
outputs = Dense(2, activation='softmax', kernel_initializer='he_normal')(x)
model = Model(inputs=inputs, outputs=outputs)
return model
def setup_vgg_1_xzt_max(autoencoder_stage, modelpath_and_name=None):
#like vgg_1_xzt but with Max/Unmaxpooling
#format 11x18x50 (=9900)
#autoencoder_stage: Type of training/network
# 0: autoencoder
# 1: encoder+ from autoencoder w/ frozen layers
# 2: encoder+ from scratch, completely unfrozen
#If autoencoder_stage==1 only the first part of the autoencoder (encoder part) will be generated
#These layers are frozen then
#The weights of the original model can be imported then by using load_weights('xxx.h5', by_name=True)
#modelpath_and_name is used to load the encoder part for supervised training,
#and only needed if make_autoencoder==False
if autoencoder_stage == 1:
#Freeze encoder layers
train=False
else:
train=True
channel_axis = 1 if K.image_data_format() == "channels_first" else -1
print("Loading model vgg_1_xzt_max")
inputs = Input(shape=(11,18,50,1))
x = Conv3D(filters=32, kernel_size=(3,3,3), padding='same', kernel_initializer='he_normal', use_bias=False, trainable=train)(inputs)
x = BatchNormalization(axis=channel_axis, trainable=train)(x)
x = Activation('relu', trainable=train)(x)
x = Conv3D(filters=32, kernel_size=(3,3,3), padding='same', kernel_initializer='he_normal', use_bias=False, trainable=train)(x)
x = BatchNormalization(axis=channel_axis, trainable=train)(x)
x = Activation('relu', trainable=train)(x)
#11x18x50
x = MaxPooling3D((1, 1, 2), padding='valid')(x)
#11x18x25
x = Conv3D(filters=32, kernel_size=(3,3,3), padding='same', kernel_initializer='he_normal', use_bias=False, trainable=train)(x)
x = BatchNormalization(axis=channel_axis, trainable=train)(x)
x = Activation('relu', trainable=train)(x)
x = Conv3D(filters=32, kernel_size=(2,3,2), padding='valid', kernel_initializer='he_normal', use_bias=False, trainable=train)(x)
x = BatchNormalization(axis=channel_axis, trainable=train)(x)
x = Activation('relu', trainable=train)(x)
#10x16x24
x = MaxPooling3D((2, 2, 2), padding='valid')(x)
#5x8x12
x = Conv3D(filters=64, kernel_size=(3,3,3), padding='same', kernel_initializer='he_normal', use_bias=False, trainable=train)(x)
x = BatchNormalization(axis=channel_axis, trainable=train)(x)
x = Activation('relu', trainable=train)(x)
x = Conv3D(filters=64, kernel_size=(3,3,3), padding='same', kernel_initializer='he_normal', use_bias=False, trainable=train)(x)
x = BatchNormalization(axis=channel_axis, trainable=train)(x)
x = Activation('relu', trainable=train)(x)
x = Conv3D(filters=64, kernel_size=(2,3,3), padding='valid', kernel_initializer='he_normal', use_bias=False, trainable=train)(x)
x = BatchNormalization(axis=channel_axis, trainable=train)(x)
x = Activation('relu', trainable=train)(x)
#4x6x10
encoded = MaxPooling3D((2, 2, 2), padding='valid')(x)
#2x3x5 x 64 (=1920 = 19.4 % org size)
if autoencoder_stage == 0:
#The Decoder part:
print("Loading Decoder")
#2x3x5 x 64
x = MaxUnpooling3D(encoded)
#4x6x10
x = Conv3DTranspose(filters=64, kernel_size=(2,3,3), padding='valid', kernel_initializer='he_normal', use_bias=False)(x)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
#5x8x12
x = Conv3DTranspose(filters=64, kernel_size=(3,3,3), padding='same', kernel_initializer='he_normal', use_bias=False)(x)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
x = Conv3DTranspose(filters=64, kernel_size=(3,3,3), padding='same', kernel_initializer='he_normal', use_bias=False)(x)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
x = MaxUnpooling3D(x)
#10x16x24
x = Conv3DTranspose(filters=32, kernel_size=(2,3,2), padding='valid', kernel_initializer='he_normal', use_bias=False)(x)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
#11x18x25
x = Conv3DTranspose(filters=32, kernel_size=(3,3,3), padding='same', kernel_initializer='he_normal', use_bias=False)(x)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
x = MaxUnpooling3D(x,(1,1,2))
#11x18x50
x = Conv3DTranspose(filters=32, kernel_size=(3,3,3), padding='same', kernel_initializer='he_normal', use_bias=False)(x)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
x = Conv3DTranspose(filters=32, kernel_size=(3,3,3), padding='same', kernel_initializer='he_normal', use_bias=False)(x)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
decoded = Conv3D(filters=1, kernel_size=(1,1,1), padding='same', activation='linear', kernel_initializer='he_normal')(x)
#Output 11x13x18 x 1
autoencoder = Model(inputs, decoded)
return autoencoder
else:
#Replacement for the decoder part for supervised training:
print("Loading dense")
if autoencoder_stage == 1:
#Load weights of encoder part from existing autoencoder
print("Loading weights of existing autoencoder", modelpath_and_name)
encoder= Model(inputs=inputs, outputs=encoded)
encoder.load_weights(modelpath_and_name, by_name=True)
x = Flatten()(encoded)
x = Dense(256, activation='relu', kernel_initializer='he_normal')(x)
x = Dense(16, activation='relu', kernel_initializer='he_normal')(x)
outputs = Dense(2, activation='softmax', kernel_initializer='he_normal')(x)
model = Model(inputs=inputs, outputs=outputs)
return model
def setup_vgg_1_xzt_stride(autoencoder_stage, modelpath_and_name=None):
#like vgg1xzt, but with stride>1 instead of pooling
#format 11x18x50
#750k params
#autoencoder_stage: Type of training/network
# 0: autoencoder
# 1: encoder+ from autoencoder w/ frozen layers
# 2: encoder+ from scratch, completely unfrozen
#If autoencoder_stage==1 only the first part of the autoencoder (encoder part) will be generated
#These layers are frozen then
#The weights of the original model can be imported then by using load_weights('xxx.h5', by_name=True)
#modelpath_and_name is used to load the encoder part for supervised training,
#and only needed if make_autoencoder==False
if autoencoder_stage == 1:
#Freeze encoder layers
train=False
else:
train=True
channel_axis = 1 if K.image_data_format() == "channels_first" else -1
inputs = Input(shape=(11,18,50,1))
x = Conv3D(filters=32, kernel_size=(3,3,3), padding='same', kernel_initializer='he_normal', use_bias=False, trainable=train)(inputs)
x = BatchNormalization(axis=channel_axis, trainable=train)(x)
x = Activation('relu', trainable=train)(x)
x = Conv3D(filters=32, kernel_size=(3,3,3), padding='same', kernel_initializer='he_normal', use_bias=False, trainable=train)(x)
x = BatchNormalization(axis=channel_axis, trainable=train)(x)
x = Activation('relu', trainable=train)(x)
#11x18x50
x = Conv3D(filters=32, kernel_size=(3,3,3), padding='same', strides=(1,1,2), kernel_initializer='he_normal', use_bias=False, trainable=train)(x)
x = BatchNormalization(axis=channel_axis, trainable=train)(x)
x = Activation('relu', trainable=train)(x)
#11x18x25
x = ZeroPadding3D(((0,1),(0,0),(0,1)))(x)
x = Conv3D(filters=32, kernel_size=(3,3,3), padding='valid', kernel_initializer='he_normal', use_bias=False, trainable=train)(x)
x = BatchNormalization(axis=channel_axis, trainable=train)(x)
x = Activation('relu', trainable=train)(x)
#10x16x24
x = Conv3D(filters=64, kernel_size=(3,3,3), padding='same', strides=(2,2,2), kernel_initializer='he_normal', use_bias=False, trainable=train)(x)
x = BatchNormalization(axis=channel_axis, trainable=train)(x)
x = Activation('relu', trainable=train)(x)
#5x8x12
x = ZeroPadding3D(((0,1),(0,0),(0,0)))(x)
#6x8x12
x = Conv3D(filters=64, kernel_size=(3,3,3), padding='valid', kernel_initializer='he_normal', use_bias=False, trainable=train)(x)
x = BatchNormalization(axis=channel_axis, trainable=train)(x)
x = Activation('relu', trainable=train)(x)
#4x6x10
x = Conv3D(filters=64, kernel_size=(3,3,3), padding='same', strides=(2,2,2), kernel_initializer='he_normal', use_bias=False, trainable=train)(x)
x = BatchNormalization(axis=channel_axis, trainable=train)(x)
encoded = Activation('relu', trainable=train)(x)
#2x3x5
if autoencoder_stage == 0:
#The Decoder part:
#2x3x5 x 64
#Originally, this layer connected to BN and not to Activation
x = Conv3DTranspose(filters=64, kernel_size=(3,3,3), padding='same', strides=(2,2,2), kernel_initializer='he_normal', use_bias=False)(encoded)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
#4x6x10
x = ZeroPadding3D(((1,2),(2,2),(2,2)))(x)
#7x10x14
x = Conv3D(filters=64, kernel_size=(3,3,3), padding='valid', kernel_initializer='he_normal', use_bias=False)(x)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
#5x8x12
x = Conv3DTranspose(filters=32, kernel_size=(3,3,3), padding='same', strides=(2,2,2), kernel_initializer='he_normal', use_bias=False)(x)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
#10x16x24
x = ZeroPadding3D(((1,2),(2,2),(1,2)))(x)
#13x20x27
x = Conv3D(filters=32, kernel_size=(3,3,3), padding='valid', kernel_initializer='he_normal', use_bias=False)(x)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
#11x18x25
x = Conv3DTranspose(filters=32, kernel_size=(3,3,3), padding='same', strides=(1,1,2), kernel_initializer='he_normal', use_bias=False)(x)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
#11x18x50
x = Conv3DTranspose(filters=32, kernel_size=(3,3,3), padding='same', kernel_initializer='he_normal', use_bias=False)(x)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
x = Conv3DTranspose(filters=32, kernel_size=(3,3,3), padding='same', kernel_initializer='he_normal', use_bias=False)(x)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
decoded = Conv3D(filters=1, kernel_size=(1,1,1), padding='same', activation='linear', kernel_initializer='he_normal')(x)
#Output 11x13x18 x 1
autoencoder = Model(inputs, decoded)
return autoencoder
else:
#Replacement for the decoder part for supervised training:
if autoencoder_stage == 1:
#Load weights of encoder part from existing autoencoder
encoder= Model(inputs=inputs, outputs=encoded)
encoder.load_weights(modelpath_and_name, by_name=True)
x = Flatten()(encoded)
x = Dense(256, activation='relu', kernel_initializer='he_normal')(x)
x = Dense(16, activation='relu', kernel_initializer='he_normal')(x)
outputs = Dense(2, activation='softmax', kernel_initializer='he_normal')(x)
model = Model(inputs=inputs, outputs=outputs)
return model
|
|
#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A utility script that can extract and edit resources in a Windows binary.
For detailed help, see the script's usage by invoking it with --help."""
import ctypes
import ctypes.wintypes
import logging
import optparse
import os
import shutil
import sys
import tempfile
import win32api
import win32con
_LOGGER = logging.getLogger(__name__)
# The win32api-supplied UpdateResource wrapper unfortunately does not allow
# one to remove resources due to overzealous parameter verification.
# For that case we're forced to go straight to the native API implementation.
UpdateResource = ctypes.windll.kernel32.UpdateResourceW
UpdateResource.argtypes = [
ctypes.wintypes.HANDLE, # HANDLE hUpdate
ctypes.c_wchar_p, # LPCTSTR lpType
ctypes.c_wchar_p, # LPCTSTR lpName
ctypes.c_short, # WORD wLanguage
ctypes.c_void_p, # LPVOID lpData
ctypes.c_ulong, # DWORD cbData
]
UpdateResource.restype = ctypes.c_short
def _ResIdToString(res_id):
# Convert integral res types/ids to a string.
if isinstance(res_id, int):
return "#%d" % res_id
return res_id
class ResourceEditor(object):
"""A utility class to make it easy to extract and manipulate resources in a
Windows binary."""
def __init__(self, input_file, output_file):
"""Create a new editor.
Args:
input_file: path to the input file.
output_file: (optional) path to the output file.
"""
self._input_file = input_file
self._output_file = output_file
self._modified = False
self._module = None
self._temp_dir = None
self._temp_file = None
self._update_handle = None
def __del__(self):
if self._module:
win32api.FreeLibrary(self._module)
self._module = None
if self._update_handle:
_LOGGER.info('Canceling edits to "%s".', self.input_file)
win32api.EndUpdateResource(self._update_handle, False)
self._update_handle = None
if self._temp_dir:
_LOGGER.info('Removing temporary directory "%s".', self._temp_dir)
shutil.rmtree(self._temp_dir)
self._temp_dir = None
def _GetModule(self):
if not self._module:
# Specify a full path to LoadLibraryEx to prevent
# it from searching the path.
input_file = os.path.abspath(self.input_file)
_LOGGER.info('Loading input_file from "%s"', input_file)
self._module = win32api.LoadLibraryEx(
input_file, None, win32con.LOAD_LIBRARY_AS_DATAFILE)
return self._module
def _GetTempDir(self):
if not self._temp_dir:
self._temp_dir = tempfile.mkdtemp()
_LOGGER.info('Created temporary directory "%s".', self._temp_dir)
return self._temp_dir
def _GetUpdateHandle(self):
if not self._update_handle:
# Make a copy of the input file in the temp dir.
self._temp_file = os.path.join(self.temp_dir,
os.path.basename(self._input_file))
shutil.copyfile(self._input_file, self._temp_file)
# Open a resource update handle on the copy.
_LOGGER.info('Opening temp file "%s".', self._temp_file)
self._update_handle = win32api.BeginUpdateResource(self._temp_file, False)
return self._update_handle
modified = property(lambda self: self._modified)
input_file = property(lambda self: self._input_file)
module = property(_GetModule)
temp_dir = property(_GetTempDir)
update_handle = property(_GetUpdateHandle)
def ExtractAllToDir(self, extract_to):
"""Extracts all resources from our input file to a directory hierarchy
in the directory named extract_to.
The generated directory hierarchy is three-level, and looks like:
resource-type/
resource-name/
lang-id.
Args:
extract_to: path to the folder to output to. This folder will be erased
and recreated if it already exists.
"""
_LOGGER.info('Extracting all resources from "%s" to directory "%s".',
self.input_file, extract_to)
if os.path.exists(extract_to):
_LOGGER.info('Destination directory "%s" exists, deleting', extract_to)
shutil.rmtree(extract_to)
# Make sure the destination dir exists.
os.makedirs(extract_to)
# Now enumerate the resource types.
for res_type in win32api.EnumResourceTypes(self.module):
res_type_str = _ResIdToString(res_type)
# And the resource names.
for res_name in win32api.EnumResourceNames(self.module, res_type):
res_name_str = _ResIdToString(res_name)
# Then the languages.
for res_lang in win32api.EnumResourceLanguages(self.module,
res_type, res_name):
res_lang_str = _ResIdToString(res_lang)
dest_dir = os.path.join(extract_to, res_type_str, res_lang_str)
dest_file = os.path.join(dest_dir, res_name_str)
_LOGGER.info('Extracting resource "%s", lang "%d" name "%s" '
'to file "%s".',
res_type_str, res_lang, res_name_str, dest_file)
# Extract each resource to a file in the output dir.
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
self.ExtractResource(res_type, res_lang, res_name, dest_file)
def ExtractResource(self, res_type, res_lang, res_name, dest_file):
"""Extracts a given resource, specified by type, language id and name,
to a given file.
Args:
res_type: the type of the resource, e.g. "B7".
res_lang: the language id of the resource e.g. 1033.
res_name: the name of the resource, e.g. "SETUP.EXE".
dest_file: path to the file where the resource data will be written.
"""
_LOGGER.info('Extracting resource "%s", lang "%d" name "%s" '
'to file "%s".', res_type, res_lang, res_name, dest_file)
data = win32api.LoadResource(self.module, res_type, res_name, res_lang)
with open(dest_file, 'wb') as f:
f.write(data)
def RemoveResource(self, res_type, res_lang, res_name):
"""Removes a given resource, specified by type, language id and name.
Args:
res_type: the type of the resource, e.g. "B7".
res_lang: the language id of the resource, e.g. 1033.
res_name: the name of the resource, e.g. "SETUP.EXE".
"""
_LOGGER.info('Removing resource "%s:%s".', res_type, res_name)
# We have to go native to perform a removal.
ret = UpdateResource(self.update_handle,
res_type,
res_name,
res_lang,
None,
0)
# Raise an error on failure.
if ret == 0:
error = win32api.GetLastError()
print ("error", error)
raise RuntimeError(error)
self._modified = True
def UpdateResource(self, res_type, res_lang, res_name, file_path):
"""Inserts or updates a given resource with the contents of a file.
This is a legacy version of UpdateResourceData, where the data arg is read
from a file , rather than passed directly.
"""
_LOGGER.info('Writing resource from file %s', file_path)
with open(file_path, 'rb') as f:
self.UpdateResourceData(res_type, res_lang, res_name, f.read())
def UpdateResourceData(self, res_type, res_lang, res_name, data):
"""Inserts or updates a given resource with the given data.
Args:
res_type: the type of the resource, e.g. "B7".
res_lang: the language id of the resource, e.g. 1033.
res_name: the name of the resource, e.g. "SETUP.EXE".
data: the new resource data.
"""
_LOGGER.info('Writing resource "%s:%s"', res_type, res_name)
win32api.UpdateResource(self.update_handle,
res_type,
res_name,
data,
res_lang)
self._modified = True
def Commit(self):
"""Commit any successful resource edits this editor has performed.
This has the effect of writing the output file.
"""
if self._update_handle:
update_handle = self._update_handle
self._update_handle = None
win32api.EndUpdateResource(update_handle, False)
_LOGGER.info('Writing edited file to "%s".', self._output_file)
shutil.copyfile(self._temp_file, self._output_file)
else:
_LOGGER.info('No edits made. Copying input to "%s".', self._output_file)
shutil.copyfile(self._input_file, self._output_file)
_USAGE = """\
usage: %prog [options] input_file
A utility script to extract and edit the resources in a Windows executable.
EXAMPLE USAGE:
# Extract from mini_installer.exe, the resource type "B7", langid 1033 and
# name "CHROME.PACKED.7Z" to a file named chrome.7z.
# Note that 1033 corresponds to English (United States).
%prog mini_installer.exe --extract B7 1033 CHROME.PACKED.7Z chrome.7z
# Update mini_installer.exe by removing the resouce type "BL", langid 1033 and
# name "SETUP.EXE". Add the resource type "B7", langid 1033 and name
# "SETUP.EXE.packed.7z" from the file setup.packed.7z.
# Write the edited file to mini_installer_packed.exe.
%prog mini_installer.exe \\
--remove BL 1033 SETUP.EXE \\
--update B7 1033 SETUP.EXE.packed.7z setup.packed.7z \\
--output_file mini_installer_packed.exe
"""
def _ParseArgs():
parser = optparse.OptionParser(_USAGE)
parser.add_option('--verbose', action='store_true',
help='Enable verbose logging.')
parser.add_option('--extract_all',
help='Path to a folder which will be created, in which all resources '
'from the input_file will be stored, each in a file named '
'"res_type/lang_id/res_name".')
parser.add_option('--extract', action='append', default=[], nargs=4,
help='Extract the resource with the given type, language id and name '
'to the given file.',
metavar='type langid name file_path')
parser.add_option('--remove', action='append', default=[], nargs=3,
help='Remove the resource with the given type, langid and name.',
metavar='type langid name')
parser.add_option('--update', action='append', default=[], nargs=4,
help='Insert or update the resource with the given type, langid and '
'name with the contents of the file given.',
metavar='type langid name file_path')
parser.add_option('--output_file',
help='On success, OUTPUT_FILE will be written with a copy of the '
'input file with the edits specified by any remove or update '
'options.')
options, args = parser.parse_args()
if len(args) != 1:
parser.error('You have to specify an input file to work on.')
modify = options.remove or options.update
if modify and not options.output_file:
parser.error('You have to specify an output file with edit options.')
return options, args
def _ConvertInts(*args):
"""Return args with any all-digit strings converted to ints."""
results = []
for arg in args:
if isinstance(arg, basestring) and arg.isdigit():
results.append(int(arg))
else:
results.append(arg)
return results
def main(options, args):
"""Main program for the script."""
if options.verbose:
logging.basicConfig(level=logging.INFO)
# Create the editor for our input file.
editor = ResourceEditor(args[0], options.output_file)
if options.extract_all:
editor.ExtractAllToDir(options.extract_all)
for res_type, res_lang, res_name, dest_file in options.extract:
res_type, res_lang, res_name = _ConvertInts(res_type, res_lang, res_name)
editor.ExtractResource(res_type, res_lang, res_name, dest_file)
for res_type, res_lang, res_name in options.remove:
res_type, res_lang, res_name = _ConvertInts(res_type, res_lang, res_name)
editor.RemoveResource(res_type, res_lang, res_name)
for res_type, res_lang, res_name, src_file in options.update:
res_type, res_lang, res_name = _ConvertInts(res_type, res_lang, res_name)
editor.UpdateResource(res_type, res_lang, res_name, src_file)
if editor.modified:
editor.Commit()
if __name__ == '__main__':
sys.exit(main(*_ParseArgs()))
|
|
import genepy
import numpy as np
import os
from Bio import SeqRecord
# NEXT TIME :
# Two errors to deal with :
# - Err.: one or more missing sequences in block 2
# --- solutions - Guindon ?
# --- solutions - read .phy generated by ClustalO, and rewrite it using BioPython
# - Duplicate names in PHYLIP files due to truncation. Way around ?
# .remove() - remove some sequences from the array
# Base sequence list class
class seqarray :
"""GenePy Sequence Array object.
For documentation, see http://github.io/QCaudron/genepy
"""
def __init__(self, source) :
"""Constructor.
Argument : a filename or a list of strings that represent sequences.
- mysequences = genepy.seqarray("from_genbank.gb") -- loads the sequences
in from_genbank.gb as BioPython Bio.Seq objects.
- mysequences = genepy.seqarray(seq_list), where seq_list is a list of
strings ( such as ["ACTG", "AGTA", "TTGC"] ) converts these to BioPython
Bio.Seq objects using the generic_dna alphabet ( for now ).
"""
# If we're reading in a sequence set from a file
if type(source) is str :
if os.path.isfile(source) :
self.seq = genepy.readalignment(source)
self.filename = source
else :
print "%s not found, aborting." % source
# If we're fed a list
elif type(source) is list :
self.seq = [SeqRecord.SeqRecord(s) for s in source]
self.filename = "genepy.fasta"
else :
raise TypeError("Expected a filename or a list of strings.")
# Generate static members
self.update()
def __str__(self) :
"""Long string representation of a genepy.seqarray object."""
out = self.__repr__()
out += ("-- C+G content : %.03f\n" % (self.statistics["C"].mean() + self.statistics["G"].mean()))
out += ("-- From file : %s" % self.filename.split("/")[-1])
return out
def __repr__(self) :
"""Short string representation of a genepy.seqarray object."""
summary = "GenePy sequence array (genepy.seqarray) :\n"
summary += "-- Sequences : %d\n" % self.len
summary += "-- Mean length : %.01f (min %d, max %d)\n" % \
(np.array(self.seq_len).mean(), np.min(self.seq_len), np.max(self.seq_len))
return summary
def __iter__(self) :
"""Iterator function."""
self.it = 0
return self
def next(self) :
"""Next object in iteration."""
if self.it == self.len :
raise StopIteration
else :
self.it += 1
return self.seq[self.it - 1]
def update(self) :
"""Updates the member variables of a genepy.seqarray object.
This function is called whenever sequences are aligned or trimmed.
Any changes made directly to genepy.seqarray variables ( such as to the
sequence list, genepy.seqarray.seq ), will not be reflected in other
member variables ( such as genepy.seqarray.len ) until this function
is called. In general, as long as the user calls genepy.seqarray methods
only, and no changes are otherwise made to the object, this method does
not need to be used.
"""
# Number of sequences
self.len = len(self.seq)
# Sequence lengths
self.seq_len = np.array([len(s.seq) for s in self.seq])
# Alignment numerical array
l = self.seq_len.max() if type(self.seq_len) == np.ndarray else self.seq_len
self.array = genepy.alignmentarray(self.seq, length = l)
# Statistics
self.statistics = genepy.calcstats(self.seq)
# Show sequences
def show(self) :
"""Display the sequences visually as a matplotlib.pyplot.imshow()
Colours :
-- A : dark green
-- C : dark red
-- G : orange
-- T : light green
-- unknown / empty : black
Cytosine and guanine are represented by "warm" colours; adenine and
thymine are shown in "cold" colours.
"""
genepy.showalignment(self.array)
# Align sequences
def align(self, force = True, it = False, full = False, full_iter = False, auto = True, threads = False) :
"""Align the array of sequences using ClustalO.
-- force : True / False; overwrite filename, if it exists
-- it : False, integers > 0; iterate the guide tree
-- full : True / False; use full distance matrix for guide-tree calculation
-- full_iter : True / False; use full distance matrix during iteration only
-- auto : True / False; automatically select options for speed and accuracy
-- threads : False, integers > 0; limit the number of threads; False uses all
"""
# System call to ClustalO
genepy.align(self.filename, force, threads, full, full_iter, it, auto)
# Read aligned sequence array
self.seq = genepy.readalignment(os.path.splitext(self.filename)[0] + "_aligned_genepy.phy")
# Update static members
self.update()
def phylotree(self, nucleotide_frequency = "empirical", bootstrap = -4, search_algorithm = "BEST") :
"""Construct a phylogenetic tree using PhyML.
-- nucleotide_frequency : "empirical" or "max_likelihood"
-- bootstrap : -4 for SH-like branch supports only; -2 for Chi^2;
-1 for approximate likelihood ratio; 0 for no bootstrapping,
integers > 0 for the number of bootstraps to perform, will try to use MPI
-- search_algorithm : "NNI" for nearest-neighbour interchange; "SPR" for subtree
pruning and regrafting; "BEST" for best of both
"""
if not os.path.isfile(os.path.splitext(self.filename)[0] + "_aligned_genepy.phy") :
print "GenePy can't find an aligned sequence file for %s.\nTry calling .align()." % \
self.filename.split("/")[-1]
return
genepy.phylotree(self.filename, nucleotide_frequency, bootstrap, search_algorithm)
def stats(self) :
"""Display sequence array statistics."""
# Display statistics
genepy.stats(self.statistics)
def trimalignment(self, array = None, left = None, right = None) :
"""Trim the sequence array by a given number of nucleotides from left and right.
left, right : like performing mysequences.seq = mysequences.seq[left:right]
"""
self.seq = genepy.trimalignment(self.seq, array, left, right)
self.update()
def dropempties(self, fraction = 0.5) :
"""Remove any sequence containing less than a fraction of known nucleotides.
fraction : between 0 and 1.
Useful after trimming to a given region of the genome."""
self.seq = genepy.dropempties(self.seq, fraction)
self.update()
|
|
"""The stomp.py command line client (used for testing or simple STOMP command scripting).
"""
import base64
from cmd import Cmd
from optparse import OptionParser
import os
import sys
import time
from stomp.adapter.multicast import MulticastConnection
import stomp.colors
from stomp.connect import StompConnection10, StompConnection11, StompConnection12
from stomp.listener import ConnectionListener, StatsListener
sys.path.append('.')
import stomp
##
# Command-line version string
#
stomppy_version = 'Stomp.py Version %s.%s.%s' % stomp.__version__
try:
import uuid
except ImportError:
from backward import uuid
class SubscriptionInfo(object):
"""
Used to store info about a subscription.
"""
def __init__(self, id, ack):
self.id = id
self.ack = ack
class StompCLI(Cmd, ConnectionListener):
"""
A command line interface to the stomp.py client. See :py:class:`stomp.connect.StompConnection11`
for more information on establishing a connection to a stomp server.
"""
def __init__(self, host='localhost', port=61613, user='', passcode='', ver='1.1', prompt='> ', verbose=True, use_ssl=False, stdin=sys.stdin, stdout=sys.stdout):
Cmd.__init__(self, 'Tab', stdin, stdout)
ConnectionListener.__init__(self)
self.prompt = prompt
self.verbose = verbose
self.user = user
self.passcode = passcode
self.__quit = False
if ver == '1.0':
self.conn = StompConnection10([(host, port)], wait_on_receipt=True)
elif ver == '1.1':
self.conn = StompConnection11([(host, port)], wait_on_receipt=True)
elif ver == '1.2':
self.conn = StompConnection12([(host, port)], wait_on_receipt=True)
elif ver == 'multicast':
self.conn = MulticastConnection()
else:
raise RuntimeError('Unknown version')
if use_ssl:
self.conn.set_ssl([(host, port)])
self.conn.set_listener('', self)
self.conn.start()
self.conn.connect(self.user, self.passcode, wait=True)
self.transaction_id = None
self.version = ver
try:
self.nversion = float(ver)
except ValueError:
self.nversion = 1.0
self.__subscriptions = {}
self.__subscription_id = 1
def __print_async(self, frame_type, headers, body):
"""
Utility function to print a message and setup the command prompt
for the next input
"""
if self.__quit:
return
self.__sysout("\r \r", end='')
if self.verbose:
self.__sysout(frame_type)
for k, v in headers.items():
self.__sysout('%s: %s' % (k, v))
if self.prompt != '':
self.__sysout('')
self.__sysout(body)
self.__sysout(self.prompt, end='')
self.stdout.flush()
def __sysout(self, msg, end="\n"):
self.stdout.write(str(msg) + end)
def __error(self, msg, end="\n"):
self.stdout.write(stomp.colors.BOLD + stomp.colors.RED + str(msg) + stomp.colors.NO_COLOR + end)
def on_connecting(self, host_and_port):
"""
See :py:meth:`ConnectionListener.on_connecting`
"""
def on_disconnected(self):
"""
see :py:meth:`ConnectionListener.on_disconnected`
"""
if not self.__quit:
self.__error("lost connection")
def on_message(self, headers, body):
"""
See :py:meth:`ConnectionListener.on_message`
Special case: if the header 'filename' is present, the content is written out
as a file
"""
if 'filename' in headers:
content = base64.b64decode(body.encode())
if os.path.exists(headers['filename']):
fname = '%s.%s' % (headers['filename'], int(time.time()))
else:
fname = headers['filename']
with open(fname, 'wb') as f:
f.write(content)
self.__print_async("MESSAGE", headers, "Saved file: %s" % fname)
else:
self.__print_async("MESSAGE", headers, body)
def on_error(self, headers, body):
"""
See :py:meth:`ConnectionListener.on_error`
"""
self.__print_async("ERROR", headers, body)
def on_receipt(self, headers, body):
"""
See :py:meth:`ConnectionListener.on_receipt`
"""
self.__print_async("RECEIPT", headers, body)
def on_connected(self, headers, body):
"""
See :py:meth:`ConnectionListener.on_connected`
"""
self.__print_async("CONNECTED", headers, body)
def help_help(self):
self.__sysout('Quick help on commands')
def default(self, line):
self.__error('Unknown command: %s' % line.split()[0])
def emptyline(self):
pass
def help(self, usage, description, required=[], optional=[]):
required.insert(0, '')
rparams = "\n\t".join(required)
optional.insert(0, '')
oparams = "\n\t".join(optional)
m = {
'hl': stomp.colors.BOLD + stomp.colors.GREEN,
'nc': stomp.colors.NO_COLOR,
'usage': usage,
'description': description,
'required': rparams.rstrip(),
'optional': oparams.rstrip()
}
if rparams.rstrip() != '':
rparams = '''%(hl)sRequired Parameters:%(nc)s%(required)s\n\n''' % m
m['required'] = rparams
if oparams.rstrip() != '':
oparams = '''%(hl)sOptional Parameters:%(nc)s%(optional)s\n\n''' % m
m['optional'] = oparams
self.__sysout('''%(hl)sUsage:%(nc)s
\t%(usage)s
%(required)s%(optional)s%(hl)sDescription:%(nc)s
\t%(description)s
''' % m)
def do_quit(self, args):
self.__quit = True
self.__sysout('Shutting down, please wait')
return True
do_exit = do_quit
do_EOF = do_quit
def help_quit(self):
self.help('exit', 'Exit the stomp client')
help_exit = help_quit
def help_EOF(self):
self.help('exit', 'Exit the stomp client (using CTRL-D)')
def do_subscribe(self, args):
args = args.split()
if len(args) < 1:
self.__error('Expecting: subscribe <destination> [ack]')
return
name = args[0]
if name in self.__subscriptions:
self.__error('Already subscribed to %s' % name)
return
ack_mode = 'auto'
if len(args) >= 2:
ack_mode = args[1]
sid = self.__subscription_id
self.__subscription_id += 1
self.__sysout('Subscribing to "%s" with acknowledge set to "%s", id set to "%s"' % (name, ack_mode, sid))
self.conn.subscribe(destination=name, ack=ack_mode, id=sid)
self.__subscriptions[name] = SubscriptionInfo(sid, ack_mode)
def help_subscribe(self):
self.help('subscribe <destination> [ack]',
'''Register to listen to a given destination. Like send, the subscribe command requires a destination
\theader indicating which destination to subscribe to. The ack parameter is optional, and defaults to
\tauto.''', ['destination - the name to subscribe to'], ['ack - how to handle acknowledgements for a message; either automatically (auto) or manually (client)'])
def do_unsubscribe(self, args):
args = args.split()
if len(args) < 1:
self.__error('Expecting: unsubscribe <destination>')
return
if args[0] not in self.__subscriptions:
self.__sysout('Subscription %s not found' % args[0])
return
self.__sysout('Unsubscribing from "%s"' % args[0])
self.conn.unsubscribe(destination=args[0], id=self.__subscriptions[args[0]].id)
del self.__subscriptions[args[0]]
def help_unsubscribe(self):
self.help('unsubscribe <destination>', 'Remove an existing subscription - so that the client no longer receive messages from that destination.',
['destination - the name to unsubscribe from'], ['ack - how to handle acknowledgements for a message; either automatically (auto) or manually (client)'])
def do_send(self, args):
args = args.split()
if len(args) < 2:
self.__error('Expecting: send <destination> <message>')
elif not self.transaction_id:
self.conn.send(args[0], ' '.join(args[1:]))
else:
self.conn.send(args[0], ' '.join(args[1:]), transaction=self.transaction_id)
def complete_send(self, text, line, begidx, endidx):
mline = line.split(' ')[1]
offs = len(mline) - len(text)
return [s[offs:] for s in self.__subscriptions if s.startswith(mline)]
complete_unsubscribe = complete_send
complete_sendrec = complete_send
complete_sendreply = complete_send
complete_sendfile = complete_send
def help_send(self):
self.help('send <destination> <message>', 'Sends a message to a destination in the messaging system.',
['destination - where to send the message', 'message - the content to send'])
def do_sendrec(self, args):
args = args.split()
receipt_id = str(uuid.uuid4())
if len(args) < 2:
self.__error('Expecting: sendrec <destination> <message>')
elif not self.transaction_id:
self.conn.send(args[0], ' '.join(args[1:]), receipt=receipt_id)
else:
self.conn.send(args[0], ' '.join(args[1:]), transaction=self.transaction_id, receipt=receipt_id)
def help_sendrec(self):
self.help('sendrec <destination> <message>', 'Sends a message to a destination in the messaging system and blocks for receipt of the message.',
['destination - where to send the message', 'message - the content to send'])
def do_sendreply(self, args):
args = args.split()
if len(args) < 3:
self.__error('Expecting: sendreply <destination> <correlation-id> <message>')
else:
self.conn.send(args[0], "%s\n" % ' '.join(args[2:]), headers={'correlation-id': args[1]})
def help_sendreply(self):
self.help('sendreply <destination> <correlation-id> <message>', 'Sends a reply message to a destination in the messaging system.',
['destination - where to send the message', 'correlation-id - the correlating identifier to send with the response', 'message - the content to send'])
def do_sendfile(self, args):
args = args.split()
if len(args) < 2:
self.__error('Expecting: sendfile <destination> <filename>')
elif not os.path.exists(args[1]):
self.__error('File %s does not exist' % args[1])
else:
with open(args[1], mode='rb') as f:
s = f.read()
msg = base64.b64encode(s).decode()
if not self.transaction_id:
self.conn.send(args[0], msg, filename=args[1])
else:
self.conn.send(args[0], msg, filename=args[1], transaction=self.transaction_id)
def help_sendfile(self):
self.help('sendfile <destination> <filename>', 'Sends a file to a destination in the messaging system.',
['destination - where to send the message', 'filename - the file to send'])
def do_version(self, args):
self.__sysout('%s%s [Protocol version %s]%s' % (stomp.colors.BOLD, stomppy_version, self.conn.version, stomp.colors.NO_COLOR))
do_ver = do_version
def help_version(self):
self.help('version', 'Display the version of the client')
help_ver = help_version
def check_ack_nack(self, cmd, args):
if self.nversion >= 1.2 and len(args) < 1:
self.__error("Expecting: %s <ack-id>" % cmd)
return None
elif self.nversion == 1.1 and len(args) < 2:
self.__error("Expecting: %s <message-id> <subscription-id>" % cmd)
return None
elif len(args) < 1:
self.__error("Expecting: %s <message-id>" % cmd)
return None
if len(args) == 1:
return (args[0], None)
else:
return (args[0], args[1])
def do_ack(self, args):
args = args.split()
hdrs = self.check_ack_nack('ack', args)
if hdrs is None:
return
(message_id, subscription_id) = hdrs
if not self.transaction_id:
self.conn.ack(message_id, subscription_id)
else:
self.conn.ack(message_id, subscription_id, transaction=self.transaction_id)
def help_ack(self):
self.help('ack <message-id> [subscription-id]', '''The command 'ack' is used to acknowledge consumption of a message from a subscription using client
\tacknowledgment. When a client has issued a 'subscribe' with the ack flag set to client, any messages
\treceived from that destination will not be considered to have been consumed (by the server) until
\tthe message has been acknowledged.''', ['message-id - the id of the message being acknowledged'], ['subscription-id the id of the subscription (only required for STOMP 1.1)'])
def do_nack(self, args):
args = args.split()
hdrs = self.check_ack_nack('nack', args)
if hdrs is None:
return
if not self.transaction_id:
self.conn.nack(headers=hdrs)
else:
self.conn.nack(headers=hdrs, transaction=self.transaction_id)
def help_nack(self):
self.help('nack <message-id> [subscription]', '''The command 'nack' is used to acknowledge the failure of a message from a subscription using client
\tacknowledgment. When a client has issued a 'subscribe' with the ack flag set to client, any messages
\treceived from that destination will not be considered to have been consumed (by the server) until
\tthe message has been acknowledged (ack or nack).''', ['message-id - the id of the message being acknowledged'])
def do_abort(self, args):
if not self.transaction_id:
self.__error("Not currently in a transaction")
else:
self.conn.abort(transaction=self.transaction_id)
self.__sysout('Aborted transaction: %s' % self.transaction_id)
self.transaction_id = None
do_rollback = do_abort
def help_abort(self):
self.help('abort', 'Roll back a transaction in progress.')
help_rollback = help_abort
def do_begin(self, args):
if self.transaction_id:
self.__error("Currently in a transaction (%s)" % self.transaction_id)
else:
self.transaction_id = self.conn.begin()
self.__sysout('Transaction id: %s' % self.transaction_id)
def help_begin(self):
self.help('begin', '''Start a transaction. Transactions in this case apply to sending and acknowledging -
\tany messages sent or acknowledged during a transaction will be handled atomically based on the
\ttransaction.''')
def do_commit(self, args):
if not self.transaction_id:
self.__error("Not currently in a transaction")
else:
self.__sysout('Committing %s' % self.transaction_id)
self.conn.commit(transaction=self.transaction_id)
self.transaction_id = None
def help_commit(self):
self.help('commit', 'Commit a transaction in progress.')
def do_stats(self, args):
args = args.split()
if len(args) < 1:
stats = self.conn.get_listener('stats')
if stats:
self.__sysout(stats)
else:
self.__error('No stats available')
elif args[0] == 'on':
self.conn.set_listener('stats', StatsListener())
elif args[0] == 'off':
self.conn.remove_listener('stats')
else:
self.__error('Expecting: stats [on|off]')
def help_stats(self):
self.help('stats [on|off]', '''Record statistics on messages sent, received, errors, etc. If no argument (on|off) is specified,
\tdump the current statistics.''')
def do_run(self, args):
args = args.split()
if len(args) == 0:
self.__error("Expecting: run <filename>")
elif not os.path.exists(args[0]):
self.__error("File %s was not found" % args[0])
else:
with open(args[0]) as f:
lines = f.read().split('\n')
for line in lines:
self.onecmd(line)
def help_run(self):
self.help('run <filename>', 'Execute commands in a specified file')
def do_nothing_loop():
while 1:
time.sleep(1)
def optional_arg(arg_default):
def func(option, opt_str, value, parser):
if parser.rargs and not parser.rargs[0].startswith('-'):
val = parser.rargs[0]
parser.rargs.pop(0)
else:
val = arg_default
setattr(parser.values, option.dest, val)
return func
def main():
parser = OptionParser(version=stomppy_version)
parser.add_option('-H', '--host', type='string', dest='host', default='localhost',
help='Hostname or IP to connect to. Defaults to localhost if not specified.')
parser.add_option('-P', '--port', type=int, dest='port', default=61613,
help='Port providing stomp protocol connections. Defaults to 61613 if not specified.')
parser.add_option('-U', '--user', type='string', dest='user', default=None,
help='Username for the connection')
parser.add_option('-W', '--password', type='string', dest='password', default=None,
help='Password for the connection')
parser.add_option('-F', '--file', type='string', dest='filename',
help='File containing commands to be executed, instead of prompting from the command prompt.')
parser.add_option('-S', '--stomp', type='string', dest='stomp', default='1.1',
help='Set the STOMP protocol version.')
parser.add_option('-L', '--listen', type='string', dest='listen', default=None,
help='Listen for messages on a queue/destination')
parser.add_option("-V", "--verbose", dest="verbose", default='on',
help='Verbose logging "on" or "off" (if on, full headers from stomp server responses are printed)')
parser.add_option('--ssl', action='callback', callback=optional_arg(True), dest='ssl',
help='Enable SSL connection')
parser.set_defaults()
(options, _) = parser.parse_args()
if options.verbose == 'on':
verbose = True
else:
verbose = False
if options.ssl is None:
options.ssl = False
if options.listen:
prompt = ''
else:
prompt = '> '
st = StompCLI(options.host, options.port, options.user, options.password, options.stomp, prompt, verbose, options.ssl)
if options.listen:
st.do_subscribe(options.listen)
try:
while 1:
time.sleep(10)
except:
print("\n")
elif options.filename:
st.do_run(options.filename)
else:
# disable CTRL-C, since can't guarantee correct handling of disconnect
import signal
def signal_handler(signal, frame):
pass
signal.signal(signal.SIGINT, signal_handler)
try:
try:
st.cmdloop()
except KeyboardInterrupt:
st.do_quit()
finally:
st.conn.disconnect()
#
# command line access
#
if __name__ == '__main__':
try:
main()
except:
pass
|
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Starting point for writing scripts to integrate TFLM with external IDEs.
This script can be used to output a tree containing only the sources and headers
needed to use TFLM for a specific configuration (e.g. target and
optimized_kernel_implementation). This should serve as a starting
point to integrate TFLM with external IDEs.
The goal is for this script to be an interface that is maintained by the TFLM
team and any additional scripting needed for integration with a particular IDE
should be written external to the TFLM repository and built to work on top of
the output tree generated with this script.
We will add more documentation for a desired end-to-end integration workflow as
we get further along in our prototyping. See this github issue for more details:
https://github.com/tensorflow/tensorflow/issues/47413
"""
import argparse
import fileinput
import os
import re
import shutil
import subprocess
def _get_dirs(file_list):
dirs = set()
for filepath in file_list:
dirs.add(os.path.dirname(filepath))
return dirs
def _get_file_list(key, makefile_options):
params_list = [
"make", "-f", "tensorflow/lite/micro/tools/make/Makefile", key
] + makefile_options.split()
process = subprocess.Popen(params_list,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if process.returncode != 0:
raise RuntimeError("%s failed with \n\n %s" %
(" ".join(params_list), stderr.decode()))
return [bytepath.decode() for bytepath in stdout.split()]
def _third_party_src_and_dest_files(prefix_dir, makefile_options):
src_files = []
src_files.extend(_get_file_list("list_third_party_sources",
makefile_options))
src_files.extend(_get_file_list("list_third_party_headers",
makefile_options))
# The list_third_party_* rules give path relative to the root of the git repo.
# However, in the output tree, we would like for the third_party code to be a
# tree under prefix_dir/third_party, with the path to the tflm_download
# directory removed. The path manipulation logic that follows removes the
# downloads directory prefix, and adds the third_party prefix to create a
# list of destination directories for each of the third party files.
tflm_download_path = "tensorflow/lite/micro/tools/make/downloads"
dest_files = [
os.path.join(prefix_dir, "third_party",
os.path.relpath(f, tflm_download_path)) for f in src_files
]
return src_files, dest_files
def _tflm_src_and_dest_files(prefix_dir, makefile_options):
src_files = []
src_files.extend(_get_file_list("list_library_sources", makefile_options))
src_files.extend(_get_file_list("list_library_headers", makefile_options))
dest_files = [os.path.join(prefix_dir, src) for src in src_files]
return src_files, dest_files
def _get_src_and_dest_files(prefix_dir, makefile_options):
tflm_src_files, tflm_dest_files = _tflm_src_and_dest_files(
prefix_dir, makefile_options)
third_party_srcs, third_party_dests = _third_party_src_and_dest_files(
prefix_dir, makefile_options)
all_src_files = tflm_src_files + third_party_srcs
all_dest_files = tflm_dest_files + third_party_dests
return all_src_files, all_dest_files
def _copy(src_files, dest_files):
for dirname in _get_dirs(dest_files):
os.makedirs(dirname, exist_ok=True)
for src, dst in zip(src_files, dest_files):
shutil.copy(src, dst)
def _get_tflm_generator_path():
return _get_file_list("list_generator_dir", "")[0]
# For examples, we are explicitly making a deicision to not have any source
# specialization based on the TARGET and OPTIMIZED_KERNEL_DIR. The thinking
# here is that any target-specific sources should not be part of the TFLM
# tree. Rather, this function will return an examples directory structure for
# x86 and it will be the responsibility of the target-specific examples
# repository to provide all the additional sources (and remove the unnecessary
# sources) for the examples to run on that specific target.
def _create_examples_tree(prefix_dir, examples_list):
files = []
for e in examples_list:
files.extend(_get_file_list("list_%s_example_sources" % (e), ""))
files.extend(_get_file_list("list_%s_example_headers" % (e), ""))
# The get_file_list gives path relative to the root of the git repo (where the
# examples are in tensorflow/lite/micro/examples). However, in the output
# tree, we would like for the examples to be under prefix_dir/examples.
tflm_examples_path = "tensorflow/lite/micro/examples"
tflm_downloads_path = "tensorflow/lite/micro/tools/make/downloads"
tflm_generator_path = _get_tflm_generator_path()
# Some non-example source and headers will be in the {files} list. They need
# special handling or they will end up outside the {prefix_dir} tree.
dest_file_list = []
for f in files:
if tflm_generator_path in f:
# file is generated during the build.
relative_path = os.path.relpath(f, tflm_generator_path)
full_filename = os.path.join(prefix_dir, relative_path)
# Allow generated example sources to be placed with their example.
f = relative_path
if tflm_examples_path in f:
# file is in examples tree
relative_path = os.path.relpath(f, tflm_examples_path)
full_filename = os.path.join(prefix_dir, "examples", relative_path)
elif tflm_downloads_path in f:
# is third-party file
relative_path = os.path.relpath(f, tflm_downloads_path)
full_filename = os.path.join(prefix_dir, "third_party", relative_path)
else:
# not third-party and not examples, don't modify file name
# ex. tensorflow/lite/experimental/microfrontend
full_filename = os.path.join(prefix_dir, f)
dest_file_list.append(full_filename)
for dest_file, filepath in zip(dest_file_list, files):
dest_dir = os.path.dirname(dest_file)
os.makedirs(dest_dir, exist_ok=True)
shutil.copy(filepath, dest_dir)
# Since we are changing the directory structure for the examples, we will also
# need to modify the paths in the code.
for filepath in dest_file_list:
with fileinput.FileInput(filepath, inplace=True) as f:
for line in f:
include_match = re.match(
r'.*#include.*"' + tflm_examples_path + r'/([^/]+)/.*"', line)
if include_match:
# We need a trailing forward slash because what we care about is
# replacing the include paths.
text_to_replace = os.path.join(tflm_examples_path,
include_match.group(1)) + "/"
line = line.replace(text_to_replace, "")
# end="" prevents an extra newline from getting added as part of the
# in-place find and replace.
print(line, end="")
def _rename_cc_to_cpp(output_dir):
for path, _, files in os.walk(output_dir):
for name in files:
if name.endswith(".cc"):
base_name_with_path = os.path.join(path, os.path.splitext(name)[0])
os.rename(base_name_with_path + ".cc", base_name_with_path + ".cpp")
def main():
parser = argparse.ArgumentParser(
description="Starting script for TFLM project generation")
parser.add_argument("output_dir",
help="Output directory for generated TFLM tree")
parser.add_argument("--no_copy",
action="store_true",
help="Do not copy files to output directory")
parser.add_argument("--print_src_files",
action="store_true",
help="Print the src files (i.e. files in the TFLM tree)")
parser.add_argument(
"--print_dest_files",
action="store_true",
help="Print the dest files (i.e. files in the output tree)")
parser.add_argument("--makefile_options",
default="",
help="Additional TFLM Makefile options. For example: "
"--makefile_options=\"TARGET=<target> "
"OPTIMIZED_KERNEL_DIR=<optimized_kernel_dir> "
"TARGET_ARCH=corex-m4\"")
parser.add_argument("--examples",
"-e",
action="append",
help="Examples to add to the output tree. For example: "
"-e hello_world -e micro_speech")
parser.add_argument(
"--rename_cc_to_cpp",
action="store_true",
help="Rename all .cc files to .cpp in the destination files location.")
args = parser.parse_args()
makefile_options = args.makefile_options
# TODO(b/143904317): Explicitly call make third_party_downloads. This will
# no longer be needed once all the downloads are switched over to bash
# scripts.
params_list = [
"make", "-f", "tensorflow/lite/micro/tools/make/Makefile",
"third_party_downloads"
] + makefile_options.split()
process = subprocess.Popen(params_list,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
_, stderr = process.communicate()
if process.returncode != 0:
raise RuntimeError("%s failed with \n\n %s" %
(" ".join(params_list), stderr.decode()))
src_files, dest_files = _get_src_and_dest_files(args.output_dir,
makefile_options)
if args.print_src_files:
print(" ".join(src_files))
if args.print_dest_files:
print(" ".join(dest_files))
if args.no_copy is False:
_copy(src_files, dest_files)
if args.examples is not None:
_create_examples_tree(args.output_dir, args.examples)
if args.rename_cc_to_cpp:
_rename_cc_to_cpp(args.output_dir)
if __name__ == "__main__":
main()
|
|
#!/usr/bin/env python3
# Standard library imports
from subprocess import Popen, PIPE
from sys import argv
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=DeprecationWarning)
from imp import load_source
from os.path import dirname, isfile, join, realpath
from fcntl import flock, LOCK_EX, LOCK_UN, LOCK_NB
from OmsConfigHostHelpers import write_omsconfig_host_telemetry, write_omsconfig_host_switch_event, write_omsconfig_host_log, stop_old_host_instances
from time import sleep
import subprocess
import codecs
pathToCurrentScript = realpath(__file__)
pathToCommonScriptsFolder = dirname(pathToCurrentScript)
helperLibPath = join(pathToCommonScriptsFolder, 'helperlib.py')
helperlib = load_source('helperlib', helperLibPath)
try:
# Used by Python 2.7+
from argparse import ArgumentParser
useArgParse = True
except:
# Used by Python 2.4-2.6
from optparse import OptionParser
useArgParse = False
def main(argv):
"""StartDscConfiguration"""
# Define method arguments and description
description = 'Starts the specified DSC configuration.'
parameters = {
'configurationmof' : {
'shortForm' : 'c',
'helpText' : 'The path to the configuration mof to start.',
'required' : True,
'action' : 'store'
},
'force' : {
'shortForm' : 'f',
'helpText' : 'Specifies that any current pending configuration should be forcibly removed before starting the new configuration.',
'required' : False,
'action' : 'store_true'
}
}
# Parse -configurationmof on its own for backwards compatibility
configmofArgument = None
if '-configurationmof' in argv:
configmofIndex = argv.index('-configurationmof')
try:
configmofArgument = argv[configmofIndex + 1]
except:
print('StartDscConfiguration.py: error: Please provide a valid path argument for -configurationmof')
exit(1)
# Set the configuration mof parameter to no longer be required so it doesn't error in the arugment parser
parameters['configurationmof']['required'] = False
# Remove -configurationmof and its argument from the list so it doesn't error in the arugment parser
argv.pop(configmofIndex)
argv.pop(configmofIndex)
# Parse arguments
if (useArgParse):
# Used by Python 2.7+
parser = ArgumentParser(description = description)
for parameter in parameters.keys():
parameterInfo = parameters[parameter]
parser.add_argument('-' + parameterInfo['shortForm'], '--' + parameter, required = parameterInfo['required'], help = parameterInfo['helpText'], action = parameterInfo['action'])
parsedArguments = parser.parse_args(argv)
else:
# Used by Python 2.4-2.6
parser = OptionParser(description = description)
for parameter in parameters.keys():
parameterInfo = parameters[parameter]
parser.add_option('-' + parameterInfo['shortForm'], '--' + parameter, help = parameterInfo['helpText'], action = parameterInfo['action'])
(parsedArguments, extraArguments) = parser.parse_args(argv)
for parameter in parameters.keys():
if parameters[parameter]['required']:
if not getattr(parsedArguments, parameter):
print ('StartDscConfiguration.py: error: argument -', parameters[parameter]['shortForm'], '/--', parameter, ' is required.')
exit(1)
# Check that we don't have two configuration mofs defined
if configmofArgument and parsedArguments.configurationmof:
print('StartDscConfiguration.py: error: Two configuration mof arguments were found. Please provide only one.')
exit(1)
if configmofArgument:
parsedArguments.configurationmof = configmofArgument
# Read the configuration mof
try:
configurationFile = codecs.open(parsedArguments.configurationmof, 'r')
except:
configurationFile = codecs.open(parsedArguments.configurationmof, 'r', encoding = 'utf-16')
try:
configurationFileContent = configurationFile.read()
finally:
if (configurationFile):
configurationFile.close()
# Convert the file content to strings of integers representing unicode
configurationData = []
for char in configurationFileContent:
configurationData.append(str(ord(char)))
# # OMI CLI location
omiBinDir = "<CONFIG_BINDIR>"
omiCliPath = omiBinDir + "/omicli"
dsc_host_base_path = helperlib.DSC_HOST_BASE_PATH
dsc_host_path = join(dsc_host_base_path, 'bin/dsc_host')
dsc_host_output_path = join(dsc_host_base_path, 'output')
dsc_host_lock_path = join(dsc_host_base_path, 'dsc_host_lock')
dsc_host_switch_path = join(dsc_host_base_path, 'dsc_host_ready')
if ("omsconfig" in helperlib.DSC_SCRIPT_PATH):
write_omsconfig_host_switch_event(pathToCurrentScript, isfile(dsc_host_switch_path))
if ("omsconfig" in helperlib.DSC_SCRIPT_PATH) and (isfile(dsc_host_switch_path)):
use_omsconfig_host = True
else:
use_omsconfig_host = False
# Assemble parameters to pass to OMI CLI
host_parameters = []
if use_omsconfig_host:
host_parameters.append(dsc_host_path)
host_parameters.append(dsc_host_output_path)
host_parameters.append("SendConfigurationApply")
host_parameters.append(args[2])
# Insert force if specified
if parsedArguments.force:
host_parameters.append("force")
else:
host_parameters.append(omiCliPath)
host_parameters.append("iv")
host_parameters.append("<DSC_NAMESPACE>")
host_parameters.append("{")
host_parameters.append("MSFT_DSCLocalConfigurationManager")
host_parameters.append("}")
host_parameters.append("SendConfigurationApply")
host_parameters.append("{")
host_parameters.append("ConfigurationData")
host_parameters.append("[")
# Insert configurationmof data here
for token in configurationData:
host_parameters.append(token)
host_parameters.append("]")
# Insert force if specified
if parsedArguments.force:
host_parameters.append("force")
host_parameters.append("true")
host_parameters.append("}")
stdout = ''
stderr = ''
if use_omsconfig_host:
try:
stop_old_host_instances(dsc_host_lock_path)
# Open the dsc host lock file. This also creates a file if it does not exist
dschostlock_filehandle = open(dsc_host_lock_path, 'w')
print("Opened the dsc host lock file at the path '" + dsc_host_lock_path + "'")
dschostlock_acquired = False
# Acquire dsc host file lock
for retry in range(10):
try:
flock(dschostlock_filehandle, LOCK_EX | LOCK_NB)
dschostlock_acquired = True
break
except IOError:
write_omsconfig_host_log('dsc_host lock file not acquired. retry (#' + str(retry) + ') after 60 seconds...', pathToCurrentScript)
sleep(60)
if dschostlock_acquired:
p = subprocess.Popen(parameters, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
stdout = stdout.decode() if isinstance(stdout, bytes) else stdout
print(stdout)
else:
print("dsc host lock already acuired by a different process")
finally:
if (dschostlock_filehandle):
# Release dsc host file lock
flock(dschostlock_filehandle, LOCK_UN)
# Close dsc host lock file handle
dschostlock_filehandle.close()
else:
p = subprocess.Popen(parameters, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
stdout = stdout.decode() if isinstance(stdout, bytes) else stdout
stderr = stderr.decode() if isinstance(stderr, bytes) else stderr
print(stdout)
print(stderr)
main(argv[1:])
|
|
# Copyright (C) 2014 New York University
# This file is part of ReproZip which is released under the Revised BSD License
# See file LICENSE for full license details.
from qtpy import QtCore, QtWidgets
import yaml
import reprounzip_qt.reprounzip_interface as reprounzip
from reprounzip_qt.gui.common import ROOT, ResizableStack, handle_error, \
error_msg, parse_ports
from reprounzip_qt.usage import record_usage
class RunOptions(QtWidgets.QWidget):
x11 = None
def __init__(self):
super(RunOptions, self).__init__()
self.setLayout(QtWidgets.QGridLayout())
def add_row(self, label, widget):
layout = self.layout()
row = layout.rowCount()
layout.addWidget(QtWidgets.QLabel(label), row, 0)
layout.addWidget(widget, row, 1)
def add_row_layout(self, label, rowlayout):
layout = self.layout()
row = layout.rowCount()
layout.addWidget(QtWidgets.QLabel(label), row, 0)
layout.addLayout(rowlayout, row, 1)
def add_x11(self):
self.x11 = QtWidgets.QCheckBox("enabled", checked=False)
self.add_row("X11 display:", self.x11)
def options(self):
options = {'args': []}
if self.x11 is not None and self.x11.isChecked():
options['args'].append('--enable-x11')
return options
class DirectoryOptions(RunOptions):
def __init__(self):
super(DirectoryOptions, self).__init__()
self.add_x11()
class ChrootOptions(RunOptions):
def __init__(self):
super(ChrootOptions, self).__init__()
self.add_x11()
class DockerOptions(RunOptions):
def __init__(self):
super(DockerOptions, self).__init__()
self.x11 = QtWidgets.QCheckBox("enabled", checked=False)
self.tunneled_x11 = QtWidgets.QCheckBox("use tunnel", checked=False)
row = QtWidgets.QHBoxLayout()
row.addWidget(self.x11)
row.addWidget(self.tunneled_x11)
row.addStretch(1)
self.add_row_layout("X11 display:", row)
self.detach = QtWidgets.QCheckBox("start background container and "
"leave it running",
checked=False)
self.add_row("Detach:", self.detach)
self.raw_options = QtWidgets.QLineEdit('')
self.add_row("Raw Docker options:", self.raw_options)
self.ports = QtWidgets.QLineEdit(
'',
toolTip="Space-separated host:guest port mappings")
self.add_row("Expose ports:", self.ports)
def options(self):
options = super(DockerOptions, self).options()
if self.tunneled_x11.isChecked():
options['args'].append('--tunneled-x11')
record_usage(docker_tunneled_x11=True)
if self.detach.isChecked():
options['args'].append('--detach')
record_usage(docker_detach=True)
nb_raw = 0
for opt in self.raw_options.text().split():
opt = opt.strip()
if opt:
nb_raw += 1
options['args'].append('--docker-option=%s' % opt)
if nb_raw:
record_usage(docker_raw_options=nb_raw)
ports = parse_ports(self.ports.text(), self)
if ports is None:
return None
for host, container, proto in ports:
options['args'].extend(
['--docker-option=-p',
'--docker-option=%s:%s/%s' % (host, container, proto)])
record_usage(docker_run_port_fwd=bool(ports))
return options
class VagrantOptions(RunOptions):
def __init__(self):
super(VagrantOptions, self).__init__()
self.add_x11()
self.ports = QtWidgets.QLineEdit(
'',
toolTip="Space-separated host:guest port mappings")
self.add_row("Expose ports:", self.ports)
def options(self):
options = super(VagrantOptions, self).options()
ports = parse_ports(self.ports.text(), self)
if ports is None:
return None
for host, container, proto in parse_ports(self.ports.text(), self):
options['args'].append('--expose-port=%s:%s/%s' %
(host, container, proto))
record_usage(vagrant_run_port_fwd=bool(ports))
return options
class FilesManager(QtWidgets.QDialog):
def __init__(self, directory, unpacker=None, root=None, **kwargs):
super(FilesManager, self).__init__(**kwargs)
self.directory = directory
self.unpacker = unpacker
self.root = root
layout = QtWidgets.QHBoxLayout()
self.files_widget = QtWidgets.QListWidget(
selectionMode=QtWidgets.QListWidget.SingleSelection)
self.files_widget.itemSelectionChanged.connect(self._file_changed)
layout.addWidget(self.files_widget)
right_layout = QtWidgets.QGridLayout()
right_layout.addWidget(QtWidgets.QLabel("name:"), 0, 0)
self.f_name = QtWidgets.QLineEdit('', readOnly=True)
right_layout.addWidget(self.f_name, 0, 1)
right_layout.addWidget(QtWidgets.QLabel("Path:"), 1, 0)
self.f_path = QtWidgets.QLineEdit('', readOnly=True)
right_layout.addWidget(self.f_path, 1, 1)
right_layout.addWidget(QtWidgets.QLabel("Current:"), 2, 0)
self.f_status = QtWidgets.QLineEdit('', readOnly=True)
right_layout.addWidget(self.f_status, 2, 1)
self.b_upload = QtWidgets.QPushButton("Upload a replacement",
enabled=False)
self.b_upload.clicked.connect(self._upload)
right_layout.addWidget(self.b_upload, 3, 0, 1, 2)
self.b_download = QtWidgets.QPushButton("Download to disk",
enabled=False)
self.b_download.clicked.connect(self._download)
right_layout.addWidget(self.b_download, 4, 0, 1, 2)
self.b_reset = QtWidgets.QPushButton("Reset file", enabled=False)
self.b_reset.clicked.connect(self._reset)
right_layout.addWidget(self.b_reset, 5, 0, 1, 2)
right_layout.setRowStretch(6, 1)
layout.addLayout(right_layout)
self.setLayout(layout)
self.files_status = reprounzip.FilesStatus(directory)
for file_status in self.files_status:
text = "[%s%s] %s" % (("I" if file_status.is_input else ''),
("O" if file_status.is_output else ''),
file_status.name)
self.files_widget.addItem(text)
record_usage(iofiles=self.files_widget.count())
def _file_changed(self):
selected = [i.row() for i in self.files_widget.selectedIndexes()]
if not selected:
self.f_name.setText('')
self.f_path.setText('')
self.f_status.setText('')
self.b_upload.setEnabled(False)
self.b_download.setEnabled(False)
self.b_reset.setEnabled(False)
else:
file_status = self.files_status[selected[0]]
self.b_upload.setEnabled(True)
self.b_download.setEnabled(True)
self.b_reset.setEnabled(False)
self.f_name.setText(file_status.name)
self.f_path.setText(str(file_status))
self.f_status.setEnabled(False)
if file_status.assigned is None:
self.f_status.setText("(original)")
self.b_reset.setEnabled(False)
elif file_status.assigned is False:
self.f_status.setText("(not created)")
elif file_status.assigned is True:
self.f_status.setText("(generated)")
else:
caption = file_status.assigned
if isinstance(caption, bytes):
caption = caption.decode('utf-8', 'replace')
self.f_status.setText(caption)
self.f_status.setEnabled(True)
def _upload(self):
selected = self.files_widget.selectedIndexes()[0].row()
file_status = self.files_status[selected]
picked, _ = QtWidgets.QFileDialog.getOpenFileName(
self, "Pick file to upload",
QtCore.QDir.currentPath())
if picked:
record_usage(file_upload=True)
handle_error(self, reprounzip.upload(
self.directory, file_status.name, picked,
unpacker=self.unpacker, root=self.root))
self._file_changed()
def _download(self):
selected = self.files_widget.selectedIndexes()[0].row()
file_status = self.files_status[selected]
picked, _ = QtWidgets.QFileDialog.getSaveFileName(
self, "Pick destination",
QtCore.QDir.currentPath() + '/' + file_status.name)
if picked:
record_usage(file_download=True)
handle_error(self, reprounzip.download(
self.directory, file_status.name, picked,
unpacker=self.unpacker, root=self.root))
self._file_changed()
def _reset(self):
selected = self.files_widget.selectedIndexes()[0].row()
file_status = self.files_status[selected]
record_usage(file_reset=True)
handle_error(self, reprounzip.upload(
self.directory, file_status.name, None,
unpacker=self.unpacker, root=self.root))
self._file_changed()
class RunTab(QtWidgets.QWidget):
"""The main window, that allows you to run/change an unpacked experiment.
"""
UNPACKERS = [
('directory', DirectoryOptions),
('chroot', ChrootOptions),
('docker', DockerOptions),
('vagrant', VagrantOptions),
]
directory = None
unpacker = None
def __init__(self, unpacked_directory='', **kwargs):
super(RunTab, self).__init__(**kwargs)
layout = QtWidgets.QGridLayout()
layout.addWidget(QtWidgets.QLabel("Experiment directory:"), 0, 0)
self.directory_widget = QtWidgets.QLineEdit(unpacked_directory)
self.directory_widget.editingFinished.connect(self._directory_changed)
layout.addWidget(self.directory_widget, 0, 1)
browse = QtWidgets.QPushButton("Browse")
browse.clicked.connect(self._browse)
layout.addWidget(browse, 0, 2)
layout.addWidget(QtWidgets.QLabel("Unpacker:"), 1, 0,
QtCore.Qt.AlignTop)
self.unpacker_widget = QtWidgets.QLabel("-")
layout.addWidget(self.unpacker_widget, 1, 1, 1, 2)
layout.addWidget(QtWidgets.QLabel("Input/output files:"), 2, 0,
QtCore.Qt.AlignTop)
self.files_button = QtWidgets.QPushButton("Manage files",
enabled=False)
self.files_button.clicked.connect(self._open_files_manager)
layout.addWidget(self.files_button, 2, 1, 1, 2)
layout.addWidget(QtWidgets.QLabel("Runs:"), 3, 0,
QtCore.Qt.AlignTop)
self.runs_widget = QtWidgets.QListWidget(
selectionMode=QtWidgets.QListWidget.MultiSelection)
layout.addWidget(self.runs_widget, 3, 1, 3, 1)
select_all = QtWidgets.QPushButton("Select All")
select_all.clicked.connect(self.runs_widget.selectAll)
layout.addWidget(select_all, 3, 2)
deselect_all = QtWidgets.QPushButton("Deselect All")
deselect_all.clicked.connect(self.runs_widget.clearSelection)
layout.addWidget(deselect_all, 4, 2)
layout.addWidget(QtWidgets.QLabel("Elevate privileges:"), 6, 0)
self.root = QtWidgets.QComboBox(editable=False)
self.root.addItems(ROOT.TEXT)
layout.addWidget(self.root, 6, 1, 1, 2)
layout.addWidget(QtWidgets.QLabel("Jupyter integration:"),
7, 0)
self.run_jupyter_notebook = QtWidgets.QCheckBox("Run notebook server",
checked=False,
enabled=False)
layout.addWidget(self.run_jupyter_notebook, 7, 1, 1, 2)
group = QtWidgets.QGroupBox(title="Unpacker options")
group_layout = QtWidgets.QVBoxLayout()
self.unpacker_options = ResizableStack()
scroll = QtWidgets.QScrollArea(widgetResizable=True)
scroll.setWidget(self.unpacker_options)
group_layout.addWidget(scroll)
group.setLayout(group_layout)
layout.addWidget(group, 8, 0, 1, 3)
layout.setRowStretch(8, 1)
for i, (name, WidgetClass) in enumerate(self.UNPACKERS):
widget = WidgetClass()
self.unpacker_options.addWidget(widget)
self.unpacker_options.addWidget(
QtWidgets.QLabel("Select a directory to display options..."))
self.unpacker_options.setCurrentIndex(len(self.UNPACKERS))
buttons = QtWidgets.QHBoxLayout()
buttons.addStretch(1)
self.run_widget = QtWidgets.QPushButton("Run experiment")
self.run_widget.clicked.connect(self._run)
buttons.addWidget(self.run_widget)
self.destroy_widget = QtWidgets.QPushButton("Destroy unpacked "
"experiment")
self.destroy_widget.clicked.connect(self._destroy)
buttons.addWidget(self.destroy_widget)
layout.addLayout(buttons, 9, 0, 1, 3)
self.setLayout(layout)
self._directory_changed()
def _browse(self):
picked = QtWidgets.QFileDialog.getExistingDirectory(
self, "Pick directory",
QtCore.QDir.currentPath())
if picked:
record_usage(browse_unpacked=True)
self.directory_widget.setText(picked)
self._directory_changed()
def _directory_changed(self, new_dir=None, force=False):
if not force and self.directory_widget.text() == self.directory:
return
self.directory = self.directory_widget.text()
unpacker = reprounzip.check_directory(self.directory)
self.run_jupyter_notebook.setChecked(False)
self.run_jupyter_notebook.setEnabled(False)
self.runs_widget.clear()
if unpacker is not None:
with open(self.directory + '/config.yml') as fp:
self.config = yaml.safe_load(fp)
self.run_widget.setEnabled(True)
self.destroy_widget.setEnabled(True)
self.files_button.setEnabled(True)
self.unpacker = unpacker
self.unpacker_widget.setText(unpacker)
for run in self.config['runs']:
self.runs_widget.addItem(' '.join(reprounzip.shell_escape(arg)
for arg in run['argv']))
self.runs_widget.selectAll()
self.unpacker_options.setCurrentIndex(
dict((n, i) for i, (n, w) in enumerate(self.UNPACKERS))
.get(unpacker, 4))
if (unpacker == 'docker' and
reprounzip.find_command('reprozip-jupyter') is not None and
reprounzip.is_jupyter(self.directory)):
self.run_jupyter_notebook.setEnabled(True)
self.run_jupyter_notebook.setChecked(True)
else:
self.run_widget.setEnabled(False)
self.destroy_widget.setEnabled(False)
self.files_button.setEnabled(False)
self.unpacker = None
self.unpacker_widget.setText("-")
self.unpacker_options.setCurrentIndex(len(self.UNPACKERS))
def _run(self):
options = self.unpacker_options.currentWidget().options()
if options is None:
return
runs = sorted(i.row() for i in self.runs_widget.selectedIndexes())
if not runs:
error_msg(self, "No run selected", 'warning')
return
record_usage(run='%d/%d' % (len(runs), self.runs_widget.count()))
handle_error(self, reprounzip.run(
self.directory, runs=runs,
unpacker=self.unpacker,
root=ROOT.INDEX_TO_OPTION[self.root.currentIndex()],
jupyter=self.run_jupyter_notebook.isChecked(),
**options))
def _destroy(self):
handle_error(self, reprounzip.destroy(
self.directory, unpacker=self.unpacker,
root=ROOT.INDEX_TO_OPTION[self.root.currentIndex()]))
self._directory_changed(force=True)
def _open_files_manager(self):
manager = FilesManager(
parent=self,
directory=self.directory_widget.text(),
unpacker=self.unpacker,
root=ROOT.INDEX_TO_OPTION[self.root.currentIndex()])
manager.exec_()
def set_directory(self, directory, root=None):
self.root.setCurrentIndex(ROOT.OPTION_TO_INDEX[root])
self.directory_widget.setText(directory)
self._directory_changed(force=True)
def should_exit(self):
if self.unpacker:
r = QtWidgets.QMessageBox.question(
self, "Close Confirmation",
"The experiment is still unpacked with '%s'. Are you sure you "
"want to exit without removing it?" % self.unpacker,
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)
if r == QtWidgets.QMessageBox.Yes:
record_usage(leave_unpacked=True)
return True
else:
return False
else:
return True
def replaceable(self):
return not self.unpacker
|
|
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: shlex.py
"""A lexical analyzer class for simple shell-like syntaxes."""
import os.path
import sys
from collections import deque
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
__all__ = [
'shlex', 'split']
class shlex:
"""A lexical analyzer class for simple shell-like syntaxes."""
def __init__(self, instream=None, infile=None, posix=False):
if isinstance(instream, basestring):
instream = StringIO(instream)
if instream is not None:
self.instream = instream
self.infile = infile
else:
self.instream = sys.stdin
self.infile = None
self.posix = posix
if posix:
self.eof = None
else:
self.eof = ''
self.commenters = '#'
self.wordchars = 'abcdfeghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_'
if self.posix:
self.wordchars += ''
self.whitespace = ' \t\r\n'
self.whitespace_split = False
self.quotes = '\'"'
self.escape = '\\'
self.escapedquotes = '"'
self.state = ' '
self.pushback = deque()
self.lineno = 1
self.debug = 0
self.token = ''
self.filestack = deque()
self.source = None
if self.debug:
print 'shlex: reading from %s, line %d' % (
self.instream, self.lineno)
return
def push_token(self, tok):
"""Push a token onto the stack popped by the get_token method"""
if self.debug >= 1:
print 'shlex: pushing token ' + repr(tok)
self.pushback.appendleft(tok)
def push_source(self, newstream, newfile=None):
"""Push an input source onto the lexer's input source stack."""
if isinstance(newstream, basestring):
newstream = StringIO(newstream)
self.filestack.appendleft((self.infile, self.instream, self.lineno))
self.infile = newfile
self.instream = newstream
self.lineno = 1
if self.debug:
if newfile is not None:
print 'shlex: pushing to file %s' % (self.infile,)
else:
print 'shlex: pushing to stream %s' % (self.instream,)
return
def pop_source(self):
"""Pop the input source stack."""
self.instream.close()
self.infile, self.instream, self.lineno = self.filestack.popleft()
if self.debug:
print 'shlex: popping to %s, line %d' % (
self.instream, self.lineno)
self.state = ' '
def get_token(self):
"""Get a token from the input stream (or from stack if it's nonempty)"""
if self.pushback:
tok = self.pushback.popleft()
if self.debug >= 1:
print 'shlex: popping token ' + repr(tok)
return tok
else:
raw = self.read_token()
if self.source is not None:
while raw == self.source:
spec = self.sourcehook(self.read_token())
if spec:
newfile, newstream = spec
self.push_source(newstream, newfile)
raw = self.get_token()
while raw == self.eof:
if not self.filestack:
return self.eof
self.pop_source()
raw = self.get_token()
if self.debug >= 1:
if raw != self.eof:
print 'shlex: token=' + repr(raw)
else:
print 'shlex: token=EOF'
return raw
def read_token(self):
quoted = False
escapedstate = ' '
while True:
nextchar = self.instream.read(1)
if nextchar == '\n':
self.lineno = self.lineno + 1
if self.debug >= 3:
print 'shlex: in state', repr(self.state),
print 'I see character:', repr(nextchar)
if self.state is None:
self.token = ''
break
elif self.state == ' ':
if not nextchar:
self.state = None
break
elif nextchar in self.whitespace:
if self.debug >= 2:
print 'shlex: I see whitespace in whitespace state'
if self.token or self.posix and quoted:
break
else:
continue
elif nextchar in self.commenters:
self.instream.readline()
self.lineno = self.lineno + 1
elif self.posix and nextchar in self.escape:
escapedstate = 'a'
self.state = nextchar
elif nextchar in self.wordchars:
self.token = nextchar
self.state = 'a'
elif nextchar in self.quotes:
if not self.posix:
self.token = nextchar
self.state = nextchar
elif self.whitespace_split:
self.token = nextchar
self.state = 'a'
else:
self.token = nextchar
if self.token or self.posix and quoted:
break
else:
continue
elif self.state in self.quotes:
quoted = True
if not nextchar:
if self.debug >= 2:
print 'shlex: I see EOF in quotes state'
raise ValueError, 'No closing quotation'
if nextchar == self.state:
if not self.posix:
self.token = self.token + nextchar
self.state = ' '
break
else:
self.state = 'a'
elif self.posix and nextchar in self.escape and self.state in self.escapedquotes:
escapedstate = self.state
self.state = nextchar
else:
self.token = self.token + nextchar
elif self.state in self.escape:
if not nextchar:
if self.debug >= 2:
print 'shlex: I see EOF in escape state'
raise ValueError, 'No escaped character'
if escapedstate in self.quotes and nextchar != self.state and nextchar != escapedstate:
self.token = self.token + self.state
self.token = self.token + nextchar
self.state = escapedstate
elif self.state == 'a':
if not nextchar:
self.state = None
break
elif nextchar in self.whitespace:
if self.debug >= 2:
print 'shlex: I see whitespace in word state'
self.state = ' '
if self.token or self.posix and quoted:
break
else:
continue
elif nextchar in self.commenters:
self.instream.readline()
self.lineno = self.lineno + 1
if self.posix:
self.state = ' '
if self.token or self.posix and quoted:
break
else:
continue
elif self.posix and nextchar in self.quotes:
self.state = nextchar
elif self.posix and nextchar in self.escape:
escapedstate = 'a'
self.state = nextchar
elif nextchar in self.wordchars or nextchar in self.quotes or self.whitespace_split:
self.token = self.token + nextchar
else:
self.pushback.appendleft(nextchar)
if self.debug >= 2:
print 'shlex: I see punctuation in word state'
self.state = ' '
if self.token:
break
else:
continue
result = self.token
self.token = ''
if self.posix and not quoted and result == '':
result = None
if self.debug > 1:
if result:
print 'shlex: raw token=' + repr(result)
else:
print 'shlex: raw token=EOF'
return result
def sourcehook(self, newfile):
"""Hook called on a filename to be sourced."""
if newfile[0] == '"':
newfile = newfile[1:-1]
if isinstance(self.infile, basestring) and not os.path.isabs(newfile):
newfile = os.path.join(os.path.dirname(self.infile), newfile)
return (newfile, open(newfile, 'r'))
def error_leader(self, infile=None, lineno=None):
"""Emit a C-compiler-like, Emacs-friendly error-message leader."""
if infile is None:
infile = self.infile
if lineno is None:
lineno = self.lineno
return '"%s", line %d: ' % (infile, lineno)
def __iter__(self):
return self
def next(self):
token = self.get_token()
if token == self.eof:
raise StopIteration
return token
def split(s, comments=False, posix=True):
lex = shlex(s, posix=posix)
lex.whitespace_split = True
if not comments:
lex.commenters = ''
return list(lex)
if __name__ == '__main__':
if len(sys.argv) == 1:
lexer = shlex()
else:
file = sys.argv[1]
lexer = shlex(open(file), file)
while 1:
tt = lexer.get_token()
if tt:
print 'Token: ' + repr(tt)
else:
break
|
|
'''
This module provides database interfaces to postgres SQL
(c) Copyright 2013 Mark V Systems Limited, California US, All rights reserved.
Mark V copyright applies to this software, which is licensed according to the terms of Arelle(r).
'''
import sys, os, io, time, re, datetime
from math import isnan, isinf
from decimal import Decimal
from arelle.ModelValue import dateTime
import socket
TRACESQLFILE = None
#TRACESQLFILE = r"z:\temp\sqltraceWin.log" # uncomment to trace SQL on connection (very big file!!!)
#TRACESQLFILE = "/Users/hermf/temp/sqltraceUnx.log" # uncomment to trace SQL on connection (very big file!!!)
def noop(*args, **kwargs): return
class NoopException(Exception):
pass
try:
import pg8000
hasPostgres = True
pgConnect = pg8000.connect
pgOperationalError = pg8000.OperationalError
pgProgrammingError = pg8000.ProgrammingError
pgInterfaceError = pg8000.InterfaceError
except ImportError:
hasPostgres = False
pgConnect = noop
pgOperationalError = pgProgrammingError = pgInterfaceError = NoopException
try:
import pymysql # MIT License but not installed at GAE
hasMySql = True
mysqlConnect = pymysql.connect
mysqlProgrammingError = pymysql.ProgrammingError
mysqlInterfaceError = pymysql.InterfaceError
mysqlInternalError = pymysql.InternalError
except ImportError:
try :
import MySQLdb # LGPL License and used on GAE, Python 2.7 only
hasMySql = True
mysqlConnect = MySQLdb.connect
mysqlProgrammingError = MySQLdb.ProgrammingError
mysqlInterfaceError = MySQLdb.InterfaceError
mysqlInternalError = MySQLdb.InternalError
except ImportError:
hasMySql = False
mysqlConnect = noop
mysqlProgrammingError = mysqlInterfaceError = mysqlInternalError = NoopException
try:
# requires NLS_LANG to be UTF-8
os.environ["NLS_LANG"] = ".UTF8"
os.environ['ORA_NCHAR_LITERAL_REPLACE'] = 'TRUE'
import cx_Oracle
hasOracle = True
oracleConnect = cx_Oracle.connect
oracleDatabaseError = cx_Oracle.DatabaseError
oracleInterfaceError = cx_Oracle.InterfaceError
oracleNCLOB = cx_Oracle.NCLOB
except ImportError:
# also requires "Oracle Instant Client"
hasOracle = False
oracleConnect = noop
oracleDatabaseError = oracleInterfaceError = NoopException
oracleCLOB = None
try:
import pyodbc
hasMSSql = True
mssqlConnect = pyodbc.connect
mssqlOperationalError = pyodbc.OperationalError
mssqlProgrammingError = pyodbc.ProgrammingError
mssqlInterfaceError = pyodbc.InterfaceError
mssqlInternalError = pyodbc.InternalError
mssqlDataError = pyodbc.DataError
mssqlIntegrityError = pyodbc.IntegrityError
except ImportError:
hasMSSql = False
mssqlConnect = noop
mssqlOperationalError = mssqlProgrammingError = mssqlInterfaceError = mssqlInternalError = \
mssqlDataError = mssqlIntegrityError = NoopException
try:
import sqlite3
hasSQLite = True
sqliteConnect = sqlite3.connect
sqliteParseDecltypes = sqlite3.PARSE_DECLTYPES
sqliteOperationalError = sqlite3.OperationalError
sqliteProgrammingError = sqlite3.ProgrammingError
sqliteInterfaceError = sqlite3.InterfaceError
sqliteInternalError = sqlite3.InternalError
sqliteDataError = sqlite3.DataError
sqliteIntegrityError = sqlite3.IntegrityError
except ImportError:
hasSQLite = False
sqliteConnect = noop
sqliteParseDecltypes = None
sqliteOperationalError = sqliteProgrammingError = sqliteInterfaceError = sqliteInternalError = \
sqliteDataError = sqliteIntegrityError = NoopException
def isSqlConnection(host, port, timeout=10, product=None):
# determine if postgres port
t = 2
while t < timeout:
try:
if product == "postgres" and hasPostgres:
pgConnect(user='', host=host, port=int(port or 5432), socket_timeout=t)
elif product == "mysql" and hasMySql:
mysqlConnect(user='', host=host, port=int(port or 5432), socket_timeout=t)
elif product == "orcl" and hasOracle:
orclConnect = oracleConnect('{}/{}@{}:{}'
.format("", "", host,
":{}".format(port) if port else ""))
elif product == "mssql" and hasMSSql:
mssqlConnect(user='', host=host, socket_timeout=t)
elif product == "sqlite" and hasSQLite:
sqliteConnect("", t) # needs a database specified for this test
except (pgProgrammingError, mysqlProgrammingError, oracleDatabaseError, sqliteProgrammingError):
return True # success, this is really a postgres socket, wants user name
except (pgInterfaceError, mysqlInterfaceError, oracleInterfaceError,
mssqlOperationalError, mssqlInterfaceError, sqliteOperationalError, sqliteInterfaceError):
return False # something is there but not postgres
except socket.timeout:
t = t + 2 # relax - try again with longer timeout
return False
class XPDBException(Exception):
def __init__(self, code, message, **kwargs ):
self.code = code
self.message = message
self.kwargs = kwargs
self.args = ( self.__repr__(), )
def __repr__(self):
return _('[{0}] exception: {1}').format(self.code, self.message % self.kwargs)
class SqlDbConnection():
def __init__(self, modelXbrl, user, password, host, port, database, timeout, product):
self.modelXbrl = modelXbrl
self.disclosureSystem = modelXbrl.modelManager.disclosureSystem
if product == "postgres":
if not hasPostgres:
raise XPDBException("xpgDB:MissingPostgresInterface",
_("Postgres interface is not installed"))
self.conn = pgConnect(user=user, password=password, host=host,
port=int(port or 5432),
database=database,
socket_timeout=timeout or 60)
self.product = product
elif product == "mysql":
if not hasMySql:
raise XPDBException("xpgDB:MissingMySQLInterface",
_("MySQL interface is not installed"))
self.conn = mysqlConnect(user=user, passwd=password, host=host,
port=int(port or 5432),
db=database, # pymysql takes database or db but MySQLdb only takes db
connect_timeout=timeout or 60,
charset='utf8')
self.product = product
elif product == "orcl":
if not hasOracle:
raise XPDBException("xpgDB:MissingOracleInterface",
_("Oracle interface is not installed"))
self.conn = oracleConnect('{}/{}@{}{}'
.format(user, password, host,
":{}".format(port) if port else ""))
# self.conn.paramstyle = 'named'
self.product = product
elif product == "mssql":
if not hasMSSql:
raise XPDBException("xpgDB:MissingMSSQLInterface",
_("MSSQL server interface is not installed"))
self.conn = mssqlConnect('DRIVER={{SQL Server Native Client 11.0}};SERVER={2};DATABASE={3};UID={0};PWD={1};CHARSET=UTF8'
.format(user,
password,
host, # e.g., localhost\\SQLEXPRESS
database))
self.product = product
elif product == "sqlite":
if not hasSQLite:
raise XPDBException("xpgDB:MissingSQLiteInterface",
_("SQLite interface is not installed"))
self.conn = sqliteConnect(database, (timeout or 60), detect_types=sqliteParseDecltypes)
self.product = product
self.syncSequences = False # for object_id coordination of autoincrement values
else:
self.product = None
self.tableColTypes = {}
self.tableColDeclaration = {}
self.accessionId = "(None)"
self.tempInputTableName = "input{}".format(os.getpid())
def close(self, rollback=False):
try:
self.closeCursor()
if rollback:
self.rollback()
self.conn.close()
self.__dict__.clear() # dereference everything
except Exception as ex:
self.__dict__.clear() # dereference everything
if sys.version[0] >= '3':
raise ex.with_traceback(ex.__traceback__)
else:
raise ex
@property
def isClosed(self):
return not bool(self.__dict__) # closed when dict is empty
def showStatus(self, msg, clearAfter=None):
self.modelXbrl.modelManager.showStatus(msg, clearAfter)
def pyBoolFromDbBool(self, str):
return str in ("TRUE", "t", True) # may be DB string or Python boolean (preconverted)
def pyNoneFromDbNULL(self, str):
return None
def dbNum(self, num):
if isinstance(num, (int,float)):
if isinf(num) or isnan(num):
return None # not legal in SQL
return num
return None
def dbStr(self, s):
if self.product == "orcl":
return "'" + str(s).replace("'","''") + "'"
elif self.product == "mysql":
return "N" + self.conn.escape(str(s))
else:
return "'" + str(s).replace("'","''").replace('%', '%%') + "'"
def dbTableName(self, tableName):
if self.product == "orcl":
return '"' + tableName + '"'
else:
return tableName
@property
def cursor(self):
try:
return self._cursor
except AttributeError:
self._cursor = self.conn.cursor()
return self._cursor
def closeCursor(self):
try:
self._cursor.close()
del self._cursor
except (AttributeError,
pgOperationalError,
mysqlProgrammingError,
oracleDatabaseError):
if hasattr(self, '_cursor'):
del self._cursor
def commit(self):
self.conn.commit()
def rollback(self):
try:
self.conn.rollback()
except (pg8000.ConnectionClosedError):
pass
def dropTemporaryTable(self):
if self.product == "orcl":
self.execute("""
BEGIN
EXECUTE IMMEDIATE 'drop table {}';
EXCEPTION WHEN OTHERS THEN NULL;
END;
""".format(self.tempInputTableName),
close=True, commit=False, fetch=False, action="dropping temporary table")
elif self.product == "mssql":
self.execute("""
DROP TEMPORARY TABLE IF EXISTS {};
""".format(self.tempInputTableName),
close=True, commit=False, fetch=False, action="dropping temporary table")
def lockTables(self, tableNames, isSessionTransaction=False):
''' lock for an entire transaction has isSessionTransaction=True, locks until commit
some databases require locks per operation (such as MySQL), when isSessionTransaction=False
'''
if self.product in ("postgres", "orcl") and isSessionTransaction:
result = self.execute('LOCK {} IN SHARE ROW EXCLUSIVE MODE'.format(', '.join(tableNames)),
close=False, commit=False, fetch=False, action="locking table")
elif self.product in ("mysql",):
result = self.execute('LOCK TABLES {}'
.format(', '.join(['{} WRITE'.format(t) for t in tableNames])),
close=False, commit=False, fetch=False, action="locking table")
elif self.product in ("sqlite",) and isSessionTransaction:
result = self.execute('BEGIN TRANSACTION',
close=False, commit=False, fetch=False, action="locking table")
# note, there is no lock for MS SQL (as far as I could find)
def unlockAllTables(self):
if self.product in ("mysql",):
result = self.execute('UNLOCK TABLES',
close=False, commit=False, fetch=False, action="locking table")
elif self.product in ("sqlite",):
result = self.execute('COMMIT TRANSACTION',
close=False, commit=False, fetch=False, action="locking table")
def execute(self, sql, commit=False, close=True, fetch=True, params=None, action="execute"):
cursor = self.cursor
try:
if isinstance(params, dict):
cursor.execute(sql, **params)
elif isinstance(params, (tuple,list)):
cursor.execute(sql, params)
else:
cursor.execute(sql)
except (pgProgrammingError,
mysqlProgrammingError, mysqlInternalError,
oracleDatabaseError,
mssqlOperationalError, mssqlInterfaceError, mssqlDataError,
mssqlProgrammingError, mssqlIntegrityError,
sqliteOperationalError, sqliteInterfaceError, sqliteDataError,
socket.timeout,
ValueError) as ex: # something wrong with SQL
if TRACESQLFILE:
with io.open(TRACESQLFILE, "a", encoding='utf-8') as fh:
fh.write("\n\n>>> EXCEPTION {} error {}\n sql {}\n"
.format(action, str(ex), sql))
raise
if fetch:
result = cursor.fetchall()
else:
#if cursor.rowcount > 0:
# cursor.fetchall() # must get anyway
result = None
if commit:
self.conn.commit()
if close:
self.closeCursor()
return result
def create(self, ddlFile):
# drop tables
startedAt = time.time()
self.showStatus("Dropping prior tables")
for table in self.tablesInDB():
result = self.execute('DROP TABLE %s' % self.dbTableName(table),
close=False, commit=False, fetch=False, action="dropping table")
self.showStatus("Dropping prior sequences")
for sequence in self.sequencesInDB():
result = self.execute('DROP SEQUENCE %s' % sequence,
close=False, commit=False, fetch=False, action="dropping sequence")
self.modelXbrl.profileStat(_("XbrlPublicDB: drop prior tables"), time.time() - startedAt)
startedAt = time.time()
with io.open(os.path.dirname(__file__) + os.sep + ddlFile,
'rt', encoding='utf-8') as fh:
sql = fh.read().replace('%', '%%')
# separate dollar-quoted bodies and statement lines
sqlstatements = []
def findstatements(start, end, laststatement):
for line in sql[start:end].split('\n'):
stmt, comment1, comment2 = line.partition("--")
laststatement += stmt + '\n'
if ';' in stmt:
sqlstatements.append(laststatement)
laststatement = ''
return laststatement
stmt = ''
i = 0
patternDollarEsc = re.compile(r"([$]\w*[$])", re.DOTALL + re.MULTILINE)
while i < len(sql): # preserve $$ function body escaping
match = patternDollarEsc.search(sql, i)
if not match:
stmt = findstatements(i, len(sql), stmt)
sqlstatements.append(stmt)
break
# found match
dollarescape = match.group()
j = match.end()
stmt = findstatements(i, j, stmt) # accumulate statements before match
i = sql.find(dollarescape, j)
if i > j: # found end of match
if self.product == "mysql":
# mysql doesn't want DELIMITER over the interface
stmt = sql[j:i]
i += len(dollarescape)
else:
# postgres and others want the delimiter in the sql sent
i += len(dollarescape)
stmt += sql[j:i]
sqlstatements.append(stmt)
# problem with driver and $$ statements, skip them (for now)
stmt = ''
action = "executing ddl in {}".format(os.path.basename(ddlFile))
for i, sql in enumerate(sqlstatements):
if any(cmd in sql
for cmd in ('CREATE TABLE', 'CREATE SEQUENCE', 'INSERT INTO', 'CREATE TYPE',
'CREATE FUNCTION',
'SET',
'CREATE INDEX', 'CREATE UNIQUE INDEX' # 'ALTER TABLE ONLY'
)):
statusMsg, sep, rest = sql.strip().partition('\n')
self.showStatus(statusMsg[0:50])
result = self.execute(sql, close=False, commit=False, fetch=False, action=action)
"""
if TRACESQLFILE:
with io.open(TRACESQLFILE, "a", encoding='utf-8') as fh:
fh.write("\n\n>>> ddl {0}: \n{1} \n\n>>> result: \n{2}\n"
.format(i, sql, result))
fh.write(sql)
"""
self.showStatus("")
self.conn.commit()
self.modelXbrl.profileStat(_("XbrlPublicDB: create tables"), time.time() - startedAt)
self.closeCursor()
def databasesInDB(self):
return self.execute({"postgres":"SELECT datname FROM pg_database;",
"mysql": "SHOW databases;",
"orcl": "SELECT DISTINCT OWNER FROM ALL_OBJECTS"
}[self.product],
action="listing tables in database")
def dropAllTablesInDB(self):
# drop all tables (clean out database)
if self.product == "postgres":
self.execute("drop schema public cascade")
self.execute("create schema public;", commit=True, action="recreating schema")
elif self.product in ("mysql", "mssql", "orcl"):
for tableName in self.tablesInDB():
self.execute("DROP TABLE {}".format( self.dbTableName(tableName) ),
action="dropping tables")
def tablesInDB(self):
return set(tableRow[0]
for tableRow in
self.execute({"postgres":"SELECT tablename FROM pg_tables WHERE schemaname = 'public';",
"mysql": "SHOW tables;",
"mssql": "SELECT * FROM sys.TABLES;",
"orcl": "SELECT table_name FROM user_tables",
"sqlite": "SELECT name FROM sqlite_master WHERE type='table';"
}[self.product]))
def sequencesInDB(self):
return set(sequenceRow[0]
for sequenceRow in
self.execute({"postgres":"SELECT c.relname FROM pg_class c WHERE c.relkind = 'S';",
"mysql": "SHOW triggers;",
"mssql": "SELECT name FROM sys.triggers;",
"orcl": "SHOW trigger_name FROM user_triggers"
}[self.product]))
def columnTypeFunctions(self, table):
if table not in self.tableColTypes:
if self.product == "orcl":
colTypesResult = self.execute("SELECT column_name, data_type, data_precision, char_col_decl_length "
"FROM user_tab_columns "
"WHERE table_name = '{0}'"
.format( table )) # table name is not " " quoted here
colTypes = []
for name, fulltype, dataPrecision, charColDeclLength in colTypesResult:
name = name.lower()
fulltype = fulltype.lower()
if fulltype in ("varchar", "varchar2"):
colDecl = "{}({})".format(fulltype, charColDeclLength)
elif fulltype == "number" and dataPrecision:
colDecl = "{}({})".format(fulltype, dataPrecision)
else:
colDecl = fulltype
colTypes.append( (name, fulltype, colDecl) )
# print ("col types for {} = {} ".format(table, colTypes))
elif self.product == "mssql":
colTypesResult = self.execute("SELECT column_name, data_type, character_maximum_length "
"FROM information_schema.columns "
"WHERE table_name = '{0}'"
.format( table )) # table name is not " " quoted here
colTypes = []
for name, fulltype, characterMaxLength in colTypesResult:
name = name.lower()
if fulltype in ("char", "varchar", "nvarchar"):
if characterMaxLength == -1:
characterMaxLength = "max"
colDecl = "{}({})".format(fulltype, characterMaxLength)
else:
colDecl = fulltype
colTypes.append( (name, fulltype, colDecl) )
# print ("col types for {} = {} ".format(table, colTypes))
elif self.product == "sqlite":
colTypesResult = self.execute("PRAGMA table_info('{0}')"
.format( table )) # table name is not " " quoted here
colTypes = []
for cid, name, type, notnull, dflt_value, pk in colTypesResult:
name = name.lower()
type = type.lower()
colTypes.append( (name, type, type) )
# print ("col types for {} = {} ".format(table, colTypes))
else:
colTypes = self.execute("SELECT c.column_name, c.data_type, {0} "
"FROM information_schema.columns c "
"WHERE c.table_name = '{1}' "
"ORDER BY c.ordinal_position;"
.format('c.column_type' if self.product == 'mysql' else 'c.data_type',
self.dbTableName(table)))
self.tableColTypes[table] = dict((name,
# (type cast, conversion function)
('::' + typename if typename in # takes first word of full type
{"integer", "smallint", "int", "bigint",
"real", "numeric",
"int2", "int4", "int8", "float4", "float8",
"boolean", "date", "timestamp"}
else "::double precision" if fulltype.startswith("double precision")
else '',
int if typename in ("integer", "smallint", "int", "bigint", "number") else
float if typename in ("double precision", "real", "numeric") else
self.pyBoolFromDbBool if typename in ("bit", "boolean") else
dateTime if typename in ("date","timestamp") else # ModelValue.datetime !!! not python class
str))
for name, fulltype, colDecl in colTypes
for typename in (fulltype.partition(' ')[0],))
if self.product in ('mysql', 'mssql', 'orcl', 'sqlite'):
self.tableColDeclaration[table] = dict((name, colDecl)
for name, fulltype, colDecl in colTypes)
return self.tableColTypes[table]
def getTable(self, table, idCol, newCols=None, matchCols=None, data=None, commit=False,
comparisonOperator='=', checkIfExisting=False, insertIfNotMatched=True,
returnMatches=True, returnExistenceStatus=False):
# generate SQL
# note: comparison by = will never match NULL fields
# use 'IS NOT DISTINCT FROM' to match nulls, but this is not indexed and verrrrry slooooow
if not data or not newCols or not matchCols:
# nothing can be done, just return
return () # place breakpoint here to debug
isOracle = self.product == "orcl"
isMSSql = self.product == "mssql"
isPostgres = self.product == "postgres"
isSQLite = self.product == "sqlite"
newCols = [newCol.lower() for newCol in newCols]
matchCols = [matchCol.lower() for matchCol in matchCols]
returningCols = []
if idCol: # idCol is the first returned column if present
returningCols.append(idCol.lower())
for matchCol in matchCols:
if matchCol not in returningCols: # allow idCol to be specified or default assigned
returningCols.append(matchCol)
colTypeFunctions = self.columnTypeFunctions(table)
colDeclarations = self.tableColDeclaration.get(table)
try:
colTypeCast = tuple(colTypeFunctions[colName][0] for colName in newCols)
colTypeFunction = [colTypeFunctions[colName][1] for colName in returningCols]
if returnExistenceStatus:
colTypeFunction.append(self.pyBoolFromDbBool) # existence is a boolean
except KeyError as err:
raise XPDBException("xpgDB:MissingColumnDefinition",
_("Table %(table)s column definition missing: %(missingColumnName)s"),
table=table, missingColumnName=str(err))
rowValues = []
rowLongValues = [] # contains None if no parameters, else {} parameter dict
if isOracle:
longColValues = {}
else:
longColValues = []
for row in data:
colValues = []
for col in row:
if isinstance(col, bool):
if isOracle or isMSSql or isSQLite:
colValues.append('1' if col else '0')
else:
colValues.append('TRUE' if col else 'FALSE')
elif isinstance(col, int):
colValues.append(str(col))
elif isinstance(col, float):
if _ISFINITE(col):
colValues.append(str(col))
else: # no NaN, INF, in SQL implementations (Postgres has it but not IEEE implementation)
colValues.append('NULL')
elif isinstance(col, Decimal):
if col.is_finite():
colValues.append(str(col))
else: # no NaN, INF, in SQL implementations (Postgres has it but not IEEE implementation)
colValues.append('NULL')
elif isinstance(col, (datetime.date, datetime.datetime)) and self.product == "orcl":
colValues.append("DATE '{:04}-{:02}-{:02}'".format(col.year, col.month, col.day))
elif isinstance(col, datetime.datetime) and (isMSSql or isSQLite):
colValues.append("'{:04}-{:02}-{:02} {:02}:{:02}:{:02}'".format(col.year, col.month, col.day, col.hour, col.minute, col.second))
elif isinstance(col, datetime.date) and (isMSSql or isSQLite):
colValues.append("'{:04}-{:02}-{:02}'".format(col.year, col.month, col.day))
elif col is None:
colValues.append('NULL')
elif isinstance(col, _STR_BASE) and len(col) >= 4000 and (isOracle or isMSSql):
if isOracle:
colName = "col{}".format(len(colValues))
longColValues[colName] = col
colValues.append(":" + colName)
else:
longColValues.append(col)
colValues.append("?")
else:
colValues.append(self.dbStr(col))
if not rowValues and isPostgres: # first row
for i, cast in enumerate(colTypeCast):
if cast:
colValues[i] = colValues[i] + cast
rowColValues = ", ".join(colValues)
rowValues.append("(" + rowColValues + ")" if not isOracle else rowColValues)
if longColValues:
rowLongValues.append(longColValues)
if isOracle:
longColValues = {} # must be new instance of dict
else:
longColValues = []
else:
rowLongValues.append(None)
values = ", \n".join(rowValues)
_table = self.dbTableName(table)
_inputTableName = self.tempInputTableName
if self.product == "postgres":
# insert new rows, return id and cols of new and existing rows
# use IS NOT DISTINCT FROM instead of = to compare NULL usefully
sql = [(('''
WITH row_values (%(newCols)s) AS (
VALUES %(values)s
)''' + (''', insertions AS (
INSERT INTO %(table)s (%(newCols)s)
SELECT %(newCols)s
FROM row_values v''' + ('''
WHERE NOT EXISTS (SELECT 1
FROM %(table)s x
WHERE %(match)s)''' if checkIfExisting else '') + '''
RETURNING %(returningCols)s
) ''' if insertIfNotMatched else '') + '''
(''' + (('''
SELECT %(x_returningCols)s %(statusIfExisting)s
FROM %(table)s x JOIN row_values v ON (%(match)s) ''' if checkIfExisting else '') + ('''
) UNION ( ''' if (checkIfExisting and insertIfNotMatched) else '') + ('''
SELECT %(returningCols)s %(statusIfInserted)s
FROM insertions''' if insertIfNotMatched else '')) + '''
);''') % {"table": _table,
"idCol": idCol,
"newCols": ', '.join(newCols),
"returningCols": ', '.join(returningCols),
"x_returningCols": ', '.join('x.{0}'.format(c) for c in returningCols),
"match": ' AND '.join('x.{0} {1} v.{0}'.format(col, comparisonOperator)
for col in matchCols),
"values": values,
"statusIfInserted": ", FALSE" if returnExistenceStatus else "",
"statusIfExisting": ", TRUE" if returnExistenceStatus else ""
}, None, True)]
elif self.product == "mysql":
sql = [("CREATE TEMPORARY TABLE %(inputTable)s ( %(inputCols)s );" %
{"inputTable": _inputTableName,
"inputCols": ', '.join('{0} {1}'.format(newCol, colDeclarations[newCol])
for newCol in newCols)}, None, False),
("INSERT INTO %(inputTable)s ( %(newCols)s ) VALUES %(values)s;" %
{"inputTable": _inputTableName,
"newCols": ', '.join(newCols),
"values": values}, None, False)]
if insertIfNotMatched:
if checkIfExisting:
_where = ('WHERE NOT EXISTS (SELECT 1 FROM %(table)s x WHERE %(match)s)' %
{"table": _table,
"match": ' AND '.join('x.{0} {1} i.{0}'.format(col, comparisonOperator)
for col in matchCols)})
_whereLock = (", %(table)s AS x READ" % {"table": _table})
else:
_where = "";
_whereLock = ""
sql.append( ("LOCK TABLES %(table)s WRITE %(whereLock)s" %
{"table": _table,
"whereLock": _whereLock}, None, False) )
sql.append( ("INSERT INTO %(table)s ( %(newCols)s ) SELECT %(newCols)s FROM %(inputTable)s i %(where)s;" %
{"inputTable": _inputTableName,
"table": _table,
"newCols": ', '.join(newCols),
"where": _where}, None, False) )
elif returnMatches or returnExistenceStatus:
sql.append( ("LOCK TABLES %(table)s READ" %
{"table": _table}, None, False) )
# don't know how to get status if existing
if returnMatches or returnExistenceStatus:
sql.append( ("SELECT %(returningCols)s %(statusIfExisting)s from %(inputTable)s JOIN %(table)s ON ( %(match)s );" %
{"inputTable": _inputTableName,
"table": _table,
"newCols": ', '.join(newCols),
"match": ' AND '.join('{0}.{2} = {1}.{2}'.format(_table,_inputTableName,col)
for col in matchCols),
"statusIfExisting": ", FALSE" if returnExistenceStatus else "",
"returningCols": ', '.join('{0}.{1}'.format(_table,col)
for col in returningCols)}, None, True) )
sql.append( ("DROP TEMPORARY TABLE %(inputTable)s;" %
{"inputTable": _inputTableName}, None, False) )
elif self.product == "mssql":
sql = [("CREATE TABLE #%(inputTable)s ( %(inputCols)s );" %
{"inputTable": _inputTableName,
"inputCols": ', '.join('{0} {1}'.format(newCol, colDeclarations[newCol])
for newCol in newCols)}, None, False)]
# break values insertion into 1000's each
def insertMSSqlRows(i, j, params):
sql.append(("INSERT INTO #%(inputTable)s ( %(newCols)s ) VALUES %(values)s;" %
{"inputTable": _inputTableName,
"newCols": ', '.join(newCols),
"values": ", ".join(rowValues[i:j])}, params, False))
iMax = len(rowValues)
i = 0
while (i < iMax):
for j in range(i, min(i+1000, iMax)):
if rowLongValues[j] is not None:
if j > i:
insertMSSqlRows(i, j, None)
insertMSSqlRows(j, j+1, rowLongValues[j])
i = j + 1
break
if i < j+1 and i < iMax:
insertMSSqlRows(i, j+1, None)
i = j+1
if insertIfNotMatched:
sql.append(("MERGE INTO %(table)s USING #%(inputTable)s ON (%(match)s) "
"WHEN NOT MATCHED THEN INSERT (%(newCols)s) VALUES (%(values)s);" %
{"inputTable": _inputTableName,
"table": _table,
"newCols": ', '.join(newCols),
"match": ' AND '.join('{0}.{2} = #{1}.{2}'.format(_table,_inputTableName,col)
for col in matchCols),
"values": ', '.join("#{0}.{1}".format(_inputTableName,newCol)
for newCol in newCols)}, None, False))
if returnMatches or returnExistenceStatus:
sql.append(# don't know how to get status if existing
("SELECT %(returningCols)s %(statusIfExisting)s from #%(inputTable)s JOIN %(table)s ON ( %(match)s );" %
{"inputTable": _inputTableName,
"table": _table,
"newCols": ', '.join(newCols),
"match": ' AND '.join('{0}.{2} = #{1}.{2}'.format(_table,_inputTableName,col)
for col in matchCols),
"statusIfExisting": ", 0" if returnExistenceStatus else "",
"returningCols": ', '.join('{0}.{1}'.format(_table,col)
for col in returningCols)}, None, True))
sql.append(("DROP TABLE #%(inputTable)s;" %
{"inputTable": _inputTableName}, None, False))
elif self.product == "orcl":
sql = [("CREATE GLOBAL TEMPORARY TABLE %(inputTable)s ( %(inputCols)s )" %
{"inputTable": _inputTableName,
"inputCols": ', '.join('{0} {1}'.format(newCol, colDeclarations[newCol])
for newCol in newCols)}, None, False)]
# break values insertion into 1000's each
def insertOrclRows(i, j, params):
sql.append(("INSERT INTO %(inputTable)s ( %(newCols)s ) %(values)s" %
{"inputTable": _inputTableName,
"newCols": ', '.join(newCols),
"values": "\nUNION ALL".join(" SELECT {} FROM dual ".format(r)
for r in rowValues[i:j])}, params, False))
iMax = len(rowValues)
i = 0
while (i < iMax):
for j in range(i, min(i+1000, iMax)):
if rowLongValues[j] is not None:
if j > i:
insertOrclRows(i, j, None)
insertOrclRows(j, j+1, rowLongValues[j])
i = j + 1
break
if i < j+1 and i < iMax:
insertOrclRows(i, j+1, None)
i = j+1
if insertIfNotMatched:
sql.append(("MERGE INTO %(table)s USING %(inputTable)s ON (%(match)s) "
"WHEN NOT MATCHED THEN INSERT (%(newCols)s) VALUES (%(values)s)" %
{"inputTable": _inputTableName,
"table": _table,
"newCols": ', '.join(newCols),
"match": ' AND '.join('{0}.{2} = {1}.{2}'.format(_table,_inputTableName,col)
for col in matchCols),
"values": ', '.join("{0}.{1}".format(_inputTableName,newCol)
for newCol in newCols)}, None, False))
if returnMatches or returnExistenceStatus:
sql.append(# don't know how to get status if existing
("SELECT %(returningCols)s %(statusIfExisting)s from %(inputTable)s JOIN %(table)s ON ( %(match)s )" %
{"inputTable": _inputTableName,
"table": _table,
"newCols": ', '.join(newCols),
"match": ' AND '.join('{0}.{2} = {1}.{2}'.format(_table,_inputTableName,col)
for col in matchCols),
"statusIfExisting": ", 0" if returnExistenceStatus else "",
"returningCols": ', '.join('{0}.{1}'.format(_table,col)
for col in returningCols)}, None, True))
sql.append(("DROP TABLE %(inputTable)s" %
{"inputTable": _inputTableName}, None, False))
elif self.product == "sqlite":
sql = [("CREATE TEMP TABLE %(inputTable)s ( %(inputCols)s );" %
{"inputTable": _inputTableName,
"inputCols": ', '.join('{0} {1}'.format(newCol, colDeclarations[newCol])
for newCol in newCols)}, None, False)]
# break values insertion into 1000's each
def insertSQLiteRows(i, j, params):
sql.append(("INSERT INTO %(inputTable)s ( %(newCols)s ) VALUES %(values)s;" %
{"inputTable": _inputTableName,
"newCols": ', '.join(newCols),
"values": ", ".join(rowValues[i:j])}, params, False))
iMax = len(rowValues)
i = 0
while (i < iMax):
for j in range(i, min(i+500, iMax)):
if rowLongValues[j] is not None:
if j > i:
insertSQLiteRows(i, j, None)
insertSQLiteRows(j, j+1, rowLongValues[j])
i = j + 1
break
if i < j+1 and i < iMax:
insertSQLiteRows(i, j+1, None)
i = j+1
if insertIfNotMatched:
if checkIfExisting:
_where = ('WHERE NOT EXISTS (SELECT 1 FROM %(table)s x WHERE %(match)s)' %
{"table": _table,
"match": ' AND '.join('x.{0} {1} i.{0}'.format(col, comparisonOperator)
for col in matchCols)})
else:
_where = "";
sql.append( ("INSERT INTO %(table)s ( %(newCols)s ) SELECT %(newCols)s FROM %(inputTable)s i %(where)s;" %
{"inputTable": _inputTableName,
"table": _table,
"newCols": ', '.join(newCols),
"where": _where}, None, False) )
if returnMatches or returnExistenceStatus:
sql.append(# don't know how to get status if existing
("SELECT %(returningCols)s %(statusIfExisting)s from %(inputTable)s JOIN %(table)s ON ( %(match)s );" %
{"inputTable": _inputTableName,
"table": _table,
"newCols": ', '.join(newCols),
"match": ' AND '.join('{0}.{2} = {1}.{2}'.format(_table,_inputTableName,col)
for col in matchCols),
"statusIfExisting": ", 0" if returnExistenceStatus else "",
"returningCols": ', '.join('{0}.{1}'.format(_table,col)
for col in returningCols)}, None, True))
sql.append(("DROP TABLE %(inputTable)s;" %
{"inputTable": _inputTableName}, None, False))
if insertIfNotMatched and self.syncSequences:
sql.append( ("update sqlite_sequence "
"set seq = (select seq from sqlite_sequence where name = '%(table)s') "
"where name != '%(table)s';" %
{"table": _table}, None, False) )
if TRACESQLFILE:
with io.open(TRACESQLFILE, "a", encoding='utf-8') as fh:
fh.write("\n\n>>> accession {0} table {1} sql length {2} row count {3}\n"
.format(self.accessionId, table, len(sql), len(data)))
for sqlStmt, params, fetch in sql:
fh.write("\n " + sqlStmt + "\n {}".format(params if params else ""))
tableRows = []
for sqlStmt, params, fetch in sql:
if params and isOracle:
self.cursor.setinputsizes(**dict((name,oracleNCLOB) for name in params))
result = self.execute(sqlStmt,commit=commit, close=False, fetch=fetch, params=params)
if fetch and result:
tableRows.extend(result)
if TRACESQLFILE:
with io.open(TRACESQLFILE, "a", encoding='utf-8') as fh:
fh.write("\n\n>>> accession {0} table {1} result row count {2}\n{3}\n"
.format(self.accessionId, table, len(tableRows), '\n'.join(str(r) for r in tableRows)))
return tuple(tuple(None if colValue == "NULL" or colValue is None else
colTypeFunction[i](colValue) # convert to int, datetime, etc
for i, colValue in enumerate(row))
for row in tableRows)
def updateTable(self, table, cols=None, data=None, commit=False):
# generate SQL
# note: comparison by = will never match NULL fields
# use 'IS NOT DISTINCT FROM' to match nulls, but this is not indexed and verrrrry slooooow
if not cols or not data:
# nothing can be done, just return
return () # place breakpoint here to debug
isOracle = self.product == "orcl"
isSQLite = self.product == "sqlite"
idCol = cols[0]
colTypeFunctions = self.columnTypeFunctions(table)
colDeclarations = self.tableColDeclaration.get(table)
try:
colTypeCast = tuple(colTypeFunctions[colName][0] for colName in cols)
except KeyError as err:
raise XPDBException("xpgDB:MissingColumnDefinition",
_("Table %(table)s column definition missing: %(missingColumnName)s"),
table=table, missingColumnName=str(err))
rowValues = []
for row in data:
colValues = []
for col in row:
if isinstance(col, bool):
colValues.append('TRUE' if col else 'FALSE')
elif isinstance(col, (int,float)):
colValues.append(str(col))
elif col is None:
colValues.append('NULL')
else:
colValues.append(self.dbStr(col))
if not rowValues and self.product == "postgres": # first row
for i, cast in enumerate(colTypeCast):
if cast:
colValues[i] = colValues[i] + cast
rowColValues = ", ".join(colValues)
if isOracle:
rowValues.append(rowColValues)
elif isSQLite:
rowValues.append(colValues)
else:
rowValues.append("(" + rowColValues + ")")
if not isOracle and not isSQLite:
values = ", \n".join(rowValues)
_table = self.dbTableName(table)
_inputTableName = self.tempInputTableName
if self.product == "postgres":
# insert new rows, return id and cols of new and existing rows
# use IS NOT DISTINCT FROM instead of = to compare NULL usefully
sql = [('''
WITH input (%(valCols)s) AS ( VALUES %(values)s )
UPDATE %(table)s t SET %(settings)s
FROM input i WHERE i.%(idCol)s = t.%(idCol)s
;''') % {"table": _table,
"idCol": idCol,
"valCols": ', '.join(col for col in cols),
"settings": ', '.join('{0} = i.{0}'.format(cols[i])
for i, col in enumerate(cols)
if i > 0),
"values": values}]
elif self.product == "mysql":
sql = ["CREATE TEMPORARY TABLE %(inputTable)s ( %(valCols)s );" %
{"inputTable": _inputTableName,
"valCols": ', '.join('{0} {1}'.format(col, colDeclarations[col])
for col in cols)},
"INSERT INTO %(inputTable)s ( %(newCols)s ) VALUES %(values)s;" %
{"inputTable": _inputTableName,
"newCols": ', '.join(cols),
"values": values},
"LOCK TABLES %(inputTable)s AS i READ, %(table)s AS t WRITE;" %
{"inputTable": _inputTableName,
"table": _table},
"UPDATE %(inputTable)s i, %(table)s t SET %(settings)s WHERE i.%(idCol)s = t.%(idCol)s;" %
{"inputTable": _inputTableName,
"table": _table,
"idCol": idCol,
"settings": ', '.join('t.{0} = i.{0}'.format(cols[i])
for i, col in enumerate(cols)
if i > 0)},
"DROP TEMPORARY TABLE %(inputTable)s;" % {"inputTable": _inputTableName}]
elif self.product == "mssql":
sql = ["CREATE TABLE #%(inputTable)s ( %(valCols)s );" %
{"inputTable": _inputTableName,
"valCols": ', '.join('{0} {1}'.format(col, colDeclarations[col])
for col in cols)}]
# must break values insertion into 1000's each
for i in range(0, len(rowValues), 950):
values = ", \n".join(rowValues[i: i+950])
sql.append("INSERT INTO #%(inputTable)s ( %(cols)s ) VALUES %(values)s;" %
{"inputTable": _inputTableName,
"cols": ', '.join(cols),
"values": values})
sql.append("MERGE INTO %(table)s USING #%(inputTable)s ON (#%(inputTable)s.%(idCol)s = %(table)s.%(idCol)s) "
"WHEN MATCHED THEN UPDATE SET %(settings)s;" %
{"inputTable": _inputTableName,
"table": _table,
"idCol": idCol,
"settings": ', '.join('{0}.{2} = #{1}.{2}'.format(_table, _inputTableName, cols[i])
for i, col in enumerate(cols)
if i > 0)})
sql.append("DROP TABLE #%(inputTable)s;" % {"inputTable": _inputTableName})
elif self.product == "orcl":
sql = ["CREATE GLOBAL TEMPORARY TABLE %(inputTable)s ( %(valCols)s )" %
{"inputTable": _inputTableName,
"valCols": ', '.join('{0} {1}'.format(col, colDeclarations[col])
for col in cols)}]
for i in range(0, len(rowValues), 500):
sql.append(
"INSERT INTO %(inputTable)s ( %(cols)s ) %(values)s" %
{"inputTable": _inputTableName,
"cols": ', '.join(cols),
"values": "\nUNION ALL".join(" SELECT {} FROM dual ".format(r)
for r in rowValues[i:i+500])})
sql.append("MERGE INTO %(table)s USING %(inputTable)s ON (%(inputTable)s.%(idCol)s = %(table)s.%(idCol)s) "
"WHEN MATCHED THEN UPDATE SET %(settings)s" %
{"inputTable": _inputTableName,
"table": _table,
"idCol": idCol,
"settings": ', '.join('{0}.{2} = {1}.{2}'.format(_table, _inputTableName, cols[i])
for i, col in enumerate(cols)
if i > 0)})
sql.append("DROP TABLE %(inputTable)s" % {"inputTable": _inputTableName})
elif self.product == "sqlite":
sql = ["UPDATE %(table)s SET %(settings)s WHERE %(idCol)s = %(idVal)s;" %
{"table": _table,
"idCol": idCol,
"idVal": rowValue[0],
"settings": ', '.join('{0} = {1}'.format(col,rowValue[i])
for i, col in enumerate(cols)
if i > 0)}
for rowValue in rowValues]
if TRACESQLFILE:
with io.open(TRACESQLFILE, "a", encoding='utf-8') as fh:
fh.write("\n\n>>> accession {0} table {1} sql length {2} row count {3}\n"
.format(self.accessionId, table, len(sql), len(data)))
for sqlStmt in sql:
fh.write(sqlStmt)
for sqlStmt in sql:
self.execute(sqlStmt,commit=commit, fetch=False, close=False)
|
|
# Copyright (c) 2012 NetApp, Inc.
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Storage service catalog utility functions and classes for NetApp systems.
"""
import copy
from threading import Timer
from cinder import exception
from cinder.openstack.common import log as logging
from cinder.openstack.common import timeutils
from cinder import utils
from cinder.volume import driver
from cinder.volume.drivers.netapp import api
from cinder.volume.drivers.netapp import utils as na_utils
LOG = logging.getLogger(__name__)
class NetAppVolume(object):
"""Represents a NetApp volume.
Present attributes
id - name, vserver, junction_path, type
aggr - name, raid_type, ha_policy, disk_type
sis - dedup, compression
state - status, vserver_root, cluster_volume,
inconsistent, invalid, junction_active
qos - qos_policy_group
space - space-guarantee-enabled, space-guarantee,
thin_provisioned, size_avl_bytes, size_total_bytes
mirror - mirrored i.e. dp mirror
export - path
"""
def __init__(self, name, vserver=None):
self.id = {}
self.aggr = {}
self.sis = {}
self.state = {}
self.qos = {}
self.space = {}
self.mirror = {}
self.export = {}
self.id['name'] = name
self.id['vserver'] = vserver
def __eq__(self, other):
"""Checks for equality."""
if (self.id['name'] == other.id['name'] and
self.id['vserver'] == other.id['vserver']):
return True
def __hash__(self):
"""Computes hash for the object."""
return hash(self.id['name'])
def __cmp__(self, other):
"""Implements comparison logic for volumes."""
self_size_avl = self.space.get('size_avl_bytes')
other_size_avl = other.space.get('size_avl_bytes')
if self_size_avl is None and other_size_avl is not None:
return -1
elif self_size_avl is not None and other_size_avl is None:
return 1
elif self_size_avl is None and other_size_avl is None:
return 0
elif int(self_size_avl) < int(other_size_avl):
return -1
elif int(self_size_avl) > int(other_size_avl):
return 1
else:
return 0
def __str__(self):
"""Returns human readable form for object."""
vol_str = "NetApp Volume id: %s, aggr: %s,"\
" space: %s, sis: %s, state: %s, qos: %s"\
% (self.id, self.aggr, self.space, self.sis, self.state, self.qos)
return vol_str
def get_cluster_vols_with_ssc(na_server, vserver, volume=None):
"""Gets ssc vols for cluster vserver."""
volumes = query_cluster_vols_for_ssc(na_server, vserver, volume)
sis_vols = get_sis_vol_dict(na_server, vserver, volume)
mirrored_vols = get_snapmirror_vol_dict(na_server, vserver, volume)
aggrs = {}
for vol in volumes:
aggr_name = vol.aggr['name']
if aggr_name:
if aggr_name in aggrs:
aggr_attrs = aggrs[aggr_name]
else:
aggr_attrs = query_aggr_options(na_server, aggr_name)
if aggr_attrs:
eff_disk_type = query_aggr_storage_disk(na_server,
aggr_name)
aggr_attrs['disk_type'] = eff_disk_type
aggrs[aggr_name] = aggr_attrs
vol.aggr['raid_type'] = aggr_attrs.get('raid_type')
vol.aggr['ha_policy'] = aggr_attrs.get('ha_policy')
vol.aggr['disk_type'] = aggr_attrs.get('disk_type')
if sis_vols:
if vol.id['name'] in sis_vols:
vol.sis['dedup'] = sis_vols[vol.id['name']]['dedup']
vol.sis['compression'] =\
sis_vols[vol.id['name']]['compression']
else:
vol.sis['dedup'] = False
vol.sis['compression'] = False
if (vol.space['space-guarantee-enabled'] and
(vol.space['space-guarantee'] == 'file' or
vol.space['space-guarantee'] == 'volume')):
vol.space['thin_provisioned'] = False
else:
vol.space['thin_provisioned'] = True
if mirrored_vols:
vol.mirror['mirrored'] = False
if vol.id['name'] in mirrored_vols:
for mirr_attrs in mirrored_vols[vol.id['name']]:
if (mirr_attrs['rel_type'] == 'data_protection' and
mirr_attrs['mirr_state'] == 'snapmirrored'):
vol.mirror['mirrored'] = True
break
return volumes
def query_cluster_vols_for_ssc(na_server, vserver, volume=None):
"""Queries cluster volumes for ssc."""
query = {'volume-attributes': None}
volume_id = {'volume-id-attributes': {'owning-vserver-name': vserver}}
if volume:
volume_id['volume-id-attributes']['name'] = volume
query['volume-attributes'] = volume_id
des_attr = {'volume-attributes':
['volume-id-attributes',
'volume-space-attributes',
'volume-state-attributes',
'volume-qos-attributes']}
result = na_utils.invoke_api(na_server, api_name='volume-get-iter',
api_family='cm', query=query,
des_result=des_attr,
additional_elems=None,
is_iter=True)
vols = set()
for res in result:
records = res.get_child_content('num-records')
if records > 0:
attr_list = res.get_child_by_name('attributes-list')
if attr_list:
vol_attrs = attr_list.get_children()
vols_found = create_vol_list(vol_attrs)
vols.update(vols_found)
return vols
def create_vol_list(vol_attrs):
"""Creates vol list with features from attr list."""
vols = set()
for v in vol_attrs:
try:
# name and vserver are mandatory
# Absence will skip by giving KeyError.
name = v['volume-id-attributes']['name']
vserver = v['volume-id-attributes']['owning-vserver-name']
vol = NetAppVolume(name, vserver)
vol.id['type'] =\
v['volume-id-attributes'].get_child_content('type')
if vol.id['type'] == "tmp":
continue
vol.id['junction_path'] =\
v['volume-id-attributes'].get_child_content('junction-path')
# state attributes mandatory.
vol.state['vserver_root'] =\
na_utils.to_bool(
v['volume-state-attributes'].get_child_content(
'is-vserver-root'))
if vol.state['vserver_root']:
continue
vol.state['status'] =\
v['volume-state-attributes'].get_child_content('state')
vol.state['inconsistent'] =\
na_utils.to_bool(
v['volume-state-attributes'].get_child_content(
'is-inconsistent'))
vol.state['invalid'] =\
na_utils.to_bool(
v['volume-state-attributes'].get_child_content(
'is-invalid'))
vol.state['junction_active'] =\
na_utils.to_bool(
v['volume-state-attributes'].get_child_content(
'is-junction-active'))
vol.state['cluster_volume'] =\
na_utils.to_bool(
v['volume-state-attributes'].get_child_content(
'is-cluster-volume'))
if (vol.state['status'] != 'online' or
vol.state['inconsistent'] or vol.state['invalid']):
# offline, invalid and inconsistent volumes are not usable
continue
# aggr attributes mandatory.
vol.aggr['name'] =\
v['volume-id-attributes']['containing-aggregate-name']
# space attributes mandatory.
vol.space['size_avl_bytes'] =\
v['volume-space-attributes']['size-available']
vol.space['size_total_bytes'] =\
v['volume-space-attributes']['size-total']
vol.space['space-guarantee-enabled'] =\
na_utils.to_bool(
v['volume-space-attributes'].get_child_content(
'is-space-guarantee-enabled'))
vol.space['space-guarantee'] =\
v['volume-space-attributes'].get_child_content(
'space-guarantee')
# qos attributes optional.
if v.get_child_by_name('volume-qos-attributes'):
vol.qos['qos_policy_group'] =\
v['volume-qos-attributes'].get_child_content(
'policy-group-name')
else:
vol.qos['qos_policy_group'] = None
vols.add(vol)
except KeyError as e:
LOG.debug('Unexpected error while creating'
' ssc vol list. Message - %s' % (e.message))
continue
return vols
def query_aggr_options(na_server, aggr_name):
"""Queries cluster aggr for attributes.
Currently queries for raid and ha-policy.
"""
add_elems = {'aggregate': aggr_name}
attrs = {}
try:
result = na_utils.invoke_api(na_server,
api_name='aggr-options-list-info',
api_family='cm', query=None,
des_result=None,
additional_elems=add_elems,
is_iter=False)
for res in result:
options = res.get_child_by_name('options')
if options:
op_list = options.get_children()
for op in op_list:
if op.get_child_content('name') == 'ha_policy':
attrs['ha_policy'] = op.get_child_content('value')
if op.get_child_content('name') == 'raidtype':
attrs['raid_type'] = op.get_child_content('value')
except Exception as e:
LOG.debug("Exception querying aggr options. %s", e)
return attrs
def get_sis_vol_dict(na_server, vserver, volume=None):
"""Queries sis for volumes.
If volume is present sis is queried for it.
Records dedup and compression enabled.
"""
sis_vols = {}
query_attr = {'vserver': vserver}
if volume:
vol_path = '/vol/%s' % (volume)
query_attr['path'] = vol_path
query = {'sis-status-info': query_attr}
try:
result = na_utils.invoke_api(na_server,
api_name='sis-get-iter',
api_family='cm',
query=query,
is_iter=True)
for res in result:
attr_list = res.get_child_by_name('attributes-list')
if attr_list:
sis_status = attr_list.get_children()
for sis in sis_status:
path = sis.get_child_content('path')
if not path:
continue
(___, __, vol) = path.rpartition('/')
if not vol:
continue
v_sis = {}
v_sis['compression'] = na_utils.to_bool(
sis.get_child_content('is-compression-enabled'))
v_sis['dedup'] = na_utils.to_bool(
sis.get_child_content('state'))
sis_vols[vol] = v_sis
except Exception as e:
LOG.debug("Exception querying sis information. %s", e)
return sis_vols
def get_snapmirror_vol_dict(na_server, vserver, volume=None):
"""Queries snapmirror volumes."""
mirrored_vols = {}
query_attr = {'source-vserver': vserver}
if volume:
query_attr['source-volume'] = volume
query = {'snapmirror-info': query_attr}
try:
result = na_utils.invoke_api(na_server,
api_name='snapmirror-get-iter',
api_family='cm', query=query,
is_iter=True)
for res in result:
attr_list = res.get_child_by_name('attributes-list')
if attr_list:
snap_info = attr_list.get_children()
for snap in snap_info:
src_volume = snap.get_child_content('source-volume')
v_snap = {}
v_snap['dest_loc'] =\
snap.get_child_content('destination-location')
v_snap['rel_type'] =\
snap.get_child_content('relationship-type')
v_snap['mirr_state'] =\
snap.get_child_content('mirror-state')
if mirrored_vols.get(src_volume):
mirrored_vols.get(src_volume).append(v_snap)
else:
mirrored_vols[src_volume] = [v_snap]
except Exception as e:
LOG.debug("Exception querying mirror information. %s", e)
return mirrored_vols
def query_aggr_storage_disk(na_server, aggr):
"""Queries for storage disks associated to an aggregate."""
query = {'storage-disk-info': {'disk-raid-info':
{'disk-aggregate-info':
{'aggregate-name': aggr}}}}
des_attr = {'storage-disk-info':
{'disk-raid-info': ['effective-disk-type']}}
try:
result = na_utils.invoke_api(na_server,
api_name='storage-disk-get-iter',
api_family='cm', query=query,
des_result=des_attr,
additional_elems=None,
is_iter=True)
for res in result:
attr_list = res.get_child_by_name('attributes-list')
if attr_list:
storage_disks = attr_list.get_children()
for disk in storage_disks:
raid_info = disk.get_child_by_name('disk-raid-info')
if raid_info:
eff_disk_type =\
raid_info.get_child_content('effective-disk-type')
if eff_disk_type:
return eff_disk_type
else:
continue
except Exception as e:
LOG.debug("Exception querying storage disk. %s", e)
return 'unknown'
def get_cluster_ssc(na_server, vserver):
"""Provides cluster volumes with ssc."""
netapp_volumes = get_cluster_vols_with_ssc(na_server, vserver)
mirror_vols = set()
dedup_vols = set()
compress_vols = set()
thin_prov_vols = set()
ssc_map = {'mirrored': mirror_vols, 'dedup': dedup_vols,
'compression': compress_vols,
'thin': thin_prov_vols, 'all': netapp_volumes}
for vol in netapp_volumes:
if vol.sis.get('dedup'):
dedup_vols.add(vol)
if vol.sis.get('compression'):
compress_vols.add(vol)
if vol.mirror.get('mirrored'):
mirror_vols.add(vol)
if vol.space.get('thin_provisioned'):
thin_prov_vols.add(vol)
return ssc_map
def refresh_cluster_stale_ssc(*args, **kwargs):
"""Refreshes stale ssc volumes with latest."""
backend = args[0]
na_server = args[1]
vserver = args[2]
identity = str(id(backend))
lock_pr = '%s_%s' % ('refresh_ssc', identity)
try:
job_set = na_utils.set_safe_attr(
backend, 'refresh_stale_running', True)
if not job_set:
return
@utils.synchronized(lock_pr)
def refresh_stale_ssc():
stale_vols = backend._update_stale_vols(reset=True)
LOG.info(_('Running stale ssc refresh job for %(server)s'
' and vserver %(vs)s')
% {'server': na_server, 'vs': vserver})
# refreshing single volumes can create inconsistency
# hence doing manipulations on copy
ssc_vols_copy = copy.deepcopy(backend.ssc_vols)
refresh_vols = set()
expired_vols = set()
for vol in stale_vols:
name = vol.id['name']
res = get_cluster_vols_with_ssc(na_server, vserver, name)
if res:
refresh_vols.add(res.pop())
else:
expired_vols.add(vol)
for vol in refresh_vols:
for k in ssc_vols_copy:
vol_set = ssc_vols_copy[k]
vol_set.discard(vol)
if k == "mirrored" and vol.mirror.get('mirrored'):
vol_set.add(vol)
if k == "dedup" and vol.sis.get('dedup'):
vol_set.add(vol)
if k == "compression" and vol.sis.get('compression'):
vol_set.add(vol)
if k == "thin" and vol.space.get('thin_provisioned'):
vol_set.add(vol)
if k == "all":
vol_set.add(vol)
for vol in expired_vols:
for k in ssc_vols_copy:
vol_set = ssc_vols_copy[k]
vol_set.discard(vol)
backend.refresh_ssc_vols(ssc_vols_copy)
LOG.info(_('Successfully completed stale refresh job for'
' %(server)s and vserver %(vs)s')
% {'server': na_server, 'vs': vserver})
refresh_stale_ssc()
finally:
na_utils.set_safe_attr(backend, 'refresh_stale_running', False)
def get_cluster_latest_ssc(*args, **kwargs):
"""Updates volumes including ssc."""
backend = args[0]
na_server = args[1]
vserver = args[2]
identity = str(id(backend))
lock_pr = '%s_%s' % ('refresh_ssc', identity)
# As this depends on stale job running state
# set flag as soon as job starts to avoid
# job accumulation.
try:
job_set = na_utils.set_safe_attr(backend, 'ssc_job_running', True)
if not job_set:
return
@utils.synchronized(lock_pr)
def get_latest_ssc():
LOG.info(_('Running cluster latest ssc job for %(server)s'
' and vserver %(vs)s')
% {'server': na_server, 'vs': vserver})
ssc_vols = get_cluster_ssc(na_server, vserver)
backend.refresh_ssc_vols(ssc_vols)
backend.ssc_run_time = timeutils.utcnow()
LOG.info(_('Successfully completed ssc job for %(server)s'
' and vserver %(vs)s')
% {'server': na_server, 'vs': vserver})
get_latest_ssc()
finally:
na_utils.set_safe_attr(backend, 'ssc_job_running', False)
def refresh_cluster_ssc(backend, na_server, vserver, synchronous=False):
"""Refresh cluster ssc for backend."""
if not isinstance(backend, driver.VolumeDriver):
raise exception.InvalidInput(reason=_("Backend not a VolumeDriver."))
if not isinstance(na_server, api.NaServer):
raise exception.InvalidInput(reason=_("Backend server not NaServer."))
delta_secs = getattr(backend, 'ssc_run_delta_secs', 1800)
if getattr(backend, 'ssc_job_running', None):
LOG.warn(_('ssc job in progress. Returning... '))
return
elif (getattr(backend, 'ssc_run_time', None) is None or
(backend.ssc_run_time and
timeutils.is_newer_than(backend.ssc_run_time, delta_secs))):
if synchronous:
get_cluster_latest_ssc(backend, na_server, vserver)
else:
t = Timer(0, get_cluster_latest_ssc,
args=[backend, na_server, vserver])
t.start()
elif getattr(backend, 'refresh_stale_running', None):
LOG.warn(_('refresh stale ssc job in progress. Returning... '))
return
else:
if backend.stale_vols:
if synchronous:
refresh_cluster_stale_ssc(backend, na_server, vserver)
else:
t = Timer(0, refresh_cluster_stale_ssc,
args=[backend, na_server, vserver])
t.start()
def get_volumes_for_specs(ssc_vols, specs):
"""Shortlists volumes for extra specs provided."""
if specs is None or not isinstance(specs, dict):
return ssc_vols['all']
result = copy.deepcopy(ssc_vols['all'])
raid_type = specs.get('netapp:raid_type')
disk_type = specs.get('netapp:disk_type')
bool_specs_list = ['netapp_mirrored', 'netapp_unmirrored',
'netapp_dedup', 'netapp_nodedup',
'netapp_compression', 'netapp_nocompression',
'netapp_thin_provisioned', 'netapp_thick_provisioned']
b_specs = {}
for spec in bool_specs_list:
b_specs[spec] = na_utils.to_bool(specs.get(spec))\
if specs.get(spec) else None
def _spec_ineffect(b_specs, spec, opp_spec):
"""If the spec with opposite spec is ineffective."""
if ((b_specs[spec] is None and b_specs[opp_spec] is None)
or (b_specs[spec] == b_specs[opp_spec])):
return True
else:
return False
if _spec_ineffect(b_specs, 'netapp_mirrored', 'netapp_unmirrored'):
pass
else:
if b_specs['netapp_mirrored'] or b_specs['netapp_unmirrored'] is False:
result = result & ssc_vols['mirrored']
else:
result = result - ssc_vols['mirrored']
if _spec_ineffect(b_specs, 'netapp_dedup', 'netapp_nodedup'):
pass
else:
if b_specs['netapp_dedup'] or b_specs['netapp_nodedup'] is False:
result = result & ssc_vols['dedup']
else:
result = result - ssc_vols['dedup']
if _spec_ineffect(b_specs, 'netapp_compression', 'netapp_nocompression'):
pass
else:
if (b_specs['netapp_compression'] or
b_specs['netapp_nocompression'] is False):
result = result & ssc_vols['compression']
else:
result = result - ssc_vols['compression']
if _spec_ineffect(b_specs, 'netapp_thin_provisioned',
'netapp_thick_provisioned'):
pass
else:
if (b_specs['netapp_thin_provisioned'] or
b_specs['netapp_thick_provisioned'] is False):
result = result & ssc_vols['thin']
else:
result = result - ssc_vols['thin']
if raid_type or disk_type:
tmp = copy.deepcopy(result)
for vol in tmp:
if raid_type:
vol_raid = vol.aggr['raid_type']
vol_raid = vol_raid.lower() if vol_raid else None
if raid_type.lower() != vol_raid:
result.discard(vol)
if disk_type:
vol_dtype = vol.aggr['disk_type']
vol_dtype = vol_dtype.lower() if vol_dtype else None
if disk_type.lower() != vol_dtype:
result.discard(vol)
return result
def check_ssc_api_permissions(na_server):
"""Checks backend ssc api permissions for the user."""
api_map = {'storage-disk-get-iter': ['netapp:disk_type'],
'snapmirror-get-iter': ['netapp_mirrored',
'netapp_unmirrored'],
'sis-get-iter': ['netapp_dedup', 'netapp_nodedup',
'netapp_compression',
'netapp_nocompression'],
'aggr-options-list-info': ['netapp:raid_type'],
'volume-get-iter': []}
failed_apis = na_utils.check_apis_on_cluster(na_server, api_map.keys())
if failed_apis:
if 'volume-get-iter' in failed_apis:
msg = _("Fatal error: User not permitted"
" to query NetApp volumes.")
raise exception.VolumeBackendAPIException(data=msg)
else:
unsupp_ssc_features = []
for fail in failed_apis:
unsupp_ssc_features.extend(api_map[fail])
LOG.warn(_("The user does not have access or sufficient"
" privileges to use all netapp apis. The following"
" extra_specs will fail or be ignored: %s"),
unsupp_ssc_features)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for Split Op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
_TEST_DTYPES = (dtypes.float32, dtypes.float64, dtypes.complex64,
dtypes.complex128)
class SplitOpTest(test.TestCase):
def _makeData(self, shape, dtype):
data = np.random.rand(*shape).astype(dtype.as_numpy_dtype)
if dtype.is_complex:
data -= 1j * data
return data
@test_util.run_deprecated_v1
def testShapeInference(self):
model_input = array_ops.placeholder(dtypes.float32, shape=(1, 10))
# check that we fail during static shape inference if sizes are known
with self.assertRaises(ValueError):
# pylint: disable=expression-not-assigned
array_ops.split(model_input, [4], axis=1)[0]
# pylint: enable=expression-not-assigned
model_input = array_ops.placeholder(dtypes.float32)
inp = np.zeros((1, 10))
# check that we still fail at runtime if the shapes were unknown
with self.cached_session(use_gpu=True) as sess:
with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run(array_ops.split(model_input, [4]), {model_input: inp})
# test that we can pass a scalar Tensor as num_splits
for axis in [0, -2]:
with self.cached_session(use_gpu=True) as sess:
result = sess.run(
array_ops.split(
array_ops.ones([4, 4]),
num_or_size_splits=array_ops.ones([2, 2]).get_shape()[1],
axis=axis))
self.assertEqual(result[0].shape, (2, 4))
self.assertEqual(result[1].shape, (2, 4))
# test that none split dimensions remain, even if we don't know how
# the split_dim will be split, but we do know the axis
result = array_ops.split(
array_ops.ones([5, 2]), array_ops.constant([2, 1, 2]) * 1, axis=0)
self.assertEqual(result[0].shape[1], 2)
self.assertEqual(result[1].shape[1], 2)
self.assertEqual(result[2].shape[1], 2)
model_input2 = array_ops.placeholder(dtypes.float32, shape=[None, 2])
result = array_ops.split(model_input2, [2, 2], axis=0)[0]
with self.cached_session(use_gpu=True) as sess:
sess.run(result, feed_dict={model_input2: np.ones([4, 2])})
@test_util.run_deprecated_v1
def testFailWithoutExplicitNum(self):
size_splits = array_ops.placeholder(dtype=dtypes.int32, shape=[None])
value = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
with self.session(use_gpu=True) as sess:
with self.assertRaises(ValueError) as context:
sess.run(array_ops.split(value, size_splits), {size_splits: [2, 2, 6]})
self.assertTrue("Cannot infer num from shape" in str(context.exception))
@test_util.run_in_graph_and_eager_modes
def testExplicitNum(self):
size_splits = array_ops.constant([2, 2, 6], dtype=dtypes.int32)
value = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
# Eager and Graph modes raise different exceptions
with self.assertRaises((errors_impl.InvalidArgumentError, ValueError)):
array_ops.split(value, size_splits, num=4)
r = self.evaluate(array_ops.split(value, size_splits, num=3))
self.assertAllEqual(r[0], value[0:2])
self.assertAllEqual(r[1], value[2:4])
self.assertAllEqual(r[2], value[4:])
@test_util.run_in_graph_and_eager_modes
def testListOfScalarTensors(self):
a = math_ops.to_int32(5)
b = math_ops.to_int32(6)
value = np.random.rand(11, 11)
with test_util.device(use_gpu=True):
result = self.evaluate(array_ops.split(value, [a, b]))
self.assertAllEqual(result[0], value[0:5, :])
self.assertAllEqual(result[1], value[5:, :])
def _RunAndVerifyVariable(self, dtype, large_num_splits=False):
# Random dims of rank 5
shape = np.random.randint(1, 5, size=5)
split_dim = np.random.randint(-5, 5)
if large_num_splits:
num_split = np.random.randint(16, 25)
else:
num_split = np.random.randint(2, 8)
size_splits = np.random.randint(2, 8, num_split, dtype=np.int32)
shape[split_dim] = np.sum(size_splits)
inp = self._makeData(shape, dtype)
with test_util.device(use_gpu=True):
result = self.evaluate(array_ops.split(inp, size_splits, split_dim))
slices = [slice(0, x) for x in shape]
offset = 0
for i in range(num_split):
slices[split_dim] = slice(offset, offset + size_splits[i])
offset += size_splits[i]
self.assertAllEqual(result[i], inp[slices])
def _testSpecialCasesVariable(self):
inp = np.random.rand(4, 4).astype("f")
with test_util.device(use_gpu=True):
result = self.evaluate(array_ops.split(inp, [4], 0))
self.assertAllEqual(result[0], inp)
result = self.evaluate(array_ops.split(inp, [-1, 3], 0))
self.assertAllEqual(result[0], inp[0:1, :])
self.assertAllEqual(result[1], inp[1:4, :])
def _testHugeNumberOfTensorsVariable(self, dtype):
num_split = 1000
size_splits = np.random.randint(1, 3, num_split, dtype=np.int32)
shape = [3, np.sum(size_splits)]
split_dim = 1
inp = self._makeData(shape, dtype)
with test_util.device(use_gpu=True):
result = self.evaluate(array_ops.split(inp, size_splits, split_dim))
slices = [slice(0, x) for x in shape]
offset = 0
for i in range(num_split):
slices[split_dim] = slice(offset, offset + size_splits[i])
offset += size_splits[i]
self.assertAllEqual(result[i], inp[slices])
@test_util.run_in_graph_and_eager_modes
def testSpecialCasesVariable(self):
self._testSpecialCasesVariable()
for dtype in _TEST_DTYPES:
self._testHugeNumberOfTensorsVariable(dtype)
@test_util.run_in_graph_and_eager_modes
def testDegenerateVariable(self):
inp = np.random.rand(4, 4).astype("f")
with test_util.device(use_gpu=True):
result = self.evaluate(array_ops.split(inp, [-1, 4], 0))
self.assertAllEqual(result[0], inp[0:0, :])
self.assertAllEqual(result[1], inp[0:4, :])
result = self.evaluate(array_ops.split(inp, [4, -1], 0))
self.assertAllEqual(result[0], inp[0:4, :])
self.assertAllEqual(result[1], inp[4:4, :])
result = self.evaluate(array_ops.split(inp, [-1, 4], 1))
self.assertAllEqual(result[0], inp[:, 0:0])
self.assertAllEqual(result[1], inp[:, 0:4])
result = self.evaluate(array_ops.split(inp, [4, -1], 1))
self.assertAllEqual(result[0], inp[:, 0:4])
self.assertAllEqual(result[1], inp[:, 4:4])
def _testGradientsSimpleVariable(self, dtype):
inp = self._makeData((4, 4), dtype)
with test_util.device(use_gpu=True):
inp_tensor = ops.convert_to_tensor(inp)
s = array_ops.split(inp_tensor, [1, 3], 1)
inp_grads = [
self._makeData((4, 1), dtype), self._makeData((4, 3), dtype)
]
grad_tensors = [constant_op.constant(x) for x in inp_grads]
grad = gradients_impl.gradients(s, [inp_tensor], grad_tensors)[-1]
result = self.evaluate(grad)
self.assertAllEqual(result[:, 0:1], inp_grads[0])
self.assertAllEqual(result[:, 1:4], inp_grads[1])
@test_util.run_deprecated_v1
def testOutputShape(self):
for axis in [1, -1]:
with self.cached_session(use_gpu=True):
tensor = array_ops.placeholder(dtypes.float32, shape=[None, 12])
size_splits = [3, 7, 2]
outputs = array_ops.split(tensor, size_splits, axis)
for i, output in enumerate(outputs):
self.assertEqual(output.get_shape().as_list(), [None, size_splits[i]])
def _compare(self, x, dim, num):
np_ans = np.split(x, num, dim)
with test_util.device(use_gpu=True):
tf_ans = array_ops.split(value=x, num_or_size_splits=num, axis=dim)
out = self.evaluate(tf_ans)
self.assertEqual(num, len(np_ans))
self.assertEqual(num, len(np_ans))
self.assertEqual(num, len(out))
for i in range(num):
self.assertAllEqual(np_ans[i], out[i])
self.assertShapeEqual(np_ans[i], tf_ans[i])
@test_util.run_in_graph_and_eager_modes
def testSplitRows(self):
for dtype in _TEST_DTYPES:
inp = self._makeData((4, 4), dtype)
self._compare(inp, 0, 4)
@test_util.run_in_graph_and_eager_modes
def testSplitCols(self):
for dtype in _TEST_DTYPES:
inp = self._makeData((4, 4), dtype)
self._compare(inp, 1, 4)
def _testEmpty(self, x, dim, num, expected_shape):
with test_util.device(use_gpu=True):
tf_ans = array_ops.split(value=x, num_or_size_splits=num, axis=dim)
out = self.evaluate(tf_ans)
self.assertEqual(x.size, 0)
self.assertEqual(len(out), num)
for i in range(num):
self.assertEqual(out[i].shape, expected_shape)
self.assertEqual(expected_shape, tf_ans[i].get_shape())
@test_util.run_in_graph_and_eager_modes
def testEmpty(self):
# Note: np.split returns a rank-0 empty ndarray
# if the input ndarray is empty.
for dtype in _TEST_DTYPES:
inp = self._makeData((8, 0, 21), dtype)
self._testEmpty(inp, 0, 2, (4, 0, 21))
self._testEmpty(inp, 0, 4, (2, 0, 21))
self._testEmpty(inp, 1, 4, (8, 0, 21))
self._testEmpty(inp, 2, 3, (8, 0, 7))
self._testEmpty(inp, 2, 7, (8, 0, 3))
@test_util.run_in_graph_and_eager_modes
def testIdentity(self):
for dtype in _TEST_DTYPES:
inp = self._makeData((2, 2, 2), dtype)
self._compare(inp, 0, 1)
self._compare(inp, 1, 1)
self._compare(inp, 2, 1)
@test_util.run_in_graph_and_eager_modes
def testSplitDim0(self):
for dtype in _TEST_DTYPES:
self._compare(self._makeData((6, 10, 18), dtype), 0, 3)
self._compare(self._makeData((6, 7, 18), dtype), 0, 3)
self._compare(self._makeData((6, 7, 9), dtype), 0, 3)
def _RunAndVerify(self, dtype, large_num_splits=False):
# Random dims of rank 5
shape = np.random.randint(0, 5, size=5)
split_dim = np.random.randint(-5, 5)
if large_num_splits:
num_split = np.random.randint(9, 15)
else:
num_split = np.random.randint(2, 8)
shape[split_dim] = np.random.randint(2, 5) * num_split
inp = self._makeData(shape, dtype)
with test_util.device(use_gpu=True):
result = self.evaluate(
array_ops.split(
value=inp, num_or_size_splits=num_split, axis=split_dim))
slices = [slice(0, x) for x in shape]
offset = 0
length = shape[split_dim] // num_split
for i in range(num_split):
slices[split_dim] = slice(offset, offset + length)
offset += length
self.assertAllEqual(result[i], inp[slices])
@test_util.run_in_graph_and_eager_modes
def testRandom(self):
for dtype in _TEST_DTYPES:
for _ in range(5):
self._RunAndVerify(dtype)
self._RunAndVerify(dtype, large_num_splits=True)
self._RunAndVerifyVariable(dtype)
self._RunAndVerifyVariable(dtype, large_num_splits=True)
def _testGradientsSimple(self, dtype):
inp = self._makeData((4, 4), dtype)
with self.cached_session(use_gpu=True):
inp_tensor = ops.convert_to_tensor(inp)
s = array_ops.split(value=inp_tensor, num_or_size_splits=4, axis=1)
inp_grads = [self._makeData((4, 1), dtype)for _ in range(4)]
grad_tensors = [constant_op.constant(x) for x in inp_grads]
grad = gradients_impl.gradients(s, [inp_tensor], grad_tensors)[0]
result = self.evaluate(grad)
for i in range(4):
self.assertAllEqual(result[:, i:i + 1], inp_grads[i])
@test_util.run_deprecated_v1
def testGradientsAll(self):
for dtype in _TEST_DTYPES:
self._testGradientsSimple(dtype)
self._testGradientsSimpleVariable(dtype)
@test_util.run_deprecated_v1
def testShapeFunctionEdgeCases(self):
# split_dim greater than rank of input.
with self.assertRaises(ValueError):
array_ops.split(value=[[0, 1], [2, 3]], num_or_size_splits=4, axis=2)
# split dim less than -(rank of input)
with self.assertRaises(ValueError):
array_ops.split(value=[[0, 1], [2, 3]], num_or_size_splits=4, axis=-3)
# num_split does not evenly divide the size in split_dim.
with self.assertRaisesRegexp(ValueError, "should evenly divide"):
array_ops.split(value=[0, 1, 2, 3], num_or_size_splits=3, axis=0)
# Unknown split_dim.
splits = array_ops.split(
value=[[0, 1, 2, 3]],
num_or_size_splits=4,
axis=array_ops.placeholder(dtypes.int32))
for s in splits:
self.assertEqual([None, None], s.get_shape().as_list())
# Unknown split_dim and input shape.
splits = array_ops.split(
value=array_ops.placeholder(dtypes.float32),
num_or_size_splits=4,
axis=array_ops.placeholder(dtypes.int32))
for s in splits:
self.assertEqual(None, s.get_shape().ndims)
@test_util.run_deprecated_v1
def testVariableShapeFunction(self):
# size_splits too big
with self.assertRaises(ValueError):
array_ops.split([0, 1], [3, -1], axis=0)
# Correct inference of variable dimension
s0, s1 = array_ops.split([0, 1, 2], [2, -1], axis=0)
assert s0.shape.as_list() == [2]
assert s1.shape.as_list() == [1]
@test_util.run_deprecated_v1
@test_util.disable_xla("b/123337890") # Error messages differ
def testNonexistentDimTensor(self):
x = array_ops.placeholder(dtypes.int32)
values = np.zeros([5, 30])
splits = array_ops.placeholder(dtypes.int32)
with self.assertRaisesRegexp(ValueError, "Cannot infer"):
y = array_ops.split(values, splits, axis=x)
splits = array_ops.placeholder(dtypes.int32, [3])
y = array_ops.split(values, splits, axis=x)
with self.session(use_gpu=True) as sess:
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"must have exactly one element"):
sess.run(y, {x: np.array([], dtype=np.int32), splits: [4, 11, 15]})
if __name__ == "__main__":
test.main()
|
|
import collections
import io
import multiprocessing
import time
from fooster.web import web
import mock
test_message = b'More test time!'
test_string = test_message.decode('utf-8')
class MyHandler(web.HTTPHandler):
reader = True
def respond(self):
self.comm['handled'].value = True
return 200, test_message
class OtherHandler(web.HTTPHandler):
reader = True
def respond(self):
self.comm['handled'].value = True
return 200, test_message
class SpecialHandler(web.HTTPHandler):
reader = False
def respond(self):
self.comm['waiting'].set()
self.comm['stop'].wait()
return 204, ''
class HeaderHandler(web.HTTPHandler):
def respond(self):
self.response.headers.set('Test', 'True')
raise web.HTTPError(402)
class HeaderErrorHandler(web.HTTPErrorHandler):
def respond(self):
self.response.headers.set('Test', 'True')
return 402, b''
class HeaderErrorRaiseHandler(web.HTTPErrorHandler):
def respond(self):
self.response.headers.set('Test', 'True')
raise TypeError()
class IOHandler(web.HTTPHandler):
def respond(self):
return 200, io.BytesIO(test_message)
class LengthIOHandler(web.HTTPHandler):
def respond(self):
self.response.headers.set('Content-Length', '2')
return 200, io.BytesIO(test_message)
class SimpleHandler(web.HTTPHandler):
def respond(self):
return 200, test_message.decode('utf-8')
class SimpleBytesHandler(web.HTTPHandler):
def do_get(self):
return 200, test_message
class BadLengthHandler(web.HTTPHandler):
def respond(self):
self.response.headers.set('Content-Length', '2')
return 200, test_message
class EmptyHandler(web.HTTPHandler):
def respond(self):
return 204, ''
class CloseHandler(web.HTTPHandler):
def respond(self):
self.request.keepalive = False
return 204, ''
class NoWriteHandler(web.HTTPHandler):
def respond(self):
self.response.write_body = False
return 200, test_message
class NoWriteBytesHandler(web.HTTPHandler):
def respond(self):
self.response.write_body = False
return 200, io.BytesIO(test_message)
class EvilHandler(web.HTTPHandler):
def respond(self):
self.response.headers.set('Content-Length', 'bad')
return 200, io.BytesIO(test_message)
def run(handler, handler_args=None, comm=None, socket=None, socket_error=False, server=None):
if not socket:
socket = mock.MockSocket(error=socket_error)
if not server:
http_server = mock.MockHTTPServer()
server = http_server.info
request_obj = mock.MockHTTPRequest(socket, ('127.0.0.1', 1337), server, handler=handler, handler_args=handler_args, comm=comm, response=web.HTTPResponse)
response_obj = request_obj.response
response_obj.handle()
value = response_obj.wfile.getvalue()
response_obj.close()
# response line comes before firt '\r\n'
response_line = value.split('\r\n'.encode(web.http_encoding), 1)[0]
if socket_error:
body = None
else:
# body should happen after '\r\n\r\n' at the end of the HTTP stuff
body = value.split('\r\n\r\n'.encode(web.http_encoding), 1)[1]
return response_obj, response_line, response_obj.headers, body
def test_write_lock_wait():
sync = multiprocessing.get_context(web.start_method).Manager()
stop = sync.Event()
waiting = sync.Event()
my_handled = sync.Value('b', 0)
other_handled = sync.Value('b', 0)
# both must have the same server
server = mock.MockHTTPServer()
# both handlers should have the same mock resource '/' and should therefore block since the first one is atomic
special = multiprocessing.get_context(web.start_method).Process(target=run, args=(SpecialHandler,), kwargs={'server': server.info, 'comm': {'stop': stop, 'waiting': waiting}})
my = multiprocessing.get_context(web.start_method).Process(target=run, args=(MyHandler,), kwargs={'server': server.info, 'comm': {'handled': my_handled}})
try:
special.start()
# wait until the handler is blocking
waiting.wait(timeout=server.poll_interval + 1)
print(server.res_lock.resources)
# make sure it is locked once
assert server.res_lock.resources['/'][1] == 1
my.start()
# wait a bit
time.sleep(server.poll_interval + 1)
# make sure that the my process did not handle the request
assert not my_handled.value
assert not my.is_alive()
assert server.res_lock.resources['/'][1] == 1
# make sure special has been here the whole time
assert special.is_alive()
# check for proper skipping when locked
response, response_line, headers, body = run(OtherHandler, server=server.info, comm={'handled': other_handled})
assert response.request.skip
# stop special handler
stop.set()
# wait a bit
time.sleep(server.poll_interval + 1)
# make sure all process exited
assert not special.is_alive()
# make sure we removed the lock
assert not server.res_lock.resources
finally:
# join everything
stop.set()
special.join(timeout=server.poll_interval + 1)
my.join(timeout=server.poll_interval + 1)
def test_write_lock_socket_error():
sync = multiprocessing.get_context(web.start_method).Manager()
stop = sync.Event()
waiting = sync.Event()
other_handled = sync.Value('b', 0)
# both must have the same server
server = mock.MockHTTPServer()
# both handlers should have the same mock resource '/' and should therefore block since the first one is atomic
special = multiprocessing.get_context(web.start_method).Process(target=run, args=(SpecialHandler,), kwargs={'server': server.info, 'comm': {'stop': stop, 'waiting': waiting}})
try:
special.start()
# wait until the handler is blocking
waiting.wait(timeout=server.poll_interval + 1)
# make sure it is locked once
assert server.res_lock.resources['/'][1] == 1
# make sure special has been here the whole time
assert special.is_alive()
# check for connection error handling when locked
response, response_line, headers, body = run(OtherHandler, server=server.info, comm={'handled': other_handled}, socket_error=True)
assert not other_handled.value
assert response_line == 'HTTP/1.1 408 Request Timeout'.encode(web.http_encoding)
assert response.request.skip
# stop special handler
stop.set()
# wait a bit
time.sleep(server.poll_interval + 1)
# make sure all process exited
assert not special.is_alive()
# make sure we removed the lock
assert not server.res_lock.resources
finally:
# join everything
stop.set()
special.join(timeout=server.poll_interval + 1)
def test_http_error():
response, response_line, headers, body = run(web.DummyHandler, {'error': web.HTTPError(402)})
assert response_line == 'HTTP/1.1 402 Payment Required'.encode(web.http_encoding)
def test_general_error():
response, response_line, headers, body = run(web.DummyHandler, {'error': TypeError()})
assert response_line == 'HTTP/1.1 500 Internal Server Error'.encode(web.http_encoding)
def test_error_headers():
error_headers = web.HTTPHeaders()
error_headers.set('Test', 'True')
response, response_line, headers, body = run(web.DummyHandler, {'error': web.HTTPError(402, headers=error_headers)})
assert response_line == 'HTTP/1.1 402 Payment Required'.encode(web.http_encoding)
assert headers.get('Test') == 'True'
def test_headers_clear():
response, response_line, headers, body = run(HeaderHandler)
assert headers.get('Test') is None
def test_error_handler():
server = mock.MockHTTPServer(error_routes=collections.OrderedDict([('400', HeaderErrorHandler), ('500', HeaderErrorHandler)]))
response, response_line, headers, body = run(web.DummyHandler, {'error': TypeError()}, server=server)
assert response_line == 'HTTP/1.1 402 Payment Required'.encode(web.http_encoding)
assert headers.get('Test') == 'True'
assert body == b''
def test_error_handler_error():
server = mock.MockHTTPServer(error_routes={'500': HeaderErrorRaiseHandler})
response, response_line, headers, body = run(web.DummyHandler, {'error': TypeError()}, server=server)
assert response_line == 'HTTP/1.1 500 Internal Server Error'.encode(web.http_encoding)
assert headers.get('Test') is None
assert headers.get('Content-Length') == '28'
assert headers.get('Server') == web.server_version
assert headers.get('Date')
assert body == b'500 - Internal Server Error\n'
def test_response_io():
response, response_line, headers, body = run(IOHandler)
assert headers.get('Transfer-Encoding') == 'chunked'
assert headers.get('Content-Length') is None
assert body == ('{:x}'.format(len(test_message)) + '\r\n').encode(web.http_encoding) + test_message + '\r\n'.encode(web.http_encoding) + '0\r\n\r\n'.encode(web.http_encoding)
def test_response_io_length():
response, response_line, headers, body = run(LengthIOHandler)
assert headers.get('Content-Length') == '2'
assert body == test_message[0:2]
def test_response_str():
response, response_line, headers, body = run(SimpleHandler)
assert headers.get('Content-Length') == str(len(test_message))
assert body == test_message
def test_response_bytes():
response, response_line, headers, body = run(SimpleBytesHandler)
assert headers.get('Content-Length') == str(len(test_message))
assert body == test_message
def test_response_length():
response, response_line, headers, body = run(BadLengthHandler)
assert headers.get('Content-Length') == str(len(test_message))
assert body == test_message
def test_connection_close():
response, response_line, headers, body = run(EmptyHandler)
assert headers.get('Connection') is None
response, response_line, headers, body = run(CloseHandler)
assert headers.get('Connection') == 'close'
def test_no_write_io():
response, response_line, headers, body = run(NoWriteHandler)
assert response_line == 'HTTP/1.1 200 OK'.encode(web.http_encoding)
assert body == b''
def test_no_write_bytes():
response, response_line, headers, body = run(NoWriteBytesHandler)
assert response_line == 'HTTP/1.1 200 OK'.encode(web.http_encoding)
assert body == b''
def test_write_error():
response, response_line, headers, body = run(EvilHandler)
assert response_line == 'HTTP/1.1 200 OK'.encode(web.http_encoding)
assert headers.get('Content-Length') == 'bad'
assert body == b''
def test_write_socket_error():
response, response_line, headers, body = run(SimpleBytesHandler, socket_error=True)
assert response_line == b''
assert body is None
|
|
import iInfo
import iSequence
import iCheck
import iBam
def iHeader(Header_File, H_CHR, opts):
with open(opts.input) as vcf:
for line in vcf:
line = line.rstrip()
if line.startswith('##'):
Header_File += [line]
elif line.startswith('#CHROM'):
H_CHR += [line]
break
if opts.SimpleRepeat:
Header_File += ['##INFO=<ID=iSR,Number=1,Type=String,Description="Variant falls in a repeated sequence. None=0, SimpleRepeat=1, Homopolymer=2.">']
if opts.SimpleRepeatLength:
Header_File += ['##INFO=<ID=iSRL,Number=1,Type=Integer,Description="Length of repeated sequence (expressed as number of nucleotides) for SR tag">']
if opts.SimpleRepeatUnit:
Header_File += ['##INFO=<ID=iSRU,Number=1,Type=String,Description="Simple repeated sequence unit composing repeated sequence (SR)">']
if opts.PseudoNucleotidesComposition:
Header_File += ['##INFO=<ID=iPNC,Number=16,Type=Float,Description="Pseudo Nucleotide sequence Composition using Kmer size of 2. Reported as: AA,AC,AG,AT,CA,CC,CG,CT,GA,GC,GG,GT,TA,TC,TG,TT">']
if opts.RepeatMasker:
Header_File += ['##INFO=<ID=iRM,Number=1,Type=Integer,Description="Variant falls in a repeated sequence according to RepeatMasker tool. True=1, False=0">']
if opts.gcContent:
Header_File += ['##INFO=<ID=iGC,Number=1,Type=Float,Description="Percentage of GC content in sequence">']
if opts.VariantClass:
Header_File += ['##INFO=<ID=iVC,Number=1,Type=String,Description="Annotated variant class: SNV=snv, Insertion=Ins, Deletion=Del, SequenceAlteration=Alt">']
if opts.UnMappedReads:
Header_File += ['##FORMAT=<ID=iUnMap,Number=1,Type=Float,Description="Fraction of unmapped reads">']
if opts.MateIsUnmapped:
Header_File += ['##FORMAT=<ID=iUnMap,Number=1,Type=Float,Description="Fraction of reads having unmapped mate">']
if opts.MeanMappingQuality:
Header_File += ['##FORMAT=<ID=iMQ,Number=1,Type=Float,Description="Mean mapping quality for reads mapping variant position">']
if opts.MappingQualityZero:
Header_File += ['##FORMAT=<ID=iMQ0,Number=1,Type=Float,Description="Fraction of reads mapping position with Mapping Quaility=0">']
if opts.NotPrimaryAlignment:
Header_File += ['##FORMAT=<ID=iNPA,Number=1,Type=Float,Description="Fraction of reads mapping position flagged as not primary alignment">']
if opts.SupplementaryAlignment:
Header_File += ['##FORMAT=<ID=iSA,Number=1,Type=Float,Description="Fraction of reads mapping position flagged as supplementary alignment">']
if opts.NotPairedReads:
Header_File += ['##FORMAT=<ID=iNP,Number=1,Type=Float,Description="Fraction of reads mapping position flagged as not paired">']
if opts.NotProperPairedReads:
Header_File += ['##FORMAT=<ID=iNPP,Number=1,Type=Float,Description="Fraction of reads mapping position flagged as not proper paired">']
if opts.AlignmentScore:
Header_File += ['##FORMAT=<ID=iAS,Number=1,Type=Float,Description="Mean alignment score of reads mapping variant position">']
if opts.SuboptimalAlignmentScore:
Header_File += ['##FORMAT=<ID=iXS,Number=1,Type=Float,Description="Mean suboptimal alignment score of reads mapping variant position">']
if opts.TotalDupReads:
Header_File += ['##FORMAT=<ID=iDUP,Number=1,Type=Integer,Description="Fraction of total reads mapping position marked as duplicate">']
if opts.iEvaDepth:
Header_File += ['##FORMAT=<ID=iDP,Number=1,Type=Integer,Description="iEVA read depth. Duplicate reads are excluded.">']
if opts.AlleleDepth:
Header_File += ['##FORMAT=<ID=iAD,Number=R,Type=Integer,Description="Allelic depth reported by iEVA as Ref,Alt">']
if opts.AlleleFrequency:
Header_File += ['##FORMAT=<ID=iFREQ,Number=1,Type=float,Description="Alternate allele frequency on variant position">']
if opts.StrandBiasDepth:
Header_File += ['##FORMAT=<ID=iSBD,Number=4,Type=Integer,Description="Depth of bases supporting REF and ALT allele on forward and reverse strand for strand bias detection (R+, R-, A+, A-)">']
if opts.StrandBias:
Header_File += ['##FORMAT=<ID=iSB,Number=1,Type=Float,Description="Fisher exact test to detect strand bias (R+,R-,A+,A-)">']
if opts.AlleleQscore:
Header_File += ['##FORMAT=<ID=iQual,Number=R,Type=Float,Description="Mean Q-score for REF and ALT allele reported as MeanRefQscore, MeanAltQscore">']
if opts.iBaseQualValAround:
Header_File += ['##FORMAT=<ID=iBQVA,Number=1,Type=Float,Description="Mean base quality value around the called variant (+-3 bases).">']
if opts.AlleleMeanMappingQuality:
Header_File += ['##FORMAT=<ID=iAMQ,Number=R,Type=Float,Description="Mean mapping quality for reads supporting REF and ALT allele reported as MeanMappingQualRef,MeanMappingQualAlt">']
if opts.AlleleMeanAlignmentScore:
Header_File += ['##FORMAT=<ID=iAAS,Number=R,Type=Float,Description="Mean alignment score for reads supporting REF and ALT allele. Reported as MeanAlignmentScoreRef,MeanAlignmentScoreAlt">']
if opts.AlleleSuboptimalAlignmentScore:
Header_File += ['##FORMAT=<ID=iAXS,Number=R,Type=Float,Description="Mean suboptimal alignment score for reads supporting REF and ALT allele. Reported as MeanSuboptimalAlignmentScoreRef,MeanSuboptimalAlignmentScoreAlt">']
if opts.AlleleSuboptimalAlignmentScoreZero:
Header_File += ['##FORMAT=<ID=iAXS0,Number=R,Type=Integer,Description="Number of reads supporting REF and ALT allele with Suboptimal Alignment Score = 0. Reported as NumberReadsXSscore0Ref,NumberReadsXSscore0Alt">']
if opts.AlleleMappingQualityZero:
Header_File += ['##FORMAT=<ID=iAMQ0,Number=R,Type=Integer,Description="Number of reads supporting REF and ALT allele with mapping quality = 0. Reported as NumberMappingQuality0Ref,NumberMappingQuality0Alt">']
if opts.AlleleClippedReads:
Header_File += ['##FORMAT=<ID=iACR,Number=R,Type=Integer,Description="Number of clipped reads mapping REF and ALT allele reported as NumberClippedRef, NumberClippedAlt">']
if opts.iClipRankSumTest:
Header_File += ['##FORMAT=<ID=iCRT,Number=1,Type=Float,Description="Mann-Whitney Rank sum test for difference between in number of clipped reads of variants from Ref and Alt (p-value).">']
if opts.iBaseQualRankSumTest:
Header_File += ['##FORMAT=<ID=iQRT,Number=1,Type=Float,Description="Mann-Whitney Rank sum test for difference between in base quality of bases supporting Ref and Alt (p-value).">']
if opts.iMapQualRankSumTest:
Header_File += ['##FORMAT=<ID=iMRT,Number=1,Type=Float,Description="Mann-Whitney Rank sum test for difference between in mapping quality of reads of variants from Ref and Alt (p-value).">']
if opts.iReadPosRankSumTest:
Header_File += ['##FORMAT=<ID=iPRT,Number=1,Type=Float,Description="Mann-Whitney Rank sum test for difference between in positions of variants in reads from Ref and Alt (p-value).">']
if opts.iAltNormReadPos:
Header_File += ['##FORMAT=<ID=iANRP,Number=1,Type=Float,Description="Mean read position of variant normalized on read length. [0-1] 1 means that variant falls at the beginning/end of read, 0 indicating variant falls in the middle of the read. (Useful in Amplicon analysis)">']
return Header_File, H_CHR
def iAnnotation(Sample_dict, out, opts, Sequence_opts, Bam_opts, Genotype_opts, H_CHR, Reference, Sample_tot, Repeat_list):
CHR_Counter = ''
with open(opts.input) as vcf:
for variant in vcf:
if variant.startswith('#'):
continue
else:
variant = variant.rstrip()
variant = variant.split('\t')
Variant_Class = iInfo.Extract_Variant_Type(variant, H_CHR)
if any(Sequence_opts):
RepeatSeq, RepeatSeq_Lenght, RepeatSeq_Unit, Psuedo_Nucleotide, RM, GC = iSequence.SimpleRepeats_Finder(Reference, variant, H_CHR, Variant_Class, opts, Repeat_list)
if opts.SimpleRepeat:
variant[H_CHR.index('INFO')] += ';' + 'iSR=' + str(RepeatSeq)
if opts.SimpleRepeatLength:
variant[H_CHR.index('INFO')] += ';' + 'iSRL=' + str(RepeatSeq_Lenght)
if opts.SimpleRepeatUnit:
variant[H_CHR.index('INFO')] += ';' + 'iSRU=' + str(RepeatSeq_Unit)
if opts.PseudoNucleotidesComposition:
variant[H_CHR.index('INFO')] += ';' + 'iPNC=' + ','.join(str(Din) for Din in Psuedo_Nucleotide)
if opts.RepeatMasker:
variant[H_CHR.index('INFO')] += ';' + 'iRM=' + RM
if opts.gcContent:
variant[H_CHR.index('INFO')] += ';' + 'iGC=' + GC
if opts.VariantClass:
variant[H_CHR.index('INFO')] += ';' + 'iVC=' + Variant_Class
if any(Genotype_opts) or any(Bam_opts):
Sample_Stat = iBam.Sequence_Annotator(variant, Sample_dict, H_CHR, Reference, Variant_Class, opts.SNVMinBaseQuality, opts.SNVMinMappingQuality, opts.IndelMinBaseQuality, opts.IndelMinMappingQuality, Bam_opts, Genotype_opts, opts)
if opts.UnMappedReads:
variant[H_CHR.index('FORMAT')] += ':' + 'iUnMap'
for sample in Sample_tot:
variant[H_CHR.index(sample)] += ':%s' % (iCheck.Check_Zero(Sample_Stat.get(sample).get('Percentage_Unmapped_Reads'))) if sample in Sample_dict.keys() else ':.'
if opts.MeanMappingQuality:
variant[H_CHR.index('FORMAT')] += ':' + 'iMQ'
for sample in Sample_tot:
variant[H_CHR.index(sample)] += ':%s' % (iCheck.Check_Zero(Sample_Stat.get(sample).get('Total_Mean_Mapping_Quality'))) if sample in Sample_dict.keys() else ':.'
if opts.MappingQualityZero:
variant[H_CHR.index('FORMAT')] += ':' + 'iMQ0'
for sample in Sample_tot:
variant[H_CHR.index(sample)] += ':%s' % (iCheck.Check_Zero(Sample_Stat.get(sample).get('Mapping_Quality_Zero'))) if sample in Sample_dict.keys() else ':.'
if opts.NotPrimaryAlignment:
variant[H_CHR.index('FORMAT')] += ':' + 'iNPA'
for sample in Sample_tot:
variant[H_CHR.index(sample)] += ':%s' % (iCheck.Check_Zero(Sample_Stat.get(sample).get('Percentage_Not_Primary_Alignment_Reads'))) if sample in Sample_dict.keys() else ':.'
if opts.SupplementaryAlignment:
variant[H_CHR.index('FORMAT')] += ':' + 'iSA'
for sample in Sample_tot:
variant[H_CHR.index(sample)] += ':%s' % (iCheck.Check_Zero(Sample_Stat.get(sample).get('Percentage_Supplementary_Align'))) if sample in Sample_dict.keys() else ':.'
if opts.NotPairedReads:
variant[H_CHR.index('FORMAT')] += ':' + 'iNP'
for sample in Sample_tot:
variant[H_CHR.index(sample)] += ':%s' % (iCheck.Check_Zero(Sample_Stat.get(sample).get('Percentage_Not_Paired_Reads'))) if sample in Sample_dict.keys() else ':.'
if opts.NotProperPairedReads:
variant[H_CHR.index('FORMAT')] += ':' + 'iNPP'
for sample in Sample_tot:
variant[H_CHR.index(sample)] += ':%s' % (iCheck.Check_Zero(Sample_Stat.get(sample).get('Percentage_Not_Proper_Paired_Reads'))) if sample in Sample_dict.keys() else ':.'
if opts.MateIsUnmapped:
variant[H_CHR.index('FORMAT')] += ':' + 'iMIU'
for sample in Sample_tot:
variant[H_CHR.index(sample)] += ':%s' % (iCheck.Check_Zero(Sample_Stat.get(sample).get('Mate_is_Unmapped'))) if sample in Sample_dict.keys() else ':.'
if opts.AlignmentScore:
variant[H_CHR.index('FORMAT')] += ':' + 'iAS'
for sample in Sample_tot:
variant[H_CHR.index(sample)] += ':%s' % (iCheck.Check_Zero(Sample_Stat.get(sample).get('Mean_Alignment_Score'))) if sample in Sample_dict.keys() else ':.'
if opts.SuboptimalAlignmentScore:
variant[H_CHR.index('FORMAT')] += ':' + 'iXS'
for sample in Sample_tot:
variant[H_CHR.index(sample)] += ':%s' % (iCheck.Check_Zero(Sample_Stat.get(sample).get('Mean_Suboptimal_Alignment_Score'))) if sample in Sample_dict.keys() else ':.'
if opts.TotalDupReads:
variant[H_CHR.index('FORMAT')] += ':' + 'iDUP'
for sample in Sample_tot:
variant[H_CHR.index(sample)] += ':%s' % (iCheck.Check_Zero(Sample_Stat.get(sample).get('Total_Duplicate_Reads'))) if sample in Sample_dict.keys() else ':.'
if opts.iEvaDepth:
variant[H_CHR.index('FORMAT')] += ':' + 'iDP'
for sample in Sample_tot:
variant[H_CHR.index(sample)] += ':%s' % (Sample_Stat.get(sample).get('Coverage')) if sample in Sample_dict.keys() else ':.'
if opts.AlleleDepth:
variant[H_CHR.index('FORMAT')] += ':' + 'iAD'
for sample in Sample_tot:
variant[H_CHR.index(sample)] += ':%s,%s' % (Sample_Stat.get(sample).get('Read_Ref'),Sample_Stat.get(sample).get('Read_Alt')) if sample in Sample_dict.keys() else ':.'
if opts.AlleleFrequency:
variant[H_CHR.index('FORMAT')] += ':' + 'iFREQ'
for sample in Sample_tot:
variant[H_CHR.index(sample)] += ':%s' % (Sample_Stat.get(sample).get('Allele_Frequency')) if sample in Sample_dict.keys() else ':.'
if opts.StrandBiasDepth:
variant[H_CHR.index('FORMAT')] += ':' + 'iSBD'
for sample in Sample_tot:
variant[H_CHR.index(sample)] += ':%s,%s,%s,%s' % (Sample_Stat.get(sample).get('REF+'),Sample_Stat.get(sample).get('REF-'),Sample_Stat.get(sample).get('ALT+'),Sample_Stat.get(sample).get('ALT-')) if sample in Sample_dict.keys() else ':.'
if opts.StrandBias:
variant[H_CHR.index('FORMAT')] += ':' + 'iSB'
for sample in Sample_tot:
variant[H_CHR.index(sample)] += ':%s' % (iCheck.Check_Zero(Sample_Stat.get(sample).get('Strand_Bias_Reads'))) if sample in Sample_dict.keys() else ':.'
if opts.AlleleQscore:
variant[H_CHR.index('FORMAT')] += ':' + 'iQual'
for sample in Sample_tot:
variant[H_CHR.index(sample)] += ':%s,%s' % (Sample_Stat.get(sample).get('Qual_Ref'), Sample_Stat.get(sample).get('Qual_Alt')) if sample in Sample_dict.keys() else ':.'
if opts.iBaseQualValAround:
variant[H_CHR.index('FORMAT')] += ':' + 'iBQVA'
for sample in Sample_tot:
variant[H_CHR.index(sample)] += ':%s' % (Sample_Stat.get(sample).get('BaseQualValAround')) if sample in Sample_dict.keys() else ':.'
if opts.AlleleMeanMappingQuality:
variant[H_CHR.index('FORMAT')] += ':' + 'iAMMQ'
for sample in Sample_tot:
variant[H_CHR.index(sample)] += ':%s,%s' % (iCheck.Check_Zero(Sample_Stat.get(sample).get('Ref_Mean_Mapping_Quality')),iCheck.Check_Zero(Sample_Stat.get(sample).get('Alt_Mean_Mapping_Quality'))) if sample in Sample_dict.keys() else ':.'
if opts.AlleleMeanAlignmentScore:
variant[H_CHR.index('FORMAT')] += ':' + 'iAAS'
for sample in Sample_tot:
variant[H_CHR.index(sample)] += ':%s,%s' % (iCheck.Check_Zero(Sample_Stat.get(sample).get('AS_Ref')),iCheck.Check_Zero(Sample_Stat.get(sample).get('AS_Alt'))) if sample in Sample_dict.keys() else ':.'
if opts.AlleleSuboptimalAlignmentScore:
variant[H_CHR.index('FORMAT')] += ':' + 'iAXS'
for sample in Sample_tot:
variant[H_CHR.index(sample)] += ':%s,%s' % (iCheck.Check_Zero(Sample_Stat.get(sample).get('XS_Ref')),iCheck.Check_Zero(Sample_Stat.get(sample).get('XS_Alt'))) if sample in Sample_dict.keys() else ':.'
if opts.AlleleSuboptimalAlignmentScoreZero:
variant[H_CHR.index('FORMAT')] += ':' + 'iAXS0'
for sample in Sample_tot:
variant[H_CHR.index(sample)] += ':%s,%s' % (iCheck.Check_Zero(Sample_Stat.get(sample).get('XS0_Ref')),iCheck.Check_Zero(Sample_Stat.get(sample).get('XS0_Alt'))) if sample in Sample_dict.keys() else ':.'
if opts.AlleleMappingQualityZero:
variant[H_CHR.index('FORMAT')] += ':' + 'iAMQ0'
for sample in Sample_tot:
variant[H_CHR.index(sample)] += ':%s,%s' % (Sample_Stat.get(sample).get('MapQ0_Ref'),Sample_Stat.get(sample).get('MapQ0_Alt')) if sample in Sample_dict.keys() else ':.'
if opts.AlleleClippedReads:
variant[H_CHR.index('FORMAT')] += ':' + 'iACR'
for sample in Sample_tot:
variant[H_CHR.index(sample)] += ':%s,%s' % (Sample_Stat.get(sample).get('Clipped_Reads_Ref'),Sample_Stat.get(sample).get('Clipped_Reads_Alt')) if sample in Sample_dict.keys() else ':.'
if opts.iClipRankSumTest:
variant[H_CHR.index('FORMAT')] += ':' + 'iCRT'
for sample in Sample_tot:
variant[H_CHR.index(sample)] += ':%s' % (Sample_Stat.get(sample).get('ClipRankTest')) if sample in Sample_dict.keys() else ':.'
if opts.iBaseQualRankSumTest:
variant[H_CHR.index('FORMAT')] += ':' + 'iQRT'
for sample in Sample_tot:
variant[H_CHR.index(sample)] += ':%s' % (Sample_Stat.get(sample).get('QualRankTest')) if sample in Sample_dict.keys() else ':.'
if opts.iMapQualRankSumTest:
variant[H_CHR.index('FORMAT')] += ':' + 'iMRT'
for sample in Sample_tot:
variant[H_CHR.index(sample)] += ':%s' % (Sample_Stat.get(sample).get('MappingRankTest')) if sample in Sample_dict.keys() else ':.'
if opts.iReadPosRankSumTest:
variant[H_CHR.index('FORMAT')] += ':' + 'iPRT'
for sample in Sample_tot:
variant[H_CHR.index(sample)] += ':%s' % (Sample_Stat.get(sample).get('PosaRankTest')) if sample in Sample_dict.keys() else ':.'
if opts.iAltNormReadPos:
variant[H_CHR.index('FORMAT')] += ':' + 'iANRP'
for sample in Sample_tot:
variant[H_CHR.index(sample)] += ':%s' % (Sample_Stat.get(sample).get('iAltNormPos')) if sample in Sample_dict.keys() else ':.'
if opts.verbose:
if variant[H_CHR.index('#CHROM')] != CHR_Counter and CHR_Counter == '':
CHR_Counter = variant[H_CHR.index('#CHROM')]
print ' Extracting attributes on: %s' % (CHR_Counter)
if variant[H_CHR.index('#CHROM')] != CHR_Counter and CHR_Counter != '':
CHR_Counter = variant[H_CHR.index('#CHROM')]
print ' Extracting attributes on: %s' % (CHR_Counter)
out.write('\t'.join(variant) + '\n')
|
|
from datetime import datetime
from hashlib import md5
from itertools import izip_longest
import json
import os
from urllib import urlencode
from StringIO import StringIO
from aludel.database import MetaData
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks
from twisted.trial.unittest import TestCase
from twisted.web.client import Agent, FileBodyProducer, readBody
from twisted.web.http_headers import Headers
from twisted.web.server import Site
from airtime_service.api import AirtimeServiceApp
from airtime_service.models import VoucherPool
from .helpers import populate_pool, mk_audit_params, sorted_dicts, voucher_dict
class ApiClient(object):
def __init__(self, base_url):
self._base_url = base_url
def _make_url(self, url_path):
return '%s/%s' % (self._base_url, url_path.lstrip('/'))
def _make_call(self, method, url_path, headers, body, expected_code):
agent = Agent(reactor)
url = self._make_url(url_path)
d = agent.request(method, url, headers, body)
return d.addCallback(self._get_response_body, expected_code)
def _get_response_body(self, response, expected_code):
assert response.code == expected_code
return readBody(response).addCallback(json.loads)
def get(self, url_path, params, expected_code):
url_path = '?'.join([url_path, urlencode(params)])
return self._make_call('GET', url_path, None, None, expected_code)
def put(self, url_path, headers, content, expected_code=200):
body = FileBodyProducer(StringIO(content))
return self._make_call('PUT', url_path, headers, body, expected_code)
def put_json(self, url_path, params, expected_code=200):
headers = Headers({'Content-Type': ['application/json']})
return self.put(
url_path, headers, json.dumps(params), expected_code)
def put_issue(self, request_id, operator, denomination, expected_code=200):
params = mk_audit_params(request_id)
params.update({
'denomination': denomination,
})
params.pop('request_id')
url_path = 'testpool/issue/%s/%s' % (operator, request_id)
return self.put_json(url_path, params, expected_code)
def put_create(self, expected_code=201):
url_path = 'testpool'
return self.put(url_path, Headers({}), None, expected_code)
def put_import(self, request_id, content, content_md5=None,
expected_code=201):
url_path = 'testpool/import/%s' % (request_id,)
hdict = {
'Content-Type': ['text/csv'],
}
if content_md5 is None:
content_md5 = md5(content).hexdigest()
if content_md5:
hdict['Content-MD5'] = [content_md5]
return self.put(url_path, Headers(hdict), content, expected_code)
def put_export(self, request_id, count=None, operators=None,
denominations=None, expected_code=200):
params = {}
if count is not None:
params['count'] = count
if operators is not None:
params['operators'] = operators
if denominations is not None:
params['denominations'] = denominations
url_path = 'testpool/export/%s' % (request_id,)
return self.put_json(url_path, params, expected_code)
def get_audit_query(self, request_id, field, value, expected_code=200):
params = {'request_id': request_id, 'field': field, 'value': value}
return self.get('testpool/audit_query', params, expected_code)
def get_voucher_counts(self, request_id, expected_code=200):
params = {'request_id': request_id}
return self.get('testpool/voucher_counts', params, expected_code)
class TestAirtimeServiceApp(TestCase):
timeout = 5
@inlineCallbacks
def setUp(self):
# We need to make sure all our queries run in the same thread,
# otherwise sqlite gets very sad.
reactor.suggestThreadPoolSize(1)
connection_string = os.environ.get(
"ALUDEL_TEST_CONNECTION_STRING", "sqlite://")
self._using_mysql = connection_string.startswith('mysql')
self.asapp = AirtimeServiceApp(connection_string, reactor=reactor)
site = Site(self.asapp.app.resource())
self.listener = reactor.listenTCP(0, site, interface='localhost')
self.listener_port = self.listener.getHost().port
self._drop_tables()
self.conn = yield self.asapp.engine.connect()
self.pool = VoucherPool('testpool', self.conn)
self.client = ApiClient('http://localhost:%s' % (self.listener_port,))
@inlineCallbacks
def tearDown(self):
yield self.conn.close()
self._drop_tables()
yield self.listener.loseConnection()
def _drop_tables(self):
# NOTE: This is a blocking operation!
md = MetaData(bind=self.asapp.engine._engine)
md.reflect()
md.drop_all()
assert self.asapp.engine._engine.table_names() == []
@inlineCallbacks
def assert_voucher_counts(self, expected_rows):
rows = yield self.pool.count_vouchers()
assert sorted(tuple(r) for r in rows) == sorted(expected_rows)
@inlineCallbacks
def test_request_missing_params(self):
params = mk_audit_params('req-0')
params.pop('request_id')
rsp = yield self.client.put_json(
'testpool/issue/Tank/req-0', params, expected_code=400)
assert rsp == {
'request_id': 'req-0',
'error': "Missing request parameters: 'denomination'",
}
@inlineCallbacks
def test_request_missing_audit_params(self):
params = {'denomination': 'red'}
rsp = yield self.client.put_json(
'testpool/issue/Tank/req-0', params, expected_code=400)
assert rsp == {
'request_id': 'req-0',
'error': (
"Missing request parameters: 'transaction_id', 'user_id'"),
}
@inlineCallbacks
def test_request_extra_params(self):
params = mk_audit_params('req-0')
params.pop('request_id')
params.update({
'denomination': 'red',
'foo': 'bar',
})
rsp = yield self.client.put_json(
'testpool/issue/Tank/req-0', params, expected_code=400)
assert rsp == {
'request_id': 'req-0',
'error': "Unexpected request parameters: 'foo'",
}
@inlineCallbacks
def test_issue_missing_pool(self):
rsp = yield self.client.put_issue(
'req-0', 'Tank', 'red', expected_code=404)
assert rsp == {
'request_id': 'req-0',
'error': 'Voucher pool does not exist.',
}
@inlineCallbacks
def test_issue_response_contains_request_id(self):
yield self.pool.create_tables()
yield populate_pool(self.pool, ['Tank'], ['red'], [0, 1])
rsp0 = yield self.client.put_issue('req-0', 'Tank', 'red')
assert rsp0['request_id'] == 'req-0'
@inlineCallbacks
def test_issue(self):
yield self.pool.create_tables()
yield populate_pool(self.pool, ['Tank'], ['red'], [0, 1])
rsp0 = yield self.client.put_issue('req-0', 'Tank', 'red')
assert set(rsp0.keys()) == set(['request_id', 'voucher'])
assert rsp0['request_id'] == 'req-0'
assert rsp0['voucher'] in ['Tank-red-0', 'Tank-red-1']
rsp1 = yield self.client.put_issue('req-1', 'Tank', 'red')
assert set(rsp1.keys()) == set(['request_id', 'voucher'])
assert rsp1['request_id'] == 'req-1'
assert rsp1['voucher'] in ['Tank-red-0', 'Tank-red-1']
assert rsp0['voucher'] != rsp1['voucher']
@inlineCallbacks
def test_issue_idempotent(self):
yield self.pool.create_tables()
yield populate_pool(self.pool, ['Tank'], ['red'], [0])
rsp0 = yield self.client.put_issue('req-0', 'Tank', 'red')
assert rsp0 == {
'request_id': 'req-0',
'voucher': 'Tank-red-0',
}
yield populate_pool(self.pool, ['Tank'], ['red'], [1])
rsp1 = yield self.client.put_issue('req-0', 'Tank', 'red')
assert rsp1 == {
'request_id': 'req-0',
'voucher': 'Tank-red-0',
}
rsp2 = yield self.client.put_issue('req-1', 'Tank', 'red')
assert rsp2 == {
'request_id': 'req-1',
'voucher': 'Tank-red-1',
}
rsp3 = yield self.client.put_issue(
'req-1', 'Tank', 'blue', expected_code=400)
assert rsp3 == {
'request_id': 'req-1',
'error': (
'This request has already been performed with different'
' parameters.'),
}
@inlineCallbacks
def test_issue_no_voucher(self):
yield self.pool.create_tables()
yield populate_pool(self.pool, ['Tank'], ['red'], [0])
rsp = yield self.client.put_issue(
'req-0', 'Tank', 'blue', expected_code=500)
assert rsp == {
'request_id': 'req-0',
'error': 'No voucher available.',
}
def _assert_audit_entries(self, request_id, response, expected_entries):
def created_ats():
format_str = '%Y-%m-%dT%H:%M:%S.%f'
if self._using_mysql:
format_str = format_str.replace('.%f', '')
for result in response['results']:
yield datetime.strptime(
result['created_at'], format_str).isoformat()
expected_results = [{
'request_id': entry['audit_params']['request_id'],
'transaction_id': entry['audit_params']['transaction_id'],
'user_id': entry['audit_params']['user_id'],
'request_data': entry['request_data'],
'response_data': entry['response_data'],
'error': entry['error'],
'created_at': created_at,
} for entry, created_at in izip_longest(
expected_entries, created_ats())]
assert response == {
'request_id': request_id,
'results': expected_results,
}
@inlineCallbacks
def test_query_bad_field(self):
yield self.pool.create_tables()
rsp = yield self.client.get_audit_query(
'audit-0', 'foo', 'req-0', expected_code=400)
assert rsp == {
'request_id': 'audit-0',
'error': 'Invalid audit field.',
}
@inlineCallbacks
def test_query_by_request_id(self):
yield self.pool.create_tables()
audit_params = mk_audit_params('req-0')
rsp = yield self.client.get_audit_query(
'audit-0', 'request_id', 'req-0')
assert rsp == {
'request_id': 'audit-0',
'results': [],
}
yield self.pool._audit_request(audit_params, 'req_data', 'resp_data')
rsp = yield self.client.get_audit_query(
'audit-1', 'request_id', 'req-0')
self._assert_audit_entries('audit-1', rsp, [{
'audit_params': audit_params,
'request_data': u'req_data',
'response_data': u'resp_data',
'error': False,
}])
@inlineCallbacks
def test_query_by_transaction_id(self):
yield self.pool.create_tables()
audit_params_0 = mk_audit_params('req-0', 'transaction-0')
audit_params_1 = mk_audit_params('req-1', 'transaction-0')
rsp = yield self.client.get_audit_query(
'audit-0', 'transaction_id', 'transaction-0')
assert rsp == {
'request_id': 'audit-0',
'results': [],
}
yield self.pool._audit_request(
audit_params_0, 'req_data_0', 'resp_data_0')
yield self.pool._audit_request(
audit_params_1, 'req_data_1', 'resp_data_1')
rsp = yield self.client.get_audit_query(
'audit-1', 'transaction_id', 'transaction-0')
self._assert_audit_entries('audit-1', rsp, [{
'audit_params': audit_params_0,
'request_data': u'req_data_0',
'response_data': u'resp_data_0',
'error': False,
}, {
'audit_params': audit_params_1,
'request_data': u'req_data_1',
'response_data': u'resp_data_1',
'error': False,
}])
@inlineCallbacks
def test_query_by_user_id(self):
yield self.pool.create_tables()
audit_params_0 = mk_audit_params('req-0', 'transaction-0', 'user-0')
audit_params_1 = mk_audit_params('req-1', 'transaction-1', 'user-0')
rsp = yield self.client.get_audit_query('audit-0', 'user_id', 'user-0')
assert rsp == {
'request_id': 'audit-0',
'results': [],
}
yield self.pool._audit_request(
audit_params_0, 'req_data_0', 'resp_data_0')
yield self.pool._audit_request(
audit_params_1, 'req_data_1', 'resp_data_1')
rsp = yield self.client.get_audit_query('audit-1', 'user_id', 'user-0')
self._assert_audit_entries('audit-1', rsp, [{
'audit_params': audit_params_0,
'request_data': u'req_data_0',
'response_data': u'resp_data_0',
'error': False,
}, {
'audit_params': audit_params_1,
'request_data': u'req_data_1',
'response_data': u'resp_data_1',
'error': False,
}])
@inlineCallbacks
def test_create(self):
resp = yield self.client.put_create()
assert resp == {
'request_id': None,
'created': True,
}
# Recreating a pool has a different response.
resp = yield self.client.put_create(expected_code=200)
assert resp == {
'request_id': None,
'created': False,
}
@inlineCallbacks
def test_import(self):
yield self.pool.create_tables()
yield self.assert_voucher_counts([])
content = '\n'.join([
'operator,denomination,voucher',
'Tank,red,Tr0',
'Tank,red,Tr1',
'Tank,blue,Tb0',
'Tank,blue,Tb1',
'Link,red,Lr0',
'Link,red,Lr1',
'Link,blue,Lb0',
'Link,blue,Lb1',
])
resp = yield self.client.put_import('req-0', content)
assert resp == {
'request_id': 'req-0',
'imported': True,
}
yield self.assert_voucher_counts([
('Link', 'blue', False, 2),
('Link', 'red', False, 2),
('Tank', 'blue', False, 2),
('Tank', 'red', False, 2),
])
@inlineCallbacks
def test_import_missing_pool(self):
content = '\n'.join([
'operator,denomination,voucher',
'Tank,red,Tr0',
'Tank,red,Tr1',
'Tank,blue,Tb0',
'Tank,blue,Tb1',
'Link,red,Lr0',
'Link,red,Lr1',
'Link,blue,Lb0',
'Link,blue,Lb1',
])
rsp = yield self.client.put_import('req-0', content, expected_code=404)
assert rsp == {
'request_id': 'req-0',
'error': 'Voucher pool does not exist.',
}
@inlineCallbacks
def test_import_heading_case_mismatch(self):
yield self.pool.create_tables()
yield self.assert_voucher_counts([])
content = '\n'.join([
'OperAtor,denomInation,voucheR',
'Tank,red,Tr0',
'Tank,red,Tr1',
'Tank,blue,Tb0',
'Tank,blue,Tb1',
'Link,red,Lr0',
'Link,red,Lr1',
'Link,blue,Lb0',
'Link,blue,Lb1',
])
resp = yield self.client.put_import('req-0', content)
assert resp == {
'request_id': 'req-0',
'imported': True,
}
yield self.assert_voucher_counts([
('Link', 'blue', False, 2),
('Link', 'red', False, 2),
('Tank', 'blue', False, 2),
('Tank', 'red', False, 2),
])
@inlineCallbacks
def test_import_no_content_md5(self):
yield self.pool.create_tables()
resp = yield self.client.put_import(
'req-0', 'content', '', expected_code=400)
assert resp == {
'request_id': 'req-0',
'error': 'Missing Content-MD5 header.',
}
@inlineCallbacks
def test_import_bad_content_md5(self):
yield self.pool.create_tables()
resp = yield self.client.put_import(
'req-0', 'content', 'badmd5', expected_code=400)
assert resp == {
'request_id': 'req-0',
'error': 'Content-MD5 header does not match content.',
}
@inlineCallbacks
def test_import_idempotent(self):
yield self.pool.create_tables()
yield self.assert_voucher_counts([])
content = '\n'.join([
'operator,denomination,voucher',
'Tank,red,Tr0',
'Tank,red,Tr1',
'Tank,blue,Tb0',
'Tank,blue,Tb1',
'Link,red,Lr0',
'Link,red,Lr1',
'Link,blue,Lb0',
'Link,blue,Lb1',
])
expected_counts = [
('Link', 'blue', False, 2),
('Link', 'red', False, 2),
('Tank', 'blue', False, 2),
('Tank', 'red', False, 2),
]
resp = yield self.client.put_import('req-0', content)
assert resp == {
'request_id': 'req-0',
'imported': True,
}
yield self.assert_voucher_counts(expected_counts)
resp = yield self.client.put_import('req-0', content)
assert resp == {
'request_id': 'req-0',
'imported': True,
}
yield self.assert_voucher_counts(expected_counts)
content_2 = '\n'.join([
'operator,denomination,voucher',
'Tank,red,Tr0',
'Tank,red,Tr1',
'Tank,blue,Tb0',
'Tank,blue,Tb1',
])
resp = yield self.client.put_import(
'req-0', content_2, expected_code=400)
assert resp == {
'request_id': 'req-0',
'error': (
'This request has already been performed with different'
' parameters.'),
}
yield self.assert_voucher_counts(expected_counts)
def _sorted_voucher_counts(self, voucher_counts):
return sorted(voucher_counts, key=lambda vc: (
vc['operator'], vc['denomination'], vc['used']))
@inlineCallbacks
def test_voucher_counts(self):
yield self.pool.create_tables()
rsp0 = yield self.client.get_voucher_counts('req-0')
assert rsp0 == {
'request_id': 'req-0',
'voucher_counts': [],
}
yield populate_pool(self.pool, ['Tank'], ['red'], [0, 1])
rsp1 = yield self.client.get_voucher_counts('req-1')
assert rsp1 == {
'request_id': 'req-1',
'voucher_counts': [
{
'operator': 'Tank',
'denomination': 'red',
'used': False,
'count': 2,
},
],
}
yield populate_pool(self.pool, ['Link'], ['blue'], [0, 1])
yield self.pool.issue_voucher('Link', 'blue', mk_audit_params('req-0'))
rsp2 = yield self.client.get_voucher_counts('req-2')
assert self._sorted_voucher_counts(rsp2['voucher_counts']) == [
{
'operator': 'Link',
'denomination': 'blue',
'used': False,
'count': 1,
},
{
'operator': 'Link',
'denomination': 'blue',
'used': True,
'count': 1,
},
{
'operator': 'Tank',
'denomination': 'red',
'used': False,
'count': 2,
},
]
@inlineCallbacks
def test_export_all_vouchers(self):
yield self.pool.create_tables()
yield populate_pool(
self.pool, ['Tank', 'Link'], ['red', 'blue'], [0, 1])
response = yield self.client.put_export('req-0')
assert set(response.keys()) == set([
'request_id', 'vouchers', 'warnings'])
assert response['request_id'] == 'req-0'
assert response['warnings'] == []
assert sorted_dicts(response['vouchers']) == sorted_dicts([
voucher_dict('Tank', 'red', 'Tank-red-0'),
voucher_dict('Tank', 'red', 'Tank-red-1'),
voucher_dict('Tank', 'blue', 'Tank-blue-0'),
voucher_dict('Tank', 'blue', 'Tank-blue-1'),
voucher_dict('Link', 'red', 'Link-red-0'),
voucher_dict('Link', 'red', 'Link-red-1'),
voucher_dict('Link', 'blue', 'Link-blue-0'),
voucher_dict('Link', 'blue', 'Link-blue-1'),
])
yield self.assert_voucher_counts([
('Link', 'blue', True, 2),
('Link', 'red', True, 2),
('Tank', 'blue', True, 2),
('Tank', 'red', True, 2),
])
@inlineCallbacks
def test_export_some_vouchers(self):
yield self.pool.create_tables()
# We give all vouchers of the same type the same voucher code to avoid
# having to check all the permutations.
yield populate_pool(
self.pool, ['Tank', 'Link'], ['red', 'blue'], [0, 0])
yield self.assert_voucher_counts([
('Link', 'blue', False, 2),
('Link', 'red', False, 2),
('Tank', 'blue', False, 2),
('Tank', 'red', False, 2),
])
response = yield self.client.put_export(
'req-0', 1, ['Tank'], ['red', 'blue'])
assert set(response.keys()) == set([
'request_id', 'vouchers', 'warnings'])
assert response['request_id'] == 'req-0'
assert response['warnings'] == []
assert sorted_dicts(response['vouchers']) == sorted_dicts([
voucher_dict('Tank', 'red', 'Tank-red-0'),
voucher_dict('Tank', 'blue', 'Tank-blue-0'),
])
yield self.assert_voucher_counts([
('Link', 'blue', False, 2),
('Link', 'red', False, 2),
('Tank', 'blue', False, 1),
('Tank', 'red', False, 1),
('Tank', 'blue', True, 1),
('Tank', 'red', True, 1),
])
@inlineCallbacks
def test_export_too_many_vouchers(self):
yield self.pool.create_tables()
# We give all vouchers of the same type the same voucher code to avoid
# having to check all the permutations.
yield populate_pool(
self.pool, ['Tank', 'Link'], ['red', 'blue'], [0, 0])
yield self.assert_voucher_counts([
('Link', 'blue', False, 2),
('Link', 'red', False, 2),
('Tank', 'blue', False, 2),
('Tank', 'red', False, 2),
])
response = yield self.client.put_export(
'req-0', 4, ['Tank'], ['red', 'blue'])
assert set(response.keys()) == set([
'request_id', 'vouchers', 'warnings'])
assert response['request_id'] == 'req-0'
assert sorted(response['warnings']) == sorted([
"Insufficient vouchers available for 'Tank' 'red'.",
"Insufficient vouchers available for 'Tank' 'blue'.",
])
assert sorted_dicts(response['vouchers']) == sorted_dicts([
voucher_dict('Tank', 'red', 'Tank-red-0'),
voucher_dict('Tank', 'red', 'Tank-red-0'),
voucher_dict('Tank', 'blue', 'Tank-blue-0'),
voucher_dict('Tank', 'blue', 'Tank-blue-0'),
])
yield self.assert_voucher_counts([
('Link', 'blue', False, 2),
('Link', 'red', False, 2),
('Tank', 'blue', True, 2),
('Tank', 'red', True, 2),
])
@inlineCallbacks
def test_export_idempotent(self):
yield self.pool.create_tables()
yield populate_pool(self.pool, ['Tank', 'Link'], ['red', 'blue'], [0])
yield self.assert_voucher_counts([
('Link', 'blue', False, 1),
('Link', 'red', False, 1),
('Tank', 'blue', False, 1),
('Tank', 'red', False, 1),
])
response = yield self.client.put_export('req-0', 1, ['Tank'], ['red'])
assert set(response.keys()) == set([
'request_id', 'vouchers', 'warnings'])
assert response['request_id'] == 'req-0'
assert response['warnings'] == []
assert sorted_dicts(response['vouchers']) == sorted_dicts([
voucher_dict('Tank', 'red', 'Tank-red-0'),
])
yield self.assert_voucher_counts([
('Link', 'blue', False, 1),
('Link', 'red', False, 1),
('Tank', 'blue', False, 1),
('Tank', 'red', True, 1),
])
response = yield self.client.put_export('req-0', 1, ['Tank'], ['red'])
assert set(response.keys()) == set([
'request_id', 'vouchers', 'warnings'])
assert response['request_id'] == 'req-0'
assert response['warnings'] == []
assert sorted_dicts(response['vouchers']) == sorted_dicts([
voucher_dict('Tank', 'red', 'Tank-red-0'),
])
yield self.assert_voucher_counts([
('Link', 'blue', False, 1),
('Link', 'red', False, 1),
('Tank', 'blue', False, 1),
('Tank', 'red', True, 1),
])
response = yield self.client.put_export(
'req-0', 2, ['Tank'], ['red'], expected_code=400)
assert response == {
'request_id': 'req-0',
'error': (
'This request has already been performed with different'
' parameters.'),
}
|
|
"""Simple distance, velocity, and angle support for Skyfield.
"""
from __future__ import print_function
import numpy as np
import sys
from numpy import abs, array, copysign, isnan
from .constants import AU_KM, AU_M, DAY_S, tau
def _auto_convert(value):
"""As a convenience, turn Python lists and tuples into NumPy arrays."""
if isinstance(value, (tuple, list)):
return array(value)
else:
return value
# Distance and velocity.
class UnpackingError(Exception):
"""You cannot iterate directly over a Skyfield measurement object."""
class Distance(object):
"""A distance, stored internally as au and available in other units.
You can initialize a ``Distance`` by providing a single float or a
float array as either an ``au=`` parameter or a ``km=`` parameter
when building a ``Distance`` object.
"""
_warned = False
def __init__(self, au=None, km=None, m=None):
if au is not None:
self.au = _auto_convert(au)
elif km is not None:
self.km = _auto_convert(km)
self.au = km / AU_KM
elif m is not None:
self.m = _auto_convert(m)
self.au = m / AU_M
else:
raise ValueError('to construct a Distance provide au, km, or m')
def __getattr__(self, name):
if name == 'km':
self.km = km = self.au * AU_KM
return km
if name == 'm':
self.m = m = self.au * AU_M
return m
if name == 'AU':
if not Distance._warned:
print('WARNING: the IAU has renamed the astronomical unit to'
' lowercase "au" so Skyfield will soon remove uppercase'
' "AU" from Distance objects', file=sys.stdout)
Distance._warned = True
return self.au
raise AttributeError('no attribute named %r' % (name,))
def __str__(self):
n = self.au
return ('{0} au' if getattr(n, 'shape', 0) else '{0:.6} au').format(n)
def __repr__(self):
return '<{0} {1}>'.format(type(self).__name__, self)
def __iter__(self):
raise UnpackingError(_iter_message % {
'class': self.__class__.__name__, 'values': 'x, y, z',
'attr1': 'au', 'attr2': 'km'})
def to(self, unit):
"""Return this distance in the given AstroPy units."""
from astropy.units import au
return (self.au * au).to(unit)
class Velocity(object):
"""A velocity, stored internally as au/day and available in other units.
You can initialize a ``Velocity`` by providing a single float or a
float array as either an ``au_per_d=`` parameter.
"""
_warned = False
def __init__(self, au_per_d):
self.au_per_d = au_per_d
def __getattr__(self, name):
if name == 'km_per_s':
self.km_per_s = self.au_per_d * AU_KM / DAY_S
return self.km_per_s
if name == 'AU_per_d':
if not Velocity._warned:
print('WARNING: the IAU has renamed the astronomical unit to'
' lowercase "au" so Skyfield will soon remove'
' "AU_per_day" in favor of "au_per_day"',
file=sys.stdout)
Velocity._warned = True
return self.au_per_d
raise AttributeError('no attribute named %r' % (name,))
def __str__(self):
return '%s au/day' % self.au_per_d
def __iter__(self):
raise UnpackingError(_iter_message % {
'class': self.__class__.__name__, 'values': 'xdot, ydot, zdot',
'attr1': 'au_per_d', 'attr2': 'km_per_s'})
def to(self, unit):
"""Return this velocity in the given AstroPy units."""
from astropy.units import au, d
return (self.au_per_d * au / d).to(unit)
_iter_message = """\
cannot directly unpack a %(class)s into several values
To unpack a %(class)s into three components, you need to ask for its
value in specific units through an attribute or method:
%(values)s = velocity.%(attr1)s
%(values)s = velocity.%(attr2)s
%(values)s = velocity.to(astropy_unit)
"""
# Angle units.
_to_degrees = 360.0 / tau
_from_degrees = tau / 360.0
_to_hours = 24.0 / tau
_from_hours = tau / 24.0
_instantiation_instructions = """to instantiate an Angle, try one of:
Angle(angle=another_angle)
Angle(radians=value)
Angle(degrees=value)
Angle(hours=value)
where `value` can be either a Python float or a NumPy array of floats"""
class Angle(object):
def __init__(self, angle=None, radians=None, degrees=None, hours=None,
preference=None, signed=False):
if angle is not None:
if not isinstance(angle, Angle):
raise ValueError(_instantiation_instructions)
self.radians = angle.radians
elif radians is not None:
self.radians = radians
elif degrees is not None:
self._degrees = degrees = _unsexagesimalize(degrees)
self.radians = degrees * _from_degrees
elif hours is not None:
self._hours = hours = _unsexagesimalize(hours)
self.radians = hours * _from_hours
preference = 'hours'
self.preference = preference or 'degrees'
self.signed = signed
def __getattr__(self, name):
if name == '_hours':
self._hours = _hours = self.radians * _to_hours
return _hours
if name == '_degrees':
self._degrees = _degrees = self.radians * _to_degrees
return _degrees
if name == 'hours':
if self.preference != 'hours':
raise WrongUnitError('hours')
self.hours = hours = self._hours
return hours
if name == 'degrees':
if self.preference != 'degrees':
raise WrongUnitError('degrees')
self.degrees = degrees = self._degrees
return degrees
raise AttributeError('no attribute named %r' % (name,))
def __str__(self):
return self.dstr() if self.preference == 'degrees' else self.hstr()
def __repr__(self):
return '<{0} {1}>'.format(type(self).__name__, self)
def hms(self, warn=True):
if warn and self.preference != 'hours':
raise WrongUnitError('hms')
sign, units, minutes, seconds = _sexagesimalize_to_float(self._hours)
return sign * units, sign * minutes, sign * seconds
def signed_hms(self, warn=True):
if warn and self.preference != 'hours':
raise WrongUnitError('signed_hms')
return _sexagesimalize_to_float(self._hours)
def hstr(self, places=2, warn=True):
if warn and self.preference != 'hours':
raise WrongUnitError('hstr')
hours = self._hours
if getattr(hours, 'shape', None):
return "{0} values from {1} to {2}".format(
len(degrees),
_hstr(min(degrees),places,signed),
_hstr(max(degrees),places,signed)
)
return _hstr(hours, places)
def dms(self, warn=True):
if warn and self.preference != 'degrees':
raise WrongUnitError('dms')
sign, units, minutes, seconds = _sexagesimalize_to_float(self._degrees)
return sign * units, sign * minutes, sign * seconds
def signed_dms(self, warn=True):
if warn and self.preference != 'degrees':
raise WrongUnitError('signed_dms')
return _sexagesimalize_to_float(self._degrees)
def dstr(self, places=1, warn=True):
if warn and self.preference != 'degrees':
raise WrongUnitError('dstr')
degrees = self._degrees
signed = self.signed
if getattr(degrees, 'shape', None):
return "{0} values from {1} to {2}".format(
len(degrees),
_dstr(min(degrees),places,signed),
_dstr(max(degrees),places,signed)
)
return _dstr(degrees, places, signed)
class WrongUnitError(ValueError):
def __init__(self, name):
unit = 'hours' if (name.startswith('h') or '_h' in name) else 'degrees'
usual = 'hours' if (unit == 'degrees') else 'degrees'
message = ('this angle is usually expressed in {0}, not {1};'
' if you want to use {1} anyway,'.format(usual, unit))
if name == unit:
message += ' then please use the attribute _{0}'.format(unit)
else:
message += ' then call {0}() with warn=False'.format(name)
self.args = (message,)
def _sexagesimalize_to_float(value):
"""Decompose `value` into units, minutes, and seconds.
Note that this routine is not appropriate for displaying a value,
because rounding to the smallest digit of display is necessary
before showing a value to the user. Use `_sexagesimalize_to_int()`
for data being displayed to the user.
This routine simply decomposes the floating point `value` into a
sign (+1.0 or -1.0), units, minutes, and seconds, returning the
result in a four-element tuple.
>>> _sexagesimalize_to_float(12.05125)
(1.0, 12.0, 3.0, 4.5)
>>> _sexagesimalize_to_float(-12.05125)
(-1.0, 12.0, 3.0, 4.5)
"""
sign = np.sign(value)
n = abs(value)
minutes, seconds = divmod(n * 3600.0, 60.0)
units, minutes = divmod(minutes, 60.0)
return sign, units, minutes, seconds
def _sexagesimalize_to_int(value, places=0):
"""Decompose `value` into units, minutes, seconds, and second fractions.
This routine prepares a value for sexagesimal display, with its
seconds fraction expressed as an integer with `places` digits. The
result is a tuple of five integers:
``(sign [either +1 or -1], units, minutes, seconds, second_fractions)``
The integers are properly rounded per astronomical convention so
that, for example, given ``places=3`` the result tuple ``(1, 11, 22,
33, 444)`` means that the input was closer to 11u 22' 33.444" than
to either 33.443" or 33.445" in its value.
"""
sign = int(np.sign(value))
value = abs(value)
power = 10 ** places
n = int(7200 * power * value + 1) // 2
n, fraction = divmod(n, power)
n, seconds = divmod(n, 60)
n, minutes = divmod(n, 60)
return sign, n, minutes, seconds, fraction
def _hstr(hours, places=2):
"""Convert floating point `hours` into a sexagesimal string.
>>> _hstr(12.125)
'12h 07m 30.00s'
>>> _hstr(12.125, places=4)
'12h 07m 30.0000s'
>>> _hstr(float('nan'))
'nan'
"""
if isnan(hours):
return 'nan'
sgn, h, m, s, etc = _sexagesimalize_to_int(hours, places)
sign = '-' if sgn < 0.0 else ''
return '%s%02dh %02dm %02d.%0*ds' % (sign, h, m, s, places, etc)
def _dstr(degrees, places=1, signed=False):
r"""Convert floating point `degrees` into a sexagesimal string.
>>> _dstr(12.125)
'12deg 07\' 30.0"'
>>> _dstr(12.125, places=3)
'12deg 07\' 30.000"'
>>> _dstr(12.125, signed=True)
'+12deg 07\' 30.0"'
>>> _dstr(float('nan'))
'nan'
"""
if isnan(degrees):
return 'nan'
sgn, d, m, s, etc = _sexagesimalize_to_int(degrees, places)
sign = '-' if sgn < 0.0 else '+' if signed else ''
return '%s%02ddeg %02d\' %02d.%0*d"' % (sign, d, m, s, places, etc)
def _unsexagesimalize(value):
"""Return `value` after interpreting a (units, minutes, seconds) tuple.
When `value` is not a tuple, it is simply returned.
>>> _unsexagesimalize(3.25)
3.25
An input tuple is interpreted as units, minutes, and seconds. Note
that only the sign of `units` is significant! So all of the
following tuples convert into exactly the same value:
>>> '%f' % _unsexagesimalize((-1, 2, 3))
'-1.034167'
>>> '%f' % _unsexagesimalize((-1, -2, 3))
'-1.034167'
>>> '%f' % _unsexagesimalize((-1, -2, -3))
'-1.034167'
"""
if isinstance(value, tuple):
for i, component in enumerate(value):
if i:
value = value + copysign(component, value) * 60.0 ** -i
else:
value = component
return value
def _interpret_angle(name, angle_object, angle_float, unit='degrees'):
"""Return an angle in radians from one of two arguments.
It is common for Skyfield routines to accept both an argument like
`alt` that takes an Angle object as well as an `alt_degrees` that
can be given a bare float or a sexagesimal tuple. A pair of such
arguments can be passed to this routine for interpretation.
"""
if angle_object is not None:
if isinstance(angle_object, Angle):
return angle_object.radians
elif angle_float is not None:
return _unsexagesimalize(angle_float) * _from_degrees
raise ValueError('you must either provide the {0}= parameter with'
' an Angle argument or supply the {0}_{1}= parameter'
' with a numeric argument'.format(name, unit))
def _interpret_ltude(value, name, psuffix, nsuffix):
"""Interpret a string, float, or tuple as a latitude or longitude angle.
`value` - The string to interpret.
`name` - 'latitude' or 'longitude', for use in exception messages.
`positive` - The string that indicates a positive angle ('N' or 'E').
`negative` - The string that indicates a negative angle ('S' or 'W').
"""
if not isinstance(value, str):
return Angle(degrees=_unsexagesimalize(value))
value = value.strip().upper()
if value.endswith(psuffix):
sign = +1.0
elif value.endswith(nsuffix):
sign = -1.0
else:
raise ValueError('your {0} string {1!r} does not end with either {2!r}'
' or {3!r}'.format(name, value, psuffix, nsuffix))
try:
value = float(value[:-1])
except ValueError:
raise ValueError('your {0} string {1!r} cannot be parsed as a floating'
' point number'.format(name, value))
return Angle(degrees=sign * value)
|
|
# Copyright 2015-2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Entry points into the Beta API of gRPC Python."""
# threading is referenced from specification in this module.
import abc
import enum
import threading # pylint: disable=unused-import
# cardinality and face are referenced from specification in this module.
from grpc._adapter import _intermediary_low
from grpc._adapter import _low
from grpc._adapter import _types
from grpc.beta import _connectivity_channel
from grpc.beta import _server
from grpc.beta import _stub
from grpc.beta import interfaces
from grpc.framework.common import cardinality # pylint: disable=unused-import
from grpc.framework.interfaces.face import face # pylint: disable=unused-import
_CHANNEL_SUBSCRIPTION_CALLBACK_ERROR_LOG_MESSAGE = (
'Exception calling channel subscription callback!')
class ChannelCredentials(object):
"""A value encapsulating the data required to create a secure Channel.
This class and its instances have no supported interface - it exists to define
the type of its instances and its instances exist to be passed to other
functions.
"""
def __init__(self, low_credentials):
self._low_credentials = low_credentials
def ssl_channel_credentials(root_certificates=None, private_key=None,
certificate_chain=None):
"""Creates a ChannelCredentials for use with an SSL-enabled Channel.
Args:
root_certificates: The PEM-encoded root certificates or unset to ask for
them to be retrieved from a default location.
private_key: The PEM-encoded private key to use or unset if no private key
should be used.
certificate_chain: The PEM-encoded certificate chain to use or unset if no
certificate chain should be used.
Returns:
A ChannelCredentials for use with an SSL-enabled Channel.
"""
return ChannelCredentials(_low.channel_credentials_ssl(
root_certificates, private_key, certificate_chain))
class CallCredentials(object):
"""A value encapsulating data asserting an identity over an *established*
channel. May be composed with ChannelCredentials to always assert identity for
every call over that channel.
This class and its instances have no supported interface - it exists to define
the type of its instances and its instances exist to be passed to other
functions.
"""
def __init__(self, low_credentials):
self._low_credentials = low_credentials
def metadata_call_credentials(metadata_plugin, name=None):
"""Construct CallCredentials from an interfaces.GRPCAuthMetadataPlugin.
Args:
metadata_plugin: An interfaces.GRPCAuthMetadataPlugin to use in constructing
the CallCredentials object.
Returns:
A CallCredentials object for use in a GRPCCallOptions object.
"""
if name is None:
name = metadata_plugin.__name__
return CallCredentials(
_low.call_credentials_metadata_plugin(metadata_plugin, name))
def composite_call_credentials(call_credentials, additional_call_credentials):
"""Compose two CallCredentials to make a new one.
Args:
call_credentials: A CallCredentials object.
additional_call_credentials: Another CallCredentials object to compose on
top of call_credentials.
Returns:
A CallCredentials object for use in a GRPCCallOptions object.
"""
return CallCredentials(
_low.call_credentials_composite(
call_credentials._low_credentials,
additional_call_credentials._low_credentials))
def composite_channel_credentials(channel_credentials,
additional_call_credentials):
"""Compose ChannelCredentials on top of client credentials to make a new one.
Args:
channel_credentials: A ChannelCredentials object.
additional_call_credentials: A CallCredentials object to compose on
top of channel_credentials.
Returns:
A ChannelCredentials object for use in a GRPCCallOptions object.
"""
return ChannelCredentials(
_low.channel_credentials_composite(
channel_credentials._low_credentials,
additional_call_credentials._low_credentials))
class Channel(object):
"""A channel to a remote host through which RPCs may be conducted.
Only the "subscribe" and "unsubscribe" methods are supported for application
use. This class' instance constructor and all other attributes are
unsupported.
"""
def __init__(self, low_channel, intermediary_low_channel):
self._low_channel = low_channel
self._intermediary_low_channel = intermediary_low_channel
self._connectivity_channel = _connectivity_channel.ConnectivityChannel(
low_channel)
def subscribe(self, callback, try_to_connect=None):
"""Subscribes to this Channel's connectivity.
Args:
callback: A callable to be invoked and passed an
interfaces.ChannelConnectivity identifying this Channel's connectivity.
The callable will be invoked immediately upon subscription and again for
every change to this Channel's connectivity thereafter until it is
unsubscribed.
try_to_connect: A boolean indicating whether or not this Channel should
attempt to connect if it is not already connected and ready to conduct
RPCs.
"""
self._connectivity_channel.subscribe(callback, try_to_connect)
def unsubscribe(self, callback):
"""Unsubscribes a callback from this Channel's connectivity.
Args:
callback: A callable previously registered with this Channel from having
been passed to its "subscribe" method.
"""
self._connectivity_channel.unsubscribe(callback)
def insecure_channel(host, port):
"""Creates an insecure Channel to a remote host.
Args:
host: The name of the remote host to which to connect.
port: The port of the remote host to which to connect.
Returns:
A Channel to the remote host through which RPCs may be conducted.
"""
intermediary_low_channel = _intermediary_low.Channel(
'%s:%d' % (host, port), None)
return Channel(intermediary_low_channel._internal, intermediary_low_channel) # pylint: disable=protected-access
def secure_channel(host, port, channel_credentials):
"""Creates a secure Channel to a remote host.
Args:
host: The name of the remote host to which to connect.
port: The port of the remote host to which to connect.
channel_credentials: A ChannelCredentials.
Returns:
A secure Channel to the remote host through which RPCs may be conducted.
"""
intermediary_low_channel = _intermediary_low.Channel(
'%s:%d' % (host, port), channel_credentials._low_credentials)
return Channel(intermediary_low_channel._internal, intermediary_low_channel) # pylint: disable=protected-access
class StubOptions(object):
"""A value encapsulating the various options for creation of a Stub.
This class and its instances have no supported interface - it exists to define
the type of its instances and its instances exist to be passed to other
functions.
"""
def __init__(
self, host, request_serializers, response_deserializers,
metadata_transformer, thread_pool, thread_pool_size):
self.host = host
self.request_serializers = request_serializers
self.response_deserializers = response_deserializers
self.metadata_transformer = metadata_transformer
self.thread_pool = thread_pool
self.thread_pool_size = thread_pool_size
_EMPTY_STUB_OPTIONS = StubOptions(
None, None, None, None, None, None)
def stub_options(
host=None, request_serializers=None, response_deserializers=None,
metadata_transformer=None, thread_pool=None, thread_pool_size=None):
"""Creates a StubOptions value to be passed at stub creation.
All parameters are optional and should always be passed by keyword.
Args:
host: A host string to set on RPC calls.
request_serializers: A dictionary from service name-method name pair to
request serialization behavior.
response_deserializers: A dictionary from service name-method name pair to
response deserialization behavior.
metadata_transformer: A callable that given a metadata object produces
another metadata object to be used in the underlying communication on the
wire.
thread_pool: A thread pool to use in stubs.
thread_pool_size: The size of thread pool to create for use in stubs;
ignored if thread_pool has been passed.
Returns:
A StubOptions value created from the passed parameters.
"""
return StubOptions(
host, request_serializers, response_deserializers,
metadata_transformer, thread_pool, thread_pool_size)
def generic_stub(channel, options=None):
"""Creates a face.GenericStub on which RPCs can be made.
Args:
channel: A Channel for use by the created stub.
options: A StubOptions customizing the created stub.
Returns:
A face.GenericStub on which RPCs can be made.
"""
effective_options = _EMPTY_STUB_OPTIONS if options is None else options
return _stub.generic_stub(
channel._intermediary_low_channel, effective_options.host, # pylint: disable=protected-access
effective_options.metadata_transformer,
effective_options.request_serializers,
effective_options.response_deserializers, effective_options.thread_pool,
effective_options.thread_pool_size)
def dynamic_stub(channel, service, cardinalities, options=None):
"""Creates a face.DynamicStub with which RPCs can be invoked.
Args:
channel: A Channel for the returned face.DynamicStub to use.
service: The package-qualified full name of the service.
cardinalities: A dictionary from RPC method name to cardinality.Cardinality
value identifying the cardinality of the RPC method.
options: An optional StubOptions value further customizing the functionality
of the returned face.DynamicStub.
Returns:
A face.DynamicStub with which RPCs can be invoked.
"""
effective_options = StubOptions() if options is None else options
return _stub.dynamic_stub(
channel._intermediary_low_channel, effective_options.host, service, # pylint: disable=protected-access
cardinalities, effective_options.metadata_transformer,
effective_options.request_serializers,
effective_options.response_deserializers, effective_options.thread_pool,
effective_options.thread_pool_size)
class ServerCredentials(object):
"""A value encapsulating the data required to open a secure port on a Server.
This class and its instances have no supported interface - it exists to define
the type of its instances and its instances exist to be passed to other
functions.
"""
def __init__(self, low_credentials):
self._low_credentials = low_credentials
def ssl_server_credentials(
private_key_certificate_chain_pairs, root_certificates=None,
require_client_auth=False):
"""Creates a ServerCredentials for use with an SSL-enabled Server.
Args:
private_key_certificate_chain_pairs: A nonempty sequence each element of
which is a pair the first element of which is a PEM-encoded private key
and the second element of which is the corresponding PEM-encoded
certificate chain.
root_certificates: PEM-encoded client root certificates to be used for
verifying authenticated clients. If omitted, require_client_auth must also
be omitted or be False.
require_client_auth: A boolean indicating whether or not to require clients
to be authenticated. May only be True if root_certificates is not None.
Returns:
A ServerCredentials for use with an SSL-enabled Server.
"""
if len(private_key_certificate_chain_pairs) == 0:
raise ValueError(
'At least one private key-certificate chain pairis required!')
elif require_client_auth and root_certificates is None:
raise ValueError(
'Illegal to require client auth without providing root certificates!')
else:
return ServerCredentials(_low.server_credentials_ssl(
root_certificates, private_key_certificate_chain_pairs,
require_client_auth))
class ServerOptions(object):
"""A value encapsulating the various options for creation of a Server.
This class and its instances have no supported interface - it exists to define
the type of its instances and its instances exist to be passed to other
functions.
"""
def __init__(
self, multi_method_implementation, request_deserializers,
response_serializers, thread_pool, thread_pool_size, default_timeout,
maximum_timeout):
self.multi_method_implementation = multi_method_implementation
self.request_deserializers = request_deserializers
self.response_serializers = response_serializers
self.thread_pool = thread_pool
self.thread_pool_size = thread_pool_size
self.default_timeout = default_timeout
self.maximum_timeout = maximum_timeout
_EMPTY_SERVER_OPTIONS = ServerOptions(
None, None, None, None, None, None, None)
def server_options(
multi_method_implementation=None, request_deserializers=None,
response_serializers=None, thread_pool=None, thread_pool_size=None,
default_timeout=None, maximum_timeout=None):
"""Creates a ServerOptions value to be passed at server creation.
All parameters are optional and should always be passed by keyword.
Args:
multi_method_implementation: A face.MultiMethodImplementation to be called
to service an RPC if the server has no specific method implementation for
the name of the RPC for which service was requested.
request_deserializers: A dictionary from service name-method name pair to
request deserialization behavior.
response_serializers: A dictionary from service name-method name pair to
response serialization behavior.
thread_pool: A thread pool to use in stubs.
thread_pool_size: The size of thread pool to create for use in stubs;
ignored if thread_pool has been passed.
default_timeout: A duration in seconds to allow for RPC service when
servicing RPCs that did not include a timeout value when invoked.
maximum_timeout: A duration in seconds to allow for RPC service when
servicing RPCs no matter what timeout value was passed when the RPC was
invoked.
Returns:
A StubOptions value created from the passed parameters.
"""
return ServerOptions(
multi_method_implementation, request_deserializers, response_serializers,
thread_pool, thread_pool_size, default_timeout, maximum_timeout)
def server(service_implementations, options=None):
"""Creates an interfaces.Server with which RPCs can be serviced.
Args:
service_implementations: A dictionary from service name-method name pair to
face.MethodImplementation.
options: An optional ServerOptions value further customizing the
functionality of the returned Server.
Returns:
An interfaces.Server with which RPCs can be serviced.
"""
effective_options = _EMPTY_SERVER_OPTIONS if options is None else options
return _server.server(
service_implementations, effective_options.multi_method_implementation,
effective_options.request_deserializers,
effective_options.response_serializers, effective_options.thread_pool,
effective_options.thread_pool_size, effective_options.default_timeout,
effective_options.maximum_timeout)
|
|
#!/usr/bin/env python
from __future__ import absolute_import
import collections
import numpy
from scipy.ndimage import map_coordinates
from eotools.coordinates import convert_coordinates
from eotools.drivers.stacked_dataset import StackedDataset
def x_profile(stacked_dataset, xy, raster_band=1, from_map=False):
"""
Get the data associated with the x-axis of an image.
:param stacked_dataset:
An instance of a StackedDataset.
:param xy:
An tuple containing an (x, y) co-ordinate pair from which to
read the horizontal profile.
:param raster_band:
The raster band to read from. Default is raster band 1.
:param from_map:
A boolean indicating whether or not the input co-ordinates
are real world map coordinates. If set to True, then the input
xy co-ordinate will be converted to image co-ordinates.
:return:
A 1D NumPy array of length determined by the number of columns
in the stacked_dataset.
"""
if not isinstance(stacked_dataset, StackedDataset):
msg = ('stacked_dataset should be an instance of StackedDataset but '
'is of type {}')
msg = msg.format(type(stacked_dataset))
raise TypeError(msg)
# Convert to image co-ordinates if needed
if from_map:
x, y = convert_coordinates(stacked_dataset.geotransform, xy,
to_map=False)
else:
x, y = xy
# Create a tile to define the chunk we wish to read
tile = ((y, y + 1), (0, stacked_dataset.samples))
# Read the profile
profile = stacked_dataset.read_tile(tile, raster_bands=raster_band)
return profile[0, :]
def y_profile(stacked_dataset, xy, raster_band=1, from_map=False):
"""
Get the data associated with a y-axis of an image.
:param stacked_dataset:
An instance of a StackedDataset.
:param xy:
An tuple containing an (x, y) co-ordinate pair from which to
read the vertical profile.
:param raster_band:
The raster band to read from. Default is raster band 1.
:param from_map:
A boolean indicating whether or not the input co-ordinates
are real world map coordinates. If set to True, then the input
xy co-ordinate will be converted to image co-ordinates.
:return:
A 1D NumPy array of length determined by the number of rows in
the stacked_dataset.
"""
if not isinstance(stacked_dataset, StackedDataset):
msg = ('stacked_dataset should be an instance of StackedDataset but '
'is of type {}')
msg = msg.format(type(stacked_dataset))
raise TypeError(msg)
# Convert to image co-ordinates if needed
if from_map:
x, y = convert_coordinates(stacked_dataset.geotransform, xy,
to_map=False)
else:
x, y = xy
# Create a tile to define the chunk we wish to read
tile = ((0, stacked_dataset.lines), (x, x + 1))
# Read the profile
profile = stacked_dataset.read_tile(tile, raster_bands=raster_band)
return profile[:, 0]
def z_profile(stacked_dataset, xy, from_map=False, raster_bands=None):
"""
Get the data associated with the z-axis of an image.
The z-axis for a 3D image is also known as a spectral profile for
spectrally stacked data or a temporal profile for temporally
stacked data.
:param stacked_dataset:
An instance of a StackedDataset.
:param xy:
The xy co-ordinate from which to get the z profile.
:param from_map:
A boolean indicating whether or not the input co-ordinates
are real world map coordinates. If set to True, then the input
xy co-ordinate will be converted to image co-ordinates.
:return:
A 1D NumPy array of length determined by the number of raster
bands in the stacked_dataset.
"""
if not isinstance(stacked_dataset, StackedDataset):
msg = ('stacked_dataset should be an instance of StackedDataset but '
'is of type {}')
msg = msg.format(type(stacked_dataset))
raise TypeError(msg)
# Convert to image co-ordinates if needed
if from_map:
x, y = convert_coordinates(stacked_dataset.geotransform, xy,
to_map=False)
else:
x, y = xy
# Create a tile to define the chunk we wish to read
tile = ((y, y + 1), (x, x + 1))
if ((raster_bands is None) or
(not isinstance(raster_bands, collections.Sequence))):
nb = range(1, stacked_dataset.bands + 1)
else:
nb = raster_bands
# Read the profile
profile = stacked_dataset.read_tile(tile, raster_bands=nb)
return profile[:, 0, 0]
def arbitrary_profile(stacked_dataset, xy_points, raster_band=1,
cubic=False, from_map=False):
"""
Get the data associated with an arbitrary set of points that
define an arbitrary profile/transect, and the pixel locations
associated with the transect.
:param stacked_dataset:
An instance of a StackedDataset.
:param xy_points:
A list of (x, y) co-ordinate paris eg [(x, y), (x, y), (x, y)].
:param raster_band:
The raster band to read from. Default is raster band 1.
:param from_map:
A boolean indicating whether or not the input co-ordinates
are real world map coordinates. If set to True, then the input
xy co-ordinate will be converted to image co-ordinates.
:return:
A 1D NumPy array of lenght determined by the distance between
the xy_points; A tuple (y_index, x_index) containing the
pixel locations of the transect; and a tuple containing a list
of start and end co-ordinates for both x and y fields.
The form of the start and end locations is:
([(xstart_1, xend_1),...,(xstart_n-1, xend_n-1)],
[(ystart_1, yend_1),...,(ystart_n-1, yend_n-1)]).
This form can be directly used in a call to plot() as follows:
z_prf, idx, xy_start_end = arbitrary_profile()
plot(xy_start_end[0], xy_start_end[1], 'r-')
"""
if not isinstance(stacked_dataset, StackedDataset):
msg = ('stacked_dataset should be an instance of StackedDataset but '
'is of type {}')
msg = msg.format(type(stacked_dataset))
raise TypeError(msg)
n_points = len(xy_points)
if n_points < 2:
msg = "Minimum number of points is 2, received {}".format(n_points)
raise ValueError(msg)
# Convert to image co-ordinates if needed
if from_map:
img_xy = convert_coordinates(stacked_dataset.geotransform, xy_points,
to_map=False)
else:
img_xy = xy_points
# Read the image band
img = stacked_dataset.read_raster_band(raster_band=raster_band)
profile = numpy.array([], dtype=img.dtype)
x_idx = numpy.array([], dtype='int')
y_idx = numpy.array([], dtype='int')
x_start_end = []
y_start_end = []
for i in range(1, n_points):
x0, y0 = img_xy[i - 1]
x1, y1 = img_xy[i]
x_start_end.append((x0, x1))
y_start_end.append((y0, y1))
n_pixels = max(abs(x1 - x0 + 1), abs(y1 - y0 + 1))
x = numpy.linspace(x0, x1, n_pixels)
y = numpy.linspace(y0, y1, n_pixels)
x_idx = numpy.append(x_idx, x)
y_idx = numpy.append(y_idx, y)
if cubic:
transect = map_coordinates(img, (y, x))
profile = numpy.append(profile, transect)
else:
transect = img[y.astype('int'), x.astype('int')]
profile = numpy.append(profile, transect)
x_idx = x_idx.astype('int')
y_idx = y_idx.astype('int')
return (profile, (y_idx, x_idx), (x_start_end, y_start_end))
|
|
"""TensorBoard server handler logic.
TensorboardHandler contains all the logic for serving static files off of disk
and for handling the API calls to endpoints like /tags that require information
about loaded events.
"""
import BaseHTTPServer
import csv
import gzip
import imghdr
import json
import mimetypes
import os
import StringIO
import urllib
import urlparse
from google.protobuf import text_format
import tensorflow.python.platform
from tensorflow.python.platform import logging
from tensorflow.python.platform import resource_loader
from tensorflow.python.summary import event_accumulator
from tensorflow.tensorboard import float_wrapper
RUNS_ROUTE = '/runs'
SCALARS_ROUTE = '/' + event_accumulator.SCALARS
IMAGES_ROUTE = '/' + event_accumulator.IMAGES
HISTOGRAMS_ROUTE = '/' + event_accumulator.HISTOGRAMS
COMPRESSED_HISTOGRAMS_ROUTE = '/' + event_accumulator.COMPRESSED_HISTOGRAMS
INDIVIDUAL_IMAGE_ROUTE = '/individualImage'
GRAPH_ROUTE = '/' + event_accumulator.GRAPH
_IMGHDR_TO_MIMETYPE = {
'bmp': 'image/bmp',
'gif': 'image/gif',
'jpeg': 'image/jpeg',
'png': 'image/png'
}
_DEFAULT_IMAGE_MIMETYPE = 'application/octet-stream'
def _content_type_for_image(encoded_image_string):
image_type = imghdr.what(None, encoded_image_string)
return _IMGHDR_TO_MIMETYPE.get(image_type, _DEFAULT_IMAGE_MIMETYPE)
class _OutputFormat(object):
"""An enum used to list the valid output formats for API calls.
Not all API calls support all formats (for example, only scalars and
compressed histograms support CSV).
"""
JSON = 'json'
CSV = 'csv'
class TensorboardHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Handler class for use with BaseHTTPServer.HTTPServer.
This is essentially a thin wrapper around calls to an EventMultiplexer object
as well as serving files off disk.
"""
def __init__(self, multiplexer, *args):
self._multiplexer = multiplexer
BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, *args)
# We use underscore_names for consistency with inherited methods.
def _image_response_for_run(self, run_images, run, tag):
"""Builds a JSON-serializable object with information about run_images.
Args:
run_images: A list of event_accumulator.ImageValueEvent objects.
run: The name of the run.
tag: The name of the tag the images all belong to.
Returns:
A list of dictionaries containing the wall time, step, URL, width, and
height for each image.
"""
response = []
for index, run_image in enumerate(run_images):
response.append({
'wall_time': run_image.wall_time,
'step': run_image.step,
# We include the size so that the frontend can add that to the <img>
# tag so that the page layout doesn't change when the image loads.
'width': run_image.width,
'height': run_image.height,
'query': self._query_for_individual_image(run, tag, index)
})
return response
def _path_is_safe(self, path):
"""Check path is safe (stays within current directory).
This is for preventing directory-traversal attacks.
Args:
path: The path to check for safety.
Returns:
True if the given path stays within the current directory, and false
if it would escape to a higher directory. E.g. _path_is_safe('index.html')
returns true, but _path_is_safe('../../../etc/password') returns false.
"""
base = os.path.abspath(os.curdir)
absolute_path = os.path.abspath(path)
prefix = os.path.commonprefix([base, absolute_path])
return prefix == base
def _send_gzip_response(self, content, content_type, code=200):
"""Writes the given content as gzip response using the given content type.
Args:
content: The content to respond with.
content_type: The mime type of the content.
code: The numeric HTTP status code to use.
"""
out = StringIO.StringIO()
f = gzip.GzipFile(fileobj=out, mode='w')
f.write(content)
f.close()
gzip_content = out.getvalue()
self.send_response(code)
self.send_header('Content-Type', content_type)
self.send_header('Content-Length', len(gzip_content))
self.send_header('Content-Encoding', 'gzip')
self.end_headers()
self.wfile.write(gzip_content)
def _send_json_response(self, obj, code=200):
"""Writes out the given object as JSON using the given HTTP status code.
This also replaces special float values with stringified versions.
Args:
obj: The object to respond with.
code: The numeric HTTP status code to use.
"""
output = json.dumps(float_wrapper.WrapSpecialFloats(obj))
self.send_response(code)
self.send_header('Content-Type', 'application/json')
self.send_header('Content-Length', len(output))
self.end_headers()
self.wfile.write(output)
def _send_csv_response(self, serialized_csv, code=200):
"""Writes out the given string, which represents CSV data.
Unlike _send_json_response, this does *not* perform the CSV serialization
for you. It only sets the proper headers.
Args:
serialized_csv: A string containing some CSV data.
code: The numeric HTTP status code to use.
"""
self.send_response(code)
self.send_header('Content-Type', 'text/csv')
self.send_header('Content-Length', len(serialized_csv))
self.end_headers()
self.wfile.write(serialized_csv)
def _serve_scalars(self, query_params):
"""Given a tag and single run, return array of ScalarEvents."""
# TODO(cassandrax): return HTTP status code for malformed requests
tag = query_params.get('tag')
run = query_params.get('run')
values = self._multiplexer.Scalars(run, tag)
if query_params.get('format') == _OutputFormat.CSV:
string_io = StringIO.StringIO()
writer = csv.writer(string_io)
writer.writerow(['Wall time', 'Step', 'Value'])
writer.writerows(values)
self._send_csv_response(string_io.getvalue())
else:
self._send_json_response(values)
def _serve_graph(self, query_params):
"""Given a single run, return the graph definition in json format."""
run = query_params.get('run', None)
if run is None:
self.send_error(400, 'query parameter "run" is required')
return
try:
graph = self._multiplexer.Graph(run)
except ValueError:
self.send_response(404)
return
# Serialize the graph to pbtxt format.
graph_pbtxt = text_format.MessageToString(graph)
# Gzip it and send it to the user.
self._send_gzip_response(graph_pbtxt, 'text/plain')
def _serve_histograms(self, query_params):
"""Given a tag and single run, return an array of histogram values."""
tag = query_params.get('tag')
run = query_params.get('run')
values = self._multiplexer.Histograms(run, tag)
self._send_json_response(values)
def _serve_compressed_histograms(self, query_params):
"""Given a tag and single run, return an array of compressed histograms."""
tag = query_params.get('tag')
run = query_params.get('run')
compressed_histograms = self._multiplexer.CompressedHistograms(run, tag)
if query_params.get('format') == _OutputFormat.CSV:
string_io = StringIO.StringIO()
writer = csv.writer(string_io)
# Build the headers; we have two columns for timing and two columns for
# each compressed histogram bucket.
headers = ['Wall time', 'Step']
if compressed_histograms:
bucket_count = len(compressed_histograms[0].compressed_histogram_values)
for i in xrange(bucket_count):
headers += ['Edge %d basis points' % i, 'Edge %d value' % i]
writer.writerow(headers)
for compressed_histogram in compressed_histograms:
row = [compressed_histogram.wall_time, compressed_histogram.step]
for value in compressed_histogram.compressed_histogram_values:
row += [value.rank_in_bps, value.value]
writer.writerow(row)
self._send_csv_response(string_io.getvalue())
else:
self._send_json_response(compressed_histograms)
def _serve_images(self, query_params):
"""Given a tag and list of runs, serve a list of images.
Note that the images themselves are not sent; instead, we respond with URLs
to the images. The frontend should treat these URLs as opaque and should not
try to parse information about them or generate them itself, as the format
may change.
Args:
query_params: The query parameters as a dict.
"""
tag = query_params.get('tag')
run = query_params.get('run')
images = self._multiplexer.Images(run, tag)
response = self._image_response_for_run(images, run, tag)
self._send_json_response(response)
def _serve_image(self, query_params):
"""Serves an individual image."""
tag = query_params.get('tag')
run = query_params.get('run')
index = int(query_params.get('index'))
image = self._multiplexer.Images(run, tag)[index]
encoded_image_string = image.encoded_image_string
content_type = _content_type_for_image(encoded_image_string)
self.send_response(200)
self.send_header('Content-Type', content_type)
self.send_header('Content-Length', len(encoded_image_string))
self.end_headers()
self.wfile.write(encoded_image_string)
def _query_for_individual_image(self, run, tag, index):
"""Builds a URL for accessing the specified image.
This should be kept in sync with _serve_image. Note that the URL is *not*
guaranteed to always return the same image, since images may be unloaded
from the reservoir as new images come in.
Args:
run: The name of the run.
tag: The tag.
index: The index of the image. Negative values are OK.
Returns:
A string representation of a URL that will load the index-th
sampled image in the given run with the given tag.
"""
query_string = urllib.urlencode({
'run': run,
'tag': tag,
'index': index
})
return query_string
def _serve_runs(self, unused_query_params):
"""Return a JSON object about runs and tags.
Returns a mapping from runs to tagType to list of tags for that run.
Returns:
{runName: {images: [tag1, tag2, tag3],
scalars: [tagA, tagB, tagC],
histograms: [tagX, tagY, tagZ]}}
"""
self._send_json_response(self._multiplexer.Runs())
def _serve_index(self, unused_query_params):
"""Serves the index page (i.e., the tensorboard app itself)."""
self._serve_static_file('/dist/index.html')
def _serve_static_file(self, path):
"""Serves the static file located at the given path.
Args:
path: The path of the static file, relative to the tensorboard/ directory.
"""
# Strip off the leading forward slash.
path = path.lstrip('/')
if not self._path_is_safe(path):
logging.info('path %s not safe, sending 404' % path)
# Traversal attack, so 404.
self.send_error(404)
return
if path.startswith('external'):
path = os.path.join('../', path)
else:
path = os.path.join('tensorboard', path)
# Open the file and read it.
try:
contents = resource_loader.load_resource(path)
except IOError:
logging.info('path %s not found, sending 404' % path)
self.send_error(404)
return
self.send_response(200)
mimetype = mimetypes.guess_type(path)[0] or 'application/octet-stream'
self.send_header('Content-Type', mimetype)
self.end_headers()
self.wfile.write(contents)
def do_GET(self): # pylint: disable=invalid-name
"""Handler for all get requests."""
parsed_url = urlparse.urlparse(self.path)
# Remove a trailing slash, if present.
clean_path = parsed_url.path
if clean_path.endswith('/'):
clean_path = clean_path[:-1]
handlers = {
SCALARS_ROUTE: self._serve_scalars,
GRAPH_ROUTE: self._serve_graph,
HISTOGRAMS_ROUTE: self._serve_histograms,
COMPRESSED_HISTOGRAMS_ROUTE: self._serve_compressed_histograms,
IMAGES_ROUTE: self._serve_images,
INDIVIDUAL_IMAGE_ROUTE: self._serve_image,
RUNS_ROUTE: self._serve_runs,
'': self._serve_index
}
if clean_path in handlers:
query_params = urlparse.parse_qs(parsed_url.query)
# parse_qs returns a list of values for each key; we're only interested in
# the first.
for key in query_params:
value_count = len(query_params[key])
if value_count != 1:
self.send_error(
400,
'query parameter %s should have exactly one value, had %d' %
(key, value_count))
return
query_params[key] = query_params[key][0]
handlers[clean_path](query_params)
else:
self._serve_static_file(clean_path)
|
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements raw HID device communication on Windows."""
import ctypes
from ctypes import wintypes
import platform
from pyu2f import errors
from pyu2f.hid import base
# Load relevant DLLs
hid = ctypes.windll.Hid
setupapi = ctypes.windll.SetupAPI
kernel32 = ctypes.windll.Kernel32
# Various structs that are used in the Windows APIs we call
class GUID(ctypes.Structure):
_fields_ = [("Data1", ctypes.c_ulong),
("Data2", ctypes.c_ushort),
("Data3", ctypes.c_ushort),
("Data4", ctypes.c_ubyte * 8)]
# On Windows, SetupAPI.h packs structures differently in 64bit and
# 32bit mode. In 64bit mode, thestructures are packed on 8 byte
# boundaries, while in 32bit mode, they are packed on 1 byte boundaries.
# This is important to get right for some API calls that fill out these
# structures.
if platform.architecture()[0] == "64bit":
SETUPAPI_PACK = 8
elif platform.architecture()[0] == "32bit":
SETUPAPI_PACK = 1
else:
raise errors.HidError("Unknown architecture: %s" % platform.architecture()[0])
class DeviceInterfaceData(ctypes.Structure):
_fields_ = [("cbSize", wintypes.DWORD),
("InterfaceClassGuid", GUID),
("Flags", wintypes.DWORD),
("Reserved", ctypes.POINTER(ctypes.c_ulong))]
_pack_ = SETUPAPI_PACK
class DeviceInterfaceDetailData(ctypes.Structure):
_fields_ = [("cbSize", wintypes.DWORD),
("DevicePath", ctypes.c_byte * 1)]
_pack_ = SETUPAPI_PACK
class HidAttributes(ctypes.Structure):
_fields_ = [("Size", ctypes.c_ulong),
("VendorID", ctypes.c_ushort),
("ProductID", ctypes.c_ushort),
("VersionNumber", ctypes.c_ushort)]
class HidCapabilities(ctypes.Structure):
_fields_ = [("Usage", ctypes.c_ushort),
("UsagePage", ctypes.c_ushort),
("InputReportByteLength", ctypes.c_ushort),
("OutputReportByteLength", ctypes.c_ushort),
("FeatureReportByteLength", ctypes.c_ushort),
("Reserved", ctypes.c_ushort * 17),
("NotUsed", ctypes.c_ushort * 10)]
# Various void* aliases for readability.
HDEVINFO = ctypes.c_void_p
HANDLE = ctypes.c_void_p
PHIDP_PREPARSED_DATA = ctypes.c_void_p # pylint: disable=invalid-name
# This is a HANDLE.
INVALID_HANDLE_VALUE = 0xffffffffL
# Status codes
NTSTATUS = ctypes.c_long
HIDP_STATUS_SUCCESS = 0x00110000L
FILE_SHARE_READ = 0x00000001L
FILE_SHARE_WRITE = 0x00000002L
OPEN_EXISTING = 0x03
ERROR_ACCESS_DENIED = 0x05
# CreateFile Flags
GENERIC_WRITE = 0x40000000L
GENERIC_READ = 0x80000000L
# Function signatures
hid.HidD_GetHidGuid.restype = None
hid.HidD_GetHidGuid.argtypes = [ctypes.POINTER(GUID)]
hid.HidD_GetAttributes.restype = wintypes.BOOLEAN
hid.HidD_GetAttributes.argtypes = [HANDLE, ctypes.POINTER(HidAttributes)]
hid.HidD_GetPreparsedData.restype = wintypes.BOOLEAN
hid.HidD_GetPreparsedData.argtypes = [HANDLE,
ctypes.POINTER(PHIDP_PREPARSED_DATA)]
hid.HidD_FreePreparsedData.restype = wintypes.BOOLEAN
hid.HidD_FreePreparsedData.argtypes = [PHIDP_PREPARSED_DATA]
hid.HidD_GetProductString.restype = wintypes.BOOLEAN
hid.HidD_GetProductString.argtypes = [HANDLE, ctypes.c_void_p, ctypes.c_ulong]
hid.HidP_GetCaps.restype = NTSTATUS
hid.HidP_GetCaps.argtypes = [PHIDP_PREPARSED_DATA,
ctypes.POINTER(HidCapabilities)]
setupapi.SetupDiGetClassDevsA.argtypes = [ctypes.POINTER(GUID), ctypes.c_char_p,
wintypes.HWND, wintypes.DWORD]
setupapi.SetupDiGetClassDevsA.restype = HDEVINFO
setupapi.SetupDiEnumDeviceInterfaces.restype = wintypes.BOOL
setupapi.SetupDiEnumDeviceInterfaces.argtypes = [
HDEVINFO, ctypes.c_void_p, ctypes.POINTER(GUID), wintypes.DWORD,
ctypes.POINTER(DeviceInterfaceData)]
setupapi.SetupDiGetDeviceInterfaceDetailA.restype = wintypes.BOOL
setupapi.SetupDiGetDeviceInterfaceDetailA.argtypes = [
HDEVINFO, ctypes.POINTER(DeviceInterfaceData),
ctypes.POINTER(DeviceInterfaceDetailData), wintypes.DWORD,
ctypes.POINTER(wintypes.DWORD), ctypes.c_void_p]
kernel32.CreateFileA.restype = HANDLE
kernel32.CreateFileA.argtypes = [
ctypes.c_char_p, wintypes.DWORD, wintypes.DWORD, ctypes.c_void_p,
wintypes.DWORD, wintypes.DWORD, HANDLE]
kernel32.CloseHandle.restype = wintypes.BOOL
kernel32.CloseHandle.argtypes = [HANDLE]
kernel32.ReadFile.restype = wintypes.BOOL
kernel32.ReadFile.argtypes = [
HANDLE, ctypes.c_void_p, wintypes.DWORD,
ctypes.POINTER(wintypes.DWORD), ctypes.c_void_p]
kernel32.WriteFile.restype = wintypes.BOOL
kernel32.WriteFile.argtypes = [
HANDLE, ctypes.c_void_p, wintypes.DWORD,
ctypes.POINTER(wintypes.DWORD), ctypes.c_void_p]
def FillDeviceAttributes(device, descriptor):
"""Fill out the attributes of the device.
Fills the devices HidAttributes and product string
into the descriptor.
Args:
device: A handle to the open device
descriptor: The DeviceDescriptor to populate with the
attributes.
Returns:
None
Raises:
WindowsError when unable to obtain attributes or product
string.
"""
attributes = HidAttributes()
result = hid.HidD_GetAttributes(device, ctypes.byref(attributes))
if not result:
raise ctypes.WinError()
buf = ctypes.create_string_buffer(1024)
result = hid.HidD_GetProductString(device, buf, 1024)
if not result:
raise ctypes.WinError()
descriptor.vendor_id = attributes.VendorID
descriptor.product_id = attributes.ProductID
descriptor.product_string = ctypes.wstring_at(buf)
def FillDeviceCapabilities(device, descriptor):
"""Fill out device capabilities.
Fills the HidCapabilitites of the device into descriptor.
Args:
device: A handle to the open device
descriptor: DeviceDescriptor to populate with the
capabilities
Returns:
none
Raises:
WindowsError when unable to obtain capabilitites.
"""
preparsed_data = PHIDP_PREPARSED_DATA(0)
ret = hid.HidD_GetPreparsedData(device, ctypes.byref(preparsed_data))
if not ret:
raise ctypes.WinError()
try:
caps = HidCapabilities()
ret = hid.HidP_GetCaps(preparsed_data, ctypes.byref(caps))
if ret != HIDP_STATUS_SUCCESS:
raise ctypes.WinError()
descriptor.usage = caps.Usage
descriptor.usage_page = caps.UsagePage
descriptor.internal_max_in_report_len = caps.InputReportByteLength
descriptor.internal_max_out_report_len = caps.OutputReportByteLength
finally:
hid.HidD_FreePreparsedData(preparsed_data)
# The python os.open() implementation uses the windows libc
# open() function, which writes CreateFile but does so in a way
# that doesn't let us open the device with the right set of permissions.
# Therefore, we have to directly use the Windows API calls.
# We could use PyWin32, which provides simple wrappers. However, to avoid
# requiring a PyWin32 dependency for clients, we simply also implement it
# using ctypes.
def OpenDevice(path, enum=False):
"""Open the device and return a handle to it."""
desired_access = GENERIC_WRITE | GENERIC_READ
share_mode = FILE_SHARE_READ | FILE_SHARE_WRITE
if enum:
desired_access = 0
h = kernel32.CreateFileA(path,
desired_access,
share_mode,
None, OPEN_EXISTING, 0, None)
if h == INVALID_HANDLE_VALUE:
raise ctypes.WinError()
return h
class WindowsHidDevice(base.HidDevice):
"""Implementation of raw HID interface on Windows."""
@staticmethod
def Enumerate():
"""See base class."""
hid_guid = GUID()
hid.HidD_GetHidGuid(ctypes.byref(hid_guid))
devices = setupapi.SetupDiGetClassDevsA(
ctypes.byref(hid_guid), None, None, 0x12)
index = 0
interface_info = DeviceInterfaceData()
interface_info.cbSize = ctypes.sizeof(DeviceInterfaceData) # pylint: disable=invalid-name
out = []
while True:
result = setupapi.SetupDiEnumDeviceInterfaces(
devices, 0, ctypes.byref(hid_guid), index,
ctypes.byref(interface_info))
index += 1
if not result:
break
detail_len = wintypes.DWORD()
result = setupapi.SetupDiGetDeviceInterfaceDetailA(
devices, ctypes.byref(interface_info), None, 0,
ctypes.byref(detail_len), None)
detail_len = detail_len.value
if detail_len == 0:
# skip this device, some kind of error
continue
buf = ctypes.create_string_buffer(detail_len)
interface_detail = DeviceInterfaceDetailData.from_buffer(buf)
interface_detail.cbSize = ctypes.sizeof(DeviceInterfaceDetailData)
result = setupapi.SetupDiGetDeviceInterfaceDetailA(
devices, ctypes.byref(interface_info),
ctypes.byref(interface_detail), detail_len, None, None)
if not result:
raise ctypes.WinError()
descriptor = base.DeviceDescriptor()
# This is a bit of a hack to work around a limitation of ctypes and
# "header" structures that are common in windows. DevicePath is a
# ctypes array of length 1, but it is backed with a buffer that is much
# longer and contains a null terminated string. So, we read the null
# terminated string off DevicePath here. Per the comment above, the
# alignment of this struct varies depending on architecture, but
# in all cases the path string starts 1 DWORD into the structure.
#
# The path length is:
# length of detail buffer - header length (1 DWORD)
path_len = detail_len - ctypes.sizeof(wintypes.DWORD)
descriptor.path = ctypes.string_at(
ctypes.addressof(interface_detail.DevicePath), path_len)
device = None
try:
device = OpenDevice(descriptor.path, True)
except WindowsError as e: # pylint: disable=undefined-variable
if e.winerror == ERROR_ACCESS_DENIED: # Access Denied, e.g. a keyboard
continue
else:
raise e
try:
FillDeviceAttributes(device, descriptor)
FillDeviceCapabilities(device, descriptor)
out.append(descriptor.ToPublicDict())
finally:
kernel32.CloseHandle(device)
return out
def __init__(self, path):
"""See base class."""
base.HidDevice.__init__(self, path)
self.dev = OpenDevice(path)
self.desc = base.DeviceDescriptor()
FillDeviceCapabilities(self.dev, self.desc)
def GetInReportDataLength(self):
"""See base class."""
return self.desc.internal_max_in_report_len - 1
def GetOutReportDataLength(self):
"""See base class."""
return self.desc.internal_max_out_report_len - 1
def Write(self, packet):
"""See base class."""
if len(packet) != self.GetOutReportDataLength():
raise errors.HidError("Packet length must match report data length.")
out = "".join(map(chr, [0] + packet)) # Prepend the zero-byte (report ID)
num_written = wintypes.DWORD()
ret = (
kernel32.WriteFile(
self.dev, out, len(out),
ctypes.byref(num_written), None))
if num_written.value != len(out):
raise errors.HidError(
"Failed to write complete packet. " + "Expected %d, but got %d" %
(len(out), num_written.value))
if not ret:
raise ctypes.WinError()
def Read(self):
"""See base class."""
buf = ctypes.create_string_buffer(self.desc.internal_max_in_report_len)
num_read = wintypes.DWORD()
ret = kernel32.ReadFile(
self.dev, buf, len(buf), ctypes.byref(num_read), None)
if num_read.value != self.desc.internal_max_in_report_len:
raise errors.HidError("Failed to read full length report from device.")
if not ret:
raise ctypes.WinError()
# Convert the string buffer to a list of numbers. Throw away the first
# byte, which is the report id (which we don't care about).
return map(ord, buf)[1:]
|
|
from __future__ import division, print_function, absolute_import
import os
from ...tools.log import LoggingMgr
from ...planners.explorers.discrete import DiscreteExplorer
from ...mdp.discrete import DiscreteModel
from . import IOnlineLearner
__all__ = ['RLLearner', 'QLearner', 'RLDTLearner']
# noinspection PyAbstractClass
class RLLearner(IOnlineLearner):
"""The reinforcement learning learner interface.
Parameters
----------
max_steps : int, optional
The maximum number of steps in an iteration. Default is 100.
filename : str, optional
The name of the file to save the learner state to after each iteration.
If None is given, the learner state is not saved. Default is None.
profile : bool, optional
Turn on profiling at which point profiling data is collected
and saved to a text file. Default is False.
"""
def __init__(self, max_steps=None, filename=None, profile=False):
super(RLLearner, self).__init__(filename)
self._logger = LoggingMgr().get_logger(self._mid)
self._step_iter = 0
self._episode_cntr = 1
self._cum_reward = 0
self._num_wins = 0
self._max_steps = max_steps if max_steps is not None else 100
self._profile = profile
def __getstate__(self):
data = super(RLLearner, self).__getstate__()
data.update(self.__dict__.copy())
remove_list = ('_id', '_logger')
for key in remove_list:
if key in data:
del data[key]
return data
def __setstate__(self, d):
super(RLLearner, self).__setstate__(d)
for name, value in d.iteritems():
setattr(self, name, value)
self._logger = LoggingMgr().get_logger(self._mid)
self._logger.debug("Episode=%d", self._episode_cntr)
def reset(self, t, **kwargs):
"""Reset reinforcement learner.
Parameters
----------
t : float
The current time (sec)
kwargs : dict, optional
Non-positional parameters, optional.
"""
super(RLLearner, self).reset(t, **kwargs)
self._step_iter = 0
def save(self, filename):
"""Save the learners state.
If profiling is turned on, profile information is saved to a `txt` file
with the same name.
Parameters
----------
filename : str
The filename to save the information to.
"""
super(RLLearner, self).save(filename)
if self._profile:
filename = os.path.splitext(self._filename)[0]
with open(filename + ".txt", "a") as f:
win_ratio = float(self._num_wins) / float(self._episode_cntr)
f.write("%d, %d, %.2f, %.2f\n" % (self._episode_cntr, self._num_wins, self._cum_reward, win_ratio))
def learn(self, experience=None):
"""Learn a policy from the experience.
Parameters
----------
experience : Experience
The agent's experience consisting of the previous state, the action performed
in that state, the current state and the reward awarded.
"""
self._logger.info(experience)
if self._profile and experience.reward is not None:
if experience.reward > 0.0:
self._num_wins += 1
self._cum_reward += experience.reward
self._logger.debug("cumReward: %.2f", self._cum_reward)
class QLearner(RLLearner):
"""Performs q-learning.
Q-learning is a reinforcement learning variant.
Parameters
----------
explorer : Explorer, optional
The exploration strategy used. Default is no exploration.
max_steps : int, optional
The maximum number of steps in an iteration. Default is 100
alpha : float, optional
The learning rate. Default is 0.5.
gamma : float, optional
The discounting factor. Default is 0.9.
filename : str, optional
The name of the file to save the learner state to after each iteration.
If None is given, the learner state is not saved. Default is None.
profile : bool, optional
Turn on profiling at which point profiling data is collected
and saved to a text file. Default is False.
"""
def __init__(self, explorer=None, max_steps=None, alpha=None, gamma=None, filename=None, profile=False):
super(QLearner, self).__init__(max_steps, filename, profile)
self._model = DiscreteModel()
self._explorer = explorer if explorer is not None else DiscreteExplorer()
""":type: Explorer"""
self._alpha = alpha if alpha is not None else 0.5
self._gamma = gamma if gamma is not None else 0.9
def execute(self, experience):
"""Execute learning specific updates.
Learning specific updates are performed, e.g. model updates.
Parameters
----------
experience : Experience
The actor's current experience consisting of previous state, the action
performed in that state, the current state, and the reward awarded.
"""
self._model.update(experience)
def learn(self, experience=None):
""" Learn a policy from the experience.
By updating the Q table according to the experience a policy is learned.
Parameters
----------
experience : Experience
The actor's current experience consisting of previous state, the action
performed in that state, the current state, and the reward awarded.
"""
super(QLearner, self).learn(experience)
info = self._model.statespace[experience.state]
info2 = self._model.statespace[experience.next_state]
qvalue = info.q[experience.action]
maxq = max([info2.q[a] for a in self._model.get_actions(experience.next_state)])
delta = experience.reward + self._gamma * maxq - qvalue
info.q[experience.action] = qvalue + self._alpha * delta
self._logger.debug("%s action=%s reward=%.2f %s d=%.2f", experience.state, experience.action, experience.reward,
experience.next_state, delta)
self._logger.debug("\tq_old=%.2f visits=%d", qvalue, info.models[experience.action].visits)
self._logger.debug("\tq_new=%.2f", info.q[experience.action])
def choose_action(self, state):
"""Choose the next action
The next action is chosen according to the current policy and the
selected exploration strategy.
Parameters
----------
state : State
The current state.
Returns
-------
Action :
The chosen action.
"""
self._model.add_state(state)
action = None
if self._step_iter < self._max_steps:
actions = self._model.get_actions(state)
info = self._model.statespace[state]
action = self._explorer.choose_action(actions, [info.q[a] for a in actions])
self._logger.debug("state=%s act=%s value=%.2f", state, action, self._model.statespace[state].q[action])
return action
class RLDTLearner(RLLearner):
"""Performs reinforcement learning using decision trees.
Reinforcement learning using decision trees (RL-DT) use decision trees
to build the transition and reward models as described by Todd Hester and
Peter Stone [1]_.
Parameters
----------
planner : IPlanner
The planner to use to determine the best action.
max_steps : int, optional
The maximum number of steps in an iteration. Default is 100.
filename : str, optional
The name of the file to save the learner state to after each iteration.
If None is given, the learner state is not saved. Default is None.
profile : bool, optional
Turn on profiling at which point profiling data is collected
and saved to a text file. Default is False.
References
----------
.. [1] Hester, Todd, and Peter Stone. "Generalized model learning for reinforcement
learning in factored domains." Proceedings of The 8th International Conference on
Autonomous Agents and Multiagent Systems-Volume 2. International Foundation for Autonomous
Agents and Multiagent Systems, 2009.
"""
def __init__(self, planner, max_steps=None, filename=None, profile=False):
super(RLDTLearner, self).__init__(max_steps, filename, profile)
self._do_plan = True
self._planner = planner
def __getstate__(self):
data = super(RLDTLearner, self).__getstate__()
data.update({'_planner': self._planner})
return data
def __setstate__(self, d):
super(RLDTLearner, self).__setstate__(d)
for name, value in d.iteritems():
setattr(self, name, value)
self._do_plan = False
def execute(self, experience):
"""Execute learning specific updates.
Learning specific updates are performed, e.g. model updates.
Parameters
----------
experience : Experience
The actor's current experience consisting of previous state, the action
performed in that state, the current state, and the reward awarded.
"""
self._do_plan = self._planner.model.update(experience)
def learn(self, experience=None):
"""Learn a policy from the experience.
A policy is learned from the experience by building the MDP model.
Parameters
----------
experience : Experience
The actor's current experience consisting of previous state, the action
performed in that state, the current state, and the reward awarded.
"""
super(RLDTLearner, self).learn(experience)
if self._do_plan:
self._planner.plan()
def choose_action(self, state):
"""Choose the next action
The next action is chosen according to the current policy and the
selected exploration strategy.
Parameters
----------
state : State
The current state.
Returns
-------
Action :
The chosen action.
"""
action = None
if self._step_iter < self._max_steps:
action = self._planner.get_next_action(state)
self._step_iter += 1
return action
|
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import datetime
import copy
import argparse
import json
import ast
import base64
from functools import wraps
from decimal import Decimal
from typing import Optional, TYPE_CHECKING
from .import util, ecc
from .util import bfh, bh2u, format_satoshis, json_decode, json_encode, is_hash256_str, is_hex_str, to_bytes
from . import bitcoin
from .bitcoin import is_address, hash_160, COIN, TYPE_ADDRESS
from .bip32 import BIP32Node
from .i18n import _
from .transaction import Transaction, multisig_script, TxOutput
from .paymentrequest import PR_PAID, PR_UNPAID, PR_UNKNOWN, PR_EXPIRED
from .synchronizer import Notifier
from .wallet import Abstract_Wallet, create_new_wallet, restore_wallet_from_text
from .address_synchronizer import TX_HEIGHT_LOCAL
if TYPE_CHECKING:
from .network import Network
from .simple_config import SimpleConfig
known_commands = {}
def satoshis(amount):
# satoshi conversion must not be performed by the parser
return int(COIN*Decimal(amount)) if amount not in ['!', None] else amount
class Command:
def __init__(self, func, s):
self.name = func.__name__
self.requires_network = 'n' in s
self.requires_wallet = 'w' in s
self.requires_password = 'p' in s
self.description = func.__doc__
self.help = self.description.split('.')[0] if self.description else None
varnames = func.__code__.co_varnames[1:func.__code__.co_argcount]
self.defaults = func.__defaults__
if self.defaults:
n = len(self.defaults)
self.params = list(varnames[:-n])
self.options = list(varnames[-n:])
else:
self.params = list(varnames)
self.options = []
self.defaults = []
def command(s):
def decorator(func):
global known_commands
name = func.__name__
known_commands[name] = Command(func, s)
@wraps(func)
def func_wrapper(*args, **kwargs):
c = known_commands[func.__name__]
wallet = args[0].wallet
password = kwargs.get('password')
if c.requires_wallet and wallet is None:
raise Exception("wallet not loaded. Use 'electrum daemon load_wallet'")
if c.requires_password and password is None and wallet.has_password():
return {'error': 'Password required' }
return func(*args, **kwargs)
return func_wrapper
return decorator
class Commands:
def __init__(self, config: 'SimpleConfig', wallet: Abstract_Wallet,
network: Optional['Network'], callback=None):
self.config = config
self.wallet = wallet
self.network = network
self._callback = callback
def _run(self, method, args, password_getter):
# this wrapper is called from the python console
cmd = known_commands[method]
if cmd.requires_password and self.wallet.has_password():
password = password_getter()
if password is None:
return
else:
password = None
f = getattr(self, method)
if cmd.requires_password:
result = f(*args, **{'password':password})
else:
result = f(*args)
if self._callback:
self._callback()
return result
@command('')
def commands(self):
"""List of commands"""
return ' '.join(sorted(known_commands.keys()))
@command('')
def create(self, passphrase=None, password=None, encrypt_file=True, segwit=False):
"""Create a new wallet.
If you want to be prompted for an argument, type '?' or ':' (concealed)
"""
d = create_new_wallet(path=self.config.get_wallet_path(),
passphrase=passphrase,
password=password,
encrypt_file=encrypt_file,
segwit=segwit)
return {
'seed': d['seed'],
'path': d['wallet'].storage.path,
'msg': d['msg'],
}
@command('')
def restore(self, text, passphrase=None, password=None, encrypt_file=True):
"""Restore a wallet from text. Text can be a seed phrase, a master
public key, a master private key, a list of bitcoin addresses
or bitcoin private keys.
If you want to be prompted for an argument, type '?' or ':' (concealed)
"""
d = restore_wallet_from_text(text,
path=self.config.get_wallet_path(),
passphrase=passphrase,
password=password,
encrypt_file=encrypt_file,
network=self.network)
return {
'path': d['wallet'].storage.path,
'msg': d['msg'],
}
@command('wp')
def password(self, password=None, new_password=None):
"""Change wallet password. """
if self.wallet.storage.is_encrypted_with_hw_device() and new_password:
raise Exception("Can't change the password of a wallet encrypted with a hw device.")
b = self.wallet.storage.is_encrypted()
self.wallet.update_password(password, new_password, b)
self.wallet.storage.write()
return {'password':self.wallet.has_password()}
@command('w')
def get(self, key):
"""Return item from wallet storage"""
return self.wallet.storage.get(key)
@command('')
def getconfig(self, key):
"""Return a configuration variable. """
return self.config.get(key)
@classmethod
def _setconfig_normalize_value(cls, key, value):
if key not in ('rpcuser', 'rpcpassword'):
value = json_decode(value)
try:
value = ast.literal_eval(value)
except:
pass
return value
@command('')
def setconfig(self, key, value):
"""Set a configuration variable. 'value' may be a string or a Python expression."""
value = self._setconfig_normalize_value(key, value)
self.config.set_key(key, value)
return True
@command('')
def make_seed(self, nbits=132, language=None, segwit=False):
"""Create a seed"""
from .mnemonic import Mnemonic
t = 'segwit' if segwit else 'standard'
s = Mnemonic(language).make_seed(t, nbits)
return s
@command('n')
def getaddresshistory(self, address):
"""Return the transaction history of any address. Note: This is a
walletless server query, results are not checked by SPV.
"""
sh = bitcoin.address_to_scripthash(address)
return self.network.run_from_another_thread(self.network.get_history_for_scripthash(sh))
@command('w')
def listunspent(self):
"""List unspent outputs. Returns the list of unspent transaction
outputs in your wallet."""
l = copy.deepcopy(self.wallet.get_utxos())
for i in l:
v = i["value"]
i["value"] = str(Decimal(v)/COIN) if v is not None else None
return l
@command('n')
def getaddressunspent(self, address):
"""Returns the UTXO list of any address. Note: This
is a walletless server query, results are not checked by SPV.
"""
sh = bitcoin.address_to_scripthash(address)
return self.network.run_from_another_thread(self.network.listunspent_for_scripthash(sh))
@command('')
def serialize(self, jsontx):
"""Create a transaction from json inputs.
Inputs must have a redeemPubkey.
Outputs must be a list of {'address':address, 'value':satoshi_amount}.
"""
keypairs = {}
inputs = jsontx.get('inputs')
outputs = jsontx.get('outputs')
locktime = jsontx.get('lockTime', 0)
for txin in inputs:
if txin.get('output'):
prevout_hash, prevout_n = txin['output'].split(':')
txin['prevout_n'] = int(prevout_n)
txin['prevout_hash'] = prevout_hash
sec = txin.get('privkey')
if sec:
txin_type, privkey, compressed = bitcoin.deserialize_privkey(sec)
pubkey = ecc.ECPrivkey(privkey).get_public_key_hex(compressed=compressed)
keypairs[pubkey] = privkey, compressed
txin['type'] = txin_type
txin['x_pubkeys'] = [pubkey]
txin['signatures'] = [None]
txin['num_sig'] = 1
outputs = [TxOutput(TYPE_ADDRESS, x['address'], int(x['value'])) for x in outputs]
tx = Transaction.from_io(inputs, outputs, locktime=locktime)
tx.sign(keypairs)
return tx.as_dict()
@command('wp')
def signtransaction(self, tx, privkey=None, password=None):
"""Sign a transaction. The wallet keys will be used unless a private key is provided."""
tx = Transaction(tx)
if privkey:
txin_type, privkey2, compressed = bitcoin.deserialize_privkey(privkey)
pubkey_bytes = ecc.ECPrivkey(privkey2).get_public_key_bytes(compressed=compressed)
h160 = bitcoin.hash_160(pubkey_bytes)
x_pubkey = 'fd' + bh2u(b'\x00' + h160)
tx.sign({x_pubkey:(privkey2, compressed)})
else:
self.wallet.sign_transaction(tx, password)
return tx.as_dict()
@command('')
def deserialize(self, tx):
"""Deserialize a serialized transaction"""
tx = Transaction(tx)
return tx.deserialize(force_full_parse=True)
@command('n')
def broadcast(self, tx):
"""Broadcast a transaction to the network. """
tx = Transaction(tx)
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
return tx.txid()
@command('')
def createmultisig(self, num, pubkeys):
"""Create multisig address"""
assert isinstance(pubkeys, list), (type(num), type(pubkeys))
redeem_script = multisig_script(pubkeys, num)
address = bitcoin.hash160_to_p2sh(hash_160(bfh(redeem_script)))
return {'address':address, 'redeemScript':redeem_script}
@command('w')
def freeze(self, address):
"""Freeze address. Freeze the funds at one of your wallet\'s addresses"""
return self.wallet.set_frozen_state_of_addresses([address], True)
@command('w')
def unfreeze(self, address):
"""Unfreeze address. Unfreeze the funds at one of your wallet\'s address"""
return self.wallet.set_frozen_state_of_addresses([address], False)
@command('wp')
def getprivatekeys(self, address, password=None):
"""Get private keys of addresses. You may pass a single wallet address, or a list of wallet addresses."""
if isinstance(address, str):
address = address.strip()
if is_address(address):
return self.wallet.export_private_key(address, password)[0]
domain = address
return [self.wallet.export_private_key(address, password)[0] for address in domain]
@command('w')
def ismine(self, address):
"""Check if address is in wallet. Return true if and only address is in wallet"""
return self.wallet.is_mine(address)
@command('')
def dumpprivkeys(self):
"""Deprecated."""
return "This command is deprecated. Use a pipe instead: 'electrum listaddresses | electrum getprivatekeys - '"
@command('')
def validateaddress(self, address):
"""Check that an address is valid. """
return is_address(address)
@command('w')
def getpubkeys(self, address):
"""Return the public keys for a wallet address. """
return self.wallet.get_public_keys(address)
@command('w')
def getbalance(self):
"""Return the balance of your wallet. """
c, u, x = self.wallet.get_balance()
out = {"confirmed": str(Decimal(c)/COIN)}
if u:
out["unconfirmed"] = str(Decimal(u)/COIN)
if x:
out["unmatured"] = str(Decimal(x)/COIN)
return out
@command('n')
def getaddressbalance(self, address):
"""Return the balance of any address. Note: This is a walletless
server query, results are not checked by SPV.
"""
sh = bitcoin.address_to_scripthash(address)
out = self.network.run_from_another_thread(self.network.get_balance_for_scripthash(sh))
out["confirmed"] = str(Decimal(out["confirmed"])/COIN)
out["unconfirmed"] = str(Decimal(out["unconfirmed"])/COIN)
return out
@command('n')
def getmerkle(self, txid, height):
"""Get Merkle branch of a transaction included in a block. Electrum
uses this to verify transactions (Simple Payment Verification)."""
return self.network.run_from_another_thread(self.network.get_merkle_for_transaction(txid, int(height)))
@command('n')
def getservers(self):
"""Return the list of available servers"""
return self.network.get_servers()
@command('')
def version(self):
"""Return the version of Electrum."""
from .version import ELECTRUM_VERSION
return ELECTRUM_VERSION
@command('w')
def getmpk(self):
"""Get master public key. Return your wallet\'s master public key"""
return self.wallet.get_master_public_key()
@command('wp')
def getmasterprivate(self, password=None):
"""Get master private key. Return your wallet\'s master private key"""
return str(self.wallet.keystore.get_master_private_key(password))
@command('')
def convert_xkey(self, xkey, xtype):
"""Convert xtype of a master key. e.g. xpub -> ypub"""
try:
node = BIP32Node.from_xkey(xkey)
except:
raise Exception('xkey should be a master public/private key')
return node._replace(xtype=xtype).to_xkey()
@command('wp')
def getseed(self, password=None):
"""Get seed phrase. Print the generation seed of your wallet."""
s = self.wallet.get_seed(password)
return s
@command('wp')
def importprivkey(self, privkey, password=None):
"""Import a private key."""
if not self.wallet.can_import_privkey():
return "Error: This type of wallet cannot import private keys. Try to create a new wallet with that key."
try:
addr = self.wallet.import_private_key(privkey, password)
out = "Keypair imported: " + addr
except Exception as e:
out = "Error: " + repr(e)
return out
def _resolver(self, x):
if x is None:
return None
out = self.wallet.contacts.resolve(x)
if out.get('type') == 'openalias' and self.nocheck is False and out.get('validated') is False:
raise Exception('cannot verify alias', x)
return out['address']
@command('n')
def sweep(self, privkey, destination, fee=None, nocheck=False, imax=100):
"""Sweep private keys. Returns a transaction that spends UTXOs from
privkey to a destination address. The transaction is not
broadcasted."""
from .wallet import sweep
tx_fee = satoshis(fee)
privkeys = privkey.split()
self.nocheck = nocheck
#dest = self._resolver(destination)
tx = sweep(privkeys, self.network, self.config, destination, tx_fee, imax)
return tx.as_dict() if tx else None
@command('wp')
def signmessage(self, address, message, password=None):
"""Sign a message with a key. Use quotes if your message contains
whitespaces"""
sig = self.wallet.sign_message(address, message, password)
return base64.b64encode(sig).decode('ascii')
@command('')
def verifymessage(self, address, signature, message):
"""Verify a signature."""
sig = base64.b64decode(signature)
message = util.to_bytes(message)
return ecc.verify_message_with_address(address, sig, message)
def _mktx(self, outputs, fee, change_addr, domain, nocheck, unsigned, rbf, password, locktime=None):
self.nocheck = nocheck
change_addr = self._resolver(change_addr)
domain = None if domain is None else map(self._resolver, domain)
final_outputs = []
for address, amount in outputs:
address = self._resolver(address)
amount = satoshis(amount)
final_outputs.append(TxOutput(TYPE_ADDRESS, address, amount))
coins = self.wallet.get_spendable_coins(domain, self.config)
tx = self.wallet.make_unsigned_transaction(coins, final_outputs, self.config, fee, change_addr)
if locktime != None:
tx.locktime = locktime
if rbf is None:
rbf = self.config.get('use_rbf', True)
if rbf:
tx.set_rbf(True)
if not unsigned:
self.wallet.sign_transaction(tx, password)
return tx
@command('wp')
def payto(self, destination, amount, fee=None, from_addr=None, change_addr=None, nocheck=False, unsigned=False, rbf=None, password=None, locktime=None):
"""Create a transaction. """
tx_fee = satoshis(fee)
domain = from_addr.split(',') if from_addr else None
tx = self._mktx([(destination, amount)], tx_fee, change_addr, domain, nocheck, unsigned, rbf, password, locktime)
return tx.as_dict()
@command('wp')
def paytomany(self, outputs, fee=None, from_addr=None, change_addr=None, nocheck=False, unsigned=False, rbf=None, password=None, locktime=None):
"""Create a multi-output transaction. """
tx_fee = satoshis(fee)
domain = from_addr.split(',') if from_addr else None
tx = self._mktx(outputs, tx_fee, change_addr, domain, nocheck, unsigned, rbf, password, locktime)
return tx.as_dict()
@command('w')
def history(self, year=None, show_addresses=False, show_fiat=False, show_fees=False,
from_height=None, to_height=None):
"""Wallet history. Returns the transaction history of your wallet."""
kwargs = {
'show_addresses': show_addresses,
'show_fees': show_fees,
'from_height': from_height,
'to_height': to_height,
}
if year:
import time
start_date = datetime.datetime(year, 1, 1)
end_date = datetime.datetime(year+1, 1, 1)
kwargs['from_timestamp'] = time.mktime(start_date.timetuple())
kwargs['to_timestamp'] = time.mktime(end_date.timetuple())
if show_fiat:
from .exchange_rate import FxThread
fx = FxThread(self.config, None)
kwargs['fx'] = fx
return json_encode(self.wallet.get_full_history(**kwargs))
@command('w')
def setlabel(self, key, label):
"""Assign a label to an item. Item may be a bitcoin address or a
transaction ID"""
self.wallet.set_label(key, label)
@command('w')
def listcontacts(self):
"""Show your list of contacts"""
return self.wallet.contacts
@command('w')
def getalias(self, key):
"""Retrieve alias. Lookup in your list of contacts, and for an OpenAlias DNS record."""
return self.wallet.contacts.resolve(key)
@command('w')
def searchcontacts(self, query):
"""Search through contacts, return matching entries. """
results = {}
for key, value in self.wallet.contacts.items():
if query.lower() in key.lower():
results[key] = value
return results
@command('w')
def listaddresses(self, receiving=False, change=False, labels=False, frozen=False, unused=False, funded=False, balance=False):
"""List wallet addresses. Returns the list of all addresses in your wallet. Use optional arguments to filter the results."""
out = []
for addr in self.wallet.get_addresses():
if frozen and not self.wallet.is_frozen_address(addr):
continue
if receiving and self.wallet.is_change(addr):
continue
if change and not self.wallet.is_change(addr):
continue
if unused and self.wallet.is_used(addr):
continue
if funded and self.wallet.is_empty(addr):
continue
item = addr
if labels or balance:
item = (item,)
if balance:
item += (format_satoshis(sum(self.wallet.get_addr_balance(addr))),)
if labels:
item += (repr(self.wallet.labels.get(addr, '')),)
out.append(item)
return out
@command('n')
def gettransaction(self, txid):
"""Retrieve a transaction. """
tx = None
if self.wallet:
tx = self.wallet.db.get_transaction(txid)
if tx is None:
raw = self.network.run_from_another_thread(self.network.get_transaction(txid))
if raw:
tx = Transaction(raw)
else:
raise Exception("Unknown transaction")
return tx.as_dict()
@command('')
def encrypt(self, pubkey, message) -> str:
"""Encrypt a message with a public key. Use quotes if the message contains whitespaces."""
if not is_hex_str(pubkey):
raise Exception(f"pubkey must be a hex string instead of {repr(pubkey)}")
try:
message = to_bytes(message)
except TypeError:
raise Exception(f"message must be a string-like object instead of {repr(message)}")
public_key = ecc.ECPubkey(bfh(pubkey))
encrypted = public_key.encrypt_message(message)
return encrypted.decode('utf-8')
@command('wp')
def decrypt(self, pubkey, encrypted, password=None) -> str:
"""Decrypt a message encrypted with a public key."""
if not is_hex_str(pubkey):
raise Exception(f"pubkey must be a hex string instead of {repr(pubkey)}")
if not isinstance(encrypted, (str, bytes, bytearray)):
raise Exception(f"encrypted must be a string-like object instead of {repr(encrypted)}")
decrypted = self.wallet.decrypt_message(pubkey, encrypted, password)
return decrypted.decode('utf-8')
def _format_request(self, out):
pr_str = {
PR_UNKNOWN: 'Unknown',
PR_UNPAID: 'Pending',
PR_PAID: 'Paid',
PR_EXPIRED: 'Expired',
}
out['amount (BTC)'] = format_satoshis(out.get('amount'))
out['status'] = pr_str[out.get('status', PR_UNKNOWN)]
return out
@command('w')
def getrequest(self, key):
"""Return a payment request"""
r = self.wallet.get_payment_request(key, self.config)
if not r:
raise Exception("Request not found")
return self._format_request(r)
#@command('w')
#def ackrequest(self, serialized):
# """<Not implemented>"""
# pass
@command('w')
def listrequests(self, pending=False, expired=False, paid=False):
"""List the payment requests you made."""
out = self.wallet.get_sorted_requests(self.config)
if pending:
f = PR_UNPAID
elif expired:
f = PR_EXPIRED
elif paid:
f = PR_PAID
else:
f = None
if f is not None:
out = list(filter(lambda x: x.get('status')==f, out))
return list(map(self._format_request, out))
@command('w')
def createnewaddress(self):
"""Create a new receiving address, beyond the gap limit of the wallet"""
return self.wallet.create_new_address(False)
@command('w')
def getunusedaddress(self):
"""Returns the first unused address of the wallet, or None if all addresses are used.
An address is considered as used if it has received a transaction, or if it is used in a payment request."""
return self.wallet.get_unused_address()
@command('w')
def addrequest(self, amount, memo='', expiration=None, force=False):
"""Create a payment request, using the first unused address of the wallet.
The address will be considered as used after this operation.
If no payment is received, the address will be considered as unused if the payment request is deleted from the wallet."""
addr = self.wallet.get_unused_address()
if addr is None:
if force:
addr = self.wallet.create_new_address(False)
else:
return False
amount = satoshis(amount)
expiration = int(expiration) if expiration else None
req = self.wallet.make_payment_request(addr, amount, memo, expiration)
self.wallet.add_payment_request(req, self.config)
out = self.wallet.get_payment_request(addr, self.config)
return self._format_request(out)
@command('w')
def addtransaction(self, tx):
""" Add a transaction to the wallet history """
tx = Transaction(tx)
if not self.wallet.add_transaction(tx.txid(), tx):
return False
self.wallet.storage.write()
return tx.txid()
@command('wp')
def signrequest(self, address, password=None):
"Sign payment request with an OpenAlias"
alias = self.config.get('alias')
if not alias:
raise Exception('No alias in your configuration')
alias_addr = self.wallet.contacts.resolve(alias)['address']
self.wallet.sign_payment_request(address, alias, alias_addr, password)
@command('w')
def rmrequest(self, address):
"""Remove a payment request"""
return self.wallet.remove_payment_request(address, self.config)
@command('w')
def clearrequests(self):
"""Remove all payment requests"""
for k in list(self.wallet.receive_requests.keys()):
self.wallet.remove_payment_request(k, self.config)
@command('n')
def notify(self, address: str, URL: str):
"""Watch an address. Every time the address changes, a http POST is sent to the URL."""
if not hasattr(self, "_notifier"):
self._notifier = Notifier(self.network)
self.network.run_from_another_thread(self._notifier.start_watching_queue.put((address, URL)))
return True
@command('wn')
def is_synchronized(self):
""" return wallet synchronization status """
return self.wallet.is_up_to_date()
@command('n')
def getfeerate(self, fee_method=None, fee_level=None):
"""Return current suggested fee rate (in sat/kvByte), according to config
settings or supplied parameters.
"""
if fee_method is None:
dyn, mempool = None, None
elif fee_method.lower() == 'static':
dyn, mempool = False, False
elif fee_method.lower() == 'eta':
dyn, mempool = True, False
elif fee_method.lower() == 'mempool':
dyn, mempool = True, True
else:
raise Exception('Invalid fee estimation method: {}'.format(fee_method))
if fee_level is not None:
fee_level = Decimal(fee_level)
return self.config.fee_per_kb(dyn=dyn, mempool=mempool, fee_level=fee_level)
@command('w')
def removelocaltx(self, txid):
"""Remove a 'local' transaction from the wallet, and its dependent
transactions.
"""
if not is_hash256_str(txid):
raise Exception(f"{repr(txid)} is not a txid")
height = self.wallet.get_tx_height(txid).height
to_delete = {txid}
if height != TX_HEIGHT_LOCAL:
raise Exception(f'Only local transactions can be removed. '
f'This tx has height: {height} != {TX_HEIGHT_LOCAL}')
to_delete |= self.wallet.get_depending_transactions(txid)
for tx_hash in to_delete:
self.wallet.remove_transaction(tx_hash)
self.wallet.storage.write()
@command('wn')
def get_tx_status(self, txid):
"""Returns some information regarding the tx. For now, only confirmations.
The transaction must be related to the wallet.
"""
if not is_hash256_str(txid):
raise Exception(f"{repr(txid)} is not a txid")
if not self.wallet.db.get_transaction(txid):
raise Exception("Transaction not in wallet.")
return {
"confirmations": self.wallet.get_tx_height(txid).conf,
}
@command('')
def help(self):
# for the python console
return sorted(known_commands.keys())
def eval_bool(x: str) -> bool:
if x == 'false': return False
if x == 'true': return True
try:
return bool(ast.literal_eval(x))
except:
return bool(x)
param_descriptions = {
'privkey': 'Private key. Type \'?\' to get a prompt.',
'destination': 'Bitcoin address, contact or alias',
'address': 'Bitcoin address',
'seed': 'Seed phrase',
'txid': 'Transaction ID',
'pos': 'Position',
'height': 'Block height',
'tx': 'Serialized transaction (hexadecimal)',
'key': 'Variable name',
'pubkey': 'Public key',
'message': 'Clear text message. Use quotes if it contains spaces.',
'encrypted': 'Encrypted message',
'amount': 'Amount to be sent (in BTC). Type \'!\' to send the maximum available.',
'requested_amount': 'Requested amount (in BTC).',
'outputs': 'list of ["address", amount]',
'redeem_script': 'redeem script (hexadecimal)',
}
command_options = {
'password': ("-W", "Password"),
'new_password':(None, "New Password"),
'encrypt_file':(None, "Whether the file on disk should be encrypted with the provided password"),
'receiving': (None, "Show only receiving addresses"),
'change': (None, "Show only change addresses"),
'frozen': (None, "Show only frozen addresses"),
'unused': (None, "Show only unused addresses"),
'funded': (None, "Show only funded addresses"),
'balance': ("-b", "Show the balances of listed addresses"),
'labels': ("-l", "Show the labels of listed addresses"),
'nocheck': (None, "Do not verify aliases"),
'imax': (None, "Maximum number of inputs"),
'fee': ("-f", "Transaction fee (in BTC)"),
'from_addr': ("-F", "Source address (must be a wallet address; use sweep to spend from non-wallet address)."),
'change_addr': ("-c", "Change address. Default is a spare address, or the source address if it's not in the wallet"),
'nbits': (None, "Number of bits of entropy"),
'segwit': (None, "Create segwit seed"),
'language': ("-L", "Default language for wordlist"),
'passphrase': (None, "Seed extension"),
'privkey': (None, "Private key. Set to '?' to get a prompt."),
'unsigned': ("-u", "Do not sign transaction"),
'rbf': (None, "Replace-by-fee transaction"),
'locktime': (None, "Set locktime block number"),
'domain': ("-D", "List of addresses"),
'memo': ("-m", "Description of the request"),
'expiration': (None, "Time in seconds"),
'timeout': (None, "Timeout in seconds"),
'force': (None, "Create new address beyond gap limit, if no more addresses are available."),
'pending': (None, "Show only pending requests."),
'expired': (None, "Show only expired requests."),
'paid': (None, "Show only paid requests."),
'show_addresses': (None, "Show input and output addresses"),
'show_fiat': (None, "Show fiat value of transactions"),
'show_fees': (None, "Show miner fees paid by transactions"),
'year': (None, "Show history for a given year"),
'fee_method': (None, "Fee estimation method to use"),
'fee_level': (None, "Float between 0.0 and 1.0, representing fee slider position"),
'from_height': (None, "Only show transactions that confirmed after given block height"),
'to_height': (None, "Only show transactions that confirmed before given block height"),
}
# don't use floats because of rounding errors
from .transaction import tx_from_str
json_loads = lambda x: json.loads(x, parse_float=lambda x: str(Decimal(x)))
arg_types = {
'num': int,
'nbits': int,
'imax': int,
'year': int,
'from_height': int,
'to_height': int,
'tx': tx_from_str,
'pubkeys': json_loads,
'jsontx': json_loads,
'inputs': json_loads,
'outputs': json_loads,
'fee': lambda x: str(Decimal(x)) if x is not None else None,
'amount': lambda x: str(Decimal(x)) if x != '!' else '!',
'locktime': int,
'fee_method': str,
'fee_level': json_loads,
'encrypt_file': eval_bool,
}
config_variables = {
'addrequest': {
'requests_dir': 'directory where a bip70 file will be written.',
'ssl_privkey': 'Path to your SSL private key, needed to sign the request.',
'ssl_chain': 'Chain of SSL certificates, needed for signed requests. Put your certificate at the top and the root CA at the end',
'url_rewrite': 'Parameters passed to str.replace(), in order to create the r= part of bitcoin: URIs. Example: \"(\'file:///var/www/\',\'https://electrum.org/\')\"',
},
'listrequests':{
'url_rewrite': 'Parameters passed to str.replace(), in order to create the r= part of bitcoin: URIs. Example: \"(\'file:///var/www/\',\'https://electrum.org/\')\"',
}
}
def set_default_subparser(self, name, args=None):
"""see http://stackoverflow.com/questions/5176691/argparse-how-to-specify-a-default-subcommand"""
subparser_found = False
for arg in sys.argv[1:]:
if arg in ['-h', '--help']: # global help if no subparser
break
else:
for x in self._subparsers._actions:
if not isinstance(x, argparse._SubParsersAction):
continue
for sp_name in x._name_parser_map.keys():
if sp_name in sys.argv[1:]:
subparser_found = True
if not subparser_found:
# insert default in first position, this implies no
# global options without a sub_parsers specified
if args is None:
sys.argv.insert(1, name)
else:
args.insert(0, name)
argparse.ArgumentParser.set_default_subparser = set_default_subparser
# workaround https://bugs.python.org/issue23058
# see https://github.com/nickstenning/honcho/pull/121
def subparser_call(self, parser, namespace, values, option_string=None):
from argparse import ArgumentError, SUPPRESS, _UNRECOGNIZED_ARGS_ATTR
parser_name = values[0]
arg_strings = values[1:]
# set the parser name if requested
if self.dest is not SUPPRESS:
setattr(namespace, self.dest, parser_name)
# select the parser
try:
parser = self._name_parser_map[parser_name]
except KeyError:
tup = parser_name, ', '.join(self._name_parser_map)
msg = _('unknown parser {!r} (choices: {})').format(*tup)
raise ArgumentError(self, msg)
# parse all the remaining options into the namespace
# store any unrecognized options on the object, so that the top
# level parser can decide what to do with them
namespace, arg_strings = parser.parse_known_args(arg_strings, namespace)
if arg_strings:
vars(namespace).setdefault(_UNRECOGNIZED_ARGS_ATTR, [])
getattr(namespace, _UNRECOGNIZED_ARGS_ATTR).extend(arg_strings)
argparse._SubParsersAction.__call__ = subparser_call
def add_network_options(parser):
parser.add_argument("-1", "--oneserver", action="store_true", dest="oneserver", default=None, help="connect to one server only")
parser.add_argument("-s", "--server", dest="server", default=None, help="set server host:port:protocol, where protocol is either t (tcp) or s (ssl)")
parser.add_argument("-p", "--proxy", dest="proxy", default=None, help="set proxy [type:]host[:port], where type is socks4,socks5 or http")
parser.add_argument("--noonion", action="store_true", dest="noonion", default=None, help="do not try to connect to onion servers")
parser.add_argument("--skipmerklecheck", action="store_true", dest="skipmerklecheck", default=False, help="Tolerate invalid merkle proofs from server")
def add_global_options(parser):
group = parser.add_argument_group('global options')
group.add_argument("-v", dest="verbosity", help="Set verbosity (log levels)", default='')
group.add_argument("-V", dest="verbosity_shortcuts", help="Set verbosity (shortcut-filter list)", default='')
group.add_argument("-D", "--dir", dest="electrum_path", help="electrum directory")
group.add_argument("-P", "--portable", action="store_true", dest="portable", default=False, help="Use local 'electrum_data' directory")
group.add_argument("-w", "--wallet", dest="wallet_path", help="wallet path")
group.add_argument("--testnet", action="store_true", dest="testnet", default=False, help="Use Testnet")
group.add_argument("--regtest", action="store_true", dest="regtest", default=False, help="Use Regtest")
group.add_argument("--simnet", action="store_true", dest="simnet", default=False, help="Use Simnet")
def get_parser():
# create main parser
parser = argparse.ArgumentParser(
epilog="Run 'electrum help <command>' to see the help for a command")
add_global_options(parser)
subparsers = parser.add_subparsers(dest='cmd', metavar='<command>')
# gui
parser_gui = subparsers.add_parser('gui', description="Run Electrum's Graphical User Interface.", help="Run GUI (default)")
parser_gui.add_argument("url", nargs='?', default=None, help="bitcoin URI (or bip70 file)")
parser_gui.add_argument("-g", "--gui", dest="gui", help="select graphical user interface", choices=['qt', 'kivy', 'text', 'stdio'])
parser_gui.add_argument("-o", "--offline", action="store_true", dest="offline", default=False, help="Run offline")
parser_gui.add_argument("-m", action="store_true", dest="hide_gui", default=False, help="hide GUI on startup")
parser_gui.add_argument("-L", "--lang", dest="language", default=None, help="default language used in GUI")
parser_gui.add_argument("--daemon", action="store_true", dest="daemon", default=False, help="keep daemon running after GUI is closed")
add_network_options(parser_gui)
add_global_options(parser_gui)
# daemon
parser_daemon = subparsers.add_parser('daemon', help="Run Daemon")
parser_daemon.add_argument("subcommand", choices=['start', 'status', 'stop', 'load_wallet', 'close_wallet'], nargs='?')
#parser_daemon.set_defaults(func=run_daemon)
add_network_options(parser_daemon)
add_global_options(parser_daemon)
# commands
for cmdname in sorted(known_commands.keys()):
cmd = known_commands[cmdname]
p = subparsers.add_parser(cmdname, help=cmd.help, description=cmd.description)
add_global_options(p)
for optname, default in zip(cmd.options, cmd.defaults):
a, help = command_options[optname]
b = '--' + optname
action = "store_true" if default is False else 'store'
args = (a, b) if a else (b,)
if action == 'store':
_type = arg_types.get(optname, str)
p.add_argument(*args, dest=optname, action=action, default=default, help=help, type=_type)
else:
p.add_argument(*args, dest=optname, action=action, default=default, help=help)
for param in cmd.params:
h = param_descriptions.get(param, '')
_type = arg_types.get(param, str)
p.add_argument(param, help=h, type=_type)
cvh = config_variables.get(cmdname)
if cvh:
group = p.add_argument_group('configuration variables', '(set with setconfig/getconfig)')
for k, v in cvh.items():
group.add_argument(k, nargs='?', help=v)
# 'gui' is the default command
parser.set_default_subparser('gui')
return parser
|
|
# --------------------------------------------------------
# Fast/er R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Bharath Hariharan
# --------------------------------------------------------
import xml.etree.ElementTree as ET
import os
import cPickle
import numpy as np
def parse_rec(filename):
""" Parse a PASCAL VOC xml file """
tree = ET.parse(filename)
objects = []
for obj in tree.findall('object'):
obj_struct = {}
obj_struct['name'] = obj.find('name').text
obj_struct['pose'] = obj.find('pose').text
obj_struct['truncated'] = int(obj.find('truncated').text)
obj_struct['difficult'] = int(obj.find('difficult').text)
bbox = obj.find('bndbox')
obj_struct['bbox'] = [int(bbox.find('xmin').text),
int(bbox.find('ymin').text),
int(bbox.find('xmax').text),
int(bbox.find('ymax').text)]
objects.append(obj_struct)
return objects
def voc_ap(rec, prec, use_07_metric=False):
""" ap = voc_ap(rec, prec, [use_07_metric])
Compute VOC AP given precision and recall.
If use_07_metric is true, uses the
VOC 07 11 point method (default:False).
"""
if use_07_metric:
# 11 point metric
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def voc_eval(detpath,
annopath,
imagesetfile,
classname,
cachedir,
ovthresh=0.5,
use_07_metric=False):
"""rec, prec, ap = voc_eval(detpath,
annopath,
imagesetfile,
classname,
[ovthresh],
[use_07_metric])
Top level function that does the PASCAL VOC evaluation.
detpath: Path to detections
detpath.format(classname) should produce the detection results file.
annopath: Path to annotations
annopath.format(imagename) should be the xml annotations file.
imagesetfile: Text file containing the list of images, one image per line.
classname: Category name (duh)
cachedir: Directory for caching the annotations
[ovthresh]: Overlap threshold (default = 0.5)
[use_07_metric]: Whether to use VOC07's 11 point AP computation
(default False)
"""
# assumes detections are in detpath.format(classname)
# assumes annotations are in annopath.format(imagename)
# assumes imagesetfile is a text file with each line an image name
# cachedir caches the annotations in a pickle file
# first load gt
if not os.path.isdir(cachedir):
os.mkdir(cachedir)
cachefile = os.path.join(cachedir, 'annots.pkl')
# read list of images
with open(imagesetfile, 'r') as f:
lines = f.readlines()
imagenames = [x.strip() for x in lines]
if not os.path.isfile(cachefile):
# load annots
recs = {}
for i, imagename in enumerate(imagenames):
recs[imagename] = parse_rec(annopath.format(imagename))
if i % 100 == 0:
print 'Reading annotation for {:d}/{:d}'.format(
i + 1, len(imagenames))
# save
print 'Saving cached annotations to {:s}'.format(cachefile)
with open(cachefile, 'w') as f:
cPickle.dump(recs, f)
else:
# load
with open(cachefile, 'r') as f:
recs = cPickle.load(f)
# extract gt objects for this class
class_recs = {}
npos = 0
for imagename in imagenames:
R = [obj for obj in recs[imagename] if obj['name'] == classname]
bbox = np.array([x['bbox'] for x in R])
difficult = np.array([x['difficult'] for x in R]).astype(np.bool)
det = [False] * len(R)
npos = npos + sum(~difficult)
class_recs[imagename] = {'bbox': bbox,
'difficult': difficult,
'det': det}
# read dets
detfile = detpath.format(classname)
with open(detfile, 'r') as f:
lines = f.readlines()
splitlines = [x.strip().split(' ') for x in lines]
image_ids = [x[0] for x in splitlines]
confidence = np.array([float(x[1]) for x in splitlines])
BB = np.array([[float(z) for z in x[2:]] for x in splitlines])
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
if BB.shape[0] > 0:
# sort by confidence
sorted_ind = np.argsort(-confidence)
sorted_scores = np.sort(-confidence)
BB = BB[sorted_ind, :]
image_ids = [image_ids[x] for x in sorted_ind]
# go down dets and mark TPs and FPs
for d in range(nd):
R = class_recs[image_ids[d]]
bb = BB[d, :].astype(float)
ovmax = -np.inf
BBGT = R['bbox'].astype(float)
if BBGT.size > 0:
# compute overlaps
# intersection
ixmin = np.maximum(BBGT[:, 0], bb[0])
iymin = np.maximum(BBGT[:, 1], bb[1])
ixmax = np.minimum(BBGT[:, 2], bb[2])
iymax = np.minimum(BBGT[:, 3], bb[3])
iw = np.maximum(ixmax - ixmin + 1., 0.)
ih = np.maximum(iymax - iymin + 1., 0.)
inters = iw * ih
# union
uni = ((bb[2] - bb[0] + 1.) * (bb[3] - bb[1] + 1.) +
(BBGT[:, 2] - BBGT[:, 0] + 1.) *
(BBGT[:, 3] - BBGT[:, 1] + 1.) - inters)
overlaps = inters / uni
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
if ovmax > ovthresh:
if not R['difficult'][jmax]:
if not R['det'][jmax]:
tp[d] = 1.
R['det'][jmax] = 1
else:
fp[d] = 1.
else:
fp[d] = 1.
# compute precision recall
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec, use_07_metric)
return rec, prec, ap
|
|
"""Tests which check the various ways you can set DJANGO_SETTINGS_MODULE
If these tests fail you probably forgot to run "python setup.py develop".
"""
import django
import pytest
BARE_SETTINGS = '''
# At least one database must be configured
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:'
},
}
SECRET_KEY = 'foobar'
'''
def test_ds_env(testdir, monkeypatch):
monkeypatch.setenv('DJANGO_SETTINGS_MODULE', 'tpkg.settings_env')
pkg = testdir.mkpydir('tpkg')
settings = pkg.join('settings_env.py')
settings.write(BARE_SETTINGS)
testdir.makepyfile("""
import os
def test_settings():
assert os.environ['DJANGO_SETTINGS_MODULE'] == 'tpkg.settings_env'
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines(['*1 passed*'])
assert result.ret == 0
def test_ds_ini(testdir, monkeypatch):
"DSM env should override ini."
monkeypatch.setenv('DJANGO_SETTINGS_MODULE', 'tpkg.settings_ini')
testdir.makeini("""\
[pytest]
DJANGO_SETTINGS_MODULE = DO_NOT_USE_ini
""")
pkg = testdir.mkpydir('tpkg')
settings = pkg.join('settings_ini.py')
settings.write(BARE_SETTINGS)
testdir.makepyfile("""
import os
def test_ds():
assert os.environ['DJANGO_SETTINGS_MODULE'] == 'tpkg.settings_ini'
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines(['*1 passed*'])
assert result.ret == 0
def test_ds_option(testdir, monkeypatch):
monkeypatch.setenv('DJANGO_SETTINGS_MODULE', 'DO_NOT_USE_env')
testdir.makeini("""
[pytest]
DJANGO_SETTINGS_MODULE = DO_NOT_USE_ini
""")
pkg = testdir.mkpydir('tpkg')
settings = pkg.join('settings_opt.py')
settings.write(BARE_SETTINGS)
testdir.makepyfile("""
import os
def test_ds():
assert os.environ['DJANGO_SETTINGS_MODULE'] == 'tpkg.settings_opt'
""")
result = testdir.runpytest('--ds=tpkg.settings_opt')
result.stdout.fnmatch_lines(['*1 passed*'])
assert result.ret == 0
def test_ds_non_existent(testdir, monkeypatch):
"""
Make sure we do not fail with INTERNALERROR if an incorrect
DJANGO_SETTINGS_MODULE is given.
"""
monkeypatch.setenv('DJANGO_SETTINGS_MODULE', 'DOES_NOT_EXIST')
testdir.makepyfile('def test_ds(): pass')
result = testdir.runpytest()
result.stderr.fnmatch_lines(["*ImportError:*DOES_NOT_EXIST*"])
assert result.ret != 0
def test_ds_after_user_conftest(testdir, monkeypatch):
"""
Test that the settings module can be imported, after pytest has adjusted
the sys.path.
"""
monkeypatch.setenv('DJANGO_SETTINGS_MODULE', 'settings_after_conftest')
testdir.makepyfile('def test_ds(): pass')
testdir.makepyfile(settings_after_conftest="SECRET_KEY='secret'")
# testdir.makeconftest("import sys; print(sys.path)")
result = testdir.runpytest('-v')
result.stdout.fnmatch_lines(['*1 passed*'])
assert result.ret == 0
def test_ds_in_pytest_configure(testdir, monkeypatch):
monkeypatch.delenv('DJANGO_SETTINGS_MODULE')
pkg = testdir.mkpydir('tpkg')
settings = pkg.join('settings_ds.py')
settings.write(BARE_SETTINGS)
testdir.makeconftest("""
import os
from django.conf import settings
def pytest_configure():
if not settings.configured:
os.environ.setdefault('DJANGO_SETTINGS_MODULE',
'tpkg.settings_ds')
""")
r = testdir.runpytest()
assert r.ret == 0
def test_django_settings_configure(testdir, monkeypatch):
"""
Make sure Django can be configured without setting
DJANGO_SETTINGS_MODULE altogether, relying on calling
django.conf.settings.configure() and then invoking pytest.
"""
monkeypatch.delenv('DJANGO_SETTINGS_MODULE')
p = testdir.makepyfile(run="""
from django.conf import settings
settings.configure(SECRET_KEY='set from settings.configure()',
DATABASES={'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:'
}},
INSTALLED_APPS=['django.contrib.auth',
'django.contrib.contenttypes',])
import pytest
pytest.main()
""")
testdir.makepyfile("""
import pytest
from django.conf import settings
from django.test.client import RequestFactory
from django.test import TestCase
from django.contrib.auth.models import User
def test_access_to_setting():
assert settings.SECRET_KEY == 'set from settings.configure()'
# This test requires Django to be properly configured to be run
def test_rf(rf):
assert isinstance(rf, RequestFactory)
# This tests that pytest-django actually configures the database
# according to the settings above
class ATestCase(TestCase):
def test_user_count(self):
assert User.objects.count() == 0
@pytest.mark.django_db
def test_user_count():
assert User.objects.count() == 0
""")
result = testdir.runpython(p)
result.stdout.fnmatch_lines([
"*4 passed*",
])
def test_settings_in_hook(testdir, monkeypatch):
monkeypatch.delenv('DJANGO_SETTINGS_MODULE')
testdir.makeconftest("""
from django.conf import settings
def pytest_configure():
settings.configure(SECRET_KEY='set from pytest_configure',
DATABASES={'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:'}},
INSTALLED_APPS=['django.contrib.auth',
'django.contrib.contenttypes',])
""")
testdir.makepyfile("""
import pytest
from django.conf import settings
from django.contrib.auth.models import User
def test_access_to_setting():
assert settings.SECRET_KEY == 'set from pytest_configure'
@pytest.mark.django_db
def test_user_count():
assert User.objects.count() == 0
""")
r = testdir.runpytest()
assert r.ret == 0
def test_django_not_loaded_without_settings(testdir, monkeypatch):
"""
Make sure Django is not imported at all if no Django settings is specified.
"""
monkeypatch.delenv('DJANGO_SETTINGS_MODULE')
testdir.makepyfile("""
import sys
def test_settings():
assert 'django' not in sys.modules
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines(['*1 passed*'])
assert result.ret == 0
def test_debug_false(testdir, monkeypatch):
monkeypatch.delenv('DJANGO_SETTINGS_MODULE')
testdir.makeconftest("""
from django.conf import settings
def pytest_configure():
settings.configure(SECRET_KEY='set from pytest_configure',
DEBUG=True,
DATABASES={'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:'}},
INSTALLED_APPS=['django.contrib.auth',
'django.contrib.contenttypes',])
""")
testdir.makepyfile("""
from django.conf import settings
def test_debug_is_false():
assert settings.DEBUG is False
""")
r = testdir.runpytest()
assert r.ret == 0
@pytest.mark.skipif(not hasattr(django, 'setup'),
reason="This Django version does not support app loading")
@pytest.mark.django_project(extra_settings="""
INSTALLED_APPS = [
'tpkg.app.apps.TestApp',
]
""")
def test_django_setup_sequence(django_testdir):
django_testdir.create_app_file("""
from django.apps import apps, AppConfig
class TestApp(AppConfig):
name = 'tpkg.app'
def ready(self):
print ('READY(): populating=%r' % apps._lock.locked())
""", 'apps.py')
django_testdir.create_app_file("""
from django.apps import apps
print ('IMPORT: populating=%r,ready=%r' % (
apps._lock.locked(), apps.ready))
SOME_THING = 1234
""", 'models.py')
django_testdir.create_app_file("", '__init__.py')
django_testdir.makepyfile("""
from django.apps import apps
from tpkg.app.models import SOME_THING
def test_anything():
print ('TEST: populating=%r,ready=%r' % (
apps._lock.locked(), apps.ready))
""")
result = django_testdir.runpytest('-s', '--tb=line')
result.stdout.fnmatch_lines(['*IMPORT: populating=True,ready=False*'])
result.stdout.fnmatch_lines(['*READY(): populating=True*'])
result.stdout.fnmatch_lines(['*TEST: populating=False,ready=True*'])
assert result.ret == 0
def test_no_ds_but_django_imported(testdir, monkeypatch):
"""pytest-django should not bail out, if "django" has been imported
somewhere, e.g. via pytest-splinter."""
monkeypatch.delenv('DJANGO_SETTINGS_MODULE')
testdir.makepyfile("""
import os
import django
from pytest_django.lazy_django import django_settings_is_configured
def test_django_settings_is_configured():
assert django_settings_is_configured() is False
def test_env():
assert 'DJANGO_SETTINGS_MODULE' not in os.environ
def test_cfg(pytestconfig):
assert pytestconfig.option.ds is None
""")
r = testdir.runpytest('-s')
assert r.ret == 0
|
|
"""A friendly Python SFTP interface."""
from __future__ import print_function
import os
from contextlib import contextmanager
import socket
from stat import S_IMODE, S_ISDIR, S_ISREG
import tempfile
import paramiko
from paramiko import SSHException # make available
from paramiko import AuthenticationException # make available
from paramiko import AgentKey
__version__ = "0.2.8"
# pylint: disable = R0913
def st_mode_to_int(val):
'''SFTAttributes st_mode returns an stat type that shows more than what
can be set. Trim off those bits and convert to an int representation.
if you want an object that was `chmod 711` to return a value of 711, use
this function
:param int val: the value of an st_mode attr returned by SFTPAttributes
:returns int: integer representation of octal mode
'''
return int(str(oct(S_IMODE(val)))[-3:])
class ConnectionException(Exception):
"""Exception raised for connection problems
Attributes:
message -- explanation of the error
"""
def __init__(self, host, port):
# Call the base class constructor with the parameters it needs
Exception.__init__(self, host, port)
self.message = 'Could not connect to host:port. %s:%s'
class CredentialException(Exception):
"""Exception raised for credential problems
Attributes:
message -- explanation of the error
"""
def __init__(self, message):
# Call the base class constructor with the parameters it needs
Exception.__init__(self, message)
self.message = message
class WTCallbacks(object):
'''an object to house the callbacks, used internally
:ivar flist: list of files currently traversed
:ivar dlist: list of directories currently traversed
:ivar ulist: list of unknown entities currently traversed
'''
def __init__(self):
'''set instance vars'''
self.flist = []
self.dlist = []
self.ulist = []
def file_cb(self, pathname):
'''called for regular files, appends pathname to .flist
:param str pathname: file path
'''
self.flist.append(pathname)
def dir_cb(self, pathname):
'''called for directories, appends pathname to .dlist
:param str pathname: directory path
'''
self.dlist.append(pathname)
def unk_cb(self, pathname):
'''called for unknown file types, appends pathname to .ulist
:param str pathname: unknown entity path
'''
self.ulist.append(pathname)
class Connection(object):
"""Connects and logs into the specified hostname.
Arguments that are not given are guessed from the environment.
:param str host:
The Hostname or IP of the remote machine.
:param str|None username: *Default: None* -
Your username at the remote machine.
:param str|obj|None private_key: *Default: None* -
path to private key file(str) or paramiko.AgentKey
:param str|None password: *Default: None* -
Your password at the remote machine.
:param int port: *Default: 22* -
The SSH port of the remote machine.
:param str|None private_key_pass: *Default: None* -
password to use, if private_key is encrypted.
:param list|None ciphers: *Default: None* -
List of ciphers to use in order.
:param bool|str log: *Default: False* -
log connection/handshake details? If set to True,
pysftp creates a temporary file and logs to that. If set to a valid
path and filename, pysftp logs to that. The name of the logfile can
be found at ``.logfile``
:returns: (obj) connection to the requested host
:raises ConnectionException:
:raises CredentialException:
:raises SSHException:
:raises AuthenticationException:
:raises PasswordRequiredException:
"""
def __init__(self,
host,
username=None,
private_key=None,
password=None,
port=22,
private_key_pass=None,
ciphers=None,
log=False,
):
self._sftp_live = False
self._sftp = None
if not username:
username = os.environ['LOGNAME']
self._logfile = log
if log:
if isinstance(log, bool):
# Log to a temporary file.
fhnd, self._logfile = tempfile.mkstemp('.txt', 'ssh-')
os.close(fhnd) # don't want os file descriptors open
paramiko.util.log_to_file(self._logfile)
# Begin the SSH transport.
self._transport_live = False
try:
self._transport = paramiko.Transport((host, port))
# Set security ciphers if set
if ciphers is not None:
self._transport.get_security_options().ciphers = ciphers
self._transport_live = True
except (AttributeError, socket.gaierror):
# couldn't connect
raise ConnectionException(host, port)
# Authenticate the transport. prefer password if given
if password is not None:
# Using Password.
self._transport.connect(username=username, password=password)
else:
# Use Private Key.
if not private_key:
# Try to use default key.
if os.path.exists(os.path.expanduser('~/.ssh/id_rsa')):
private_key = '~/.ssh/id_rsa'
elif os.path.exists(os.path.expanduser('~/.ssh/id_dsa')):
private_key = '~/.ssh/id_dsa'
else:
raise CredentialException("You have not specified a "\
"password or key.")
if not isinstance(private_key, AgentKey):
private_key_file = os.path.expanduser(private_key)
try: #try rsa
rsakey = paramiko.RSAKey
prv_key = rsakey.from_private_key_file(private_key_file,
private_key_pass)
except paramiko.SSHException: #if it fails, try dss
dsskey = paramiko.DSSKey
prv_key = dsskey.from_private_key_file(private_key_file,
private_key_pass)
else:
# use the paramiko agent key
prv_key = private_key
self._transport.connect(username=username, pkey=prv_key)
def _sftp_connect(self):
"""Establish the SFTP connection."""
if not self._sftp_live:
self._sftp = paramiko.SFTPClient.from_transport(self._transport)
self._sftp_live = True
@property
def pwd(self):
'''return the current working directory
:returns: (str) current working directory
'''
self._sftp_connect()
return self._sftp.normalize('.')
def get(self, remotepath, localpath=None, callback=None,
preserve_mtime=False):
"""Copies a file between the remote host and the local host.
:param str remotepath: the remote path and filename, source
:param str localpath:
the local path and filename to copy, destination. If not specified,
file is copied to local current working directory
:param callable callback:
optional callback function (form: ``func(int, int)``) that accepts
the bytes transferred so far and the total bytes to be transferred.
:param bool preserve_mtime:
*Default: False* - make the modification time(st_mtime) on the
local file match the time on the remote. (st_atime can differ
because stat'ing the localfile can/does update it's st_atime)
:returns: None
:raises: IOError
"""
if not localpath:
localpath = os.path.split(remotepath)[1]
self._sftp_connect()
if preserve_mtime:
sftpattrs = self._sftp.stat(remotepath)
self._sftp.get(remotepath, localpath, callback=callback)
if preserve_mtime:
os.utime(localpath, (sftpattrs.st_atime, sftpattrs.st_mtime))
def get_d(self, remotedir, localdir, preserve_mtime=False):
"""get the contents of remotedir and write to locadir. (non-recursive)
:param str remotedir: the remote directory to copy from (source)
:param str localdir: the local directory to copy to (target)
:param bool preserve_mtime: *Default: False* -
preserve modification time on files
:returns: None
:raises:
"""
self._sftp_connect()
with self.cd(remotedir):
for sattr in self._sftp.listdir_attr('.'):
if S_ISREG(sattr.st_mode):
rname = sattr.filename
self.get(rname, reparent(localdir, rname),
preserve_mtime=preserve_mtime)
def get_r(self, remotedir, localdir, preserve_mtime=False):
"""recursively copy remotedir structure to localdir
:param str remotedir: the remote directory to copy from
:param str localdir: the local directory to copy to
:param bool preserve_mtime: *Default: False* -
preserve modification time on files
:returns: None
:raises:
"""
self._sftp_connect()
wtcb = WTCallbacks()
self.walktree(remotedir, wtcb.file_cb, wtcb.dir_cb, wtcb.unk_cb)
# handle directories we recursed through
for dname in wtcb.dlist:
for subdir in path_advance(dname):
try:
os.mkdir(reparent(localdir, subdir))
wtcb.dlist.append(subdir)
except OSError: # dir exists
pass
for fname in wtcb.flist:
# they may have told us to start down farther, so we may not have
# recursed through some, ensure local dir structure matches
head, _ = os.path.split(fname)
if head not in wtcb.dlist:
for subdir in path_advance(head):
if subdir not in wtcb.dlist and subdir != '.':
os.mkdir(reparent(localdir, subdir))
wtcb.dlist.append(subdir)
self.get(fname,
reparent(localdir, fname),
preserve_mtime=preserve_mtime
)
def getfo(self, remotepath, flo, callback=None):
"""Copy a remote file (remotepath) to a file-like object, flo.
:param str remotepath: the remote path and filename, source
:param flo: open file like object to write, destination.
:param callable callback:
optional callback function (form: ``func(int, int``)) that accepts
the bytes transferred so far and the total bytes to be transferred.
:returns: (int) the number of bytes written to the opened file object
:raises: Any exception raised by operations will be passed through.
"""
self._sftp_connect()
return self._sftp.getfo(remotepath, flo, callback=callback)
def put(self, localpath, remotepath=None, callback=None, confirm=True,
preserve_mtime=False):
"""Copies a file between the local host and the remote host.
:param str localpath: the local path and filename
:param str remotepath:
the remote path, else the remote :attr:`.pwd` and filename is used.
:param callable callback:
optional callback function (form: ``func(int, int``)) that accepts
the bytes transferred so far and the total bytes to be transferred..
:param bool confirm:
whether to do a stat() on the file afterwards to confirm the file
size
:param bool preserve_mtime:
*Default: False* - make the modification time(st_mtime) on the
remote file match the time on the local. (st_atime can differ
because stat'ing the localfile can/does update it's st_atime)
:returns:
(obj) SFTPAttributes containing attributes about the given file
:raises IOError: if remotepath doesn't exist
:raises OSError: if localpath doesn't exist
"""
if not remotepath:
remotepath = os.path.split(localpath)[1]
self._sftp_connect()
if preserve_mtime:
local_stat = os.stat(localpath)
times = (local_stat.st_atime, local_stat.st_mtime)
sftpattrs = self._sftp.put(localpath, remotepath, callback=callback,
confirm=confirm)
if preserve_mtime:
self._sftp.utime(remotepath, times)
sftpattrs = self._sftp.stat(remotepath)
return sftpattrs
def put_d(self, localpath, remotepath, confirm=True, preserve_mtime=False):
"""Copies a local directory's contents to a remotepath
:param str localpath: the local path to copy (source)
:param str remotepath:
the remote path to copy to (target)
:param bool confirm:
whether to do a stat() on the file afterwards to confirm the file
size
:param bool preserve_mtime:
*Default: False* - make the modification time(st_mtime) on the
remote file match the time on the local. (st_atime can differ
because stat'ing the localfile can/does update it's st_atime)
:returns: None
:raises IOError: if remotepath doesn't exist
:raises OSError: if localpath doesn't exist
"""
self._sftp_connect()
wtcb = WTCallbacks()
cur_local_dir = os.getcwd()
os.chdir(localpath)
walktree('.', wtcb.file_cb, wtcb.dir_cb, wtcb.unk_cb,
recurse=False)
for fname in wtcb.flist:
src = os.path.join(localpath, fname)
dest = reparent(remotepath, fname)
# print('put', src, dest)
self.put(src, dest, confirm=confirm, preserve_mtime=preserve_mtime)
# restore local directory
os.chdir(cur_local_dir)
def put_r(self, localpath, remotepath, confirm=True, preserve_mtime=False):
"""Recursively copies a local directory's contents to a remotepath
:param str localpath: the local path to copy (source)
:param str remotepath:
the remote path to copy to (target)
:param bool confirm:
whether to do a stat() on the file afterwards to confirm the file
size
:param bool preserve_mtime:
*Default: False* - make the modification time(st_mtime) on the
remote file match the time on the local. (st_atime can differ
because stat'ing the localfile can/does update it's st_atime)
:returns: None
:raises IOError: if remotepath doesn't exist
:raises OSError: if localpath doesn't exist
"""
self._sftp_connect()
wtcb = WTCallbacks()
cur_local_dir = os.getcwd()
os.chdir(localpath)
walktree('.', wtcb.file_cb, wtcb.dir_cb, wtcb.unk_cb)
# restore local directory
os.chdir(cur_local_dir)
for dname in wtcb.dlist:
#for subdir in path_advance(dname):
if dname != '.':
self.mkdir(reparent(remotepath, dname))
for fname in wtcb.flist:
head, _ = os.path.split(fname)
if head not in wtcb.dlist:
for subdir in path_advance(head):
if subdir not in wtcb.dlist and subdir != '.':
self.mkdir(reparent(remotepath, subdir))
wtcb.dlist.append(subdir)
src = os.path.join(localpath, fname)
dest = reparent(remotepath, fname)
# print('put', src, dest)
self.put(src, dest, confirm=confirm, preserve_mtime=preserve_mtime)
def putfo(self, flo, remotepath=None, file_size=0, callback=None,
confirm=True):
"""Copies the contents of a file like object to remotepath.
:param flo: a file-like object that supports .read()
:param str remotepath: the remote path.
:param int file_size:
the size of flo, if not given the second param passed to the
callback function will always be 0.
:param callable callback:
optional callback function (form: ``func(int, int``)) that accepts
the bytes transferred so far and the total bytes to be transferred..
:param bool confirm:
whether to do a stat() on the file afterwards to confirm the file
size
:returns:
(obj) SFTPAttributes containing attributes about the given file
:raises: TypeError, if remotepath not specified, any underlying error
"""
self._sftp_connect()
return self._sftp.putfo(flo, remotepath, file_size=file_size,
callback=callback, confirm=confirm)
def execute(self, command):
"""Execute the given commands on a remote machine. The command is
executed without regard to the remote :attr:`.pwd`.
:param str command: the command to execute.
:returns: (list of str) representing the results of the command
:raises: Any exception raised by command will be passed through.
"""
channel = self._transport.open_session()
channel.exec_command(command)
output = channel.makefile('rb', -1).readlines()
if output:
return output
else:
return channel.makefile_stderr('rb', -1).readlines()
@contextmanager
def cd(self, remotepath=None):
"""context manager that can change to a optionally specified remote
directory and restores the old pwd on exit.
:param str|None remotepath: *Default: None* -
remotepath to temporarily make the current directory
:returns: None
:raises: IOError, if remote path doesn't exist
"""
try:
original_path = self.pwd
if remotepath is not None:
self.cwd(remotepath)
yield
finally:
self.cwd(original_path)
def chdir(self, remotepath):
"""change the current working directory on the remote
:param str remotepath: the remote path to change to
:returns: None
:raises: IOError, if path does not exist
"""
self._sftp_connect()
self._sftp.chdir(remotepath)
cwd = chdir # synonym for chdir
def chmod(self, remotepath, mode=777):
"""set the mode of a remotepath to mode, where mode is an integer
representation of the octal mode to use.
:param str remotepath: the remote path/file to modify
:param int mode: *Default: 777* -
int representation of octal mode for directory
:returns: None
:raises: IOError, if the file doesn't exist
"""
self._sftp_connect()
self._sftp.chmod(remotepath, mode=int(str(mode), 8))
def chown(self, remotepath, uid=None, gid=None):
""" set uid and/or gid on a remotepath, you may specify either or both.
Unless you have **permission** to do this on the remote server, you will
raise an IOError: 13 - permission denied
:param str remotepath: the remote path/file to modify
:param int uid: the user id to set on the remotepath
:param int gid: the group id to set on the remotepath
:returns: None
:raises: IOError, if you don't have permission or the file doesn't exist
"""
self._sftp_connect()
if uid is None or gid is None:
if uid is None and gid is None: # short circuit if no change
return
rstat = self._sftp.stat(remotepath)
if uid is None:
uid = rstat.st_uid
if gid is None:
gid = rstat.st_gid
self._sftp.chown(remotepath, uid=uid, gid=gid)
def getcwd(self):
"""return the current working directory on the remote. This is a wrapper
for paramiko's method and not to be confused with the SFTP command, cwd.
:returns: (str) the current remote path. None, if not set.
"""
self._sftp_connect()
return self._sftp.getcwd()
def listdir(self, remotepath='.'):
"""return a list of files/directories for the given remote path.
Unlike, paramiko, the directory listing is sorted.
:param str remotepath: path to list on the server
:returns: (list of str) directory entries, sorted
"""
self._sftp_connect()
return sorted(self._sftp.listdir(remotepath))
def listdir_attr(self, remotepath='.'):
"""return a list of SFTPAttribute objects of the files/directories for
the given remote path. The list is in arbitrary order. It does not
include the special entries '.' and '..'.
The returned SFTPAttributes objects will each have an additional field:
longname, which may contain a formatted string of the file's
attributes, in unix format. The content of this string will depend on
the SFTP server.
:param str remotepath: path to list on the server
:returns: (list of SFTPAttributes), sorted
"""
self._sftp_connect()
return sorted(self._sftp.listdir_attr(remotepath),
key=lambda attr: attr.filename)
def mkdir(self, remotepath, mode=777):
"""Create a directory named remotepath with mode. On some systems,
mode is ignored. Where it is used, the current umask value is first
masked out.
:param str remotepath: directory to create`
:param int mode: *Default: 777* -
int representation of octal mode for directory
:returns: None
"""
self._sftp_connect()
self._sftp.mkdir(remotepath, mode=int(str(mode), 8))
def normalize(self, remotepath):
"""Return the expanded path, w.r.t the server, of a given path. This
can be used to resolve symlinks or determine what the server believes
to be the :attr:`.pwd`, by passing '.' as remotepath.
:param str remotepath: path to be normalized
:return: (str) normalized form of the given path
:raises: IOError, if remotepath can't be resolved
"""
self._sftp_connect()
return self._sftp.normalize(remotepath)
def isdir(self, remotepath):
"""return true, if remotepath is a directory
:param str remotepath: the path to test
:returns: (bool)
"""
self._sftp_connect()
try:
result = S_ISDIR(self._sftp.stat(remotepath).st_mode)
except IOError: # no such file
result = False
return result
def isfile(self, remotepath):
"""return true if remotepath is a file
:param str remotepath: the path to test
:returns: (bool)
"""
self._sftp_connect()
try:
result = S_ISREG(self._sftp.stat(remotepath).st_mode)
except IOError: # no such file
result = False
return result
def makedirs(self, remotedir, mode=777):
"""create all directories in remotedir as needed, setting their mode
to mode, if created.
If remotedir already exists, silently complete. If a regular file is
in the way, raise an exception.
:param str remotedir: the directory structure to create
:param int mode: *Default: 777* -
int representation of octal mode for directory
:returns: None
:raises: OSError
"""
self._sftp_connect()
if self.isdir(remotedir):
pass
elif self.isfile(remotedir):
raise OSError("a file with the same name as the remotedir, " \
"'%s', already exists." % remotedir)
else:
head, tail = os.path.split(remotedir)
if head and not self.isdir(head):
self.makedirs(head, mode)
if tail:
self.mkdir(remotedir, mode=mode)
def readlink(self, remotelink):
"""Return the target of a symlink (shortcut). The result will be
an absolute pathname.
:param str remotelink: remote path of the symlink
:return: (str) absolute path to target
"""
self._sftp_connect()
return self._sftp.normalize(self._sftp.readlink(remotelink))
def remove(self, remotefile):
"""remove the file @ remotefile, remotefile may include a path, if no
path, then :attr:`.pwd` is used. This method only works on files
:param str remotefile: the remote file to delete
:returns: None
:raises: IOError
"""
self._sftp_connect()
self._sftp.remove(remotefile)
unlink = remove # synonym for remove
def rmdir(self, remotepath):
"""remove remote directory
:param str remotepath: the remote directory to remove
:returns: None
"""
self._sftp_connect()
self._sftp.rmdir(remotepath)
def rename(self, remote_src, remote_dest):
"""rename a file or directory on the remote host.
:param str remote_src: the remote file/directory to rename
:param str remote_dest: the remote file/directory to put it
:returns: None
:raises: IOError
"""
self._sftp_connect()
self._sftp.rename(remote_src, remote_dest)
def stat(self, remotepath):
"""return information about file/directory for the given remote path
:param str remotepath: path to stat
:returns: (obj) SFTPAttributes
"""
self._sftp_connect()
return self._sftp.stat(remotepath)
def lstat(self, remotepath):
"""return information about file/directory for the given remote path,
without following symbolic links. Otherwise, the same as .stat()
:param str remotepath: path to stat
:returns: (obj) SFTPAttributes object
"""
self._sftp_connect()
return self._sftp.lstat(remotepath)
def close(self):
"""Closes the connection and cleans up."""
# Close SFTP Connection.
if self._sftp_live:
self._sftp.close()
self._sftp_live = False
# Close the SSH Transport.
if self._transport_live:
self._transport.close()
self._transport_live = False
def open(self, remote_file, mode='r', bufsize=-1):
"""Open a file on the remote server.
See http://paramiko-docs.readthedocs.org/en/latest/api/sftp.html?highlight=open#paramiko.sftp_client.SFTPClient.open for details.
:param str remote_file: name of the file to open.
:param str mode:
mode (Python-style) to open file (always assumed binary)
:param int bufsize: *Default: -1* - desired buffering
:returns: (obj) SFTPFile, a handle the remote open file
:raises: IOError, if the file could not be opened.
"""
self._sftp_connect()
return self._sftp.open(remote_file, mode=mode, bufsize=bufsize)
def exists(self, remotepath):
"""Test whether a remotepath exists.
:param str remotepath: the remote path to verify
:returns: (bool) True, if remotepath exists, else False
"""
self._sftp_connect()
try:
self._sftp.stat(remotepath)
except IOError:
return False
return True
def lexists(self, remotepath):
"""Test whether a remotepath exists. Returns True for broken symbolic
links
:param str remotepath: the remote path to verify
:returns: (bool), True, if lexists, else False
"""
self._sftp_connect()
try:
self._sftp.lstat(remotepath)
except IOError:
return False
return True
def symlink(self, remote_src, remote_dest):
'''create a symlink for a remote file on the server
:param str remote_src: path of original file
:param str remote_dest: path of the created symlink
:returns: None
:raises:
any underlying error, IOError if something already exists at
remote_dest
'''
self._sftp_connect()
self._sftp.symlink(remote_src, remote_dest)
def truncate(self, remotepath, size):
"""Change the size of the file specified by path. Used to modify the
size of the file, just like the truncate method on Python file objects.
The new file size is confirmed and returned.
:param str remotepath: remote file path to modify
:param int|long size: the new file size
:returns: (int) new size of file
:raises: IOError, if file does not exist
"""
self._sftp_connect()
self._sftp.truncate(remotepath, size)
return self._sftp.stat(remotepath).st_size
def walktree(self, remotepath, fcallback, dcallback, ucallback, recurse=True):
'''recursively descend, depth first, the directory tree rooted at
remotepath, calling discreet callback functions for each regular file,
directory and unknown file type.
:param str remotepath:
root of remote directory to descend, use '.' to start at
:attr:`.pwd`
:param callable fcallback:
callback function to invoke for a regular file.
(form: ``func(str)``)
:param callable dcallback:
callback function to invoke for a directory. (form: ``func(str)``)
:param callable ucallback:
callback function to invoke for an unknown file type.
(form: ``func(str)``)
:param bool recurse: *Default: True* - should it recurse
:returns: None
:raises:
'''
self._sftp_connect()
for entry in self._sftp.listdir(remotepath):
pathname = os.path.join(remotepath, entry)
mode = self._sftp.stat(pathname).st_mode
if S_ISDIR(mode):
# It's a directory, call the dcallback function
dcallback(pathname)
if recurse:
# now, recurse into it
self.walktree(pathname, fcallback, dcallback, ucallback)
elif S_ISREG(mode):
# It's a file, call the fcallback function
fcallback(pathname)
else:
# Unknown file type
ucallback(pathname)
@property
def sftp_client(self):
"""give access to the underlying, connected paramiko SFTPClient object
see http://paramiko-docs.readthedocs.org/en/latest/api/sftp.html?highlight=sftpclient
:params: None
:returns: (obj) the active SFTPClient object
"""
self._sftp_connect()
return self._sftp
@property
def active_ciphers(self):
"""Get tuple of currently used local and remote ciphers.
:returns:
(tuple of str) currently used ciphers (local_cipher, remote_cipher)
"""
return self._transport.local_cipher, self._transport.remote_cipher
@property
def security_options(self):
"""return the available security options recognized by paramiko.
:returns:
(obj) security preferences of the ssh transport. These are tuples
of acceptable `.ciphers`, `.digests`, `.key_types`, and key exchange
algorithms `.kex`, listed in order of preference.
"""
return self._transport.get_security_options()
@property
def logfile(self):
'''return the name of the file used for logging or False it not logging
:returns: (str)logfile or (bool) False
'''
return self._logfile
@property
def timeout(self):
''' (float|None) *Default: None* -
get or set the underlying socket timeout for pending read/write
ops.
:returns:
(float|None) seconds to wait for a pending read/write operation
before raising socket.timeout, or None for no timeout
'''
self._sftp_connect()
channel = self._sftp.get_channel()
return channel.gettimeout()
@timeout.setter
def timeout(self, val):
'''setter for timeout'''
self._sftp_connect()
channel = self._sftp.get_channel()
channel.settimeout(val)
def __del__(self):
"""Attempt to clean up if not explicitly closed."""
self.close()
def __enter__(self):
return self
def __exit__(self, etype, value, traceback):
self.close()
def path_advance(thepath, sep=os.sep):
'''generator to iterate over a file path forwards
:param str thepath: the path to navigate forwards
:param str sep: *Default: os.sep* - the path separator to use
:returns: (iter)able of strings
'''
# handle a direct path
pre = ''
if thepath[0] == sep:
pre = sep
curpath = ''
parts = thepath.split(sep)
if pre:
if parts[0]:
parts[0] = pre + parts[0]
else:
parts[1] = pre + parts[1]
for part in parts:
curpath = os.path.join(curpath, part)
if curpath:
yield curpath
def path_retreat(thepath, sep=os.sep):
'''generator to iterate over a file path in reverse
:param str thepath: the path to retreat over
:param str sep: *Default: os.sep* - the path separator to use
:returns: (iter)able of strings
'''
pre = ''
if thepath[0] == sep:
pre = sep
parts = thepath.split(sep)
while parts:
if os.path.join(*parts):
yield '%s%s' % (pre, os.path.join(*parts))
parts = parts[:-1]
def reparent(newparent, oldpath):
'''when copying or moving a directory structure, you need to re-parent the
oldpath. When using os.path.join to calculate this new path, the
appearance of a / root path at the beginning of oldpath, supplants the
newparent and we don't want this to happen, so we need to make the oldpath
root appear as a child of the newparent.
:param: str newparent: the new parent location for oldpath (target)
:param str oldpath: the path being adopted by newparent (source)
:returns: (str) resulting adoptive path
'''
if oldpath[0] == os.sep:
oldpath = '.' + oldpath
return os.path.join(newparent, oldpath)
def walktree(localpath, fcallback, dcallback, ucallback, recurse=True):
'''on the local file system, recursively descend, depth first, the
directory tree rooted at localpath, calling discreet callback functions
for each regular file, directory and unknown file type.
:param str localpath:
root of remote directory to descend, use '.' to start at
:attr:`.pwd`
:param callable fcallback:
callback function to invoke for a regular file.
(form: ``func(str)``)
:param callable dcallback:
callback function to invoke for a directory. (form: ``func(str)``)
:param callable ucallback:
callback function to invoke for an unknown file type.
(form: ``func(str)``)
:param bool recurse: *Default: True* - should it recurse
:returns: None
:raises: OSError, if localpath doesn't exist
'''
for entry in os.listdir(localpath):
pathname = os.path.join(localpath, entry)
mode = os.stat(pathname).st_mode
if S_ISDIR(mode):
# It's a directory, call the dcallback function
dcallback(pathname)
if recurse:
# now, recurse into it
walktree(pathname, fcallback, dcallback, ucallback)
elif S_ISREG(mode):
# It's a file, call the fcallback function
fcallback(pathname)
else:
# Unknown file type
ucallback(pathname)
@contextmanager
def cd(localpath=None):
"""context manager that can change to a optionally specified local
directory and restores the old pwd on exit.
:param str|None localpath: *Default: None* -
local path to temporarily make the current directory
:returns: None
:raises: OSError, if local path doesn't exist
"""
try:
original_path = os.getcwd()
if localpath is not None:
os.chdir(localpath)
yield
finally:
os.chdir(original_path)
|
|
import calendar
from collections import OrderedDict
import datetime
import importlib
from itertools import chain
from operator import itemgetter
import os
import re
import time
from django.apps import apps
from django.contrib.auth import get_user_model
from django.core.management import call_command
from django.http import QueryDict
from django.urls import resolve
from django.utils import timezone
from django.utils.crypto import get_random_string
from django.utils.encoding import force_text
def flatten(*xs):
return tuple(chain.from_iterable(xs))
def sort_dict(unsorted_dict):
"""
Return a OrderedDict ordered by key names from the :unsorted_dict:
"""
sorted_dict = OrderedDict()
# sort items before inserting them into a dict
for key, value in sorted(unsorted_dict.items(), key=itemgetter(0)):
sorted_dict[key] = value
return sorted_dict
def format_time_and_value_to_segment_list(time_and_value_list, segments_count, start_timestamp,
end_timestamp, average=False):
"""
Format time_and_value_list to time segments
Parameters
^^^^^^^^^^
time_and_value_list: list of tuples
Have to be sorted by time
Example: [(time, value), (time, value) ...]
segments_count: integer
How many segments will be in result
Returns
^^^^^^^
List of dictionaries
Example:
[{'from': time1, 'to': time2, 'value': sum_of_values_from_time1_to_time2}, ...]
"""
segment_list = []
time_step = (end_timestamp - start_timestamp) / segments_count
for i in range(segments_count):
segment_start_timestamp = start_timestamp + time_step * i
segment_end_timestamp = segment_start_timestamp + time_step
value_list = [
value for time, value in time_and_value_list
if time >= segment_start_timestamp and time < segment_end_timestamp]
segment_value = sum(value_list)
if average and len(value_list) != 0:
segment_value /= len(value_list)
segment_list.append({
'from': segment_start_timestamp,
'to': segment_end_timestamp,
'value': segment_value,
})
return segment_list
def datetime_to_timestamp(datetime):
return int(time.mktime(datetime.timetuple()))
def timestamp_to_datetime(timestamp, replace_tz=True):
dt = datetime.datetime.fromtimestamp(int(timestamp))
if replace_tz:
dt = dt.replace(tzinfo=timezone.get_current_timezone())
return dt
def timeshift(**kwargs):
return timezone.now().replace(microsecond=0) + datetime.timedelta(**kwargs)
def hours_in_month(month=None, year=None):
now = datetime.datetime.now()
if not month:
month = now.month
if not year:
year = now.year
days_in_month = calendar.monthrange(year, month)[1]
return 24 * days_in_month
def month_start(date):
return timezone.make_aware(datetime.datetime(day=1, month=date.month, year=date.year))
def month_end(date):
days_in_month = calendar.monthrange(date.year, date.month)[1]
last_day_of_month = datetime.date(month=date.month, year=date.year, day=days_in_month)
last_second_of_month = datetime.datetime.combine(last_day_of_month, datetime.time.max)
return timezone.make_aware(last_second_of_month, timezone.get_current_timezone())
def pwgen(pw_len=16):
""" Generate a random password with the given length.
Allowed chars does not have "I" or "O" or letters and
digits that look similar -- just to avoid confusion.
"""
return get_random_string(pw_len, 'abcdefghjkmnpqrstuvwxyz'
'ABCDEFGHJKLMNPQRSTUVWXYZ'
'23456789')
def serialize_instance(instance):
""" Serialize Django model instance """
model_name = force_text(instance._meta)
return '{}:{}'.format(model_name, instance.pk)
def deserialize_instance(serialized_instance):
""" Deserialize Django model instance """
model_name, pk = serialized_instance.split(':')
model = apps.get_model(model_name)
return model._default_manager.get(pk=pk)
def serialize_class(cls):
""" Serialize Python class """
return '{}:{}'.format(cls.__module__, cls.__name__)
def deserialize_class(serilalized_cls):
""" Deserialize Python class """
module_name, cls_name = serilalized_cls.split(':')
module = importlib.import_module(module_name)
return getattr(module, cls_name)
def clear_url(url):
""" Remove domain and protocol from url """
if url.startswith('http'):
return '/' + url.split('/', 3)[-1]
return url
def get_model_from_resolve_match(match):
queryset = match.func.cls.queryset
if queryset is not None:
return queryset.model
else:
return match.func.cls.model
def instance_from_url(url, user=None):
""" Restore instance from URL """
# XXX: This circular dependency will be removed then filter_queryset_for_user
# will be moved to model manager method
from waldur_core.structure.managers import filter_queryset_for_user
url = clear_url(url)
match = resolve(url)
model = get_model_from_resolve_match(match)
queryset = model.objects.all()
if user is not None:
queryset = filter_queryset_for_user(model.objects.all(), user)
return queryset.get(**match.kwargs)
def get_detail_view_name(model):
if model is NotImplemented:
raise AttributeError('Cannot get detail view name for not implemented model')
if hasattr(model, 'get_url_name') and callable(model.get_url_name):
return '%s-detail' % model.get_url_name()
return '%s-detail' % model.__name__.lower()
def get_list_view_name(model):
if model is NotImplemented:
raise AttributeError('Cannot get list view name for not implemented model')
if hasattr(model, 'get_url_name') and callable(model.get_url_name):
return '%s-list' % model.get_url_name()
return '%s-list' % model.__name__.lower()
def get_fake_context():
user = get_user_model()()
request = type('R', (object,), {'method': 'GET', 'user': user, 'query_params': QueryDict()})
return {'request': request, 'user': user}
def camel_case_to_underscore(name):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def silent_call(name, *args, **options):
call_command(name, stdout=open(os.devnull, 'w'), *args, **options)
|
|
import sys
from landsat.downloader import Downloader
from landsat.landsat import Process
from boto.s3.key import Key
from shutil import rmtree
from datetime import datetime
from boto import utils
import socket
from models import UserJob_Model, WorkerLog
from sqs import (make_SQS_connection, get_queue, get_message,
get_attributes, delete_message_from_handle)
import os
import boto
import zipfile
os.getcwd()
PATH_DOWNLOAD = os.getcwd() + '/download'
AWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID']
AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY']
JOBS_QUEUE = 'snapsat_composite_queue'
REGION = 'us-west-2'
try:
INSTANCE_METADATA = utils.get_instance_metadata(timeout=0.5, num_retries=1)
INSTANCE_ID = INSTANCE_METADATA['instance-id']
except:
INSTANCE_ID = socket.gethostname()
def cleanup_downloads(folder_path):
"""Clean up download folder if process fails.
Return True if the download folder is empty.
"""
for file_object in os.listdir(folder_path):
file_object_path = os.path.join(folder_path, file_object)
if os.path.isfile(file_object_path):
os.remove(file_object_path)
else:
rmtree(file_object_path)
if not os.listdir(folder_path):
return True
else:
return False
def write_activity(statement, value, activity_type):
"""Write to activity log."""
WorkerLog.log_entry(INSTANCE_ID, statement, value, activity_type)
def checking_for_jobs():
"""Poll jobs queue for jobs."""
SQSconn = make_SQS_connection(REGION, AWS_ACCESS_KEY_ID,
AWS_SECRET_ACCESS_KEY)
write_activity('SQS Connection', SQSconn.server_name(), 'success')
jobs_queue = get_queue(SQSconn, JOBS_QUEUE)
write_activity('Jobs queue', jobs_queue.name, 'success')
while True:
job_message = get_message(jobs_queue)
if job_message:
job_attributes = get_job_attributes(job_message)
delete_job_from_queue(SQSconn, job_message, jobs_queue)
# Process full res images
process_image(job_attributes)
# Begin checking for jobs
def get_job_attributes(job_message):
"""Get job attributes, log the result."""
job_attributes = None
try:
job_attributes = get_attributes(job_message[0])
write_activity('Job attributes',
str(job_attributes), 'success')
except Exception as e:
write_activity('Attribute retrieval fail because',
e.message, 'error')
return job_attributes
def delete_job_from_queue(SQSconn, job_message, jobs_queue):
"""Remove the job from the job queue."""
try:
del_status = delete_message_from_handle(SQSconn,
jobs_queue,
job_message[0])
write_activity('Delete status', unicode(del_status), 'success')
except Exception as e:
write_activity('Delete status', unicode(del_status), 'error')
write_activity('Delete message fail because ',
e.message, 'error')
def process_image(job_attributes):
"""Begin the image processing and log the results."""
try:
proc_status = process(job_attributes)
write_activity('Job process status',
unicode(proc_status), 'success')
except Exception as e:
proc_status = False
# If processing fails, send message to pyramid to update db
write_activity('Job process success',
unicode(proc_status), 'error')
write_activity('Job process fail because',
e.message, 'error')
cleanup_status = cleanup_downloads(PATH_DOWNLOAD)
write_activity('Cleanup downloads success',
cleanup_status, 'error')
UserJob_Model.set_job_status(job_attributes['job_id'], 10)
def download_and_set(job, PATH_DOWNLOAD):
"""Download the image file."""
UserJob_Model.set_job_status(job['job_id'], 1)
b = Downloader(verbose=False, download_dir=PATH_DOWNLOAD)
scene_id = str(job['scene_id'])
bands = [job['band_1'], job['band_2'], job['band_3']]
b.download([scene_id], bands)
input_path = os.path.join(PATH_DOWNLOAD, scene_id)
return input_path, bands, scene_id
def merge_images(job, input_path, bands, PATH_DOWNLOAD, scene_id):
"""Process images using landsat-util."""
UserJob_Model.set_job_status(job['job_id'], 2)
c = Process(input_path, bands=bands, dst_path=PATH_DOWNLOAD, verbose=False)
c.run(pansharpen=False)
band_output = ''
for band in bands:
band_output = '{}{}'.format(band_output, band)
file_name = '{}_bands_{}.TIF'.format(scene_id, band_output)
file_location = os.path.join(input_path, file_name)
return band_output, file_location
def zip_file(job, band_output, scene_id, input_path, file_location):
"""
Compress the image.
"""
print 'Zipping file'
UserJob_Model.set_job_status(job['job_id'], 3)
file_name_zip = '{}_bands_{}.zip'.format(scene_id, band_output)
file_name = '{}_bands_{}.TIF'.format(scene_id, band_output)
path_to_zip = os.path.join(input_path, file_name_zip)
with zipfile.ZipFile(path_to_zip, 'w', zipfile.ZIP_DEFLATED) as myzip:
myzip.write(file_location, arcname=file_name)
return file_name_zip
def upload_to_s3(file_location, file_name_zip, input_path, job):
"""
Upload processed images to S3.
"""
try:
print 'Uploading to S3'
UserJob_Model.set_job_status(job['job_id'], 4)
file_location = os.path.join(input_path, file_name_zip)
conne = boto.connect_s3(aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY)
b = conne.get_bucket('snapsatcompositesjoel')
k = Key(b)
k.key = file_name_zip
k.set_contents_from_filename(file_location)
k.get_contents_to_filename(file_location)
hello = b.get_key(file_name_zip)
# make public
hello.set_canned_acl('public-read')
out = unicode(hello.generate_url(0, query_auth=False, force_http=True))
print out
UserJob_Model.set_job_status(job['job_id'], 5, out)
except:
raise Exception('S3 Upload failed')
return file_location
def process(job):
"""
Given bands and sceneID, download, image process, zip & upload to S3.
"""
# set worker instance id for job
UserJob_Model.set_worker_instance_id(job['job_id'], INSTANCE_ID)
# download and set vars
input_path, bands, scene_id = download_and_set(job, PATH_DOWNLOAD)
# call landsat-util to merge images
band_output, file_location = merge_images(
job, input_path, bands, PATH_DOWNLOAD, scene_id
)
# zip file, maintain location
file_name_zip = zip_file(job, band_output, scene_id, input_path,
file_location)
# upload to s3
file_location = upload_to_s3(file_location, file_name_zip, input_path, job)
# delete files
try:
rmtree(input_path) # band images and composite
except OSError:
print input_path
print 'error deleting files'
return True
if __name__ == '__main__':
checking_for_jobs()
|
|
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import tempfile
import fixtures
from lxml import etree
from oslo_config import cfg
import requests
import testtools
from testtools import content as test_content
from testtools import matchers
import urllib.parse as urlparse
from os_collect_config import cfn
from os_collect_config import collect
from os_collect_config import exc
META_DATA = {u'int1': 1,
u'strfoo': u'foo',
u'map_ab': {
u'a': 'apple',
u'b': 'banana',
}}
SOFTWARE_CONFIG_DATA = {
u'old-style': u'value',
u'deployments': [
{
u'inputs': [
{
u'type': u'String',
u'name': u'input1',
u'value': u'value1'
}
],
u'group': 'Heat::Ungrouped',
u'name': 'dep-name1',
u'outputs': None,
u'options': None,
u'config': {
u'config1': 'value1'
}
},
{
u'inputs': [
{
u'type': u'String',
u'name': u'input1',
u'value': u'value1'
}
],
u'group': 'os-apply-config',
u'name': 'dep-name2',
u'outputs': None,
u'options': None,
u'config': {
u'config2': 'value2'
}
},
{
u'inputs': [
{
u'type': u'String',
u'name': u'input1',
u'value': u'value1'
}
],
u'name': 'dep-name3',
u'outputs': None,
u'options': None,
u'config': {
u'config3': 'value3'
}
},
{
u'inputs': [],
u'group': 'ignore_me',
u'name': 'ignore_me_name',
u'outputs': None,
u'options': None,
u'config': 'ignore_me_config'
}
]
}
SOFTWARE_CONFIG_IMPOSTER_DATA = {
u'old-style': u'value',
u'deployments': {
u"not": u"a list"
}
}
class FakeResponse(dict):
def __init__(self, text):
self.text = text
def raise_for_status(self):
pass
class FakeReqSession(object):
SESSION_META_DATA = META_DATA
def __init__(self, testcase, expected_netloc):
self._test = testcase
self._expected_netloc = expected_netloc
self.verify = False
def get(self, url, params, headers, verify=None, timeout=None):
self._test.addDetail('url', test_content.text_content(url))
url = urlparse.urlparse(url)
self._test.assertEqual(self._expected_netloc, url.netloc)
self._test.assertEqual('/v1/', url.path)
self._test.assertEqual('application/json',
headers['Content-Type'])
self._test.assertIn('SignatureVersion', params)
self._test.assertEqual('2', params['SignatureVersion'])
self._test.assertIn('Signature', params)
self._test.assertIn('Action', params)
self._test.assertEqual('DescribeStackResource',
params['Action'])
self._test.assertIn('LogicalResourceId', params)
self._test.assertEqual('foo', params['LogicalResourceId'])
self._test.assertEqual(10, timeout)
root = etree.Element('DescribeStackResourceResponse')
result = etree.SubElement(root, 'DescribeStackResourceResult')
detail = etree.SubElement(result, 'StackResourceDetail')
metadata = etree.SubElement(detail, 'Metadata')
metadata.text = json.dumps(self.SESSION_META_DATA)
if verify is not None:
self.verify = True
return FakeResponse(etree.tostring(root))
class FakeRequests(object):
exceptions = requests.exceptions
def __init__(self, testcase, expected_netloc='192.0.2.1:8000'):
self._test = testcase
self._expected_netloc = expected_netloc
def Session(self):
return FakeReqSession(self._test, self._expected_netloc)
class FakeReqSessionSoftwareConfig(FakeReqSession):
SESSION_META_DATA = SOFTWARE_CONFIG_DATA
class FakeRequestsSoftwareConfig(FakeRequests):
FAKE_SESSION = FakeReqSessionSoftwareConfig
def Session(self):
return self.FAKE_SESSION(self._test, self._expected_netloc)
class FakeReqSessionConfigImposter(FakeReqSession):
SESSION_META_DATA = SOFTWARE_CONFIG_IMPOSTER_DATA
class FakeRequestsConfigImposter(FakeRequestsSoftwareConfig):
FAKE_SESSION = FakeReqSessionConfigImposter
class FakeFailRequests(object):
exceptions = requests.exceptions
class Session(object):
def get(self, url, params, headers, verify=None, timeout=None):
raise requests.exceptions.HTTPError(403, 'Forbidden')
class TestCfnBase(testtools.TestCase):
def setUp(self):
super(TestCfnBase, self).setUp()
self.log = self.useFixture(fixtures.FakeLogger())
self.useFixture(fixtures.NestedTempfile())
self.hint_file = tempfile.NamedTemporaryFile()
self.hint_file.write(u'http://192.0.2.1:8000'.encode('utf-8'))
self.hint_file.flush()
self.addCleanup(self.hint_file.close)
collect.setup_conf()
cfg.CONF.cfn.heat_metadata_hint = self.hint_file.name
cfg.CONF.cfn.metadata_url = None
cfg.CONF.cfn.path = ['foo.Metadata']
cfg.CONF.cfn.access_key_id = '0123456789ABCDEF'
cfg.CONF.cfn.secret_access_key = 'FEDCBA9876543210'
class TestCfn(TestCfnBase):
def test_collect_cfn(self):
cfn_md = cfn.Collector(requests_impl=FakeRequests(self)).collect()
self.assertThat(cfn_md, matchers.IsInstance(list))
self.assertEqual('cfn', cfn_md[0][0])
cfn_md = cfn_md[0][1]
for k in ('int1', 'strfoo', 'map_ab'):
self.assertIn(k, cfn_md)
self.assertEqual(cfn_md[k], META_DATA[k])
self.assertEqual('', self.log.output)
def test_collect_with_ca_cert(self):
cfn.CONF.cfn.ca_certificate = "foo"
collector = cfn.Collector(requests_impl=FakeRequests(self))
collector.collect()
self.assertTrue(collector._session.verify)
def test_collect_cfn_fail(self):
cfn_collect = cfn.Collector(requests_impl=FakeFailRequests)
self.assertRaises(exc.CfnMetadataNotAvailable, cfn_collect.collect)
self.assertIn('Forbidden', self.log.output)
def test_collect_cfn_no_path(self):
cfg.CONF.cfn.path = None
cfn_collect = cfn.Collector(requests_impl=FakeRequests(self))
self.assertRaises(exc.CfnMetadataNotConfigured, cfn_collect.collect)
self.assertIn('No path configured', self.log.output)
def test_collect_cfn_bad_path(self):
cfg.CONF.cfn.path = ['foo']
cfn_collect = cfn.Collector(requests_impl=FakeRequests(self))
self.assertRaises(exc.CfnMetadataNotConfigured, cfn_collect.collect)
self.assertIn('Path not in format', self.log.output)
def test_collect_cfn_no_metadata_url(self):
cfg.CONF.cfn.heat_metadata_hint = None
cfn_collect = cfn.Collector(requests_impl=FakeRequests(self))
self.assertRaises(exc.CfnMetadataNotConfigured, cfn_collect.collect)
self.assertIn('No metadata_url configured', self.log.output)
def test_collect_cfn_missing_sub_path(self):
cfg.CONF.cfn.path = ['foo.Metadata.not_there']
cfn_collect = cfn.Collector(requests_impl=FakeRequests(self))
self.assertRaises(exc.CfnMetadataNotAvailable, cfn_collect.collect)
self.assertIn('Sub-key not_there does not exist', self.log.output)
def test_collect_cfn_sub_path(self):
cfg.CONF.cfn.path = ['foo.Metadata.map_ab']
cfn_collect = cfn.Collector(requests_impl=FakeRequests(self))
content = cfn_collect.collect()
self.assertThat(content, matchers.IsInstance(list))
self.assertEqual('cfn', content[0][0])
content = content[0][1]
self.assertIn(u'b', content)
self.assertEqual(u'banana', content[u'b'])
def test_collect_cfn_metadata_url_overrides_hint(self):
cfg.CONF.cfn.metadata_url = 'http://127.0.1.1:8000/v1/'
cfn_collect = cfn.Collector(
requests_impl=FakeRequests(self,
expected_netloc='127.0.1.1:8000'))
cfn_collect.collect()
class TestCfnSoftwareConfig(TestCfnBase):
def test_collect_cfn_software_config(self):
cfn_md = cfn.Collector(
requests_impl=FakeRequestsSoftwareConfig(self)).collect()
self.assertThat(cfn_md, matchers.IsInstance(list))
self.assertEqual('cfn', cfn_md[0][0])
cfn_config = cfn_md[0][1]
self.assertThat(cfn_config, matchers.IsInstance(dict))
self.assertEqual(set(['old-style', 'deployments']),
set(cfn_config.keys()))
self.assertIn('deployments', cfn_config)
self.assertThat(cfn_config['deployments'], matchers.IsInstance(list))
self.assertEqual(4, len(cfn_config['deployments']))
deployment = cfn_config['deployments'][0]
self.assertIn('inputs', deployment)
self.assertThat(deployment['inputs'], matchers.IsInstance(list))
self.assertEqual(1, len(deployment['inputs']))
self.assertEqual('dep-name1', cfn_md[1][0])
self.assertEqual('value1', cfn_md[1][1]['config1'])
self.assertEqual('dep-name2', cfn_md[2][0])
self.assertEqual('value2', cfn_md[2][1]['config2'])
def test_collect_cfn_deployments_not_list(self):
cfn_md = cfn.Collector(
requests_impl=FakeRequestsConfigImposter(self)).collect()
self.assertEqual(1, len(cfn_md))
self.assertEqual('cfn', cfn_md[0][0])
self.assertIn('not', cfn_md[0][1]['deployments'])
self.assertEqual('a list', cfn_md[0][1]['deployments']['not'])
|
|
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Conary facade module
This module provides a high-level stable API for use within rbuild
plugins. It provides public methods which are only to be accessed
via C{handle.facade.conary} which is automatically available to
all plugins through the C{handle} object.
"""
import sys
import copy
import itertools
import os
import stat
import types
import urlparse
from conary import conarycfg
from conary import conaryclient
from conary import checkin
from conary import errors as conaryerrors
from conary import state
from conary import trove
from conary import versions
from conary.build import loadrecipe
from conary.build import use
from conary.build import errors as builderrors
from conary.build import derive
from conary.cmds import clone
from conary.cmds import updatecmd
from conary.conaryclient import cmdline
from conary.deps import deps
from conary.lib import util
from rbuild import errors
class ConaryFacade(object):
"""
The rBuild Appliance Developer Process Toolkit Conary facade.
Note that the contents of objects marked as B{opaque} may vary
according to the version of Conary in use, and the contents
of such objects are not included in the stable rBuild API.
"""
def __init__(self, handle):
"""
@param handle: The handle with which this instance is associated.
"""
self._handle = handle
self._conaryCfg = None
self._initializedFlavors = False
#{ Private Methods
def _parseRBuilderConfigFile(self, cfg):
"""
Include conary configuration file provided by rBuilder
@param cfg: configuration file to add rbuilder configuration
data to.
"""
serverUrl = self._handle.getConfig().serverUrl
if serverUrl:
hostname = urlparse.urlparse(serverUrl)[1]
if hostname not in ['www.rpath.com', 'www.rpath.org']:
cfg.includeConfigFile(serverUrl + '/conaryrc')
def _initializeFlavors(self):
if not self._initializedFlavors:
self.getConaryConfig().initializeFlavors()
self._initializedFlavors = True
def _getConaryClient(self):
"""
Get a conaryclient object
"""
return conaryclient.ConaryClient(self.getConaryConfig())
def _getRepositoryClient(self):
"""
Get a repository object from a conaryclient
"""
return self._getConaryClient().getRepos()
@staticmethod
def _getVersion(version):
"""
Converts a version string into an B{opaque} Conary version object,
or returns the B{opaque} version object.
@param version: a representation of a conary version
@type version: string or B{opaque} conary.versions.Version
@return: B{opaque} Conary version object
@rtype: conary.versions.Version
"""
if isinstance(version, types.StringTypes):
return versions.VersionFromString(str(version))
return version
@staticmethod
def _getLabel(label):
"""
Converts a label string into an B{opaque} Conary label object,
or returns the B{opaque} label object.
@param label: a representation of a conary label
@type label: string or B{opaque} conary.versions.Label
@return: B{opaque} Conary label object
@rtype: conary.versions.Label
"""
if isinstance(label, types.StringTypes):
if label.startswith('/'):
version = versions.VersionFromString(label)
if isinstance(version, versions.Branch):
return version.label()
else:
return version.trailingLabel()
if label.count('/') == 1 and '/' in label:
label = label.split('/', 1)[0]
return versions.Label(str(label))
return label
@staticmethod
def _getFlavor(flavor=None, keepNone=False):
"""
Converts a version string into an B{opaque} Conary flavor object
or returns the B{opaque} flavor object.
@param flavor: conary flavor
@type flavor: string or B{opaque} conary.deps.deps.Flavor
@param keepNone: if True, leave None objects as None instead
of converting to empty flavor
@type keepNone: boolean
@return: B{opaque} Conary flavor object
@rtype: conary.deps.deps.Flavor
"""
if flavor is None:
if keepNone:
return None
else:
return(deps.Flavor())
if isinstance(flavor, types.StringTypes):
return deps.parseFlavor(str(flavor), raiseError=True)
return flavor
@classmethod
def _getBuildFlavor(cls, flavor=None):
"""
Converts a B{opaque} flavor object into an B{opaque} build
flavor object, striping any biarch flavor to just x86_64.
@param flavor: conary flavor
@type flavor: string or B{opaque} conary.deps.deps.Flavor
@return: B{opaque} Conary flavor object
@rtype: conary.deps.deps.Flavor
"""
flavor = cls._getFlavor(flavor=flavor)
# Remove any x86 dependencies that part of the flavor.
biarch = deps.parseFlavor('is: x86 x86_64')
if flavor.stronglySatisfies(biarch):
# Get a new flavor before modifying it.
flavor = flavor.copy()
# Remove the x86 deps.
flavor.removeDeps(deps.InstructionSetDependency,
[deps.Dependency('x86'), ])
return flavor
def _findTrovesFlattened(self, specList, labelPath=None,
defaultFlavor=None, allowMissing=False):
results = self._findTroves(specList, labelPath=labelPath,
defaultFlavor=defaultFlavor,
allowMissing=allowMissing)
return list(itertools.chain(*results.values()))
def _findTroves(self, specList, labelPath=None,
defaultFlavor=None, allowMissing=False):
newSpecList = []
specMap = {}
for spec in specList:
if not isinstance(spec, tuple):
newSpec = cmdline.parseTroveSpec(spec)
else:
newSpec = spec
newSpecList.append(newSpec)
specMap[newSpec] = spec
repos = self._getRepositoryClient()
if isinstance(labelPath, (tuple, list)):
labelPath = [ self._getLabel(x) for x in labelPath ]
elif labelPath:
labelPath = self._getLabel(labelPath)
defaultFlavor = self._getFlavor(defaultFlavor, keepNone=True)
results = repos.findTroves(labelPath, newSpecList,
defaultFlavor = defaultFlavor,
allowMissing=allowMissing)
return dict((specMap[x[0]], x[1]) for x in results.items())
def _findTrove(self, name, version, flavor=None, labelPath=None,
defaultFlavor = None, allowMissing=False):
#pylint: disable-msg=R0913
# findTrove really needs all these arguments to pass through
"""
Gets a reference to a trove in the repository.
@param name: package to find
@type name: string
@param version: version of package to find
@type version: string or C{conary.versions.Version} B{opaque}
@param flavor: flavor of package to find (optional)
@type flavor: string or C{deps.Flavor} B{opaque}
@param labelPath: label(s) to find package on
@type labelPath: None, conary.versions.Label, or list of
conary.versions.Label
@param defaultFlavor: Flavor to use for those troves specifying None
for their flavor.
@type defaultFlavor: str or None
@param allowMissing: if True, allow None as a return value if
the package was not found.
@return: C{(name, version, flavor)} tuple.
Note that C{version} and C{flavor} objects are B{opaque}.
@rtype: (string, conary.versions.Version conary.deps.deps.Flavor)
"""
repos = self._getRepositoryClient()
flavor = self._getFlavor(flavor)
defaultFlavor = self._getFlavor(defaultFlavor)
try:
results = repos.findTroves(labelPath, [(name, version, flavor)],
defaultFlavor=defaultFlavor,
allowMissing=allowMissing)
except conaryerrors.LabelPathNeeded:
errstr = "%s is not a label. Please specify a label where a " \
"product definition can be found, or specify a product " \
"short name and version." % str(version)
raise errors.RbuildError(errstr)
if not results:
return None
troveTup, = results[name, version, flavor]
return troveTup
@staticmethod
def _versionToString(version):
"""
Takes either a string or an B{opaque} C{version.Version}
object and returns a string. The inverse of C{_getVersion}
@param version: trove version
@type version: string or B{opaque} C{conary.versions.Version}
@return: version
@rtype: string
"""
if type(version) is versions.Version:
version = version.asString()
return version
@staticmethod
def _flavorToString(flavor):
"""
Takes either a string or an B{opaque} C{conary.deps.deps.Flavor}
object and returns a string. The inverse of C{_getFlavor}
@param flavor: trove flavor
@type flavor: None, string, or B{opaque} C{conary.deps.deps.Flavor}
@return: flavor
@rtype: string
"""
if flavor is None:
return ''
if type(flavor) is deps.Flavor:
flavor = str(flavor)
return flavor
@classmethod
def _troveTupToStrings(cls, name, version, flavor=None):
"""
Turns a (name, version, flavor) tuple with strings or objects
as elements, and converts it to a (name, version, flavor)
tuple with only strings, to avoid unnecessarily exporting
conary objects into the rbuild API.
@param name: trove name
@type name: string
@param version: trove version
@type version: string or B{opaque} C{conary.versions.Version}
@param flavor: trove flavor
@type flavor: None, string, or B{opaque} C{conary.deps.deps.Flavor}
@return: (name, version, flavor) tuple
@rtype: (string, string, string)
"""
version = cls._versionToString(version)
flavor = cls._flavorToString(flavor)
return (name, version, flavor)
#}
def getConaryConfig(self, useCache=True):
"""
Fetches a (possibly cached) B{opaque} conary config object with all
appropriate data inherited from the associated rbuild config
object.
@param useCache: if True (default), uses a cached version of the
conary configuration file if available.
@type useCache: bool
@return: C{conarycfg.ConaryConfiguration} B{opaque} object
"""
if self._conaryCfg and useCache:
return self._conaryCfg
cfg = conarycfg.ConaryConfiguration(False)
rbuildCfg = self._handle.getConfig()
self._parseRBuilderConfigFile(cfg)
#pylint: disable-msg=E1101
# pylint does not understand config objects very well
cfg.repositoryMap.update(rbuildCfg.repositoryMap)
cfg.user.append(('*',) + rbuildCfg.user)
cfg.name = rbuildCfg.name
cfg.contact = rbuildCfg.contact
cfg.signatureKey = rbuildCfg.signatureKey
cfg.signatureKeyMap = rbuildCfg.signatureKeyMap
if useCache:
self._conaryCfg = cfg
return cfg
def _getBaseConaryConfig(self, readConfigFiles=True):
"""
Fetches an B{opaque} conary config object with no rBuild
configuration data included.
@param readConfigFiles: initialize contents of config object
from normal configuration files (default: True)
@type readConfigFiles: bool
@return: C{conarycfg.ConaryConfiguration} B{opaque} object
"""
return conarycfg.ConaryConfiguration(readConfigFiles = readConfigFiles,
ignoreErrors = True,
readProxyValuesFirst = True)
@staticmethod
def setFactoryFlag(factoryName, targetDir=None):
"""
Sets the factory type for a checkout.
@param factoryName: name of factory or empty string to reset
@type factoryName: string
@param targetDir: directory containing package; default (C{None})
is the current directory
@type targetDir: string
"""
if not factoryName:
# convert from None to checkin's accepted ''
factoryName = ''
checkin.factory(factoryName, targetDir=targetDir)
def commit(self, targetDir, message):
cfg = self.getConaryConfig()
self._initializeFlavors()
use.setBuildFlagsFromFlavor(None, cfg.buildFlavor, False)
cwd = os.getcwd()
try:
os.chdir(targetDir)
checkin.commit(self._getRepositoryClient(), cfg, message=message)
except conaryerrors.CvcError, e:
tb = sys.exc_info()[2]
raise errors.RbuildError, str(e), tb
finally:
os.chdir(cwd)
return True
def checkout(self, package, label, targetDir=None):
"""
Create a subdirectory containing a checkout of a conary
source package. Similar to the C{cvc checkout} command.
@param package: name of package
@type package: string
@param label: label to find package on
@type label: string
@param targetDir: subdirectory into which to check out the package,
This is the final directory into which the checked-out files
will be placed, not the parent directory in which a subdirectory
will be created.
defaults to C{package}
@type targetDir: string
"""
cfg = self.getConaryConfig()
checkin.checkout(self._getRepositoryClient(), cfg,
targetDir, ['%s=%s' % (package, label)])
def refresh(self, targetDir=None):
"""
Refresh the checked-out sources for a conary source package.
@param targetDir: checkout directory to refresh
@type targetDir: string
"""
cfg = self.getConaryConfig()
self._initializeFlavors()
use.setBuildFlagsFromFlavor(None, cfg.buildFlavor, False)
return checkin.refresh(self._getRepositoryClient(), cfg,
dirName=targetDir)
def updateCheckout(self, targetDir):
"""
Update a subdirectory containing a checkout of a conary
source package. Similar to the C{cvc update} command.
@param targetDir: subdirectory containing package to update
@type targetDir: string
@return: Status
@rtype: bool
"""
# Conary likes absolute paths RBLD-137
targetDir = os.path.abspath(targetDir)
try:
return checkin.nologUpdateSrc(self._getRepositoryClient(),
[targetDir])
except (builderrors.UpToDate, builderrors.NotCheckedInError):
# The end result is an up to date checkout, so ignore the exception
return True
except builderrors.CheckinError, e:
# All other exceptions are deemed failures
raise errors.RbuildError(str(e))
except AttributeError:
return checkin.updateSrc(self._getRepositoryClient(), [targetDir])
def getCheckoutStatus(self, targetDir):
"""
Create summary of changes regarding all files in a directory
as a list of C{(I{status}, I{filename})} tuples where
C{I{status}} is a single character describing the status
of the file C{I{filename}}:
- C{?}: File not managed by Conary
- C{A}: File added since last commit (or since package created if no commit)
- C{M}: File modified since last commit
- C{R}: File removed since last commit
@param targetDir: name of directory for which to fetch status.
@type targetDir: string
@return: lines of text describing differences
@rtype: list
"""
return checkin.generateStatus(self._getRepositoryClient(),
dirName=targetDir)
def getCheckoutLog(self, targetDir, newerOnly=False, versionList=None):
"""
Returns list of lines of log messages relative to the specified
targetDirectory.
@param targetDir: name of directory for which to fetch status.
@type targetDir: string
@param newerOnly: (C{False}) whether to return only log messages
newer than the current contents of the checkout.
@type newerOnly: bool
@param versionList: (C{None}) if set, a list of versions for
which to return log messages. If C{None}, return all log
messages.
@type versionList: list of strings or (opaque) conary version objects
@return: list of strings
"""
repos, sourceState = self._getRepositoryStateFromDirectory(targetDir)
troveName = sourceState.getName()
if versionList is None:
if newerOnly:
versionList = self._getNewerRepositoryVersions(targetDir)
else:
versionList = self._getRepositoryVersions(targetDir)
else:
versionList = [self._getVersion(x) for x in versionList]
emptyFlavor = deps.Flavor()
nvfList = [(troveName, v, emptyFlavor) for v in versionList]
troves = repos.getTroves(nvfList)
return [message for message in checkin.iterLogMessages(troves)]
def iterRepositoryDiff(self, targetDir, lastver=None):
"""
Yields lines of repository diff output relative to the
specified targetDirectory.
@param targetDir: name of directory for which to fetch status.
@type targetDir: string
@param lastver: (C{None}) None for diff between directory and
latest version in repository, otherwise a string or (opaque)
conary version object specifying the repository version against
which to generated the diff.
@return: yields strings
"""
repos, sourceState = self._getRepositoryStateFromDirectory(targetDir)
troveName = sourceState.getName()
ver = sourceState.getVersion()
label = ver.branch().label()
if lastver is None:
lastver = self._getNewerRepositoryVersions(targetDir)[-1]
else:
lastver = self._getVersion(lastver)
for line in checkin._getIterRdiff(repos, label, troveName,
ver.asString(), lastver.asString()):
yield line
def iterCheckoutDiff(self, targetDir):
"""
Yields lines of checkout diff output relative to the
specified targetDirectory.
@param targetDir: name of directory for which to fetch status.
@type targetDir: string
@return: yields strings
"""
repos, sourceState = self._getRepositoryStateFromDirectory(targetDir)
ver = sourceState.getVersion()
i = checkin._getIterDiff(repos, ver.asString(),
pathList=None, logErrors=False, dirName=targetDir)
if i not in (0, 2):
# not an "error" case, so i really is an iterator
for line in i:
yield line
def _getNewerRepositoryVersionStrings(self, targetDir):
'''
Returns list of versions from the repository that are newer than the checkout
@param targetDir: directory containing Conary checkout
@return: list of version strings
'''
return [self._versionToString(x)
for x in self._getNewerRepositoryVersions(targetDir)]
def _getNewerRepositoryVersions(self, targetDir):
'''
Returns list of versions from the repository that are newer than the checkout
@param targetDir: directory containing Conary checkout
@return: list of C{conary.versions.Version}
'''
_, sourceState = self._getRepositoryStateFromDirectory(targetDir)
troveVersion = sourceState.getVersion()
#pylint: disable-msg=E1103
# we know that ver does have an isAfter method
return [ver for ver in self._getRepositoryVersions(targetDir)
if ver.isAfter(troveVersion)]
def _getRepositoryVersions(self, targetDir):
'''
List of versions of the this package checked into the repository
@param targetDir: directory containing Conary checkout
@return: list of C{conary.versions.Version}
'''
repos, sourceState = self._getRepositoryStateFromDirectory(targetDir)
branch = sourceState.getBranch()
troveName = sourceState.getName()
verList = repos.getTroveVersionsByBranch({troveName: {branch: None}})
if verList:
verList = verList[troveName].keys()
verList.sort()
verList.reverse()
else:
verList = []
return verList
def _getRepositoryStateFromDirectory(self, targetDir):
'''
Create repository and state objects for working with a checkout
@param targetDir: directory containing Conary checkout
'''
repos = self._getRepositoryClient()
conaryState = state.ConaryStateFromFile(targetDir + '/CONARY', repos)
sourceState = conaryState.getSourceState()
return repos, sourceState
@staticmethod
def isConaryCheckoutDirectory(targetDir):
'''
Return whether a directory contains a CONARY file
@param targetDir: directory containing Conary checkout
'''
return os.path.exists(os.sep.join((targetDir, 'CONARY')))
def createNewPackage(self, package, label, targetDir=None, template=None,
factory=None):
"""
Create a subdirectory containing files to initialize a new
conary source package. Similar to the C{cvc newpkg} command.
@param package: name of package
@type package: string
@param label: label to create package on
@type label: string
@param targetDir: directory to create new package in (default
is current working directory)
@type targetDir: string
@param template: name of Conary template to use
@type template: string
@param factory: name of Conary factory to use, or True to create a factory
@type factory: string, NoneType, or bool
"""
# Normalize factory settings
if factory is True:
factory = 'factory'
if factory is False:
factory = None
checkin.newTrove(self._getRepositoryClient(), self.getConaryConfig(),
'%s=%s' % (package, label), dir=targetDir,
template=template, factory=factory)
def shadowSource(self, name, version, targetLabel):
"""
Create a shadow of a conary source package. Similar to the
C{cvc shadow} command.
@param name: package to shadow
@type name: string
@param version: version of package to shadow
@type version: string or B{opaque} C{conary.versions.Version}
@param targetLabel: label on which to create shadow
@type targetLabel: string or B{opaque} conary.versions.Label
@return: C{(name, version, flavor)} tuple specifying the newly-created
or pre-existing shadow.
@rtype: (string, string, string)
"""
version = self._getVersion(version)
flavor = self._getFlavor()
targetLabel = self._getLabel(targetLabel)
results = self._getConaryClient().createShadowChangeSet(
str(targetLabel),
[(name, version, flavor)])
if not results:
return False
return self._commitShadowChangeSet(results[0], results[1])[0]
def shadowSourceForBinary(self, name, version, flavor, targetLabel):
version = self._getVersion(version)
flavor = self._getFlavor(flavor)
targetLabel = self._getLabel(targetLabel)
client = self._getConaryClient()
results = client.createShadowChangeSet(
str(targetLabel),
[(name, version, flavor)],
branchType=client.BRANCH_SOURCE)
if not results:
return False
return self._commitShadowChangeSet(results[0], results[1])[0]
def derive(self, troveToDerive, targetLabel, targetDir):
repos = self._getRepositoryClient()
cfg = self.getConaryConfig()
derive.derive(repos,cfg, targetLabel, troveToDerive, targetDir,
extract = True)
def _commitShadowChangeSet(self, existingShadow, cs):
if cs and not cs.isEmpty():
self._getRepositoryClient().commitChangeSet(cs)
allTroves = []
if existingShadow:
allTroves.extend(self._troveTupToStrings(*x)
for x in existingShadow)
if cs:
allTroves.extend(
self._troveTupToStrings(*x.getNewNameVersionFlavor())
for x in cs.iterNewTroveList())
return allTroves
#pylint: disable-msg=R0913
# too many args, but still better than self, troveTup, targetDir
def checkoutBinaryPackage(self, name, version, flavor, targetDir,
quiet=True, tagScript=None):
"""
Check out the contents of a binary package into a directory
with a minimal derived recipe written and a binary checkout
in the C{_ROOT_} directory to make modifying the derived
package easier. Does not commit the derived package.
@param name: package to check out
@type name: string
@param version: version of package to check out
@type version: string or B{opaque} C{conary.versions.Version}
@param flavor: conary flavor
@type flavor: string or B{opaque} conary.deps.deps.Flavor
@param targetDir: subdirectory into which to check out the package,
defaults to C{package}
@type targetDir: string
@param quiet: (C{True}) determines whether to print update status
during the operation.
@type quiet: bool
@param tagScript: If not C{None}, write tag scripts to this file
instead of running them in-place.
@type tagScript: str
"""
version = self._versionToString(version)
flavor = self._flavorToString(flavor)
cfg = self.getConaryConfig()
if quiet:
callback = _QuietUpdateCallback()
else:
callback = None
cfg = copy.deepcopy(cfg)
cfg.root = targetDir
updatecmd.doUpdate(cfg, '%s=%s[%s]' % (name, version, flavor),
callback=callback, depCheck=False, tagScript=tagScript)
def _buildTroveSpec(self, searchPath, packageNames):
flv = self._getFlavor(searchPath[2], keepNone = True)
if searchPath[0] is None:
# the search path was a label. We look for exactly this package
return [ (packageName, str(searchPath[1]), flv)
for packageName in packageNames ]
return [ (str(searchPath[0]), str(searchPath[1]), flv) ]
def _findPackageInSearchPaths(self, searchPaths, packageName):
return self._findPackagesInSearchPaths(searchPaths, [ packageName ])[0]
def _findPackagesInSearchPaths(self, searchPaths, packageNames):
repos = self._getRepositoryClient()
# Compose a list of all search paths. If a label is presented,
# add all package names to it.
extTroveSpecs = [ self._buildTroveSpec(x, packageNames)
for x in searchPaths ]
troveSpecs = list(itertools.chain(*extTroveSpecs))
results = repos.findTroves(None, troveSpecs, allowMissing = True)
troveSpecResultsByPkgName = {}
for packageName in packageNames:
troveSpecResults = [ [] for x in troveSpecs ]
troveSpecResultsByPkgName[packageName] = troveSpecResults
groupTroveList = []
# Map back into groupTroveList
groupIndexMap = {}
# it's important that we go through this list in order so
# that you'll find matches earlier on the searchPath first.
for idx, (searchPath, troveSpecs) in enumerate(zip(searchPaths, extTroveSpecs)):
for packageName, troveSpec in zip(packageNames, troveSpecs):
troveList = results.get(troveSpec)
if not troveList:
continue
# we may have multiple flavors here. We only want those
# flavors of these troves that have been built most recently
# to be taken into account
maxVersion = sorted(troveList, key=lambda x:x[1])[-1][1]
troveTups = [ x for x in troveList if x[1] == maxVersion ]
if searchPath[0] is not None:
# This is a group
assert len(troveSpecs) == 1
groupTroveList.extend(troveTups)
# Add indices back, so we know where to put the results
for troveTup in troveTups:
groupIndexMap.setdefault(troveTup, []).append(idx)
continue # outer for loop too, we only have one entry in troveSpecs
# Not a group; trove(s) found on this label
troveSpecResultsByPkgName[packageName][idx] = troveTups
if groupTroveList:
groupTroves = repos.getTroves(groupTroveList, withFiles=False)
for trv, troveSpec in zip(groupTroves, groupTroveList):
idxList = groupIndexMap[troveSpec]
for troveTup in trv.iterTroveList(weakRefs=True,
strongRefs=True):
for packageName in packageNames:
if troveTup[0] != packageName:
continue
troveSpecResults = troveSpecResultsByPkgName[packageName]
for idx in idxList:
troveSpecResults[idx].append(troveTup)
ret = []
for packageName in packageNames:
troveSpecResults = troveSpecResultsByPkgName[packageName]
matchingTroveList = list(itertools.chain(*troveSpecResults))
ret.append(matchingTroveList)
return ret
def _overrideFlavors(self, baseFlavor, flavorList):
baseFlavor = self._getFlavor(baseFlavor)
return [ str(deps.overrideFlavor(baseFlavor, self._getFlavor(x)))
for x in flavorList ]
def _getFlavorArch(self, flavor):
flavor = self._getFlavor(flavor)
return deps.getMajorArch(flavor)
def _getShortFlavorDescriptors(self, flavorList):
if not flavorList:
return {}
descriptions = deps.getShortFlavorDescriptors(
[ self._getFlavor(x) for x in flavorList ])
return dict((str(x[0]), x[1]) for x in descriptions.items())
def _loadRecipeClassFromCheckout(self, recipePath):
directory = os.path.dirname(recipePath)
repos, sourceState = self._getRepositoryStateFromDirectory(directory)
cfg = self.getConaryConfig()
self._initializeFlavors()
loader = loadrecipe.RecipeLoader(recipePath, cfg=cfg,
repos=repos,
branch=sourceState.getBranch(),
buildFlavor=cfg.buildFlavor)
return loader.getRecipe()
def _removeNonRecipeFilesFromCheckout(self, recipePath):
recipeDir = os.path.dirname(recipePath)
recipeName = os.path.basename(recipePath)
repos = self._getRepositoryClient()
statePath = os.path.join(recipeDir, 'CONARY')
conaryState = state.ConaryStateFromFile(statePath, repos)
sourceState = conaryState.getSourceState()
for (pathId, path, _, _) in list(sourceState.iterFileList()):
if path == recipeName:
continue
path = os.path.join(recipeDir, path)
sourceState.removeFile(pathId)
if util.exists(path):
statInfo = os.lstat(path)
try:
if statInfo.st_mode & stat.S_IFDIR:
os.rmdir(path)
else:
os.unlink(path)
except OSError, e:
self._handle.ui.warning(
"cannot remove %s: %s", path, e.strerror)
conaryState.write(statePath)
@staticmethod
def getNameForCheckout(checkoutDir):
conaryState = state.ConaryStateFromFile(checkoutDir + '/CONARY')
return conaryState.getSourceState().getName().split(':', 1)[0]
@staticmethod
def isGroupName(packageName):
return trove.troveIsGroup(packageName)
def getAllLabelsFromTroves(self, troveSpecs):
"""
Return the set of labels referenced by a number of troves.
@param troveTups: List of trovespec tuples to inspect
@type troveTups: C{[troveSpecTuple]}
@rtype: C{set}
"""
repos = self._getRepositoryClient()
fetchTups = self._findTrovesFlattened(troveSpecs)
labels = set()
for trove in repos.getTroves(fetchTups, withFiles=False):
labels.add(trove.getVersion().trailingLabel())
for subTroveTup in trove.iterTroveList(strongRefs=True, weakRefs=True):
labels.add(subTroveTup[1].trailingLabel())
return set(x.asString() for x in labels)
def promoteGroups(self, groupList, fromTo, infoOnly=False):
"""
Promote the troves in C{groupList} using the promote map in
C{fromTo}. The former should be a list of trove tuples, and the
latter a dictionary mapping of labels (C{from: to}).
@param groupList: List of group trove tuples to promote
@type groupList: [(name, version, flavor)]
@param fromTo: Mapping of labels to execute promote on
@type fromTo: {from: to}
@param infoOnly: If C{True}, return without committing anything
@type infoOnly: C{bool}
"""
def getLabelOrBranch(label):
if isinstance(label, types.StringTypes):
if label.startswith('/'):
return self._getVersion(label)
else:
return self._getLabel(label)
return label
promoteMap = dict((self._getLabel(fromLabel), getLabelOrBranch(toLabel))
for (fromLabel, toLabel) in fromTo.iteritems())
client = self._getConaryClient()
success, cs = client.createSiblingCloneChangeSet(promoteMap,
groupList, cloneSources=True)
if not success:
raise errors.RbuildError('Promote failed.')
else:
packageList = [ x.getNewNameVersionFlavor()
for x in cs.iterNewTroveList() ]
packageList = [ (str(x[0]), str(x[1]), str(x[2]))
for x in packageList ]
if not infoOnly:
self._getRepositoryClient().commitChangeSet(cs)
return packageList
def detachPackage(self, troveSpec, targetLabel, message=None):
cfg = self.getConaryConfig()
if not message:
message = 'Automatic promote by rBuild.'
return clone.CloneTrove(cfg, targetLabel,
[troveSpec[0]+'='+troveSpec[1].asString()],
message=message)
def getLatestPackagesOnLabel(self, label, keepComponents=False,
keepGroups=False):
client = self._getConaryClient()
label = self._getLabel(label)
results = client.getRepos().getTroveLatestByLabel(
{None: {label: [None]}})
packages = []
for name, versiondict in results.iteritems():
if ':' in name and not keepComponents:
continue
elif trove.troveIsGroup(name) and not keepGroups:
continue
for version, flavors in versiondict.iteritems():
for flavor in flavors:
packages.append((name, version, flavor))
return packages
@staticmethod
def parseTroveSpec(troveSpec):
return cmdline.parseTroveSpec(troveSpec)
#pylint: disable-msg=C0103,R0901,W0221,R0904
# "The creature can't help its ancestry"
class _QuietUpdateCallback(checkin.CheckinCallback):
"""
Make checkout a bit quieter
"""
#pylint: disable-msg=W0613
# unused arguments
# implements an interface that may pass arguments that need to be ignored
def setUpdateJob(self, *args, **kw):
#pylint: disable-msg=C0999
# arguments not documented: implements interface, ignores parameters
'stifle update announcement for extract'
return
|
|
#!/usr/bin/python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import textwrap
import unittest
from gae_ext_runtime import testutil
RUNTIME_DEF_ROOT = os.path.dirname(os.path.dirname(__file__))
class RuntimeTestCase(testutil.TestBase):
"""Tests for the PHP external runtime fingerprinter."""
def license(self):
return textwrap.dedent('''\
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
''')
def preamble(self):
return textwrap.dedent('''\
# Dockerfile extending the generic PHP image with application files for a
# single application.
FROM gcr.io/google-appengine/php:latest
# The Docker image will configure the document root according to this
# environment variable.
''')
def setUp(self):
self.runtime_def_root = RUNTIME_DEF_ROOT
super(RuntimeTestCase, self).setUp()
def file_contents(self, filename):
with open(self.full_path(filename)) as f:
return f.read()
def test_generate_without_php_files(self):
self.write_file('index.html', 'index')
self.assertFalse(self.generate_configs())
self.assertFalse(os.path.exists(self.full_path('app.yaml')))
self.assertFalse(os.path.exists(self.full_path('Dockerfile')))
self.assertFalse(os.path.exists(self.full_path('.dockerignore')))
def test_generate_with_php_files(self):
self.write_file('index.php', 'index')
self.generate_configs()
app_yaml = self.file_contents('app.yaml')
self.assertIn('runtime: php\n', app_yaml)
self.assertIn('env: flex\n', app_yaml)
self.assertIn('runtime_config:\n document_root: .\n', app_yaml)
self.assertFalse(os.path.exists(self.full_path('Dockerfile')))
self.assertFalse(os.path.exists(self.full_path('.dockerignore')))
def test_generate_with_php_files_no_write(self):
"""Test generate_config_data with a .php file.
Checks app.yaml contents, app.yaml is written to disk, and
Dockerfile and .dockerignore not in the directory.
"""
self.write_file('index.php', 'index')
self.generate_config_data()
app_yaml = self.file_contents('app.yaml')
self.assertIn('runtime: php\n', app_yaml)
self.assertIn('env: flex\n', app_yaml)
self.assertIn('runtime_config:\n document_root: .\n', app_yaml)
self.assertFalse(os.path.exists(self.full_path('Dockerfile')))
self.assertFalse(os.path.exists(self.full_path('.dockerignore')))
def test_generate_custom_runtime(self):
self.write_file('index.php', 'index')
self.generate_configs(custom=True)
dockerfile = self.file_contents('Dockerfile')
self.assertEqual(dockerfile, self.preamble() + textwrap.dedent('''\
ENV DOCUMENT_ROOT /app
'''))
self.assert_file_exists_with_contents(
'.dockerignore',
self.license() + textwrap.dedent('''\
.dockerignore
Dockerfile
.git
.hg
.svn
'''))
def test_generate_custom_runtime_no_write(self):
"""Tests generate_config_data with custom runtime."""
self.write_file('index.php', 'index')
cfg_files = self.generate_config_data(custom=True)
self.assert_genfile_exists_with_contents(
cfg_files,
'Dockerfile',
self.preamble() + textwrap.dedent('''\
ENV DOCUMENT_ROOT /app
'''))
self.assert_genfile_exists_with_contents(
cfg_files,
'.dockerignore',
self.license() + textwrap.dedent('''\
.dockerignore
Dockerfile
.git
.hg
.svn
'''))
def test_generate_with_deploy(self):
self.write_file('index.php', 'index')
self.generate_configs(deploy=True)
dockerfile = self.file_contents('Dockerfile')
self.assertEqual(dockerfile, textwrap.dedent('''\
# Dockerfile extending the generic PHP image with application files for a
# single application.
FROM gcr.io/google-appengine/php:latest
# The Docker image will configure the document root according to this
# environment variable.
ENV DOCUMENT_ROOT /app
'''))
dockerignore = self.file_contents('.dockerignore')
self.assertEqual(dockerignore, self.license() + textwrap.dedent('''\
.dockerignore
Dockerfile
.git
.hg
.svn
'''))
def test_generate_with_deploy_no_write(self):
"""Tests generate_config_data with deploy=True."""
self.write_file('index.php', 'index')
cfg_files = self.generate_config_data(deploy=True)
self.assert_genfile_exists_with_contents(
cfg_files,
'Dockerfile',
self.preamble() + textwrap.dedent('''\
ENV DOCUMENT_ROOT /app
'''))
self.assert_genfile_exists_with_contents(
cfg_files,
'.dockerignore',
self.license() + textwrap.dedent('''\
.dockerignore
Dockerfile
.git
.hg
.svn
'''))
def test_generate_with_existing_appinfo(self):
self.write_file('index.php', 'index')
appinfo = testutil.AppInfoFake(
runtime_config={'document_root': 'wordpress'},
entrypoint='["/bin/bash", "my-cmd.sh"]')
self.generate_configs(deploy=True, appinfo=appinfo)
dockerfile = self.file_contents('Dockerfile')
self.assertEqual(dockerfile, self.preamble() + textwrap.dedent('''\
ENV DOCUMENT_ROOT /app/wordpress
# Allow custom CMD
CMD ["/bin/bash", "my-cmd.sh"]
'''))
dockerignore = self.file_contents('.dockerignore')
self.assertEqual(dockerignore, self.license() + textwrap.dedent('''\
.dockerignore
Dockerfile
.git
.hg
.svn
'''))
def test_generate_with_existing_appinfo_no_write(self):
"""Tests generate_config_data with fake appinfo."""
self.write_file('index.php', 'index')
appinfo = testutil.AppInfoFake(
runtime_config={'document_root': 'wordpress'},
entrypoint='["/bin/bash", "my-cmd.sh"]')
cfg_files = self.generate_config_data(deploy=True, appinfo=appinfo)
self.assert_genfile_exists_with_contents(
cfg_files,
'Dockerfile',
self.preamble() + textwrap.dedent('''\
ENV DOCUMENT_ROOT /app/wordpress
# Allow custom CMD
CMD ["/bin/bash", "my-cmd.sh"]
'''))
self.assert_genfile_exists_with_contents(
cfg_files,
'.dockerignore',
self.license() + textwrap.dedent('''\
.dockerignore
Dockerfile
.git
.hg
.svn
'''))
def test_generate_with_array_entrypoint(self):
self.write_file('index.php', 'index')
appinfo = testutil.AppInfoFake(
runtime_config={'document_root': 'wordpress'},
entrypoint=['/bin/bash', 'my-cmd.sh'])
self.generate_configs(deploy=True, appinfo=appinfo)
dockerfile = self.file_contents('Dockerfile')
self.assertEqual(dockerfile, self.preamble() + textwrap.dedent('''\
ENV DOCUMENT_ROOT /app/wordpress
# Allow custom CMD
CMD ["/bin/bash", "my-cmd.sh"]
'''))
def test_generate_with_array_entrypoint_no_write(self):
"""Tests generate_config_data with an array entrypoint."""
self.write_file('index.php', 'index')
appinfo = testutil.AppInfoFake(
runtime_config={'document_root': 'wordpress'},
entrypoint=["/bin/bash", "my-cmd.sh"])
cfg_files = self.generate_config_data(deploy=True, appinfo=appinfo)
self.assert_genfile_exists_with_contents(
cfg_files,
'Dockerfile',
self.preamble() + textwrap.dedent('''\
ENV DOCUMENT_ROOT /app/wordpress
# Allow custom CMD
CMD ["/bin/bash", "my-cmd.sh"]
'''))
if __name__ == '__main__':
unittest.main()
|
|
from __future__ import division
from pdb import set_trace
from os import environ, getcwd
from os import walk
from os.path import expanduser
from pdb import set_trace
import sys
# Update PYTHONPATH
HOME = expanduser('~')
axe = HOME + '/git/axe/axe/' # AXE
pystat = HOME + '/git/pystats/' # PySTAT
cwd = getcwd() # Current Directory
sys.path.extend([axe, pystat, cwd])
from scipy.stats.mstats import mode
from scipy.spatial.distance import euclidean
from numpy import mean
from random import choice, uniform as rand
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from smote import *
import pandas as pd
from tools.axe.abcd import _Abcd
from methods1 import *
from tools.sk import rdivDemo
def formatData(tbl):
Rows = [i.cells for i in tbl._rows]
headers = [i.name for i in tbl.headers]
return pd.DataFrame(Rows, columns=headers)
def Bugs(tbl):
cells = [i.cells[-2] for i in tbl._rows]
return cells
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# PREDICTION SYSTEMS:
# ```````````````````
# 1. WHERE2 2. RANDOM FORESTS, 3. DECISION TREES, 4. ADABOOST,
# 5. LOGISTIC REGRESSION
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def where2prd(train, test, tunings=[None, None], smoteit=False, thresh=1):
"WHERE2"
def flatten(x):
"""
Takes an N times nested list of list like [[a,b],[c, [d, e]],[f]]
and returns a single list [a,b,c,d,e,f]
"""
result = []
for el in x:
if hasattr(el, "__iter__") and not isinstance(el, basestring):
result.extend(flatten(el))
else:
result.append(el)
return result
def leaves(node):
"""
Returns all terminal nodes.
"""
L = []
if len(node.kids) > 1:
for l in node.kids:
L.extend(leaves(l))
return L
elif len(node.kids) == 1:
return [node.kids]
else:
return [node]
train_DF = createTbl(
train,
settings=tunings[0],
_smote=False,
isBin=True,
bugThres=2)
test_df = createTbl(test)
t = discreteNums(train_DF, map(lambda x: x.cells, train_DF._rows))
myTree = tdiv(t, opt=tunings[1])
testCase = test_df._rows
rows, preds = [], []
for tC in testCase:
newRow = tC
loc = drop(tC, myTree) # Drop a test case in the tree & see where it lands
leafNodes = flatten(leaves(loc))
# set_trace()
rows = [leaf.rows for leaf in leafNodes][0]
vals = [r.cells[-2] for r in rows]
preds.append(0 if mean([k for k in vals]).tolist() < thresh else 1)
# if median(vals) > 0 else preds.extend([0])
return preds
def _where2pred():
"Test where2"
dir = '../Data'
one, two = explore(dir)
# set_trace()
# Training data
train = one[0][:-1]
# Test data
test = [one[0][-1]]
actual = Bugs(createTbl(test, isBin=True))
preds = where2prd(train, test)
# for a, b in zip(actual, preds): print a, b
# set_trace()
return _Abcd(before=actual, after=preds, show=False)[-1]
def rforest(train, test, tunings=None, smoteit=True, duplicate=True):
"RF "
# Apply random forest Classifier to predict the number of bugs.
if smoteit:
train = SMOTE(train, atleast=50, atmost=101, resample=duplicate)
if not tunings:
clf = RandomForestClassifier(n_estimators=100, random_state=1)
else:
clf = RandomForestClassifier(n_estimators=int(tunings[0]),
max_features=tunings[1] / 100,
min_samples_leaf=int(tunings[2]),
min_samples_split=int(tunings[3])
)
train_DF = formatData(train)
test_DF = formatData(test)
features = train_DF.columns[:-2]
klass = train_DF[train_DF.columns[-2]]
# set_trace()
clf.fit(train_DF[features], klass)
preds = clf.predict(test_DF[test_DF.columns[:-2]])
return preds
def rforest2(train, test, tunings=None, smoteit=True, duplicate=True):
"RF "
# Apply random forest Classifier to predict the number of bugs.
if smoteit:
train = SMOTE(train, atleast=50, atmost=101, resample=duplicate)
if not tunings:
clf = RandomForestRegressor(n_estimators=100, random_state=1)
else:
clf = RandomForestRegressor(n_estimators=int(tunings[0]),
max_features=tunings[1] / 100,
min_samples_leaf=int(tunings[2]),
min_samples_split=int(tunings[3])
)
train_DF = formatData(train)
test_DF = formatData(test)
features = train_DF.columns[:-2]
klass = train_DF[train_DF.columns[-2]]
# set_trace()
clf.fit(train_DF[features], klass)
preds = clf.predict(test_DF[test_DF.columns[:-2]])
return preds
def _RF():
"Test RF"
dir = '../Data'
one, two = explore(dir)
# Training data
train_DF = createTbl([one[0][0]])
# Test data
test_df = createTbl([one[0][1]])
actual = Bugs(test_df)
preds = rforest(train_DF, test_df, mss=6, msl=8,
max_feat=4, n_est=5756,
smoteit=False)
print _Abcd(before=actual, after=preds, show=False)[-1]
def CART(train, test, tunings=None, smoteit=True, duplicate=True):
" CART"
# Apply random forest Classifier to predict the number of bugs.
if smoteit:
train = SMOTE(train, atleast=50, atmost=101, resample=duplicate)
if not tunings:
clf = DecisionTreeClassifier()
else:
clf = DecisionTreeClassifier(max_depth=int(tunings[0]),
min_samples_split=int(tunings[1]),
min_samples_leaf=int(tunings[2]),
max_features=float(tunings[3] / 100),
max_leaf_nodes=int(tunings[4]),
criterion='entropy')
train_DF = formatData(train)
test_DF = formatData(test)
features = train_DF.columns[:-2]
klass = train_DF[train_DF.columns[-2]]
# set_trace()
clf.fit(train_DF[features].astype('float32'), klass.astype('float32'))
preds = clf.predict(test_DF[test_DF.columns[:-2]].astype('float32')).tolist()
return preds
def _CART():
"Test CART"
dir = './Data'
one, two = explore(dir)
# Training data
train_DF = createTbl(one[0])
# Test data
test_df = createTbl(two[0])
actual = Bugs(test_df)
preds = CART(train_DF, test_df)
set_trace()
_Abcd(train=actual, test=preds, verbose=True)
def adaboost(train, test, smoteit=True):
"ADABOOST"
if smoteit:
train = SMOTE(train)
clf = AdaBoostClassifier()
train_DF = formatData(train)
test_DF = formatData(test)
features = train_DF.columns[:-2]
klass = train_DF[train_DF.columns[-2]]
# set_trace()
clf.fit(train_DF[features], klass)
preds = clf.predict(test_DF[test_DF.columns[:-2]]).tolist()
return preds
def _adaboost():
"Test AdaBoost"
dir = './Data'
one, two = explore(dir)
# Training data
train_DF = createTbl(one[0])
# Test data
test_df = createTbl(two[0])
actual = Bugs(test_df)
preds = adaboost(train_DF, test_df)
set_trace()
_Abcd(train=actual, test=preds, verbose=True)
def logit(train, test, smoteit=True):
"Logistic Regression"
if smoteit:
train = SMOTE(train)
clf = LogisticRegression(penalty='l2', dual=False, tol=0.0001, C=1.0,
fit_intercept=True, intercept_scaling=1,
class_weight=None, random_state=None)
train_DF = formatData(train)
test_DF = formatData(test)
features = train_DF.columns[:-2]
klass = train_DF[train_DF.columns[-2]]
# set_trace()
clf.fit(train_DF[features], klass)
preds = clf.predict(test_DF[test_DF.columns[:-2]]).tolist()
return preds
def _logit():
"Test LOGIT"
dir = './Data'
one, two = explore(dir)
# Training data
train_DF = createTbl(one[0])
# Test data
test_df = createTbl(two[0])
actual = Bugs(test_df)
preds = logit(train_DF, test_df)
set_trace()
_Abcd(train=actual, test=preds, verbose=True)
def knn(train, test, smoteit=True):
"kNN"
if smoteit:
train = SMOTE(train)
neigh = KNeighborsClassifier()
train_DF = formatData(train)
test_DF = formatData(test)
features = train_DF.columns[:-2]
klass = train_DF[train_DF.columns[-2]]
# set_trace()
neigh.fit(train_DF[features], klass)
preds = neigh.predict(test_DF[test_DF.columns[:-2]]).tolist()
return preds
if __name__ == '__main__':
random.seed(0)
Dat = []
for _ in xrange(10):
print(_where2pred())
# Dat.insert(0, 'Where2 untuned')
# rdivDemo([Dat])
|
|
"""Useful utility decorators. """
from __future__ import print_function, division
import sys
import types
import inspect
from sympy.core.decorators import wraps
from sympy.core.compatibility import class_types, get_function_globals, get_function_name, iterable
from sympy.utilities.runtests import DependencyError, SymPyDocTests, PyTestReporter
def threaded_factory(func, use_add):
"""A factory for ``threaded`` decorators. """
from sympy.core import sympify
from sympy.matrices import MatrixBase
@wraps(func)
def threaded_func(expr, *args, **kwargs):
if isinstance(expr, MatrixBase):
return expr.applyfunc(lambda f: func(f, *args, **kwargs))
elif iterable(expr):
try:
return expr.__class__([func(f, *args, **kwargs) for f in expr])
except TypeError:
return expr
else:
expr = sympify(expr)
if use_add and expr.is_Add:
return expr.__class__(*[ func(f, *args, **kwargs) for f in expr.args ])
elif expr.is_Relational:
return expr.__class__(func(expr.lhs, *args, **kwargs),
func(expr.rhs, *args, **kwargs))
else:
return func(expr, *args, **kwargs)
return threaded_func
def threaded(func):
"""Apply ``func`` to sub--elements of an object, including :class:`~.Add`.
This decorator is intended to make it uniformly possible to apply a
function to all elements of composite objects, e.g. matrices, lists, tuples
and other iterable containers, or just expressions.
This version of :func:`threaded` decorator allows threading over
elements of :class:`~.Add` class. If this behavior is not desirable
use :func:`xthreaded` decorator.
Functions using this decorator must have the following signature::
@threaded
def function(expr, *args, **kwargs):
"""
return threaded_factory(func, True)
def xthreaded(func):
"""Apply ``func`` to sub--elements of an object, excluding :class:`~.Add`.
This decorator is intended to make it uniformly possible to apply a
function to all elements of composite objects, e.g. matrices, lists, tuples
and other iterable containers, or just expressions.
This version of :func:`threaded` decorator disallows threading over
elements of :class:`~.Add` class. If this behavior is not desirable
use :func:`threaded` decorator.
Functions using this decorator must have the following signature::
@xthreaded
def function(expr, *args, **kwargs):
"""
return threaded_factory(func, False)
def conserve_mpmath_dps(func):
"""After the function finishes, resets the value of mpmath.mp.dps to
the value it had before the function was run."""
import functools
import mpmath
def func_wrapper(*args, **kwargs):
dps = mpmath.mp.dps
try:
return func(*args, **kwargs)
finally:
mpmath.mp.dps = dps
func_wrapper = functools.update_wrapper(func_wrapper, func)
return func_wrapper
class no_attrs_in_subclass(object):
"""Don't 'inherit' certain attributes from a base class
>>> from sympy.utilities.decorator import no_attrs_in_subclass
>>> class A(object):
... x = 'test'
>>> A.x = no_attrs_in_subclass(A, A.x)
>>> class B(A):
... pass
>>> hasattr(A, 'x')
True
>>> hasattr(B, 'x')
False
"""
def __init__(self, cls, f):
self.cls = cls
self.f = f
def __get__(self, instance, owner=None):
if owner == self.cls:
if hasattr(self.f, '__get__'):
return self.f.__get__(instance, owner)
return self.f
raise AttributeError
def doctest_depends_on(exe=None, modules=None, disable_viewers=None, python_version=None):
"""
Adds metadata about the dependencies which need to be met for doctesting
the docstrings of the decorated objects.
exe should be a list of executables
modules should be a list of modules
disable_viewers should be a list of viewers for preview() to disable
python_version should be the minimum Python version required, as a tuple
(like (3, 0))
"""
dependencies = {}
if exe is not None:
dependencies['executables'] = exe
if modules is not None:
dependencies['modules'] = modules
if disable_viewers is not None:
dependencies['disable_viewers'] = disable_viewers
if python_version is not None:
dependencies['python_version'] = python_version
def skiptests():
r = PyTestReporter()
t = SymPyDocTests(r, None)
try:
t._check_dependencies(**dependencies)
except DependencyError:
return True # Skip doctests
else:
return False # Run doctests
def depends_on_deco(fn):
fn._doctest_depends_on = dependencies
fn.__doctest_skip__ = skiptests
if inspect.isclass(fn):
fn._doctest_depdends_on = no_attrs_in_subclass(
fn, fn._doctest_depends_on)
fn.__doctest_skip__ = no_attrs_in_subclass(
fn, fn.__doctest_skip__)
return fn
return depends_on_deco
def public(obj):
"""
Append ``obj``'s name to global ``__all__`` variable (call site).
By using this decorator on functions or classes you achieve the same goal
as by filling ``__all__`` variables manually, you just don't have to repeat
yourself (object's name). You also know if object is public at definition
site, not at some random location (where ``__all__`` was set).
Note that in multiple decorator setup (in almost all cases) ``@public``
decorator must be applied before any other decorators, because it relies
on the pointer to object's global namespace. If you apply other decorators
first, ``@public`` may end up modifying the wrong namespace.
Examples
========
>>> from sympy.utilities.decorator import public
>>> __all__
Traceback (most recent call last):
...
NameError: name '__all__' is not defined
>>> @public
... def some_function():
... pass
>>> __all__
['some_function']
"""
if isinstance(obj, types.FunctionType):
ns = get_function_globals(obj)
name = get_function_name(obj)
elif isinstance(obj, (type(type), class_types)):
ns = sys.modules[obj.__module__].__dict__
name = obj.__name__
else:
raise TypeError("expected a function or a class, got %s" % obj)
if "__all__" not in ns:
ns["__all__"] = [name]
else:
ns["__all__"].append(name)
return obj
def memoize_property(propfunc):
"""Property decorator that caches the value of potentially expensive
`propfunc` after the first evaluation. The cached value is stored in
the corresponding property name with an attached underscore."""
attrname = '_' + propfunc.__name__
sentinel = object()
@wraps(propfunc)
def accessor(self):
val = getattr(self, attrname, sentinel)
if val is sentinel:
val = propfunc(self)
setattr(self, attrname, val)
return val
return property(accessor)
|
|
# The following comment should be removed at some point in the future.
# mypy: strict-optional=False
from __future__ import absolute_import
import logging
from collections import OrderedDict
from pip._vendor.packaging.utils import canonicalize_name
from pip._internal import pep425tags
from pip._internal.exceptions import InstallationError
from pip._internal.models.wheel import Wheel
from pip._internal.utils.logging import indent_log
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Dict, Iterable, List, Optional, Tuple
from pip._internal.req.req_install import InstallRequirement
logger = logging.getLogger(__name__)
class RequirementSet(object):
def __init__(self, check_supported_wheels=True):
# type: (bool) -> None
"""Create a RequirementSet.
"""
self.requirements = OrderedDict() # type: Dict[str, InstallRequirement] # noqa: E501
self.check_supported_wheels = check_supported_wheels
self.unnamed_requirements = [] # type: List[InstallRequirement]
self.successfully_downloaded = [] # type: List[InstallRequirement]
self.reqs_to_cleanup = [] # type: List[InstallRequirement]
def __str__(self):
# type: () -> str
requirements = sorted(
(req for req in self.requirements.values() if not req.comes_from),
key=lambda req: canonicalize_name(req.name),
)
return ' '.join(str(req.req) for req in requirements)
def __repr__(self):
# type: () -> str
requirements = sorted(
self.requirements.values(),
key=lambda req: canonicalize_name(req.name),
)
format_string = '<{classname} object; {count} requirement(s): {reqs}>'
return format_string.format(
classname=self.__class__.__name__,
count=len(requirements),
reqs=', '.join(str(req.req) for req in requirements),
)
def add_unnamed_requirement(self, install_req):
# type: (InstallRequirement) -> None
assert not install_req.name
self.unnamed_requirements.append(install_req)
def add_named_requirement(self, install_req):
# type: (InstallRequirement) -> None
assert install_req.name
project_name = canonicalize_name(install_req.name)
self.requirements[project_name] = install_req
def add_requirement(
self,
install_req, # type: InstallRequirement
parent_req_name=None, # type: Optional[str]
extras_requested=None # type: Optional[Iterable[str]]
):
# type: (...) -> Tuple[List[InstallRequirement], Optional[InstallRequirement]] # noqa: E501
"""Add install_req as a requirement to install.
:param parent_req_name: The name of the requirement that needed this
added. The name is used because when multiple unnamed requirements
resolve to the same name, we could otherwise end up with dependency
links that point outside the Requirements set. parent_req must
already be added. Note that None implies that this is a user
supplied requirement, vs an inferred one.
:param extras_requested: an iterable of extras used to evaluate the
environment markers.
:return: Additional requirements to scan. That is either [] if
the requirement is not applicable, or [install_req] if the
requirement is applicable and has just been added.
"""
# If the markers do not match, ignore this requirement.
if not install_req.match_markers(extras_requested):
logger.info(
"Ignoring %s: markers '%s' don't match your environment",
install_req.name, install_req.markers,
)
return [], None
# If the wheel is not supported, raise an error.
# Should check this after filtering out based on environment markers to
# allow specifying different wheels based on the environment/OS, in a
# single requirements file.
if install_req.link and install_req.link.is_wheel:
wheel = Wheel(install_req.link.filename)
tags = pep425tags.get_supported()
if (self.check_supported_wheels and not wheel.supported(tags)):
raise InstallationError(
"%s is not a supported wheel on this platform." %
wheel.filename
)
# This next bit is really a sanity check.
assert install_req.is_direct == (parent_req_name is None), (
"a direct req shouldn't have a parent and also, "
"a non direct req should have a parent"
)
# Unnamed requirements are scanned again and the requirement won't be
# added as a dependency until after scanning.
if not install_req.name:
self.add_unnamed_requirement(install_req)
return [install_req], None
try:
existing_req = self.get_requirement(install_req.name)
except KeyError:
existing_req = None
has_conflicting_requirement = (
parent_req_name is None and
existing_req and
not existing_req.constraint and
existing_req.extras == install_req.extras and
existing_req.req.specifier != install_req.req.specifier
)
if has_conflicting_requirement:
raise InstallationError(
"Double requirement given: %s (already in %s, name=%r)"
% (install_req, existing_req, install_req.name)
)
# When no existing requirement exists, add the requirement as a
# dependency and it will be scanned again after.
if not existing_req:
self.add_named_requirement(install_req)
# We'd want to rescan this requirement later
return [install_req], install_req
# Assume there's no need to scan, and that we've already
# encountered this for scanning.
if install_req.constraint or not existing_req.constraint:
return [], existing_req
does_not_satisfy_constraint = (
install_req.link and
not (
existing_req.link and
install_req.link.path == existing_req.link.path
)
)
if does_not_satisfy_constraint:
self.reqs_to_cleanup.append(install_req)
raise InstallationError(
"Could not satisfy constraints for '%s': "
"installation from path or url cannot be "
"constrained to a version" % install_req.name,
)
# If we're now installing a constraint, mark the existing
# object for real installation.
existing_req.constraint = False
existing_req.extras = tuple(sorted(
set(existing_req.extras) | set(install_req.extras)
))
logger.debug(
"Setting %s extras to: %s",
existing_req, existing_req.extras,
)
# Return the existing requirement for addition to the parent and
# scanning again.
return [existing_req], existing_req
def has_requirement(self, name):
# type: (str) -> bool
project_name = canonicalize_name(name)
return (
project_name in self.requirements and
not self.requirements[project_name].constraint
)
def get_requirement(self, name):
# type: (str) -> InstallRequirement
project_name = canonicalize_name(name)
if project_name in self.requirements:
return self.requirements[project_name]
raise KeyError("No project with the name %r" % name)
def cleanup_files(self):
# type: () -> None
"""Clean up files, remove builds."""
logger.debug('Cleaning up...')
with indent_log():
for req in self.reqs_to_cleanup:
req.remove_temporary_source()
|
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2011 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://babel.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://babel.edgewall.org/log/.
"""Data structures for message catalogs."""
from cgi import parse_header
from datetime import datetime, time as time_
from difflib import get_close_matches
from email import message_from_string
from copy import copy
import re
import time
from babel import __version__ as VERSION
from babel.core import Locale
from babel.dates import format_datetime
from babel.messages.plurals import get_plural
from babel.util import odict, distinct, LOCALTZ, FixedOffsetTimezone
from babel._compat import string_types, number_types
__all__ = ['Message', 'Catalog', 'TranslationError']
PYTHON_FORMAT = re.compile(r'''(?x)
\%
(?:\(([\w]*)\))?
(
[-#0\ +]?(?:\*|[\d]+)?
(?:\.(?:\*|[\d]+))?
[hlL]?
)
([diouxXeEfFgGcrs%])
''')
class Message(object):
"""Representation of a single message in a catalog."""
def __init__(self, id, string=u'', locations=(), flags=(), auto_comments=(),
user_comments=(), previous_id=(), lineno=None, context=None):
"""Create the message object.
:param id: the message ID, or a ``(singular, plural)`` tuple for
pluralizable messages
:param string: the translated message string, or a
``(singular, plural)`` tuple for pluralizable messages
:param locations: a sequence of ``(filenname, lineno)`` tuples
:param flags: a set or sequence of flags
:param auto_comments: a sequence of automatic comments for the message
:param user_comments: a sequence of user comments for the message
:param previous_id: the previous message ID, or a ``(singular, plural)``
tuple for pluralizable messages
:param lineno: the line number on which the msgid line was found in the
PO file, if any
:param context: the message context
"""
self.id = id #: The message ID
if not string and self.pluralizable:
string = (u'', u'')
self.string = string #: The message translation
self.locations = list(distinct(locations))
self.flags = set(flags)
if id and self.python_format:
self.flags.add('python-format')
else:
self.flags.discard('python-format')
self.auto_comments = list(distinct(auto_comments))
self.user_comments = list(distinct(user_comments))
if isinstance(previous_id, string_types):
self.previous_id = [previous_id]
else:
self.previous_id = list(previous_id)
self.lineno = lineno
self.context = context
def __repr__(self):
return '<%s %r (flags: %r)>' % (type(self).__name__, self.id,
list(self.flags))
def __cmp__(self, obj):
"""Compare Messages, taking into account plural ids"""
def values_to_compare():
if isinstance(obj, Message):
plural = self.pluralizable
obj_plural = obj.pluralizable
if plural and obj_plural:
return self.id[0], obj.id[0]
elif plural:
return self.id[0], obj.id
elif obj_plural:
return self.id, obj.id[0]
return self.id, obj.id
this, other = values_to_compare()
return cmp(this, other)
def __gt__(self, other):
return self.__cmp__(other) > 0
def __lt__(self, other):
return self.__cmp__(other) < 0
def __ge__(self, other):
return self.__cmp__(other) >= 0
def __le__(self, other):
return self.__cmp__(other) <= 0
def __eq__(self, other):
return self.__cmp__(other) == 0
def __ne__(self, other):
return self.__cmp__(other) != 0
def clone(self):
return Message(*map(copy, (self.id, self.string, self.locations,
self.flags, self.auto_comments,
self.user_comments, self.previous_id,
self.lineno, self.context)))
def check(self, catalog=None):
"""Run various validation checks on the message. Some validations
are only performed if the catalog is provided. This method returns
a sequence of `TranslationError` objects.
:rtype: ``iterator``
:param catalog: A catalog instance that is passed to the checkers
:see: `Catalog.check` for a way to perform checks for all messages
in a catalog.
"""
from babel.messages.checkers import checkers
errors = []
for checker in checkers:
try:
checker(catalog, self)
except TranslationError as e:
errors.append(e)
return errors
@property
def fuzzy(self):
"""Whether the translation is fuzzy.
>>> Message('foo').fuzzy
False
>>> msg = Message('foo', 'foo', flags=['fuzzy'])
>>> msg.fuzzy
True
>>> msg
<Message 'foo' (flags: ['fuzzy'])>
:type: `bool`"""
return 'fuzzy' in self.flags
@property
def pluralizable(self):
"""Whether the message is plurizable.
>>> Message('foo').pluralizable
False
>>> Message(('foo', 'bar')).pluralizable
True
:type: `bool`"""
return isinstance(self.id, (list, tuple))
@property
def python_format(self):
"""Whether the message contains Python-style parameters.
>>> Message('foo %(name)s bar').python_format
True
>>> Message(('foo %(name)s', 'foo %(name)s')).python_format
True
:type: `bool`"""
ids = self.id
if not isinstance(ids, (list, tuple)):
ids = [ids]
return bool(filter(None, [PYTHON_FORMAT.search(id) for id in ids]))
class TranslationError(Exception):
"""Exception thrown by translation checkers when invalid message
translations are encountered."""
DEFAULT_HEADER = u"""\
# Translations template for PROJECT.
# Copyright (C) YEAR ORGANIZATION
# This file is distributed under the same license as the PROJECT project.
# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
#"""
class Catalog(object):
"""Representation of a message catalog."""
def __init__(self, locale=None, domain=None, header_comment=DEFAULT_HEADER,
project=None, version=None, copyright_holder=None,
msgid_bugs_address=None, creation_date=None,
revision_date=None, last_translator=None, language_team=None,
charset='utf-8', fuzzy=True):
"""Initialize the catalog object.
:param locale: the locale identifier or `Locale` object, or `None`
if the catalog is not bound to a locale (which basically
means it's a template)
:param domain: the message domain
:param header_comment: the header comment as string, or `None` for the
default header
:param project: the project's name
:param version: the project's version
:param copyright_holder: the copyright holder of the catalog
:param msgid_bugs_address: the email address or URL to submit bug
reports to
:param creation_date: the date the catalog was created
:param revision_date: the date the catalog was revised
:param last_translator: the name and email of the last translator
:param language_team: the name and email of the language team
:param charset: the encoding to use in the output
:param fuzzy: the fuzzy bit on the catalog header
"""
self.domain = domain #: The message domain
if locale:
locale = Locale.parse(locale)
self.locale = locale #: The locale or `None`
self._header_comment = header_comment
self._messages = odict()
self.project = project or 'PROJECT' #: The project name
self.version = version or 'VERSION' #: The project version
self.copyright_holder = copyright_holder or 'ORGANIZATION'
self.msgid_bugs_address = msgid_bugs_address or 'EMAIL@ADDRESS'
self.last_translator = last_translator or 'FULL NAME <EMAIL@ADDRESS>'
"""Name and email address of the last translator."""
self.language_team = language_team or 'LANGUAGE <LL@li.org>'
"""Name and email address of the language team."""
self.charset = charset or 'utf-8'
if creation_date is None:
creation_date = datetime.now(LOCALTZ)
elif isinstance(creation_date, datetime) and not creation_date.tzinfo:
creation_date = creation_date.replace(tzinfo=LOCALTZ)
self.creation_date = creation_date #: Creation date of the template
if revision_date is None:
revision_date = 'YEAR-MO-DA HO:MI+ZONE'
elif isinstance(revision_date, datetime) and not revision_date.tzinfo:
revision_date = revision_date.replace(tzinfo=LOCALTZ)
self.revision_date = revision_date #: Last revision date of the catalog
self.fuzzy = fuzzy #: Catalog header fuzzy bit (`True` or `False`)
self.obsolete = odict() #: Dictionary of obsolete messages
self._num_plurals = None
self._plural_expr = None
def _get_header_comment(self):
comment = self._header_comment
year = datetime.now(LOCALTZ).strftime('%Y')
if hasattr(self.revision_date, 'strftime'):
year = self.revision_date.strftime('%Y')
comment = comment.replace('PROJECT', self.project) \
.replace('VERSION', self.version) \
.replace('YEAR', year) \
.replace('ORGANIZATION', self.copyright_holder)
if self.locale:
comment = comment.replace('Translations template', '%s translations'
% self.locale.english_name)
return comment
def _set_header_comment(self, string):
self._header_comment = string
header_comment = property(_get_header_comment, _set_header_comment, doc="""\
The header comment for the catalog.
>>> catalog = Catalog(project='Foobar', version='1.0',
... copyright_holder='Foo Company')
>>> print catalog.header_comment #doctest: +ELLIPSIS
# Translations template for Foobar.
# Copyright (C) ... Foo Company
# This file is distributed under the same license as the Foobar project.
# FIRST AUTHOR <EMAIL@ADDRESS>, ....
#
The header can also be set from a string. Any known upper-case variables
will be replaced when the header is retrieved again:
>>> catalog = Catalog(project='Foobar', version='1.0',
... copyright_holder='Foo Company')
>>> catalog.header_comment = '''\\
... # The POT for my really cool PROJECT project.
... # Copyright (C) 1990-2003 ORGANIZATION
... # This file is distributed under the same license as the PROJECT
... # project.
... #'''
>>> print catalog.header_comment
# The POT for my really cool Foobar project.
# Copyright (C) 1990-2003 Foo Company
# This file is distributed under the same license as the Foobar
# project.
#
:type: `unicode`
""")
def _get_mime_headers(self):
headers = []
headers.append(('Project-Id-Version',
'%s %s' % (self.project, self.version)))
headers.append(('Report-Msgid-Bugs-To', self.msgid_bugs_address))
headers.append(('POT-Creation-Date',
format_datetime(self.creation_date, 'yyyy-MM-dd HH:mmZ',
locale='en')))
if isinstance(self.revision_date, (datetime, time_) + number_types):
headers.append(('PO-Revision-Date',
format_datetime(self.revision_date,
'yyyy-MM-dd HH:mmZ', locale='en')))
else:
headers.append(('PO-Revision-Date', self.revision_date))
headers.append(('Last-Translator', self.last_translator))
if (self.locale is not None) and ('LANGUAGE' in self.language_team):
headers.append(('Language-Team',
self.language_team.replace('LANGUAGE',
str(self.locale))))
else:
headers.append(('Language-Team', self.language_team))
if self.locale is not None:
headers.append(('Plural-Forms', self.plural_forms))
headers.append(('MIME-Version', '1.0'))
headers.append(('Content-Type',
'text/plain; charset=%s' % self.charset))
headers.append(('Content-Transfer-Encoding', '8bit'))
headers.append(('Generated-By', 'Babel %s\n' % VERSION))
return headers
def _set_mime_headers(self, headers):
for name, value in headers:
name = name.lower()
if name == 'project-id-version':
parts = value.split(' ')
self.project = u' '.join(parts[:-1])
self.version = parts[-1]
elif name == 'report-msgid-bugs-to':
self.msgid_bugs_address = value
elif name == 'last-translator':
self.last_translator = value
elif name == 'language-team':
self.language_team = value
elif name == 'content-type':
mimetype, params = parse_header(value)
if 'charset' in params:
self.charset = params['charset'].lower()
elif name == 'plural-forms':
_, params = parse_header(' ;' + value)
self._num_plurals = int(params.get('nplurals', 2))
self._plural_expr = params.get('plural', '(n != 1)')
elif name == 'pot-creation-date':
# FIXME: this should use dates.parse_datetime as soon as that
# is ready
value, tzoffset, _ = re.split('([+-]\d{4})$', value, 1)
tt = time.strptime(value, '%Y-%m-%d %H:%M')
ts = time.mktime(tt)
# Separate the offset into a sign component, hours, and minutes
plus_minus_s, rest = tzoffset[0], tzoffset[1:]
hours_offset_s, mins_offset_s = rest[:2], rest[2:]
# Make them all integers
plus_minus = int(plus_minus_s + '1')
hours_offset = int(hours_offset_s)
mins_offset = int(mins_offset_s)
# Calculate net offset
net_mins_offset = hours_offset * 60
net_mins_offset += mins_offset
net_mins_offset *= plus_minus
# Create an offset object
tzoffset = FixedOffsetTimezone(net_mins_offset)
# Store the offset in a datetime object
dt = datetime.fromtimestamp(ts)
self.creation_date = dt.replace(tzinfo=tzoffset)
elif name == 'po-revision-date':
# Keep the value if it's not the default one
if 'YEAR' not in value:
# FIXME: this should use dates.parse_datetime as soon as
# that is ready
value, tzoffset, _ = re.split('([+-]\d{4})$', value, 1)
tt = time.strptime(value, '%Y-%m-%d %H:%M')
ts = time.mktime(tt)
# Separate the offset into a sign component, hours, and
# minutes
plus_minus_s, rest = tzoffset[0], tzoffset[1:]
hours_offset_s, mins_offset_s = rest[:2], rest[2:]
# Make them all integers
plus_minus = int(plus_minus_s + '1')
hours_offset = int(hours_offset_s)
mins_offset = int(mins_offset_s)
# Calculate net offset
net_mins_offset = hours_offset * 60
net_mins_offset += mins_offset
net_mins_offset *= plus_minus
# Create an offset object
tzoffset = FixedOffsetTimezone(net_mins_offset)
# Store the offset in a datetime object
dt = datetime.fromtimestamp(ts)
self.revision_date = dt.replace(tzinfo=tzoffset)
mime_headers = property(_get_mime_headers, _set_mime_headers, doc="""\
The MIME headers of the catalog, used for the special ``msgid ""`` entry.
The behavior of this property changes slightly depending on whether a locale
is set or not, the latter indicating that the catalog is actually a template
for actual translations.
Here's an example of the output for such a catalog template:
>>> from babel.dates import UTC
>>> created = datetime(1990, 4, 1, 15, 30, tzinfo=UTC)
>>> catalog = Catalog(project='Foobar', version='1.0',
... creation_date=created)
>>> for name, value in catalog.mime_headers:
... print '%s: %s' % (name, value)
Project-Id-Version: Foobar 1.0
Report-Msgid-Bugs-To: EMAIL@ADDRESS
POT-Creation-Date: 1990-04-01 15:30+0000
PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE
Last-Translator: FULL NAME <EMAIL@ADDRESS>
Language-Team: LANGUAGE <LL@li.org>
MIME-Version: 1.0
Content-Type: text/plain; charset=utf-8
Content-Transfer-Encoding: 8bit
Generated-By: Babel ...
And here's an example of the output when the locale is set:
>>> revised = datetime(1990, 8, 3, 12, 0, tzinfo=UTC)
>>> catalog = Catalog(locale='de_DE', project='Foobar', version='1.0',
... creation_date=created, revision_date=revised,
... last_translator='John Doe <jd@example.com>',
... language_team='de_DE <de@example.com>')
>>> for name, value in catalog.mime_headers:
... print '%s: %s' % (name, value)
Project-Id-Version: Foobar 1.0
Report-Msgid-Bugs-To: EMAIL@ADDRESS
POT-Creation-Date: 1990-04-01 15:30+0000
PO-Revision-Date: 1990-08-03 12:00+0000
Last-Translator: John Doe <jd@example.com>
Language-Team: de_DE <de@example.com>
Plural-Forms: nplurals=2; plural=(n != 1)
MIME-Version: 1.0
Content-Type: text/plain; charset=utf-8
Content-Transfer-Encoding: 8bit
Generated-By: Babel ...
:type: `list`
""")
@property
def num_plurals(self):
"""The number of plurals used by the catalog or locale.
>>> Catalog(locale='en').num_plurals
2
>>> Catalog(locale='ga').num_plurals
3
:type: `int`"""
if self._num_plurals is None:
num = 2
if self.locale:
num = get_plural(self.locale)[0]
self._num_plurals = num
return self._num_plurals
@property
def plural_expr(self):
"""The plural expression used by the catalog or locale.
>>> Catalog(locale='en').plural_expr
'(n != 1)'
>>> Catalog(locale='ga').plural_expr
'(n==1 ? 0 : n==2 ? 1 : 2)'
:type: `string_types`"""
if self._plural_expr is None:
expr = '(n != 1)'
if self.locale:
expr = get_plural(self.locale)[1]
self._plural_expr = expr
return self._plural_expr
@property
def plural_forms(self):
"""Return the plural forms declaration for the locale.
>>> Catalog(locale='en').plural_forms
'nplurals=2; plural=(n != 1)'
>>> Catalog(locale='pt_BR').plural_forms
'nplurals=2; plural=(n > 1)'
:type: `str`"""
return 'nplurals=%s; plural=%s' % (self.num_plurals, self.plural_expr)
def __contains__(self, id):
"""Return whether the catalog has a message with the specified ID."""
return self._key_for(id) in self._messages
def __len__(self):
"""The number of messages in the catalog.
This does not include the special ``msgid ""`` entry."""
return len(self._messages)
def __iter__(self):
"""Iterates through all the entries in the catalog, in the order they
were added, yielding a `Message` object for every entry.
:rtype: ``iterator``"""
buf = []
for name, value in self.mime_headers:
buf.append('%s: %s' % (name, value))
flags = set()
if self.fuzzy:
flags |= set(['fuzzy'])
yield Message(u'', '\n'.join(buf), flags=flags)
for key in self._messages:
yield self._messages[key]
def __repr__(self):
locale = ''
if self.locale:
locale = ' %s' % self.locale
return '<%s %r%s>' % (type(self).__name__, self.domain, locale)
def __delitem__(self, id):
"""Delete the message with the specified ID."""
self.delete(id)
def __getitem__(self, id):
"""Return the message with the specified ID.
:param id: the message ID
:return: the message with the specified ID, or `None` if no such
message is in the catalog
:rtype: `Message`
"""
return self.get(id)
def __setitem__(self, id, message):
"""Add or update the message with the specified ID.
>>> catalog = Catalog()
>>> catalog[u'foo'] = Message(u'foo')
>>> catalog[u'foo']
<Message u'foo' (flags: [])>
If a message with that ID is already in the catalog, it is updated
to include the locations and flags of the new message.
>>> catalog = Catalog()
>>> catalog[u'foo'] = Message(u'foo', locations=[('main.py', 1)])
>>> catalog[u'foo'].locations
[('main.py', 1)]
>>> catalog[u'foo'] = Message(u'foo', locations=[('utils.py', 5)])
>>> catalog[u'foo'].locations
[('main.py', 1), ('utils.py', 5)]
:param id: the message ID
:param message: the `Message` object
"""
assert isinstance(message, Message), 'expected a Message object'
key = self._key_for(id, message.context)
current = self._messages.get(key)
if current:
if message.pluralizable and not current.pluralizable:
# The new message adds pluralization
current.id = message.id
current.string = message.string
current.locations = list(distinct(current.locations +
message.locations))
current.auto_comments = list(distinct(current.auto_comments +
message.auto_comments))
current.user_comments = list(distinct(current.user_comments +
message.user_comments))
current.flags |= message.flags
message = current
elif id == '':
# special treatment for the header message
def _parse_header(header_string):
# message_from_string only works for str, not for unicode
headers = message_from_string(header_string.encode('utf8'))
decoded_headers = {}
for name, value in headers.items():
name = name.decode('utf8')
value = value.decode('utf8')
decoded_headers[name] = value
return decoded_headers
self.mime_headers = _parse_header(message.string).items()
self.header_comment = '\n'.join([('# %s' % c).rstrip() for c
in message.user_comments])
self.fuzzy = message.fuzzy
else:
if isinstance(id, (list, tuple)):
assert isinstance(message.string, (list, tuple)), \
'Expected sequence but got %s' % type(message.string)
self._messages[key] = message
def add(self, id, string=None, locations=(), flags=(), auto_comments=(),
user_comments=(), previous_id=(), lineno=None, context=None):
"""Add or update the message with the specified ID.
>>> catalog = Catalog()
>>> catalog.add(u'foo')
<Message ...>
>>> catalog[u'foo']
<Message u'foo' (flags: [])>
This method simply constructs a `Message` object with the given
arguments and invokes `__setitem__` with that object.
:param id: the message ID, or a ``(singular, plural)`` tuple for
pluralizable messages
:param string: the translated message string, or a
``(singular, plural)`` tuple for pluralizable messages
:param locations: a sequence of ``(filenname, lineno)`` tuples
:param flags: a set or sequence of flags
:param auto_comments: a sequence of automatic comments
:param user_comments: a sequence of user comments
:param previous_id: the previous message ID, or a ``(singular, plural)``
tuple for pluralizable messages
:param lineno: the line number on which the msgid line was found in the
PO file, if any
:param context: the message context
:return: the newly added message
:rtype: `Message`
"""
message = Message(id, string, list(locations), flags, auto_comments,
user_comments, previous_id, lineno=lineno,
context=context)
self[id] = message
return message
def check(self):
"""Run various validation checks on the translations in the catalog.
For every message which fails validation, this method yield a
``(message, errors)`` tuple, where ``message`` is the `Message` object
and ``errors`` is a sequence of `TranslationError` objects.
:rtype: ``iterator``
"""
for message in self._messages.values():
errors = message.check(catalog=self)
if errors:
yield message, errors
def get(self, id, context=None):
"""Return the message with the specified ID and context.
:param id: the message ID
:param context: the message context, or ``None`` for no context
:return: the message with the specified ID, or `None` if no such
message is in the catalog
:rtype: `Message`
"""
return self._messages.get(self._key_for(id, context))
def delete(self, id, context=None):
"""Delete the message with the specified ID and context.
:param id: the message ID
:param context: the message context, or ``None`` for no context
"""
key = self._key_for(id, context)
if key in self._messages:
del self._messages[key]
def update(self, template, no_fuzzy_matching=False):
"""Update the catalog based on the given template catalog.
>>> from babel.messages import Catalog
>>> template = Catalog()
>>> template.add('green', locations=[('main.py', 99)])
<Message ...>
>>> template.add('blue', locations=[('main.py', 100)])
<Message ...>
>>> template.add(('salad', 'salads'), locations=[('util.py', 42)])
<Message ...>
>>> catalog = Catalog(locale='de_DE')
>>> catalog.add('blue', u'blau', locations=[('main.py', 98)])
<Message ...>
>>> catalog.add('head', u'Kopf', locations=[('util.py', 33)])
<Message ...>
>>> catalog.add(('salad', 'salads'), (u'Salat', u'Salate'),
... locations=[('util.py', 38)])
<Message ...>
>>> catalog.update(template)
>>> len(catalog)
3
>>> msg1 = catalog['green']
>>> msg1.string
>>> msg1.locations
[('main.py', 99)]
>>> msg2 = catalog['blue']
>>> msg2.string
u'blau'
>>> msg2.locations
[('main.py', 100)]
>>> msg3 = catalog['salad']
>>> msg3.string
(u'Salat', u'Salate')
>>> msg3.locations
[('util.py', 42)]
Messages that are in the catalog but not in the template are removed
from the main collection, but can still be accessed via the `obsolete`
member:
>>> 'head' in catalog
False
>>> catalog.obsolete.values()
[<Message 'head' (flags: [])>]
:param template: the reference catalog, usually read from a POT file
:param no_fuzzy_matching: whether to use fuzzy matching of message IDs
"""
messages = self._messages
remaining = messages.copy()
self._messages = odict()
# Prepare for fuzzy matching
fuzzy_candidates = []
if not no_fuzzy_matching:
fuzzy_candidates = dict([
(self._key_for(msgid), messages[msgid].context)
for msgid in messages if msgid and messages[msgid].string
])
fuzzy_matches = set()
def _merge(message, oldkey, newkey):
message = message.clone()
fuzzy = False
if oldkey != newkey:
fuzzy = True
fuzzy_matches.add(oldkey)
oldmsg = messages.get(oldkey)
if isinstance(oldmsg.id, string_types):
message.previous_id = [oldmsg.id]
else:
message.previous_id = list(oldmsg.id)
else:
oldmsg = remaining.pop(oldkey, None)
message.string = oldmsg.string
if isinstance(message.id, (list, tuple)):
if not isinstance(message.string, (list, tuple)):
fuzzy = True
message.string = tuple(
[message.string] + ([u''] * (len(message.id) - 1))
)
elif len(message.string) != self.num_plurals:
fuzzy = True
message.string = tuple(message.string[:len(oldmsg.string)])
elif isinstance(message.string, (list, tuple)):
fuzzy = True
message.string = message.string[0]
message.flags |= oldmsg.flags
if fuzzy:
message.flags |= set([u'fuzzy'])
self[message.id] = message
for message in template:
if message.id:
key = self._key_for(message.id, message.context)
if key in messages:
_merge(message, key, key)
else:
if no_fuzzy_matching is False:
# do some fuzzy matching with difflib
if isinstance(key, tuple):
matchkey = key[0] # just the msgid, no context
else:
matchkey = key
matches = get_close_matches(matchkey.lower().strip(),
fuzzy_candidates.keys(), 1)
if matches:
newkey = matches[0]
newctxt = fuzzy_candidates[newkey]
if newctxt is not None:
newkey = newkey, newctxt
_merge(message, newkey, key)
continue
self[message.id] = message
self.obsolete = odict()
for msgid in remaining:
if no_fuzzy_matching or msgid not in fuzzy_matches:
self.obsolete[msgid] = remaining[msgid]
# Make updated catalog's POT-Creation-Date equal to the template
# used to update the catalog
self.creation_date = template.creation_date
def _key_for(self, id, context=None):
"""The key for a message is just the singular ID even for pluralizable
messages, but is a ``(msgid, msgctxt)`` tuple for context-specific
messages.
"""
key = id
if isinstance(key, (list, tuple)):
key = id[0]
if context is not None:
key = (key, context)
return key
|
|
# Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import nova.conf
from nova.objects import keypair as keypair_obj
from nova.tests.functional.api_sample_tests import api_sample_base
from nova.tests.unit import fake_crypto
CONF = nova.conf.CONF
class KeyPairsSampleJsonTest(api_sample_base.ApiSampleTestBaseV21):
microversion = None
sample_dir = "keypairs"
expected_delete_status_code = 202
expected_post_status_code = 200
def _get_flags(self):
f = super(KeyPairsSampleJsonTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.keypairs.Keypairs')
return f
def setUp(self):
super(KeyPairsSampleJsonTest, self).setUp()
self.api.microversion = self.microversion
# TODO(sdague): this is only needed because we randomly choose the
# uuid each time.
def generalize_subs(self, subs, vanilla_regexes):
subs['keypair_name'] = 'keypair-[0-9a-f-]+'
return subs
def test_keypairs_post(self):
return self._check_keypairs_post()
def _check_keypairs_post(self, **kwargs):
"""Get api sample of key pairs post request."""
key_name = 'keypair-' + str(uuid.uuid4())
subs = dict(keypair_name=key_name, **kwargs)
response = self._do_post('os-keypairs', 'keypairs-post-req', subs)
subs = {'keypair_name': key_name}
self._verify_response('keypairs-post-resp', subs, response,
self.expected_post_status_code)
# NOTE(maurosr): return the key_name is necessary cause the
# verification returns the label of the last compared information in
# the response, not necessarily the key name.
return key_name
def test_keypairs_import_key_post(self):
public_key = fake_crypto.get_ssh_public_key()
self._check_keypairs_import_key_post(public_key)
def _check_keypairs_import_key_post(self, public_key, **kwargs):
# Get api sample of key pairs post to import user's key.
key_name = 'keypair-' + str(uuid.uuid4())
subs = {
'keypair_name': key_name,
}
params = subs.copy()
params['public_key'] = public_key
params.update(**kwargs)
response = self._do_post('os-keypairs', 'keypairs-import-post-req',
params)
self._verify_response('keypairs-import-post-resp', subs, response,
self.expected_post_status_code)
def test_keypairs_list(self):
# Get api sample of key pairs list request.
key_name = self.test_keypairs_post()
response = self._do_get('os-keypairs')
subs = {'keypair_name': key_name}
self._verify_response('keypairs-list-resp', subs, response, 200)
def test_keypairs_get(self):
# Get api sample of key pairs get request.
key_name = self.test_keypairs_post()
response = self._do_get('os-keypairs/%s' % key_name)
subs = {'keypair_name': key_name}
self._verify_response('keypairs-get-resp', subs, response, 200)
def test_keypairs_delete(self):
# Get api sample of key pairs delete request.
key_name = self.test_keypairs_post()
response = self._do_delete('os-keypairs/%s' % key_name)
self.assertEqual(self.expected_delete_status_code,
response.status_code)
class KeyPairsV22SampleJsonTest(KeyPairsSampleJsonTest):
microversion = '2.2'
expected_post_status_code = 201
expected_delete_status_code = 204
# NOTE(gmann): microversion tests do not need to run for v2 API
# so defining scenarios only for v2.2 which will run the original tests
# by appending '(v2_2)' in test_id.
scenarios = [('v2_2', {'api_major_version': 'v2.1'})]
def test_keypairs_post(self):
# NOTE(claudiub): overrides the method with the same name in
# KeypairsSampleJsonTest, as it is used by other tests.
return self._check_keypairs_post(
keypair_type=keypair_obj.KEYPAIR_TYPE_SSH)
def test_keypairs_post_x509(self):
return self._check_keypairs_post(
keypair_type=keypair_obj.KEYPAIR_TYPE_X509)
def test_keypairs_post_invalid(self):
key_name = 'keypair-' + str(uuid.uuid4())
subs = dict(keypair_name=key_name, keypair_type='fakey_type')
response = self._do_post('os-keypairs', 'keypairs-post-req', subs)
self.assertEqual(400, response.status_code)
def test_keypairs_import_key_post(self):
# NOTE(claudiub): overrides the method with the same name in
# KeypairsSampleJsonTest, since the API sample expects a keypair_type.
public_key = fake_crypto.get_ssh_public_key()
self._check_keypairs_import_key_post(
public_key, keypair_type=keypair_obj.KEYPAIR_TYPE_SSH)
def test_keypairs_import_key_post_x509(self):
public_key = fake_crypto.get_x509_cert_and_fingerprint()[0]
public_key = public_key.replace('\n', '\\n')
self._check_keypairs_import_key_post(
public_key, keypair_type=keypair_obj.KEYPAIR_TYPE_X509)
def _check_keypairs_import_key_post_invalid(self, keypair_type):
key_name = 'keypair-' + str(uuid.uuid4())
subs = {
'keypair_name': key_name,
'keypair_type': keypair_type,
'public_key': fake_crypto.get_ssh_public_key()
}
response = self._do_post('os-keypairs', 'keypairs-import-post-req',
subs)
self.assertEqual(400, response.status_code)
def test_keypairs_import_key_post_invalid_type(self):
self._check_keypairs_import_key_post_invalid(
keypair_type='fakey_type')
def test_keypairs_import_key_post_invalid_combination(self):
self._check_keypairs_import_key_post_invalid(
keypair_type=keypair_obj.KEYPAIR_TYPE_X509)
class KeyPairsV210SampleJsonTest(KeyPairsSampleJsonTest):
ADMIN_API = True
microversion = '2.10'
expected_post_status_code = 201
expected_delete_status_code = 204
scenarios = [('v2_10', {'api_major_version': 'v2.1'})]
def test_keypair_create_for_user(self):
subs = {
'keypair_type': keypair_obj.KEYPAIR_TYPE_SSH,
'public_key': fake_crypto.get_ssh_public_key(),
'user_id': "fake"
}
self._check_keypairs_post(**subs)
def test_keypairs_post(self):
return self._check_keypairs_post(
keypair_type=keypair_obj.KEYPAIR_TYPE_SSH,
user_id="admin")
def test_keypairs_import_key_post(self):
# NOTE(claudiub): overrides the method with the same name in
# KeypairsSampleJsonTest, since the API sample expects a keypair_type.
public_key = fake_crypto.get_ssh_public_key()
self._check_keypairs_import_key_post(
public_key, keypair_type=keypair_obj.KEYPAIR_TYPE_SSH,
user_id="fake")
def test_keypairs_delete_for_user(self):
# Delete a keypair on behalf of a user
subs = {
'keypair_type': keypair_obj.KEYPAIR_TYPE_SSH,
'public_key': fake_crypto.get_ssh_public_key(),
'user_id': "fake"
}
key_name = self._check_keypairs_post(**subs)
response = self._do_delete('os-keypairs/%s?user_id=fake' % key_name)
self.assertEqual(self.expected_delete_status_code,
response.status_code)
class KeyPairsV210SampleJsonTestNotAdmin(KeyPairsV210SampleJsonTest):
ADMIN_API = False
def test_keypairs_post(self):
return self._check_keypairs_post(
keypair_type=keypair_obj.KEYPAIR_TYPE_SSH,
user_id="fake")
def test_keypairs_post_for_other_user(self):
key_name = 'keypair-' + str(uuid.uuid4())
subs = dict(keypair_name=key_name,
keypair_type=keypair_obj.KEYPAIR_TYPE_SSH,
user_id='fake1')
response = self._do_post('os-keypairs', 'keypairs-post-req', subs)
self.assertEqual(403, response.status_code)
|
|
"""Tradfri lights platform tests."""
from copy import deepcopy
from unittest.mock import Mock, MagicMock, patch, PropertyMock
import pytest
from pytradfri.device import Device, LightControl, Light
from homeassistant.components import tradfri
from tests.common import MockConfigEntry
DEFAULT_TEST_FEATURES = {'can_set_dimmer': False,
'can_set_color': False,
'can_set_temp': False}
# [
# {bulb features},
# {turn_on arguments},
# {expected result}
# ]
TURN_ON_TEST_CASES = [
# Turn On
[
{},
{},
{'state': 'on'},
],
# Brightness > 0
[
{'can_set_dimmer': True},
{'brightness': 100},
{
'state': 'on',
'brightness': 100
}
],
# Brightness == 1
[
{'can_set_dimmer': True},
{'brightness': 1},
{
'brightness': 1
}
],
# Brightness > 254
[
{'can_set_dimmer': True},
{'brightness': 1000},
{
'brightness': 254
}
],
# color_temp
[
{'can_set_temp': True},
{'color_temp': 250},
{'color_temp': 250},
],
# color_temp < 250
[
{'can_set_temp': True},
{'color_temp': 1},
{'color_temp': 250},
],
# color_temp > 454
[
{'can_set_temp': True},
{'color_temp': 1000},
{'color_temp': 454},
],
# hs color
[
{'can_set_color': True},
{'hs_color': [300, 100]},
{
'state': 'on',
'hs_color': [300, 100]
}
],
# ct + brightness
[
{
'can_set_dimmer': True,
'can_set_temp': True
},
{
'color_temp': 250,
'brightness': 200
},
{
'state': 'on',
'color_temp': 250,
'brightness': 200
}
],
# ct + brightness (no temp support)
[
{
'can_set_dimmer': True,
'can_set_temp': False,
'can_set_color': True
},
{
'color_temp': 250,
'brightness': 200
},
{
'state': 'on',
'hs_color': [26.807, 34.869],
'brightness': 200
}
],
# ct + brightness (no temp or color support)
[
{
'can_set_dimmer': True,
'can_set_temp': False,
'can_set_color': False
},
{
'color_temp': 250,
'brightness': 200
},
{
'state': 'on',
'brightness': 200
}
],
# hs + brightness
[
{
'can_set_dimmer': True,
'can_set_color': True
},
{
'hs_color': [300, 100],
'brightness': 200
},
{
'state': 'on',
'hs_color': [300, 100],
'brightness': 200
}
]
]
# Result of transition is not tested, but data is passed to turn on service.
TRANSITION_CASES_FOR_TESTS = [None, 0, 1]
@pytest.fixture(autouse=True, scope='module')
def setup(request):
"""Set up patches for pytradfri methods."""
p_1 = patch('pytradfri.device.LightControl.raw',
new_callable=PropertyMock,
return_value=[{'mock': 'mock'}])
p_2 = patch('pytradfri.device.LightControl.lights')
p_1.start()
p_2.start()
def teardown():
"""Remove patches for pytradfri methods."""
p_1.stop()
p_2.stop()
request.addfinalizer(teardown)
@pytest.fixture
def mock_gateway():
"""Mock a Tradfri gateway."""
def get_devices():
"""Return mock devices."""
return gateway.mock_devices
def get_groups():
"""Return mock groups."""
return gateway.mock_groups
gateway = Mock(
get_devices=get_devices,
get_groups=get_groups,
mock_devices=[],
mock_groups=[],
mock_responses=[]
)
return gateway
@pytest.fixture
def mock_api(mock_gateway):
"""Mock api."""
async def api(command):
"""Mock api function."""
# Store the data for "real" command objects.
if(hasattr(command, '_data') and not isinstance(command, Mock)):
mock_gateway.mock_responses.append(command._data)
return command
return api
async def generate_psk(self, code):
"""Mock psk."""
return "mock"
async def setup_gateway(hass, mock_gateway, mock_api):
"""Load the Tradfri platform with a mock gateway."""
entry = MockConfigEntry(domain=tradfri.DOMAIN, data={
'host': 'mock-host',
'identity': 'mock-identity',
'key': 'mock-key',
'import_groups': True,
'gateway_id': 'mock-gateway-id',
})
hass.data[tradfri.KEY_GATEWAY] = {entry.entry_id: mock_gateway}
hass.data[tradfri.KEY_API] = {entry.entry_id: mock_api}
await hass.config_entries.async_forward_entry_setup(
entry, 'light'
)
def mock_light(test_features={}, test_state={}, n=0):
"""Mock a tradfri light."""
mock_light_data = Mock(
**test_state
)
mock_light = Mock(
id='mock-light-id-{}'.format(n),
reachable=True,
observe=Mock(),
device_info=MagicMock()
)
mock_light.name = 'tradfri_light_{}'.format(n)
# Set supported features for the light.
features = {**DEFAULT_TEST_FEATURES, **test_features}
lc = LightControl(mock_light)
for k, v in features.items():
setattr(lc, k, v)
# Store the initial state.
setattr(lc, 'lights', [mock_light_data])
mock_light.light_control = lc
return mock_light
async def test_light(hass, mock_gateway, mock_api):
"""Test that lights are correctly added."""
features = {
'can_set_dimmer': True,
'can_set_color': True,
'can_set_temp': True
}
state = {
'state': True,
'dimmer': 100,
'color_temp': 250,
'hsb_xy_color': (100, 100, 100, 100, 100)
}
mock_gateway.mock_devices.append(
mock_light(test_features=features, test_state=state)
)
await setup_gateway(hass, mock_gateway, mock_api)
lamp_1 = hass.states.get('light.tradfri_light_0')
assert lamp_1 is not None
assert lamp_1.state == 'on'
assert lamp_1.attributes['brightness'] == 100
assert lamp_1.attributes['hs_color'] == (0.549, 0.153)
async def test_light_observed(hass, mock_gateway, mock_api):
"""Test that lights are correctly observed."""
light = mock_light()
mock_gateway.mock_devices.append(light)
await setup_gateway(hass, mock_gateway, mock_api)
assert len(light.observe.mock_calls) > 0
async def test_light_available(hass, mock_gateway, mock_api):
"""Test light available property."""
light = mock_light({'state': True}, n=1)
light.reachable = True
light2 = mock_light({'state': True}, n=2)
light2.reachable = False
mock_gateway.mock_devices.append(light)
mock_gateway.mock_devices.append(light2)
await setup_gateway(hass, mock_gateway, mock_api)
assert (hass.states.get('light.tradfri_light_1')
.state == 'on')
assert (hass.states.get('light.tradfri_light_2')
.state == 'unavailable')
# Combine TURN_ON_TEST_CASES and TRANSITION_CASES_FOR_TESTS
ALL_TURN_ON_TEST_CASES = [
["test_features", "test_data", "expected_result", "id"],
[]
]
idx = 1
for tc in TURN_ON_TEST_CASES:
for trans in TRANSITION_CASES_FOR_TESTS:
case = deepcopy(tc)
if trans is not None:
case[1]['transition'] = trans
case.append(idx)
idx = idx + 1
ALL_TURN_ON_TEST_CASES[1].append(case)
@pytest.mark.parametrize(*ALL_TURN_ON_TEST_CASES)
async def test_turn_on(hass,
mock_gateway,
mock_api,
test_features,
test_data,
expected_result,
id):
"""Test turning on a light."""
# Note pytradfri style, not hass. Values not really important.
initial_state = {
'state': False,
'dimmer': 0,
'color_temp': 250,
'hsb_xy_color': (100, 100, 100, 100, 100)
}
# Setup the gateway with a mock light.
light = mock_light(test_features=test_features,
test_state=initial_state,
n=id)
mock_gateway.mock_devices.append(light)
await setup_gateway(hass, mock_gateway, mock_api)
# Use the turn_on service call to change the light state.
await hass.services.async_call('light', 'turn_on', {
'entity_id': 'light.tradfri_light_{}'.format(id),
**test_data
}, blocking=True)
await hass.async_block_till_done()
# Check that the light is observed.
mock_func = light.observe
assert len(mock_func.mock_calls) > 0
_, callkwargs = mock_func.call_args
assert 'callback' in callkwargs
# Callback function to refresh light state.
cb = callkwargs['callback']
responses = mock_gateway.mock_responses
# State on command data.
data = {'3311': [{'5850': 1}]}
# Add data for all sent commands.
for r in responses:
data['3311'][0] = {**data['3311'][0], **r['3311'][0]}
# Use the callback function to update the light state.
dev = Device(data)
light_data = Light(dev, 0)
light.light_control.lights[0] = light_data
cb(light)
await hass.async_block_till_done()
# Check that the state is correct.
states = hass.states.get('light.tradfri_light_{}'.format(id))
for k, v in expected_result.items():
if k == 'state':
assert states.state == v
else:
# Allow some rounding error in color conversions.
assert states.attributes[k] == pytest.approx(v, abs=0.01)
async def test_turn_off(hass, mock_gateway, mock_api):
"""Test turning off a light."""
state = {
'state': True,
'dimmer': 100,
}
light = mock_light(test_state=state)
mock_gateway.mock_devices.append(light)
await setup_gateway(hass, mock_gateway, mock_api)
# Use the turn_off service call to change the light state.
await hass.services.async_call('light', 'turn_off', {
'entity_id': 'light.tradfri_light_0'}, blocking=True)
await hass.async_block_till_done()
# Check that the light is observed.
mock_func = light.observe
assert len(mock_func.mock_calls) > 0
_, callkwargs = mock_func.call_args
assert 'callback' in callkwargs
# Callback function to refresh light state.
cb = callkwargs['callback']
responses = mock_gateway.mock_responses
data = {'3311': [{}]}
# Add data for all sent commands.
for r in responses:
data['3311'][0] = {**data['3311'][0], **r['3311'][0]}
# Use the callback function to update the light state.
dev = Device(data)
light_data = Light(dev, 0)
light.light_control.lights[0] = light_data
cb(light)
await hass.async_block_till_done()
# Check that the state is correct.
states = hass.states.get('light.tradfri_light_0')
assert states.state == 'off'
def mock_group(test_state={}, n=0):
"""Mock a Tradfri group."""
default_state = {
'state': False,
'dimmer': 0,
}
state = {**default_state, **test_state}
mock_group = Mock(
member_ids=[],
observe=Mock(),
**state
)
mock_group.name = 'tradfri_group_{}'.format(n)
return mock_group
async def test_group(hass, mock_gateway, mock_api):
"""Test that groups are correctly added."""
mock_gateway.mock_groups.append(mock_group())
state = {'state': True, 'dimmer': 100}
mock_gateway.mock_groups.append(mock_group(state, 1))
await setup_gateway(hass, mock_gateway, mock_api)
group = hass.states.get('light.tradfri_group_0')
assert group is not None
assert group.state == 'off'
group = hass.states.get('light.tradfri_group_1')
assert group is not None
assert group.state == 'on'
assert group.attributes['brightness'] == 100
async def test_group_turn_on(hass, mock_gateway, mock_api):
"""Test turning on a group."""
group = mock_group()
group2 = mock_group(n=1)
group3 = mock_group(n=2)
mock_gateway.mock_groups.append(group)
mock_gateway.mock_groups.append(group2)
mock_gateway.mock_groups.append(group3)
await setup_gateway(hass, mock_gateway, mock_api)
# Use the turn_off service call to change the light state.
await hass.services.async_call('light', 'turn_on', {
'entity_id': 'light.tradfri_group_0'}, blocking=True)
await hass.services.async_call('light', 'turn_on', {
'entity_id': 'light.tradfri_group_1',
'brightness': 100}, blocking=True)
await hass.services.async_call('light', 'turn_on', {
'entity_id': 'light.tradfri_group_2',
'brightness': 100,
'transition': 1}, blocking=True)
await hass.async_block_till_done()
group.set_state.assert_called_with(1)
group2.set_dimmer.assert_called_with(100)
group3.set_dimmer.assert_called_with(100, transition_time=10)
async def test_group_turn_off(hass, mock_gateway, mock_api):
"""Test turning off a group."""
group = mock_group({'state': True})
mock_gateway.mock_groups.append(group)
await setup_gateway(hass, mock_gateway, mock_api)
# Use the turn_off service call to change the light state.
await hass.services.async_call('light', 'turn_off', {
'entity_id': 'light.tradfri_group_0'}, blocking=True)
await hass.async_block_till_done()
group.set_state.assert_called_with(0)
|
|
from __future__ import division, absolute_import, print_function
import functools
import warnings
import numpy.core.numeric as _nx
from numpy.core.numeric import (
asarray, zeros, outer, concatenate, array, asanyarray
)
from numpy.core.fromnumeric import product, reshape, transpose
from numpy.core.multiarray import normalize_axis_index
from numpy.core import overrides
from numpy.core import vstack, atleast_3d
from numpy.core.shape_base import _arrays_for_stack_dispatcher
from numpy.lib.index_tricks import ndindex
from numpy.matrixlib.defmatrix import matrix # this raises all the right alarm bells
__all__ = [
'column_stack', 'row_stack', 'dstack', 'array_split', 'split',
'hsplit', 'vsplit', 'dsplit', 'apply_over_axes', 'expand_dims',
'apply_along_axis', 'kron', 'tile', 'get_array_wrap', 'take_along_axis',
'put_along_axis'
]
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
def _make_along_axis_idx(arr_shape, indices, axis):
# compute dimensions to iterate over
if not _nx.issubdtype(indices.dtype, _nx.integer):
raise IndexError('`indices` must be an integer array')
if len(arr_shape) != indices.ndim:
raise ValueError(
"`indices` and `arr` must have the same number of dimensions")
shape_ones = (1,) * indices.ndim
dest_dims = list(range(axis)) + [None] + list(range(axis+1, indices.ndim))
# build a fancy index, consisting of orthogonal aranges, with the
# requested index inserted at the right location
fancy_index = []
for dim, n in zip(dest_dims, arr_shape):
if dim is None:
fancy_index.append(indices)
else:
ind_shape = shape_ones[:dim] + (-1,) + shape_ones[dim+1:]
fancy_index.append(_nx.arange(n).reshape(ind_shape))
return tuple(fancy_index)
def _take_along_axis_dispatcher(arr, indices, axis):
return (arr, indices)
@array_function_dispatch(_take_along_axis_dispatcher)
def take_along_axis(arr, indices, axis):
"""
Take values from the input array by matching 1d index and data slices.
This iterates over matching 1d slices oriented along the specified axis in
the index and data arrays, and uses the former to look up values in the
latter. These slices can be different lengths.
Functions returning an index along an axis, like `argsort` and
`argpartition`, produce suitable indices for this function.
.. versionadded:: 1.15.0
Parameters
----------
arr: ndarray (Ni..., M, Nk...)
Source array
indices: ndarray (Ni..., J, Nk...)
Indices to take along each 1d slice of `arr`. This must match the
dimension of arr, but dimensions Ni and Nj only need to broadcast
against `arr`.
axis: int
The axis to take 1d slices along. If axis is None, the input array is
treated as if it had first been flattened to 1d, for consistency with
`sort` and `argsort`.
Returns
-------
out: ndarray (Ni..., J, Nk...)
The indexed result.
Notes
-----
This is equivalent to (but faster than) the following use of `ndindex` and
`s_`, which sets each of ``ii`` and ``kk`` to a tuple of indices::
Ni, M, Nk = a.shape[:axis], a.shape[axis], a.shape[axis+1:]
J = indices.shape[axis] # Need not equal M
out = np.empty(Nk + (J,) + Nk)
for ii in ndindex(Ni):
for kk in ndindex(Nk):
a_1d = a [ii + s_[:,] + kk]
indices_1d = indices[ii + s_[:,] + kk]
out_1d = out [ii + s_[:,] + kk]
for j in range(J):
out_1d[j] = a_1d[indices_1d[j]]
Equivalently, eliminating the inner loop, the last two lines would be::
out_1d[:] = a_1d[indices_1d]
See Also
--------
take : Take along an axis, using the same indices for every 1d slice
put_along_axis :
Put values into the destination array by matching 1d index and data slices
Examples
--------
For this sample array
>>> a = np.array([[10, 30, 20], [60, 40, 50]])
We can sort either by using sort directly, or argsort and this function
>>> np.sort(a, axis=1)
array([[10, 20, 30],
[40, 50, 60]])
>>> ai = np.argsort(a, axis=1); ai
array([[0, 2, 1],
[1, 2, 0]])
>>> np.take_along_axis(a, ai, axis=1)
array([[10, 20, 30],
[40, 50, 60]])
The same works for max and min, if you expand the dimensions:
>>> np.expand_dims(np.max(a, axis=1), axis=1)
array([[30],
[60]])
>>> ai = np.expand_dims(np.argmax(a, axis=1), axis=1)
>>> ai
array([[1],
[0]])
>>> np.take_along_axis(a, ai, axis=1)
array([[30],
[60]])
If we want to get the max and min at the same time, we can stack the
indices first
>>> ai_min = np.expand_dims(np.argmin(a, axis=1), axis=1)
>>> ai_max = np.expand_dims(np.argmax(a, axis=1), axis=1)
>>> ai = np.concatenate([ai_min, ai_max], axis=1)
>>> ai
array([[0, 1],
[1, 0]])
>>> np.take_along_axis(a, ai, axis=1)
array([[10, 30],
[40, 60]])
"""
# normalize inputs
if axis is None:
arr = arr.flat
arr_shape = (len(arr),) # flatiter has no .shape
axis = 0
else:
axis = normalize_axis_index(axis, arr.ndim)
arr_shape = arr.shape
# use the fancy index
return arr[_make_along_axis_idx(arr_shape, indices, axis)]
def _put_along_axis_dispatcher(arr, indices, values, axis):
return (arr, indices, values)
@array_function_dispatch(_put_along_axis_dispatcher)
def put_along_axis(arr, indices, values, axis):
"""
Put values into the destination array by matching 1d index and data slices.
This iterates over matching 1d slices oriented along the specified axis in
the index and data arrays, and uses the former to place values into the
latter. These slices can be different lengths.
Functions returning an index along an axis, like `argsort` and
`argpartition`, produce suitable indices for this function.
.. versionadded:: 1.15.0
Parameters
----------
arr: ndarray (Ni..., M, Nk...)
Destination array.
indices: ndarray (Ni..., J, Nk...)
Indices to change along each 1d slice of `arr`. This must match the
dimension of arr, but dimensions in Ni and Nj may be 1 to broadcast
against `arr`.
values: array_like (Ni..., J, Nk...)
values to insert at those indices. Its shape and dimension are
broadcast to match that of `indices`.
axis: int
The axis to take 1d slices along. If axis is None, the destination
array is treated as if a flattened 1d view had been created of it.
Notes
-----
This is equivalent to (but faster than) the following use of `ndindex` and
`s_`, which sets each of ``ii`` and ``kk`` to a tuple of indices::
Ni, M, Nk = a.shape[:axis], a.shape[axis], a.shape[axis+1:]
J = indices.shape[axis] # Need not equal M
for ii in ndindex(Ni):
for kk in ndindex(Nk):
a_1d = a [ii + s_[:,] + kk]
indices_1d = indices[ii + s_[:,] + kk]
values_1d = values [ii + s_[:,] + kk]
for j in range(J):
a_1d[indices_1d[j]] = values_1d[j]
Equivalently, eliminating the inner loop, the last two lines would be::
a_1d[indices_1d] = values_1d
See Also
--------
take_along_axis :
Take values from the input array by matching 1d index and data slices
Examples
--------
For this sample array
>>> a = np.array([[10, 30, 20], [60, 40, 50]])
We can replace the maximum values with:
>>> ai = np.expand_dims(np.argmax(a, axis=1), axis=1)
>>> ai
array([[1],
[0]])
>>> np.put_along_axis(a, ai, 99, axis=1)
>>> a
array([[10, 99, 20],
[99, 40, 50]])
"""
# normalize inputs
if axis is None:
arr = arr.flat
axis = 0
arr_shape = (len(arr),) # flatiter has no .shape
else:
axis = normalize_axis_index(axis, arr.ndim)
arr_shape = arr.shape
# use the fancy index
arr[_make_along_axis_idx(arr_shape, indices, axis)] = values
def _apply_along_axis_dispatcher(func1d, axis, arr, *args, **kwargs):
return (arr,)
@array_function_dispatch(_apply_along_axis_dispatcher)
def apply_along_axis(func1d, axis, arr, *args, **kwargs):
"""
Apply a function to 1-D slices along the given axis.
Execute `func1d(a, *args)` where `func1d` operates on 1-D arrays and `a`
is a 1-D slice of `arr` along `axis`.
This is equivalent to (but faster than) the following use of `ndindex` and
`s_`, which sets each of ``ii``, ``jj``, and ``kk`` to a tuple of indices::
Ni, Nk = a.shape[:axis], a.shape[axis+1:]
for ii in ndindex(Ni):
for kk in ndindex(Nk):
f = func1d(arr[ii + s_[:,] + kk])
Nj = f.shape
for jj in ndindex(Nj):
out[ii + jj + kk] = f[jj]
Equivalently, eliminating the inner loop, this can be expressed as::
Ni, Nk = a.shape[:axis], a.shape[axis+1:]
for ii in ndindex(Ni):
for kk in ndindex(Nk):
out[ii + s_[...,] + kk] = func1d(arr[ii + s_[:,] + kk])
Parameters
----------
func1d : function (M,) -> (Nj...)
This function should accept 1-D arrays. It is applied to 1-D
slices of `arr` along the specified axis.
axis : integer
Axis along which `arr` is sliced.
arr : ndarray (Ni..., M, Nk...)
Input array.
args : any
Additional arguments to `func1d`.
kwargs : any
Additional named arguments to `func1d`.
.. versionadded:: 1.9.0
Returns
-------
out : ndarray (Ni..., Nj..., Nk...)
The output array. The shape of `out` is identical to the shape of
`arr`, except along the `axis` dimension. This axis is removed, and
replaced with new dimensions equal to the shape of the return value
of `func1d`. So if `func1d` returns a scalar `out` will have one
fewer dimensions than `arr`.
See Also
--------
apply_over_axes : Apply a function repeatedly over multiple axes.
Examples
--------
>>> def my_func(a):
... \"\"\"Average first and last element of a 1-D array\"\"\"
... return (a[0] + a[-1]) * 0.5
>>> b = np.array([[1,2,3], [4,5,6], [7,8,9]])
>>> np.apply_along_axis(my_func, 0, b)
array([4., 5., 6.])
>>> np.apply_along_axis(my_func, 1, b)
array([2., 5., 8.])
For a function that returns a 1D array, the number of dimensions in
`outarr` is the same as `arr`.
>>> b = np.array([[8,1,7], [4,3,9], [5,2,6]])
>>> np.apply_along_axis(sorted, 1, b)
array([[1, 7, 8],
[3, 4, 9],
[2, 5, 6]])
For a function that returns a higher dimensional array, those dimensions
are inserted in place of the `axis` dimension.
>>> b = np.array([[1,2,3], [4,5,6], [7,8,9]])
>>> np.apply_along_axis(np.diag, -1, b)
array([[[1, 0, 0],
[0, 2, 0],
[0, 0, 3]],
[[4, 0, 0],
[0, 5, 0],
[0, 0, 6]],
[[7, 0, 0],
[0, 8, 0],
[0, 0, 9]]])
"""
# handle negative axes
arr = asanyarray(arr)
nd = arr.ndim
axis = normalize_axis_index(axis, nd)
# arr, with the iteration axis at the end
in_dims = list(range(nd))
inarr_view = transpose(arr, in_dims[:axis] + in_dims[axis+1:] + [axis])
# compute indices for the iteration axes, and append a trailing ellipsis to
# prevent 0d arrays decaying to scalars, which fixes gh-8642
inds = ndindex(inarr_view.shape[:-1])
inds = (ind + (Ellipsis,) for ind in inds)
# invoke the function on the first item
try:
ind0 = next(inds)
except StopIteration:
raise ValueError('Cannot apply_along_axis when any iteration dimensions are 0')
res = asanyarray(func1d(inarr_view[ind0], *args, **kwargs))
# build a buffer for storing evaluations of func1d.
# remove the requested axis, and add the new ones on the end.
# laid out so that each write is contiguous.
# for a tuple index inds, buff[inds] = func1d(inarr_view[inds])
buff = zeros(inarr_view.shape[:-1] + res.shape, res.dtype)
# permutation of axes such that out = buff.transpose(buff_permute)
buff_dims = list(range(buff.ndim))
buff_permute = (
buff_dims[0 : axis] +
buff_dims[buff.ndim-res.ndim : buff.ndim] +
buff_dims[axis : buff.ndim-res.ndim]
)
# matrices have a nasty __array_prepare__ and __array_wrap__
if not isinstance(res, matrix):
buff = res.__array_prepare__(buff)
# save the first result, then compute and save all remaining results
buff[ind0] = res
for ind in inds:
buff[ind] = asanyarray(func1d(inarr_view[ind], *args, **kwargs))
if not isinstance(res, matrix):
# wrap the array, to preserve subclasses
buff = res.__array_wrap__(buff)
# finally, rotate the inserted axes back to where they belong
return transpose(buff, buff_permute)
else:
# matrices have to be transposed first, because they collapse dimensions!
out_arr = transpose(buff, buff_permute)
return res.__array_wrap__(out_arr)
def _apply_over_axes_dispatcher(func, a, axes):
return (a,)
@array_function_dispatch(_apply_over_axes_dispatcher)
def apply_over_axes(func, a, axes):
"""
Apply a function repeatedly over multiple axes.
`func` is called as `res = func(a, axis)`, where `axis` is the first
element of `axes`. The result `res` of the function call must have
either the same dimensions as `a` or one less dimension. If `res`
has one less dimension than `a`, a dimension is inserted before
`axis`. The call to `func` is then repeated for each axis in `axes`,
with `res` as the first argument.
Parameters
----------
func : function
This function must take two arguments, `func(a, axis)`.
a : array_like
Input array.
axes : array_like
Axes over which `func` is applied; the elements must be integers.
Returns
-------
apply_over_axis : ndarray
The output array. The number of dimensions is the same as `a`,
but the shape can be different. This depends on whether `func`
changes the shape of its output with respect to its input.
See Also
--------
apply_along_axis :
Apply a function to 1-D slices of an array along the given axis.
Notes
------
This function is equivalent to tuple axis arguments to reorderable ufuncs
with keepdims=True. Tuple axis arguments to ufuncs have been available since
version 1.7.0.
Examples
--------
>>> a = np.arange(24).reshape(2,3,4)
>>> a
array([[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]],
[[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]]])
Sum over axes 0 and 2. The result has same number of dimensions
as the original array:
>>> np.apply_over_axes(np.sum, a, [0,2])
array([[[ 60],
[ 92],
[124]]])
Tuple axis arguments to ufuncs are equivalent:
>>> np.sum(a, axis=(0,2), keepdims=True)
array([[[ 60],
[ 92],
[124]]])
"""
val = asarray(a)
N = a.ndim
if array(axes).ndim == 0:
axes = (axes,)
for axis in axes:
if axis < 0:
axis = N + axis
args = (val, axis)
res = func(*args)
if res.ndim == val.ndim:
val = res
else:
res = expand_dims(res, axis)
if res.ndim == val.ndim:
val = res
else:
raise ValueError("function is not returning "
"an array of the correct shape")
return val
def _expand_dims_dispatcher(a, axis):
return (a,)
@array_function_dispatch(_expand_dims_dispatcher)
def expand_dims(a, axis):
"""
Expand the shape of an array.
Insert a new axis that will appear at the `axis` position in the expanded
array shape.
.. note:: Previous to NumPy 1.13.0, neither ``axis < -a.ndim - 1`` nor
``axis > a.ndim`` raised errors or put the new axis where documented.
Those axis values are now deprecated and will raise an AxisError in the
future.
Parameters
----------
a : array_like
Input array.
axis : int
Position in the expanded axes where the new axis is placed.
Returns
-------
res : ndarray
Output array. The number of dimensions is one greater than that of
the input array.
See Also
--------
squeeze : The inverse operation, removing singleton dimensions
reshape : Insert, remove, and combine dimensions, and resize existing ones
doc.indexing, atleast_1d, atleast_2d, atleast_3d
Examples
--------
>>> x = np.array([1,2])
>>> x.shape
(2,)
The following is equivalent to ``x[np.newaxis,:]`` or ``x[np.newaxis]``:
>>> y = np.expand_dims(x, axis=0)
>>> y
array([[1, 2]])
>>> y.shape
(1, 2)
>>> y = np.expand_dims(x, axis=1) # Equivalent to x[:,np.newaxis]
>>> y
array([[1],
[2]])
>>> y.shape
(2, 1)
Note that some examples may use ``None`` instead of ``np.newaxis``. These
are the same objects:
>>> np.newaxis is None
True
"""
if isinstance(a, matrix):
a = asarray(a)
else:
a = asanyarray(a)
shape = a.shape
if axis > a.ndim or axis < -a.ndim - 1:
# 2017-05-17, 1.13.0
warnings.warn("Both axis > a.ndim and axis < -a.ndim - 1 are "
"deprecated and will raise an AxisError in the future.",
DeprecationWarning, stacklevel=2)
# When the deprecation period expires, delete this if block,
if axis < 0:
axis = axis + a.ndim + 1
# and uncomment the following line.
# axis = normalize_axis_index(axis, a.ndim + 1)
return a.reshape(shape[:axis] + (1,) + shape[axis:])
row_stack = vstack
def _column_stack_dispatcher(tup):
return _arrays_for_stack_dispatcher(tup)
@array_function_dispatch(_column_stack_dispatcher)
def column_stack(tup):
"""
Stack 1-D arrays as columns into a 2-D array.
Take a sequence of 1-D arrays and stack them as columns
to make a single 2-D array. 2-D arrays are stacked as-is,
just like with `hstack`. 1-D arrays are turned into 2-D columns
first.
Parameters
----------
tup : sequence of 1-D or 2-D arrays.
Arrays to stack. All of them must have the same first dimension.
Returns
-------
stacked : 2-D array
The array formed by stacking the given arrays.
See Also
--------
stack, hstack, vstack, concatenate
Examples
--------
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.column_stack((a,b))
array([[1, 2],
[2, 3],
[3, 4]])
"""
arrays = []
for v in tup:
arr = array(v, copy=False, subok=True)
if arr.ndim < 2:
arr = array(arr, copy=False, subok=True, ndmin=2).T
arrays.append(arr)
return _nx.concatenate(arrays, 1)
def _dstack_dispatcher(tup):
return _arrays_for_stack_dispatcher(tup)
@array_function_dispatch(_dstack_dispatcher)
def dstack(tup):
"""
Stack arrays in sequence depth wise (along third axis).
This is equivalent to concatenation along the third axis after 2-D arrays
of shape `(M,N)` have been reshaped to `(M,N,1)` and 1-D arrays of shape
`(N,)` have been reshaped to `(1,N,1)`. Rebuilds arrays divided by
`dsplit`.
This function makes most sense for arrays with up to 3 dimensions. For
instance, for pixel-data with a height (first axis), width (second axis),
and r/g/b channels (third axis). The functions `concatenate`, `stack` and
`block` provide more general stacking and concatenation operations.
Parameters
----------
tup : sequence of arrays
The arrays must have the same shape along all but the third axis.
1-D or 2-D arrays must have the same shape.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays, will be at least 3-D.
See Also
--------
stack : Join a sequence of arrays along a new axis.
vstack : Stack along first axis.
hstack : Stack along second axis.
concatenate : Join a sequence of arrays along an existing axis.
dsplit : Split array along third axis.
Examples
--------
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.dstack((a,b))
array([[[1, 2],
[2, 3],
[3, 4]]])
>>> a = np.array([[1],[2],[3]])
>>> b = np.array([[2],[3],[4]])
>>> np.dstack((a,b))
array([[[1, 2]],
[[2, 3]],
[[3, 4]]])
"""
return _nx.concatenate([atleast_3d(_m) for _m in tup], 2)
def _replace_zero_by_x_arrays(sub_arys):
for i in range(len(sub_arys)):
if _nx.ndim(sub_arys[i]) == 0:
sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype)
elif _nx.sometrue(_nx.equal(_nx.shape(sub_arys[i]), 0)):
sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype)
return sub_arys
def _array_split_dispatcher(ary, indices_or_sections, axis=None):
return (ary, indices_or_sections)
@array_function_dispatch(_array_split_dispatcher)
def array_split(ary, indices_or_sections, axis=0):
"""
Split an array into multiple sub-arrays.
Please refer to the ``split`` documentation. The only difference
between these functions is that ``array_split`` allows
`indices_or_sections` to be an integer that does *not* equally
divide the axis. For an array of length l that should be split
into n sections, it returns l % n sub-arrays of size l//n + 1
and the rest of size l//n.
See Also
--------
split : Split array into multiple sub-arrays of equal size.
Examples
--------
>>> x = np.arange(8.0)
>>> np.array_split(x, 3)
[array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7.])]
>>> x = np.arange(7.0)
>>> np.array_split(x, 3)
[array([0., 1., 2.]), array([3., 4.]), array([5., 6.])]
"""
try:
Ntotal = ary.shape[axis]
except AttributeError:
Ntotal = len(ary)
try:
# handle array case.
Nsections = len(indices_or_sections) + 1
div_points = [0] + list(indices_or_sections) + [Ntotal]
except TypeError:
# indices_or_sections is a scalar, not an array.
Nsections = int(indices_or_sections)
if Nsections <= 0:
raise ValueError('number sections must be larger than 0.')
Neach_section, extras = divmod(Ntotal, Nsections)
section_sizes = ([0] +
extras * [Neach_section+1] +
(Nsections-extras) * [Neach_section])
div_points = _nx.array(section_sizes, dtype=_nx.intp).cumsum()
sub_arys = []
sary = _nx.swapaxes(ary, axis, 0)
for i in range(Nsections):
st = div_points[i]
end = div_points[i + 1]
sub_arys.append(_nx.swapaxes(sary[st:end], axis, 0))
return sub_arys
def _split_dispatcher(ary, indices_or_sections, axis=None):
return (ary, indices_or_sections)
@array_function_dispatch(_split_dispatcher)
def split(ary, indices_or_sections, axis=0):
"""
Split an array into multiple sub-arrays.
Parameters
----------
ary : ndarray
Array to be divided into sub-arrays.
indices_or_sections : int or 1-D array
If `indices_or_sections` is an integer, N, the array will be divided
into N equal arrays along `axis`. If such a split is not possible,
an error is raised.
If `indices_or_sections` is a 1-D array of sorted integers, the entries
indicate where along `axis` the array is split. For example,
``[2, 3]`` would, for ``axis=0``, result in
- ary[:2]
- ary[2:3]
- ary[3:]
If an index exceeds the dimension of the array along `axis`,
an empty sub-array is returned correspondingly.
axis : int, optional
The axis along which to split, default is 0.
Returns
-------
sub-arrays : list of ndarrays
A list of sub-arrays.
Raises
------
ValueError
If `indices_or_sections` is given as an integer, but
a split does not result in equal division.
See Also
--------
array_split : Split an array into multiple sub-arrays of equal or
near-equal size. Does not raise an exception if
an equal division cannot be made.
hsplit : Split array into multiple sub-arrays horizontally (column-wise).
vsplit : Split array into multiple sub-arrays vertically (row wise).
dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).
concatenate : Join a sequence of arrays along an existing axis.
stack : Join a sequence of arrays along a new axis.
hstack : Stack arrays in sequence horizontally (column wise).
vstack : Stack arrays in sequence vertically (row wise).
dstack : Stack arrays in sequence depth wise (along third dimension).
Examples
--------
>>> x = np.arange(9.0)
>>> np.split(x, 3)
[array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7., 8.])]
>>> x = np.arange(8.0)
>>> np.split(x, [3, 5, 6, 10])
[array([0., 1., 2.]),
array([3., 4.]),
array([5.]),
array([6., 7.]),
array([], dtype=float64)]
"""
try:
len(indices_or_sections)
except TypeError:
sections = indices_or_sections
N = ary.shape[axis]
if N % sections:
raise ValueError(
'array split does not result in an equal division')
res = array_split(ary, indices_or_sections, axis)
return res
def _hvdsplit_dispatcher(ary, indices_or_sections):
return (ary, indices_or_sections)
@array_function_dispatch(_hvdsplit_dispatcher)
def hsplit(ary, indices_or_sections):
"""
Split an array into multiple sub-arrays horizontally (column-wise).
Please refer to the `split` documentation. `hsplit` is equivalent
to `split` with ``axis=1``, the array is always split along the second
axis regardless of the array dimension.
See Also
--------
split : Split an array into multiple sub-arrays of equal size.
Examples
--------
>>> x = np.arange(16.0).reshape(4, 4)
>>> x
array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[12., 13., 14., 15.]])
>>> np.hsplit(x, 2)
[array([[ 0., 1.],
[ 4., 5.],
[ 8., 9.],
[12., 13.]]),
array([[ 2., 3.],
[ 6., 7.],
[10., 11.],
[14., 15.]])]
>>> np.hsplit(x, np.array([3, 6]))
[array([[ 0., 1., 2.],
[ 4., 5., 6.],
[ 8., 9., 10.],
[12., 13., 14.]]),
array([[ 3.],
[ 7.],
[11.],
[15.]]),
array([], shape=(4, 0), dtype=float64)]
With a higher dimensional array the split is still along the second axis.
>>> x = np.arange(8.0).reshape(2, 2, 2)
>>> x
array([[[0., 1.],
[2., 3.]],
[[4., 5.],
[6., 7.]]])
>>> np.hsplit(x, 2)
[array([[[0., 1.]],
[[4., 5.]]]),
array([[[2., 3.]],
[[6., 7.]]])]
"""
if _nx.ndim(ary) == 0:
raise ValueError('hsplit only works on arrays of 1 or more dimensions')
if ary.ndim > 1:
return split(ary, indices_or_sections, 1)
else:
return split(ary, indices_or_sections, 0)
@array_function_dispatch(_hvdsplit_dispatcher)
def vsplit(ary, indices_or_sections):
"""
Split an array into multiple sub-arrays vertically (row-wise).
Please refer to the ``split`` documentation. ``vsplit`` is equivalent
to ``split`` with `axis=0` (default), the array is always split along the
first axis regardless of the array dimension.
See Also
--------
split : Split an array into multiple sub-arrays of equal size.
Examples
--------
>>> x = np.arange(16.0).reshape(4, 4)
>>> x
array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[12., 13., 14., 15.]])
>>> np.vsplit(x, 2)
[array([[0., 1., 2., 3.],
[4., 5., 6., 7.]]), array([[ 8., 9., 10., 11.],
[12., 13., 14., 15.]])]
>>> np.vsplit(x, np.array([3, 6]))
[array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]]), array([[12., 13., 14., 15.]]), array([], shape=(0, 4), dtype=float64)]
With a higher dimensional array the split is still along the first axis.
>>> x = np.arange(8.0).reshape(2, 2, 2)
>>> x
array([[[0., 1.],
[2., 3.]],
[[4., 5.],
[6., 7.]]])
>>> np.vsplit(x, 2)
[array([[[0., 1.],
[2., 3.]]]), array([[[4., 5.],
[6., 7.]]])]
"""
if _nx.ndim(ary) < 2:
raise ValueError('vsplit only works on arrays of 2 or more dimensions')
return split(ary, indices_or_sections, 0)
@array_function_dispatch(_hvdsplit_dispatcher)
def dsplit(ary, indices_or_sections):
"""
Split array into multiple sub-arrays along the 3rd axis (depth).
Please refer to the `split` documentation. `dsplit` is equivalent
to `split` with ``axis=2``, the array is always split along the third
axis provided the array dimension is greater than or equal to 3.
See Also
--------
split : Split an array into multiple sub-arrays of equal size.
Examples
--------
>>> x = np.arange(16.0).reshape(2, 2, 4)
>>> x
array([[[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.]],
[[ 8., 9., 10., 11.],
[12., 13., 14., 15.]]])
>>> np.dsplit(x, 2)
[array([[[ 0., 1.],
[ 4., 5.]],
[[ 8., 9.],
[12., 13.]]]), array([[[ 2., 3.],
[ 6., 7.]],
[[10., 11.],
[14., 15.]]])]
>>> np.dsplit(x, np.array([3, 6]))
[array([[[ 0., 1., 2.],
[ 4., 5., 6.]],
[[ 8., 9., 10.],
[12., 13., 14.]]]),
array([[[ 3.],
[ 7.]],
[[11.],
[15.]]]),
array([], shape=(2, 2, 0), dtype=float64)]
"""
if _nx.ndim(ary) < 3:
raise ValueError('dsplit only works on arrays of 3 or more dimensions')
return split(ary, indices_or_sections, 2)
def get_array_prepare(*args):
"""Find the wrapper for the array with the highest priority.
In case of ties, leftmost wins. If no wrapper is found, return None
"""
wrappers = sorted((getattr(x, '__array_priority__', 0), -i,
x.__array_prepare__) for i, x in enumerate(args)
if hasattr(x, '__array_prepare__'))
if wrappers:
return wrappers[-1][-1]
return None
def get_array_wrap(*args):
"""Find the wrapper for the array with the highest priority.
In case of ties, leftmost wins. If no wrapper is found, return None
"""
wrappers = sorted((getattr(x, '__array_priority__', 0), -i,
x.__array_wrap__) for i, x in enumerate(args)
if hasattr(x, '__array_wrap__'))
if wrappers:
return wrappers[-1][-1]
return None
def _kron_dispatcher(a, b):
return (a, b)
@array_function_dispatch(_kron_dispatcher)
def kron(a, b):
"""
Kronecker product of two arrays.
Computes the Kronecker product, a composite array made of blocks of the
second array scaled by the first.
Parameters
----------
a, b : array_like
Returns
-------
out : ndarray
See Also
--------
outer : The outer product
Notes
-----
The function assumes that the number of dimensions of `a` and `b`
are the same, if necessary prepending the smallest with ones.
If `a.shape = (r0,r1,..,rN)` and `b.shape = (s0,s1,...,sN)`,
the Kronecker product has shape `(r0*s0, r1*s1, ..., rN*SN)`.
The elements are products of elements from `a` and `b`, organized
explicitly by::
kron(a,b)[k0,k1,...,kN] = a[i0,i1,...,iN] * b[j0,j1,...,jN]
where::
kt = it * st + jt, t = 0,...,N
In the common 2-D case (N=1), the block structure can be visualized::
[[ a[0,0]*b, a[0,1]*b, ... , a[0,-1]*b ],
[ ... ... ],
[ a[-1,0]*b, a[-1,1]*b, ... , a[-1,-1]*b ]]
Examples
--------
>>> np.kron([1,10,100], [5,6,7])
array([ 5, 6, 7, ..., 500, 600, 700])
>>> np.kron([5,6,7], [1,10,100])
array([ 5, 50, 500, ..., 7, 70, 700])
>>> np.kron(np.eye(2), np.ones((2,2)))
array([[1., 1., 0., 0.],
[1., 1., 0., 0.],
[0., 0., 1., 1.],
[0., 0., 1., 1.]])
>>> a = np.arange(100).reshape((2,5,2,5))
>>> b = np.arange(24).reshape((2,3,4))
>>> c = np.kron(a,b)
>>> c.shape
(2, 10, 6, 20)
>>> I = (1,3,0,2)
>>> J = (0,2,1)
>>> J1 = (0,) + J # extend to ndim=4
>>> S1 = (1,) + b.shape
>>> K = tuple(np.array(I) * np.array(S1) + np.array(J1))
>>> c[K] == a[I]*b[J]
True
"""
b = asanyarray(b)
a = array(a, copy=False, subok=True, ndmin=b.ndim)
ndb, nda = b.ndim, a.ndim
if (nda == 0 or ndb == 0):
return _nx.multiply(a, b)
as_ = a.shape
bs = b.shape
if not a.flags.contiguous:
a = reshape(a, as_)
if not b.flags.contiguous:
b = reshape(b, bs)
nd = ndb
if (ndb != nda):
if (ndb > nda):
as_ = (1,)*(ndb-nda) + as_
else:
bs = (1,)*(nda-ndb) + bs
nd = nda
result = outer(a, b).reshape(as_+bs)
axis = nd-1
for _ in range(nd):
result = concatenate(result, axis=axis)
wrapper = get_array_prepare(a, b)
if wrapper is not None:
result = wrapper(result)
wrapper = get_array_wrap(a, b)
if wrapper is not None:
result = wrapper(result)
return result
def _tile_dispatcher(A, reps):
return (A, reps)
@array_function_dispatch(_tile_dispatcher)
def tile(A, reps):
"""
Construct an array by repeating A the number of times given by reps.
If `reps` has length ``d``, the result will have dimension of
``max(d, A.ndim)``.
If ``A.ndim < d``, `A` is promoted to be d-dimensional by prepending new
axes. So a shape (3,) array is promoted to (1, 3) for 2-D replication,
or shape (1, 1, 3) for 3-D replication. If this is not the desired
behavior, promote `A` to d-dimensions manually before calling this
function.
If ``A.ndim > d``, `reps` is promoted to `A`.ndim by pre-pending 1's to it.
Thus for an `A` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as
(1, 1, 2, 2).
Note : Although tile may be used for broadcasting, it is strongly
recommended to use numpy's broadcasting operations and functions.
Parameters
----------
A : array_like
The input array.
reps : array_like
The number of repetitions of `A` along each axis.
Returns
-------
c : ndarray
The tiled output array.
See Also
--------
repeat : Repeat elements of an array.
broadcast_to : Broadcast an array to a new shape
Examples
--------
>>> a = np.array([0, 1, 2])
>>> np.tile(a, 2)
array([0, 1, 2, 0, 1, 2])
>>> np.tile(a, (2, 2))
array([[0, 1, 2, 0, 1, 2],
[0, 1, 2, 0, 1, 2]])
>>> np.tile(a, (2, 1, 2))
array([[[0, 1, 2, 0, 1, 2]],
[[0, 1, 2, 0, 1, 2]]])
>>> b = np.array([[1, 2], [3, 4]])
>>> np.tile(b, 2)
array([[1, 2, 1, 2],
[3, 4, 3, 4]])
>>> np.tile(b, (2, 1))
array([[1, 2],
[3, 4],
[1, 2],
[3, 4]])
>>> c = np.array([1,2,3,4])
>>> np.tile(c,(4,1))
array([[1, 2, 3, 4],
[1, 2, 3, 4],
[1, 2, 3, 4],
[1, 2, 3, 4]])
"""
try:
tup = tuple(reps)
except TypeError:
tup = (reps,)
d = len(tup)
if all(x == 1 for x in tup) and isinstance(A, _nx.ndarray):
# Fixes the problem that the function does not make a copy if A is a
# numpy array and the repetitions are 1 in all dimensions
return _nx.array(A, copy=True, subok=True, ndmin=d)
else:
# Note that no copy of zero-sized arrays is made. However since they
# have no data there is no risk of an inadvertent overwrite.
c = _nx.array(A, copy=False, subok=True, ndmin=d)
if (d < c.ndim):
tup = (1,)*(c.ndim-d) + tup
shape_out = tuple(s*t for s, t in zip(c.shape, tup))
n = c.size
if n > 0:
for dim_in, nrep in zip(c.shape, tup):
if nrep != 1:
c = c.reshape(-1, n).repeat(nrep, 0)
n //= dim_in
return c.reshape(shape_out)
|
|
#! /usr/bin/env/python 3.0
#
# Contains common functions necessary for various python testcase scripts.
#
import os, re, csv, datetime, subprocess, glob, sys, time, shutil
def is_generated_file(fullfilepath):
"""
Determines if the first line of a file contains the autogenerated signature string.
"""
with open(fullfilepath, 'r') as f:
firstline = f.readline()
if firstline.strip() == get_engine_signature():
return True
return False
def find_files_in_dir(directory, regex, silent=True):
"""
Finds files (non-directories) that match a regex in a certain directory. (recursively, case-insensitive)
Can pass an optional argument of silent=False to print filenames that did not match the regex.
"""
matching_files = []
for root, dirs, files in os.walk(directory):
for name in files:
result = re.search(regex, name, re.IGNORECASE)
if result != None:
matching_files.append(os.path.realpath(os.path.join(root, name)))
else:
if not silent:
print("Skipped file (did not match regex): ", name)
return matching_files
def find_directories_in_dir(directory, regex, silent=True):
"""
Finds directories that match a regex in a certain directory (recursively, case-insensitive)
Can pass an optional argument of silent=False to print filenames that did not match the regex.
"""
matching_directories = []
for root, dirs, files in os.walk(directory):
for name in dirs:
result = re.search(regex, name, re.IGNORECASE)
if result != None:
matching_directories.append(os.path.realpath(os.path.join(root, name)))
else:
if not silent:
print("Skipped dir (did not match regex): ", name)
return matching_directories
def find_all_files_in_dir_nr(directory):
"""
Finds all files (non-directories) in a directory. This function is not recursive.
"""
files = os.listdir(directory)
# append base dir
files = list(map(lambda x: os.path.join(directory, x), files))
# make sure each path is a file (and not a directory)
files = list(filter(lambda x: os.path.isfile(x), files))
return files
def find_testcase_functional_variants_in_dir(dir):
"""
Finds all functional variants in a directory. This was originally created when
we decided to split the test cases into separate directories by functional
variant.
"""
func_vars = []
# filter the list of test cases to the baseline test cases so that we can
# iterate over this list without worrying about duplicate functional variants
baseline_testcases = find_files_in_dir(dir, get_baseline_functional_variant_regex())
for btc in baseline_testcases:
btc_file_name = os.path.basename(btc)
result = re.search(get_testcase_filename_regex(), btc_file_name, re.IGNORECASE)
if result != None:
func_vars.append(result.group('functional_variant_name'))
else:
print_with_timestamp('Could not determine the functional variant in ' + btc_file_name)
exit(1)
return func_vars
def open_file_and_get_contents(file):
"""
Returns the entire contents of a file as one large string.
"""
with open(file, 'r') as f:
try:
content = f.read()
return content
except UnicodeDecodeError as error:
print("\n\n")
print(error)
print("Weird char in ", file)
print("\n")
return None;
def open_file_and_get_lines(file):
"""
Returns the file as a list of lines
"""
with open(file, 'r') as f:
try:
lines = f.readlines()
return lines
except UnicodeDecodeError as error:
print("\n\n")
print(error)
print("Weird char in ", file)
print("\n")
return None;
def write_file(filename, contents):
"""
Write contents to file.
"""
with open(filename, 'w') as f:
f.write(contents)
def read_csv(filename):
"""
Reads a csv.
"""
raw_records = []
with open(filename, 'r') as f:
reader = csv.reader(f, dialect='excel')
for row in reader:
raw_records.append(row)
return raw_records
def read_csv_with_header(filename):
"""
Reads a csv and returns the header along with the records.
"""
raw_records = read_csv(filename)
header = raw_records.pop(0)
return header, raw_records
def write_csv(filename, records):
"""
Writes a list to a csv.
"""
with open(filename, 'w', newline='') as f:
writer = csv.writer(f, dialect='excel')
for r in records:
writer.writerow(r)
def transform_csv(input_file, output_file, header_fx=None, row_fx=None):
"""
Transforms a csv using streaming technique. Calls a header function that
allows the caller to modify the header; also calls a row function that
allows the caller to modify each row in csv.
Allows the caller to pass arbitrary arguments between the header fx
and row fx.
The header function should look like (at a minimum):
def header_fx(header):
data = "data to share with row_fx"
return header, data
The row function declaration should look like (at a minimum):
def row_fx(orig_header, new_header, row, data):
return row
"""
with open(input_file, 'r', newline='') as fi:
reader = csv.reader(fi, dialect='excel')
orig_header = next(reader)
if header_fx == None:
new_header, data = orig_header, None
else:
new_header, data = header_fx(orig_header)
with open(output_file, 'w', newline='') as fo:
writer = csv.writer(fo, dialect='excel')
writer.writerow(new_header)
for row in reader:
if row_fx == None:
pass
else:
row = row_fx(orig_header, new_header, row, data)
writer.writerow(row)
return output_file
def get_c_good_fx_counting_regex():
"""
This is not used to figure out ALL C good functions. This regex is a way of counting
how many non-flawed constructs we have per testcase.
"""
return "good(\d+|G2B|B2G|G2B\d+|B2G\d+)"
def get_java_good_fx_counting_regex():
"""
This is not used to figure out ALL Java good functions. This regex is a way of counting
how many non-flawed constructs we have per testcase.
"""
return "good(\d+|G2B|B2G|G2B\d+|B2G\d+)"
def get_testcase_filename_regex():
"""
This regex matches primary and secondary test case files.
Matches must be performed case-insensitive. (re.IGNORECASE)
If you change this regex, update the C# common library regex.
If you change this regex, update the primary testcase filename regex.
"""
return "^cwe" + \
"(?P<cwe_number>\d+)" + \
"_" + \
"(?P<cwe_name>.*)" + \
"__" + \
"(?P<functional_variant_name>.*)" + \
"_" + \
"(?P<flow_variant_id>\d+)" + \
"_?" + \
"(?P<subfile_id>[a-z]{1}|(bad)|(good(\d)+)|(base)|(goodB2G)|(goodG2B))?" + \
"\." + \
"(?P<extension>c|cpp|java|h)$"
def get_primary_testcase_filename_regex():
"""
This regex matches only primary test case files.
Matches must be performed case-insensitive. (re.IGNORECASE)
The "(?!8[12]_bad)" is a "negative lookahead" so that we don't
get the 81_bad or 82_bad file as the primary since those flow
variants also have an "a" file (which is the primary file)
The "(?!CWE580.*01_bad.java)" prevents getting the _bad file for
CWE580 since it also has an "a" file.
"""
return "^(?!CWE580.*01_bad.java)" + \
"cwe" + \
"(?P<cwe_number>\d+)" + \
"_" + \
"(?P<cwe_name>.*)" + \
"__" + \
"(?P<functional_variant_name>.*)" + \
"_" + \
"(?!8[12]_bad)" + \
"(?P<flow_variant_id>\d+)" + \
"_?" + \
"(?P<subfile_id>a|(_bad))?" + \
"\." + \
"(?P<extension>c|cpp|java)$"
def get_baseline_functional_variant_regex():
"""
This regex matches only baseline test case files
and can be used to calculate the number of functional variants.
Matches must be performed case-insensitive. (re.IGNORECASE)
The "(?!CWE580.*01_bad.java)" prevents getting the _bad file for
CWE580 since it also has an "a" file.
"""
return "^(?!CWE580.*01_bad.java)CWE\d+.*_01((a)|(_?bad)|)\.(c|cpp|java)?$"
def get_functionname_c_regex():
"""
Used to get the "simple" function name for c functions.
"""
return "^(CWE|cwe)(?P<cwe_number>\d+)_(?P<cwe_name>.*)__(?P<function_variant>.*)_(?P<flow_variant>\d+)(?P<subfile_id>[a-z]*)_(?P<function_name>[^.]*)$"
def get_cwe_id_regex():
"""
Used to get the CWE ID from a test case file or path name
"""
return "(CWE\d+)_"
def get_java_testcase_lib():
"""
Used to get the path to the Java test case lib directory
"""
return "..\\..\\..\\lib"
def get_java_testcase_lib_split():
"""
Used to get the path to the Java test case lib directory from a split directory
"""
return "..\\" + get_java_testcase_lib()
def get_c_and_cpp_testcasesupport_dir():
"""
Used to get the path to the C/C++ test case support directory
"""
return "..\\..\\testcasesupport"
def get_c_and_cpp_testcasesupport_dir_split():
"""
Used to get the path to the C/C++ test case support directory from a split directory
"""
return "..\\" + get_c_and_cpp_testcasesupport_dir()
def get_testcase_subdirectory_regex():
"""
Used to get the regex that will match the split test case CWE ID
Starting in 2012 the CWE ID will be of the form: CWEXYZ_s01, CWEXYZ_s02, etc.
"""
return "CWE.*_s\d{2,}$"
def get_timestamp():
"""
Returns a timestamp of the form YYYY-MM-DD.
"""
date = datetime.date.today()
return str(date)
def get_engine_signature():
"""
This is the first line in a test case that has been auto-generated
by the Test Case Engine. We use this to identify auto-generated vs.
manually-genenerated test cases.
"""
return "/* TEMPLATE GENERATED TESTCASE FILE"
def get_java_main_comment():
"""
This is the comment that appears on the line directly above the main() method in the
Java test cases.
"""
return "Below is the main()"
def get_c_cpp_main_comment():
"""
This is the comment that appears on the line directly above the main() function in the
C/C++ test cases.
"""
return "Below is the main()"
def get_tool_study_max_java_heap_size():
"""
Some of the tools allow you to specify the java heap size. We want to ensure all of the tools
use the same heap size (if they allow it to be specified), so the run_analysis scripts
should use this method to retrieve the size
"""
return "4096m"
def map_weakness_classes(file):
"""
Reads the weakness class csv file. Allows a cwe to be part of multiple weakness classes.
"""
header, records = read_csv_with_header(file)
dict = {}
for record in records:
cwe = record[header.index("CWEID")]
wclass = record[header.index("Weakness Class")]
if cwe in dict.keys():
dict[cwe].append(wclass)
# may want to error here instead
print_with_timestamp("WARNING: CWE \"" + cwe + "\" is assigned to more than 1 weakness class.")
else:
dict[cwe] = [wclass]
return dict
def print_with_timestamp(contents):
"""
Print a string with the timestamp at the beginning of the line.
"""
print("[" + time.ctime(None) + "] " + contents)
def run_commands(commands, use_shell=False):
"""
Runs a command as if it were run in the command prompt. If you need to use commands such as
"cd, dir, etc", set use_shell to True.
"""
command = " && ".join(commands)
# Not using print_with_timestamp() here since we want to capture the time for the time diff
time_started = time.time()
print("[" + time.ctime(time_started) + "] Started command: \"" + command + "\"")
sys.stdout.flush()
subprocess.check_call(command, shell=use_shell, stderr=sys.stderr, stdout=sys.stdout)
# Not using print_with_timestamp() here since we want to capture the time for the time diff
time_ended = time.time()
print("[" + time.ctime(time_ended) + "] Finished command: \"" + command + "\"")
elapsed_seconds = time_ended-time_started
print_with_timestamp("Command \"" + command + "\" took " + str(elapsed_seconds) + " seconds to complete.")
def run_analysis(test_case_path, build_file_regex, run_analysis_fx):
"""
Helper method to run an analysis using a tool.
Takes a test case path, build file regex and a function pointer.
"""
time_started = time.time()
# find all the files
files = find_files_in_dir(test_case_path, build_file_regex)
# run all the files using the function pointer
for file in files:
# change into directory with the file
dir = os.path.dirname(file)
os.chdir(dir)
# run the the file
file = os.path.basename(file)
run_analysis_fx(file)
# return to original working directory
os.chdir(sys.path[0])
time_ended = time.time()
print_with_timestamp("Started: " + time.ctime(time_started))
print_with_timestamp("Ended: " + time.ctime(time_ended))
elapsed_seconds = time_ended-time_started
print_with_timestamp("Elapsed time: " + convertSecondsToDHMS(elapsed_seconds))
def break_up_filename(file_name):
"""
Looks for various parts of the filename to place into the new columns.
"""
cwe_num = ''
cwe_name = ''
fx_var = ''
flow_var = ''
subfile = ''
lang = ''
result = re.search(get_testcase_filename_regex(), file_name, re.IGNORECASE)
if result == None:
# use blank values
print_with_timestamp("WARNING: file \"" + file_name + "\" is not going to be parsed into parts! (blank values will be used)")
else:
# its a normal testcase file
cwe_num = result.group('cwe_number')
cwe_name = result.group('cwe_name')
fx_var = result.group('functional_variant_name')
flow_var = result.group('flow_variant_id')
subfile = result.group('subfile_id')
lang = result.group('extension')
parts = {}
parts["testcase_cwe_number"] = cwe_num
parts["testcase_cwe_name"] = cwe_name
parts["testcase_function_variant"] = fx_var
parts["testcase_flow_variant"] = flow_var
parts["testcase_subfile_id"] = subfile
parts["testcase_language"] = lang
return parts
def break_up_cpp_function_name(function_name):
"""
Looks for various parts of the function name to place into the simplified function name
"""
result = re.search(get_functionname_c_regex(), function_name, re.IGNORECASE)
if result == None:
# Just use the original
return function_name
else:
# Use the "simplified" portion
return result.group("function_name")
def concatenate_csvs(input_directory, output_file):
"""
Combines multiple CSV files into a single CSV file.
"""
with open(output_file, 'w', newline='') as f:
writer = csv.writer(f, dialect='excel')
need_header = True
for file in find_files_in_dir(input_directory, ".*?\.csv$"):
header, records = read_csv_with_header(file)
if need_header:
writer.writerow(header)
need_header = False
for record in records:
writer.writerow(record)
def generate_unique_finding_ids(input_csv, output_csv):
"""
Modifies CSV so that each number in the finding_id column is unique
"""
with open(input_csv, 'r', newline='') as fi:
reader = csv.reader(fi, dialect='excel')
header = next(reader)
if 'finding_id' in header:
finding_id_index = header.index('finding_id')
else:
print_with_timestamp('finding_id does not exist in CSV header')
exit()
with open(output_csv, 'w', newline='') as fo:
writer = csv.writer(fo, dialect='excel')
writer.writerow(header)
unique_id = 1
for row in reader:
row[finding_id_index] = unique_id
writer.writerow(row)
unique_id = unique_id + 1
unique_id_count = 1
def add_unique_finding_ids(orig_header, new_header, row, data):
"""
Modifies CSV row so that each number in the finding_id column is unique
Call this from transform_csv
For example: transform_csv(input_file, output_file, header_fx=None, row_fx=add_unique_finding_id)
"""
global unique_id_count
finding_id_index = orig_header.index('finding_id')
row[finding_id_index] = unique_id_count
unique_id_count += 1
return row
def encode_language(input_lang):
"""
Checks the input language to ensure invalid file name/path characters do
not exist in the language as it is often used to generate output file names
in our scripts.
We currently only analyze C, C++, Java, C#, and .NET code, so if a new
language other than those listed is added we may need to review this helper
function.
"""
encoded_lang = input_lang.replace("+", "p") # for C++
encoded_lang = encoded_lang.replace("/", "_") # for C/C++
encoded_lang = encoded_lang.replace("\\", "_") # for C\C++
encoded_lang = encoded_lang.replace("#", "sharp") # for C#
return encoded_lang
def move_testcase_to_split_directories(dir, functional_variants, testcase_files, file_count_limit):
"""
Given a directory, list of functional variants, list of testcase files, and file count limit,
this method creates subdirectories inside the provided directory. It adds all of the files for
a functional variant until the file_count_limit is reached. If this limit is reached, it begins
placing the files in another subdirectory.
NOTE: All files for a given functional variant will remain in the same directory.
"""
subdir_count = 1
number_of_files_in_subdir = 0
is_subdir_needed = True
func_var_dir = ""
for func_var in functional_variants:
# filter the list of test cases for this functional variant
func_var_regex = "__" + func_var + "_\d\d"
filter_regex = re.compile(func_var_regex, re.IGNORECASE)
func_var_testcase_files = [f for f in testcase_files if filter_regex.search(f)]
func_var_testcase_files_count = len(func_var_testcase_files)
if ((func_var_testcase_files_count + number_of_files_in_subdir) > file_count_limit):
is_subdir_needed = True
if is_subdir_needed == True:
if subdir_count < 10:
func_var_dir = os.path.join(dir, 's' + '0' + str(subdir_count))
else:
func_var_dir = os.path.join(dir, 's' + str(subdir_count))
os.mkdir(func_var_dir)
subdir_count = subdir_count + 1
is_subdir_needed = False
number_of_files_in_subdir = func_var_testcase_files_count
else:
number_of_files_in_subdir = number_of_files_in_subdir + func_var_testcase_files_count
# Copy the files for this functional variant to the new directory
# and remove the file from the CWE root directory
print_with_timestamp("Moving the test cases for the following functional variant \"" + func_var + "\" to subdirecory \"" + func_var_dir + "\"")
for testcase_file in func_var_testcase_files:
shutil.copy(testcase_file, func_var_dir)
os.unlink(testcase_file)
def create_or_clean_directory(dir):
"""
This method attempts to create the specified directory. However, if it
already exists then it will be cleaned to ensure there are no stale files.
"""
if not os.path.exists(dir):
print_with_timestamp("The path \"" + dir + "\" does not exist")
print_with_timestamp("creating directory \"" + dir + "\"")
os.makedirs(dir)
else: #Directory exists, but we want to clean it before use
print_with_timestamp(dir + " already exists. Cleaning before use...")
shutil.rmtree(dir)
os.makedirs(dir)
def extract_cwe_id_from_path(path):
"""
This method extracts the CWE ID (and possibly sub-ID) from the path.
It assumes that the CWE ID is contained in the path and will error
if it not within the path.
This is used most for the Java test cases when creating a project
name in our run_analysis scripts. It's easy to get the C/C++ CWE ID
since it is the beginning part of the batch filename. However, the Java
scripts run on all build.xml files and therefore we need to parse the
CWE ID from the path containing the build.xml file.
"""
cwe_id = ""
if os.path.basename(path).startswith('CWE'):
cwe_id = re.search(get_cwe_id_regex(), os.path.basename(path)).group(1)
# if the basename does not start with 'CWE' then we are in a sub-directory
# and the sub-directory name is "s" plus a number (s01, s02, etc.) so we append this string
# to the end of the CWE id to make it a unique value
else:
cwe_id = re.search(get_cwe_id_regex(), path).group(1)
sub_dir = os.path.basename(path)
cwe_id = cwe_id + '_' + sub_dir
return cwe_id
def convertSecondsToDHMS(seconds):
"""
Converts seconds into days, hours, minutes, seconds
"""
if seconds >= 0 and seconds < 1:
seconds = round(seconds, 2)
return str(seconds)
else:
seconds = int(round(seconds))
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
formatStr = "{0} day{1}, {2} hour{3}, {4} minute{5}, {6} second{7}"
output = formatStr.format( \
days, "" if days==1 else "s", \
hours, "" if hours==1 else "s", \
minutes, "" if minutes==1 else "s", \
seconds, "" if seconds==1 else "s")
return output
|
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# This module uses functions from TLSLite (public domain)
#
# TLSLite Authors:
# Trevor Perrin
# Martin von Loewis - python 3 port
# Yngve Pettersen (ported by Paul Sokolovsky) - TLS 1.2
#
"""Pure-Python RSA implementation."""
import os
import math
import hashlib
from .pem import *
def SHA1(x):
return hashlib.sha1(x).digest()
# **************************************************************************
# PRNG Functions
# **************************************************************************
# Check that os.urandom works
import zlib
length = len(zlib.compress(os.urandom(1000)))
assert(length > 900)
def getRandomBytes(howMany):
b = bytearray(os.urandom(howMany))
assert(len(b) == howMany)
return b
prngName = "os.urandom"
# **************************************************************************
# Converter Functions
# **************************************************************************
def bytesToNumber(b):
total = 0
multiplier = 1
for count in range(len(b)-1, -1, -1):
byte = b[count]
total += multiplier * byte
multiplier *= 256
return total
def numberToByteArray(n, howManyBytes=None):
"""Convert an integer into a bytearray, zero-pad to howManyBytes.
The returned bytearray may be smaller than howManyBytes, but will
not be larger. The returned bytearray will contain a big-endian
encoding of the input integer (n).
"""
if howManyBytes == None:
howManyBytes = numBytes(n)
b = bytearray(howManyBytes)
for count in range(howManyBytes-1, -1, -1):
b[count] = int(n % 256)
n >>= 8
return b
def mpiToNumber(mpi): #mpi is an openssl-format bignum string
if (ord(mpi[4]) & 0x80) !=0: #Make sure this is a positive number
raise AssertionError()
b = bytearray(mpi[4:])
return bytesToNumber(b)
def numberToMPI(n):
b = numberToByteArray(n)
ext = 0
#If the high-order bit is going to be set,
#add an extra byte of zeros
if (numBits(n) & 0x7)==0:
ext = 1
length = numBytes(n) + ext
b = bytearray(4+ext) + b
b[0] = (length >> 24) & 0xFF
b[1] = (length >> 16) & 0xFF
b[2] = (length >> 8) & 0xFF
b[3] = length & 0xFF
return bytes(b)
# **************************************************************************
# Misc. Utility Functions
# **************************************************************************
def numBits(n):
if n==0:
return 0
s = "%x" % n
return ((len(s)-1)*4) + \
{'0':0, '1':1, '2':2, '3':2,
'4':3, '5':3, '6':3, '7':3,
'8':4, '9':4, 'a':4, 'b':4,
'c':4, 'd':4, 'e':4, 'f':4,
}[s[0]]
return int(math.floor(math.log(n, 2))+1)
def numBytes(n):
if n==0:
return 0
bits = numBits(n)
return int(math.ceil(bits / 8.0))
# **************************************************************************
# Big Number Math
# **************************************************************************
def getRandomNumber(low, high):
if low >= high:
raise AssertionError()
howManyBits = numBits(high)
howManyBytes = numBytes(high)
lastBits = howManyBits % 8
while 1:
bytes = getRandomBytes(howManyBytes)
if lastBits:
bytes[0] = bytes[0] % (1 << lastBits)
n = bytesToNumber(bytes)
if n >= low and n < high:
return n
def gcd(a,b):
a, b = max(a,b), min(a,b)
while b:
a, b = b, a % b
return a
def lcm(a, b):
return (a * b) // gcd(a, b)
#Returns inverse of a mod b, zero if none
#Uses Extended Euclidean Algorithm
def invMod(a, b):
c, d = a, b
uc, ud = 1, 0
while c != 0:
q = d // c
c, d = d-(q*c), c
uc, ud = ud - (q * uc), uc
if d == 1:
return ud % b
return 0
def powMod(base, power, modulus):
if power < 0:
result = pow(base, power*-1, modulus)
result = invMod(result, modulus)
return result
else:
return pow(base, power, modulus)
#Pre-calculate a sieve of the ~100 primes < 1000:
def makeSieve(n):
sieve = list(range(n))
for count in range(2, int(math.sqrt(n))+1):
if sieve[count] == 0:
continue
x = sieve[count] * 2
while x < len(sieve):
sieve[x] = 0
x += sieve[count]
sieve = [x for x in sieve[2:] if x]
return sieve
sieve = makeSieve(1000)
def isPrime(n, iterations=5, display=False):
#Trial division with sieve
for x in sieve:
if x >= n: return True
if n % x == 0: return False
#Passed trial division, proceed to Rabin-Miller
#Rabin-Miller implemented per Ferguson & Schneier
#Compute s, t for Rabin-Miller
if display: print("*", end=' ')
s, t = n-1, 0
while s % 2 == 0:
s, t = s//2, t+1
#Repeat Rabin-Miller x times
a = 2 #Use 2 as a base for first iteration speedup, per HAC
for count in range(iterations):
v = powMod(a, s, n)
if v==1:
continue
i = 0
while v != n-1:
if i == t-1:
return False
else:
v, i = powMod(v, 2, n), i+1
a = getRandomNumber(2, n)
return True
def getRandomPrime(bits, display=False):
if bits < 10:
raise AssertionError()
#The 1.5 ensures the 2 MSBs are set
#Thus, when used for p,q in RSA, n will have its MSB set
#
#Since 30 is lcm(2,3,5), we'll set our test numbers to
#29 % 30 and keep them there
low = ((2 ** (bits-1)) * 3) // 2
high = 2 ** bits - 30
p = getRandomNumber(low, high)
p += 29 - (p % 30)
while 1:
if display: print(".", end=' ')
p += 30
if p >= high:
p = getRandomNumber(low, high)
p += 29 - (p % 30)
if isPrime(p, display=display):
return p
#Unused at the moment...
def getRandomSafePrime(bits, display=False):
if bits < 10:
raise AssertionError()
#The 1.5 ensures the 2 MSBs are set
#Thus, when used for p,q in RSA, n will have its MSB set
#
#Since 30 is lcm(2,3,5), we'll set our test numbers to
#29 % 30 and keep them there
low = (2 ** (bits-2)) * 3//2
high = (2 ** (bits-1)) - 30
q = getRandomNumber(low, high)
q += 29 - (q % 30)
while 1:
if display: print(".", end=' ')
q += 30
if (q >= high):
q = getRandomNumber(low, high)
q += 29 - (q % 30)
#Ideas from Tom Wu's SRP code
#Do trial division on p and q before Rabin-Miller
if isPrime(q, 0, display=display):
p = (2 * q) + 1
if isPrime(p, display=display):
if isPrime(q, display=display):
return p
class RSAKey(object):
def __init__(self, n=0, e=0, d=0, p=0, q=0, dP=0, dQ=0, qInv=0):
if (n and not e) or (e and not n):
raise AssertionError()
self.n = n
self.e = e
self.d = d
self.p = p
self.q = q
self.dP = dP
self.dQ = dQ
self.qInv = qInv
self.blinder = 0
self.unblinder = 0
def __len__(self):
"""Return the length of this key in bits.
@rtype: int
"""
return numBits(self.n)
def hasPrivateKey(self):
return self.d != 0
def hashAndSign(self, bytes):
"""Hash and sign the passed-in bytes.
This requires the key to have a private component. It performs
a PKCS1-SHA1 signature on the passed-in data.
@type bytes: str or L{bytearray} of unsigned bytes
@param bytes: The value which will be hashed and signed.
@rtype: L{bytearray} of unsigned bytes.
@return: A PKCS1-SHA1 signature on the passed-in data.
"""
hashBytes = SHA1(bytearray(bytes))
prefixedHashBytes = self._addPKCS1SHA1Prefix(hashBytes)
sigBytes = self.sign(prefixedHashBytes)
return sigBytes
def hashAndVerify(self, sigBytes, bytes):
"""Hash and verify the passed-in bytes with the signature.
This verifies a PKCS1-SHA1 signature on the passed-in data.
@type sigBytes: L{bytearray} of unsigned bytes
@param sigBytes: A PKCS1-SHA1 signature.
@type bytes: str or L{bytearray} of unsigned bytes
@param bytes: The value which will be hashed and verified.
@rtype: bool
@return: Whether the signature matches the passed-in data.
"""
hashBytes = SHA1(bytearray(bytes))
# Try it with/without the embedded NULL
prefixedHashBytes1 = self._addPKCS1SHA1Prefix(hashBytes, False)
prefixedHashBytes2 = self._addPKCS1SHA1Prefix(hashBytes, True)
result1 = self.verify(sigBytes, prefixedHashBytes1)
result2 = self.verify(sigBytes, prefixedHashBytes2)
return (result1 or result2)
def sign(self, bytes):
"""Sign the passed-in bytes.
This requires the key to have a private component. It performs
a PKCS1 signature on the passed-in data.
@type bytes: L{bytearray} of unsigned bytes
@param bytes: The value which will be signed.
@rtype: L{bytearray} of unsigned bytes.
@return: A PKCS1 signature on the passed-in data.
"""
if not self.hasPrivateKey():
raise AssertionError()
paddedBytes = self._addPKCS1Padding(bytes, 1)
m = bytesToNumber(paddedBytes)
if m >= self.n:
raise ValueError()
c = self._rawPrivateKeyOp(m)
sigBytes = numberToByteArray(c, numBytes(self.n))
return sigBytes
def verify(self, sigBytes, bytes):
"""Verify the passed-in bytes with the signature.
This verifies a PKCS1 signature on the passed-in data.
@type sigBytes: L{bytearray} of unsigned bytes
@param sigBytes: A PKCS1 signature.
@type bytes: L{bytearray} of unsigned bytes
@param bytes: The value which will be verified.
@rtype: bool
@return: Whether the signature matches the passed-in data.
"""
if len(sigBytes) != numBytes(self.n):
return False
paddedBytes = self._addPKCS1Padding(bytes, 1)
c = bytesToNumber(sigBytes)
if c >= self.n:
return False
m = self._rawPublicKeyOp(c)
checkBytes = numberToByteArray(m, numBytes(self.n))
return checkBytes == paddedBytes
def encrypt(self, bytes):
"""Encrypt the passed-in bytes.
This performs PKCS1 encryption of the passed-in data.
@type bytes: L{bytearray} of unsigned bytes
@param bytes: The value which will be encrypted.
@rtype: L{bytearray} of unsigned bytes.
@return: A PKCS1 encryption of the passed-in data.
"""
paddedBytes = self._addPKCS1Padding(bytes, 2)
m = bytesToNumber(paddedBytes)
if m >= self.n:
raise ValueError()
c = self._rawPublicKeyOp(m)
encBytes = numberToByteArray(c, numBytes(self.n))
return encBytes
def decrypt(self, encBytes):
"""Decrypt the passed-in bytes.
This requires the key to have a private component. It performs
PKCS1 decryption of the passed-in data.
@type encBytes: L{bytearray} of unsigned bytes
@param encBytes: The value which will be decrypted.
@rtype: L{bytearray} of unsigned bytes or None.
@return: A PKCS1 decryption of the passed-in data or None if
the data is not properly formatted.
"""
if not self.hasPrivateKey():
raise AssertionError()
if len(encBytes) != numBytes(self.n):
return None
c = bytesToNumber(encBytes)
if c >= self.n:
return None
m = self._rawPrivateKeyOp(c)
decBytes = numberToByteArray(m, numBytes(self.n))
#Check first two bytes
if decBytes[0] != 0 or decBytes[1] != 2:
return None
#Scan through for zero separator
for x in range(1, len(decBytes)-1):
if decBytes[x]== 0:
break
else:
return None
return decBytes[x+1:] #Return everything after the separator
# **************************************************************************
# Helper Functions for RSA Keys
# **************************************************************************
def _addPKCS1SHA1Prefix(self, bytes, withNULL=True):
# There is a long history of confusion over whether the SHA1
# algorithmIdentifier should be encoded with a NULL parameter or
# with the parameter omitted. While the original intention was
# apparently to omit it, many toolkits went the other way. TLS 1.2
# specifies the NULL should be included, and this behavior is also
# mandated in recent versions of PKCS #1, and is what tlslite has
# always implemented. Anyways, verification code should probably
# accept both. However, nothing uses this code yet, so this is
# all fairly moot.
if not withNULL:
prefixBytes = bytearray(\
[0x30,0x1f,0x30,0x07,0x06,0x05,0x2b,0x0e,0x03,0x02,0x1a,0x04,0x14])
else:
prefixBytes = bytearray(\
[0x30,0x21,0x30,0x09,0x06,0x05,0x2b,0x0e,0x03,0x02,0x1a,0x05,0x00,0x04,0x14])
prefixedBytes = prefixBytes + bytes
return prefixedBytes
def _addPKCS1Padding(self, bytes, blockType):
padLength = (numBytes(self.n) - (len(bytes)+3))
if blockType == 1: #Signature padding
pad = [0xFF] * padLength
elif blockType == 2: #Encryption padding
pad = bytearray(0)
while len(pad) < padLength:
padBytes = getRandomBytes(padLength * 2)
pad = [b for b in padBytes if b != 0]
pad = pad[:padLength]
else:
raise AssertionError()
padding = bytearray([0,blockType] + pad + [0])
paddedBytes = padding + bytes
return paddedBytes
def _rawPrivateKeyOp(self, m):
#Create blinding values, on the first pass:
if not self.blinder:
self.unblinder = getRandomNumber(2, self.n)
self.blinder = powMod(invMod(self.unblinder, self.n), self.e,
self.n)
#Blind the input
m = (m * self.blinder) % self.n
#Perform the RSA operation
c = self._rawPrivateKeyOpHelper(m)
#Unblind the output
c = (c * self.unblinder) % self.n
#Update blinding values
self.blinder = (self.blinder * self.blinder) % self.n
self.unblinder = (self.unblinder * self.unblinder) % self.n
#Return the output
return c
def _rawPrivateKeyOpHelper(self, m):
#Non-CRT version
#c = powMod(m, self.d, self.n)
#CRT version (~3x faster)
s1 = powMod(m, self.dP, self.p)
s2 = powMod(m, self.dQ, self.q)
h = ((s1 - s2) * self.qInv) % self.p
c = s2 + self.q * h
return c
def _rawPublicKeyOp(self, c):
m = powMod(c, self.e, self.n)
return m
def acceptsPassword(self):
return False
def generate(bits):
key = RSAKey()
p = getRandomPrime(bits//2, False)
q = getRandomPrime(bits//2, False)
t = lcm(p-1, q-1)
key.n = p * q
key.e = 65537
key.d = invMod(key.e, t)
key.p = p
key.q = q
key.dP = key.d % (p-1)
key.dQ = key.d % (q-1)
key.qInv = invMod(q, p)
return key
generate = staticmethod(generate)
|
|
#!/usr/bin/env python3
# Copyright (c) 2016-2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test compact blocks (BIP 152).
Version 1 compact blocks are pre-segwit (txids)
Version 2 compact blocks are post-segwit (wtxids)
"""
import random
from test_framework.blocktools import (
COINBASE_MATURITY,
NORMAL_GBT_REQUEST_PARAMS,
add_witness_commitment,
create_block,
)
from test_framework.messages import (
BlockTransactions,
BlockTransactionsRequest,
CBlock,
CBlockHeader,
CInv,
COutPoint,
CTransaction,
CTxIn,
CTxInWitness,
CTxOut,
from_hex,
HeaderAndShortIDs,
MSG_BLOCK,
MSG_CMPCT_BLOCK,
MSG_WITNESS_FLAG,
NODE_NETWORK,
P2PHeaderAndShortIDs,
PrefilledTransaction,
calculate_shortid,
msg_block,
msg_blocktxn,
msg_cmpctblock,
msg_getblocktxn,
msg_getdata,
msg_getheaders,
msg_headers,
msg_inv,
msg_no_witness_block,
msg_no_witness_blocktxn,
msg_sendcmpct,
msg_sendheaders,
msg_tx,
ser_uint256,
tx_from_hex,
)
from test_framework.p2p import (
P2PInterface,
p2p_lock,
)
from test_framework.script import (
CScript,
OP_DROP,
OP_TRUE,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
softfork_active,
)
from test_framework.wallet import MiniWallet
# TestP2PConn: A peer we use to send messages to bitcoind, and store responses.
class TestP2PConn(P2PInterface):
def __init__(self, cmpct_version):
super().__init__()
self.last_sendcmpct = []
self.block_announced = False
# Store the hashes of blocks we've seen announced.
# This is for synchronizing the p2p message traffic,
# so we can eg wait until a particular block is announced.
self.announced_blockhashes = set()
self.cmpct_version = cmpct_version
def on_sendcmpct(self, message):
self.last_sendcmpct.append(message)
def on_cmpctblock(self, message):
self.block_announced = True
self.last_message["cmpctblock"].header_and_shortids.header.calc_sha256()
self.announced_blockhashes.add(self.last_message["cmpctblock"].header_and_shortids.header.sha256)
def on_headers(self, message):
self.block_announced = True
for x in self.last_message["headers"].headers:
x.calc_sha256()
self.announced_blockhashes.add(x.sha256)
def on_inv(self, message):
for x in self.last_message["inv"].inv:
if x.type == MSG_BLOCK:
self.block_announced = True
self.announced_blockhashes.add(x.hash)
# Requires caller to hold p2p_lock
def received_block_announcement(self):
return self.block_announced
def clear_block_announcement(self):
with p2p_lock:
self.block_announced = False
self.last_message.pop("inv", None)
self.last_message.pop("headers", None)
self.last_message.pop("cmpctblock", None)
def get_headers(self, locator, hashstop):
msg = msg_getheaders()
msg.locator.vHave = locator
msg.hashstop = hashstop
self.send_message(msg)
def send_header_for_blocks(self, new_blocks):
headers_message = msg_headers()
headers_message.headers = [CBlockHeader(b) for b in new_blocks]
self.send_message(headers_message)
def request_headers_and_sync(self, locator, hashstop=0):
self.clear_block_announcement()
self.get_headers(locator, hashstop)
self.wait_until(self.received_block_announcement, timeout=30)
self.clear_block_announcement()
# Block until a block announcement for a particular block hash is
# received.
def wait_for_block_announcement(self, block_hash, timeout=30):
def received_hash():
return (block_hash in self.announced_blockhashes)
self.wait_until(received_hash, timeout=timeout)
def send_await_disconnect(self, message, timeout=30):
"""Sends a message to the node and wait for disconnect.
This is used when we want to send a message into the node that we expect
will get us disconnected, eg an invalid block."""
self.send_message(message)
self.wait_for_disconnect(timeout)
class CompactBlocksTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [[
"-acceptnonstdtxn=1",
]]
self.utxos = []
def build_block_on_tip(self, node, segwit=False):
block = create_block(tmpl=node.getblocktemplate(NORMAL_GBT_REQUEST_PARAMS))
if segwit:
add_witness_commitment(block)
block.solve()
return block
# Create 10 more anyone-can-spend utxo's for testing.
def make_utxos(self):
block = self.build_block_on_tip(self.nodes[0])
self.segwit_node.send_and_ping(msg_no_witness_block(block))
assert int(self.nodes[0].getbestblockhash(), 16) == block.sha256
self.generate(self.wallet, COINBASE_MATURITY)
total_value = block.vtx[0].vout[0].nValue
out_value = total_value // 10
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(block.vtx[0].sha256, 0), b''))
for _ in range(10):
tx.vout.append(CTxOut(out_value, CScript([OP_TRUE])))
tx.rehash()
block2 = self.build_block_on_tip(self.nodes[0])
block2.vtx.append(tx)
block2.hashMerkleRoot = block2.calc_merkle_root()
block2.solve()
self.segwit_node.send_and_ping(msg_no_witness_block(block2))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), block2.sha256)
self.utxos.extend([[tx.sha256, i, out_value] for i in range(10)])
# Test "sendcmpct" (between peers preferring the same version):
# - No compact block announcements unless sendcmpct is sent.
# - If sendcmpct is sent with version > preferred_version, the message is ignored.
# - If sendcmpct is sent with boolean 0, then block announcements are not
# made with compact blocks.
# - If sendcmpct is then sent with boolean 1, then new block announcements
# are made with compact blocks.
# If old_node is passed in, request compact blocks with version=preferred-1
# and verify that it receives block announcements via compact block.
def test_sendcmpct(self, test_node, old_node=None):
preferred_version = test_node.cmpct_version
node = self.nodes[0]
# Make sure we get a SENDCMPCT message from our peer
def received_sendcmpct():
return (len(test_node.last_sendcmpct) > 0)
test_node.wait_until(received_sendcmpct, timeout=30)
with p2p_lock:
# Check that the first version received is the preferred one
assert_equal(test_node.last_sendcmpct[0].version, preferred_version)
# And that we receive versions down to 1.
assert_equal(test_node.last_sendcmpct[-1].version, 1)
test_node.last_sendcmpct = []
tip = int(node.getbestblockhash(), 16)
def check_announcement_of_new_block(node, peer, predicate):
peer.clear_block_announcement()
block_hash = int(self.generate(node, 1)[0], 16)
peer.wait_for_block_announcement(block_hash, timeout=30)
assert peer.block_announced
with p2p_lock:
assert predicate(peer), (
"block_hash={!r}, cmpctblock={!r}, inv={!r}".format(
block_hash, peer.last_message.get("cmpctblock", None), peer.last_message.get("inv", None)))
# We shouldn't get any block announcements via cmpctblock yet.
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message)
# Try one more time, this time after requesting headers.
test_node.request_headers_and_sync(locator=[tip])
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message and "inv" in p.last_message)
# Test a few ways of using sendcmpct that should NOT
# result in compact block announcements.
# Before each test, sync the headers chain.
test_node.request_headers_and_sync(locator=[tip])
# Now try a SENDCMPCT message with too-high version
test_node.send_and_ping(msg_sendcmpct(announce=True, version=preferred_version+1))
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message)
# Headers sync before next test.
test_node.request_headers_and_sync(locator=[tip])
# Now try a SENDCMPCT message with valid version, but announce=False
test_node.send_and_ping(msg_sendcmpct(announce=False, version=preferred_version))
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message)
# Headers sync before next test.
test_node.request_headers_and_sync(locator=[tip])
# Finally, try a SENDCMPCT message with announce=True
test_node.send_and_ping(msg_sendcmpct(announce=True, version=preferred_version))
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message)
# Try one more time (no headers sync should be needed!)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message)
# Try one more time, after turning on sendheaders
test_node.send_and_ping(msg_sendheaders())
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message)
# Try one more time, after sending a version-1, announce=false message.
test_node.send_and_ping(msg_sendcmpct(announce=False, version=preferred_version-1))
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message)
# Now turn off announcements
test_node.send_and_ping(msg_sendcmpct(announce=False, version=preferred_version))
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message and "headers" in p.last_message)
if old_node is not None:
# Verify that a peer using an older protocol version can receive
# announcements from this node.
old_node.send_and_ping(msg_sendcmpct(announce=True, version=preferred_version-1))
# Header sync
old_node.request_headers_and_sync(locator=[tip])
check_announcement_of_new_block(node, old_node, lambda p: "cmpctblock" in p.last_message)
# This test actually causes bitcoind to (reasonably!) disconnect us, so do this last.
def test_invalid_cmpctblock_message(self):
self.generate(self.nodes[0], COINBASE_MATURITY + 1)
block = self.build_block_on_tip(self.nodes[0])
cmpct_block = P2PHeaderAndShortIDs()
cmpct_block.header = CBlockHeader(block)
cmpct_block.prefilled_txn_length = 1
# This index will be too high
prefilled_txn = PrefilledTransaction(1, block.vtx[0])
cmpct_block.prefilled_txn = [prefilled_txn]
self.segwit_node.send_await_disconnect(msg_cmpctblock(cmpct_block))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.hashPrevBlock)
# Compare the generated shortids to what we expect based on BIP 152, given
# bitcoind's choice of nonce.
def test_compactblock_construction(self, test_node, use_witness_address=True):
version = test_node.cmpct_version
node = self.nodes[0]
# Generate a bunch of transactions.
self.generate(node, COINBASE_MATURITY + 1)
num_transactions = 25
segwit_tx_generated = False
for _ in range(num_transactions):
hex_tx = self.wallet.send_self_transfer(from_node=self.nodes[0])['hex']
tx = tx_from_hex(hex_tx)
if not tx.wit.is_null():
segwit_tx_generated = True
if use_witness_address:
assert segwit_tx_generated # check that our test is not broken
# Wait until we've seen the block announcement for the resulting tip
tip = int(node.getbestblockhash(), 16)
test_node.wait_for_block_announcement(tip)
# Make sure we will receive a fast-announce compact block
self.request_cb_announcements(test_node)
# Now mine a block, and look at the resulting compact block.
test_node.clear_block_announcement()
block_hash = int(self.generate(node, 1)[0], 16)
# Store the raw block in our internal format.
block = from_hex(CBlock(), node.getblock("%064x" % block_hash, False))
for tx in block.vtx:
tx.calc_sha256()
block.rehash()
# Wait until the block was announced (via compact blocks)
test_node.wait_until(lambda: "cmpctblock" in test_node.last_message, timeout=30)
# Now fetch and check the compact block
header_and_shortids = None
with p2p_lock:
# Convert the on-the-wire representation to absolute indexes
header_and_shortids = HeaderAndShortIDs(test_node.last_message["cmpctblock"].header_and_shortids)
self.check_compactblock_construction_from_block(version, header_and_shortids, block_hash, block)
# Now fetch the compact block using a normal non-announce getdata
test_node.clear_block_announcement()
inv = CInv(MSG_CMPCT_BLOCK, block_hash)
test_node.send_message(msg_getdata([inv]))
test_node.wait_until(lambda: "cmpctblock" in test_node.last_message, timeout=30)
# Now fetch and check the compact block
header_and_shortids = None
with p2p_lock:
# Convert the on-the-wire representation to absolute indexes
header_and_shortids = HeaderAndShortIDs(test_node.last_message["cmpctblock"].header_and_shortids)
self.check_compactblock_construction_from_block(version, header_and_shortids, block_hash, block)
def check_compactblock_construction_from_block(self, version, header_and_shortids, block_hash, block):
# Check that we got the right block!
header_and_shortids.header.calc_sha256()
assert_equal(header_and_shortids.header.sha256, block_hash)
# Make sure the prefilled_txn appears to have included the coinbase
assert len(header_and_shortids.prefilled_txn) >= 1
assert_equal(header_and_shortids.prefilled_txn[0].index, 0)
# Check that all prefilled_txn entries match what's in the block.
for entry in header_and_shortids.prefilled_txn:
entry.tx.calc_sha256()
# This checks the non-witness parts of the tx agree
assert_equal(entry.tx.sha256, block.vtx[entry.index].sha256)
# And this checks the witness
wtxid = entry.tx.calc_sha256(True)
if version == 2:
assert_equal(wtxid, block.vtx[entry.index].calc_sha256(True))
else:
# Shouldn't have received a witness
assert entry.tx.wit.is_null()
# Check that the cmpctblock message announced all the transactions.
assert_equal(len(header_and_shortids.prefilled_txn) + len(header_and_shortids.shortids), len(block.vtx))
# And now check that all the shortids are as expected as well.
# Determine the siphash keys to use.
[k0, k1] = header_and_shortids.get_siphash_keys()
index = 0
while index < len(block.vtx):
if (len(header_and_shortids.prefilled_txn) > 0 and
header_and_shortids.prefilled_txn[0].index == index):
# Already checked prefilled transactions above
header_and_shortids.prefilled_txn.pop(0)
else:
tx_hash = block.vtx[index].sha256
if version == 2:
tx_hash = block.vtx[index].calc_sha256(True)
shortid = calculate_shortid(k0, k1, tx_hash)
assert_equal(shortid, header_and_shortids.shortids[0])
header_and_shortids.shortids.pop(0)
index += 1
# Test that bitcoind requests compact blocks when we announce new blocks
# via header or inv, and that responding to getblocktxn causes the block
# to be successfully reconstructed.
# Post-segwit: upgraded nodes would only make this request of cb-version-2,
# NODE_WITNESS peers. Unupgraded nodes would still make this request of
# any cb-version-1-supporting peer.
def test_compactblock_requests(self, test_node, segwit=True):
version = test_node.cmpct_version
node = self.nodes[0]
# Try announcing a block with an inv or header, expect a compactblock
# request
for announce in ["inv", "header"]:
block = self.build_block_on_tip(node, segwit=segwit)
if announce == "inv":
test_node.send_message(msg_inv([CInv(MSG_BLOCK, block.sha256)]))
test_node.wait_until(lambda: "getheaders" in test_node.last_message, timeout=30)
test_node.send_header_for_blocks([block])
else:
test_node.send_header_for_blocks([block])
test_node.wait_for_getdata([block.sha256], timeout=30)
assert_equal(test_node.last_message["getdata"].inv[0].type, 4)
# Send back a compactblock message that omits the coinbase
comp_block = HeaderAndShortIDs()
comp_block.header = CBlockHeader(block)
comp_block.nonce = 0
[k0, k1] = comp_block.get_siphash_keys()
coinbase_hash = block.vtx[0].sha256
if version == 2:
coinbase_hash = block.vtx[0].calc_sha256(True)
comp_block.shortids = [calculate_shortid(k0, k1, coinbase_hash)]
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock)
# Expect a getblocktxn message.
with p2p_lock:
assert "getblocktxn" in test_node.last_message
absolute_indexes = test_node.last_message["getblocktxn"].block_txn_request.to_absolute()
assert_equal(absolute_indexes, [0]) # should be a coinbase request
# Send the coinbase, and verify that the tip advances.
if version == 2:
msg = msg_blocktxn()
else:
msg = msg_no_witness_blocktxn()
msg.block_transactions.blockhash = block.sha256
msg.block_transactions.transactions = [block.vtx[0]]
test_node.send_and_ping(msg)
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
# Create a chain of transactions from given utxo, and add to a new block.
def build_block_with_transactions(self, node, utxo, num_transactions):
block = self.build_block_on_tip(node)
for _ in range(num_transactions):
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(utxo[0], utxo[1]), b''))
tx.vout.append(CTxOut(utxo[2] - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx.rehash()
utxo = [tx.sha256, 0, tx.vout[0].nValue]
block.vtx.append(tx)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
return block
# Test that we only receive getblocktxn requests for transactions that the
# node needs, and that responding to them causes the block to be
# reconstructed.
def test_getblocktxn_requests(self, test_node):
version = test_node.cmpct_version
node = self.nodes[0]
with_witness = (version == 2)
def test_getblocktxn_response(compact_block, peer, expected_result):
msg = msg_cmpctblock(compact_block.to_p2p())
peer.send_and_ping(msg)
with p2p_lock:
assert "getblocktxn" in peer.last_message
absolute_indexes = peer.last_message["getblocktxn"].block_txn_request.to_absolute()
assert_equal(absolute_indexes, expected_result)
def test_tip_after_message(node, peer, msg, tip):
peer.send_and_ping(msg)
assert_equal(int(node.getbestblockhash(), 16), tip)
# First try announcing compactblocks that won't reconstruct, and verify
# that we receive getblocktxn messages back.
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block, use_witness=with_witness)
test_getblocktxn_response(comp_block, test_node, [1, 2, 3, 4, 5])
msg_bt = msg_no_witness_blocktxn()
if with_witness:
msg_bt = msg_blocktxn() # serialize with witnesses
msg_bt.block_transactions = BlockTransactions(block.sha256, block.vtx[1:])
test_tip_after_message(node, test_node, msg_bt, block.sha256)
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
# Now try interspersing the prefilled transactions
comp_block.initialize_from_block(block, prefill_list=[0, 1, 5], use_witness=with_witness)
test_getblocktxn_response(comp_block, test_node, [2, 3, 4])
msg_bt.block_transactions = BlockTransactions(block.sha256, block.vtx[2:5])
test_tip_after_message(node, test_node, msg_bt, block.sha256)
# Now try giving one transaction ahead of time.
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
test_node.send_and_ping(msg_tx(block.vtx[1]))
assert block.vtx[1].hash in node.getrawmempool()
# Prefill 4 out of the 6 transactions, and verify that only the one
# that was not in the mempool is requested.
comp_block.initialize_from_block(block, prefill_list=[0, 2, 3, 4], use_witness=with_witness)
test_getblocktxn_response(comp_block, test_node, [5])
msg_bt.block_transactions = BlockTransactions(block.sha256, [block.vtx[5]])
test_tip_after_message(node, test_node, msg_bt, block.sha256)
# Now provide all transactions to the node before the block is
# announced and verify reconstruction happens immediately.
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 10)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
for tx in block.vtx[1:]:
test_node.send_message(msg_tx(tx))
test_node.sync_with_ping()
# Make sure all transactions were accepted.
mempool = node.getrawmempool()
for tx in block.vtx[1:]:
assert tx.hash in mempool
# Clear out last request.
with p2p_lock:
test_node.last_message.pop("getblocktxn", None)
# Send compact block
comp_block.initialize_from_block(block, prefill_list=[0], use_witness=with_witness)
test_tip_after_message(node, test_node, msg_cmpctblock(comp_block.to_p2p()), block.sha256)
with p2p_lock:
# Shouldn't have gotten a request for any transaction
assert "getblocktxn" not in test_node.last_message
# Incorrectly responding to a getblocktxn shouldn't cause the block to be
# permanently failed.
def test_incorrect_blocktxn_response(self, test_node):
version = test_node.cmpct_version
node = self.nodes[0]
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 10)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
# Relay the first 5 transactions from the block in advance
for tx in block.vtx[1:6]:
test_node.send_message(msg_tx(tx))
test_node.sync_with_ping()
# Make sure all transactions were accepted.
mempool = node.getrawmempool()
for tx in block.vtx[1:6]:
assert tx.hash in mempool
# Send compact block
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block, prefill_list=[0], use_witness=(version == 2))
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
absolute_indexes = []
with p2p_lock:
assert "getblocktxn" in test_node.last_message
absolute_indexes = test_node.last_message["getblocktxn"].block_txn_request.to_absolute()
assert_equal(absolute_indexes, [6, 7, 8, 9, 10])
# Now give an incorrect response.
# Note that it's possible for bitcoind to be smart enough to know we're
# lying, since it could check to see if the shortid matches what we're
# sending, and eg disconnect us for misbehavior. If that behavior
# change was made, we could just modify this test by having a
# different peer provide the block further down, so that we're still
# verifying that the block isn't marked bad permanently. This is good
# enough for now.
msg = msg_no_witness_blocktxn()
if version == 2:
msg = msg_blocktxn()
msg.block_transactions = BlockTransactions(block.sha256, [block.vtx[5]] + block.vtx[7:])
test_node.send_and_ping(msg)
# Tip should not have updated
assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock)
# We should receive a getdata request
test_node.wait_for_getdata([block.sha256], timeout=10)
assert test_node.last_message["getdata"].inv[0].type == MSG_BLOCK or \
test_node.last_message["getdata"].inv[0].type == MSG_BLOCK | MSG_WITNESS_FLAG
# Deliver the block
if version == 2:
test_node.send_and_ping(msg_block(block))
else:
test_node.send_and_ping(msg_no_witness_block(block))
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
def test_getblocktxn_handler(self, test_node):
version = test_node.cmpct_version
node = self.nodes[0]
# bitcoind will not send blocktxn responses for blocks whose height is
# more than 10 blocks deep.
MAX_GETBLOCKTXN_DEPTH = 10
chain_height = node.getblockcount()
current_height = chain_height
while (current_height >= chain_height - MAX_GETBLOCKTXN_DEPTH):
block_hash = node.getblockhash(current_height)
block = from_hex(CBlock(), node.getblock(block_hash, False))
msg = msg_getblocktxn()
msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), [])
num_to_request = random.randint(1, len(block.vtx))
msg.block_txn_request.from_absolute(sorted(random.sample(range(len(block.vtx)), num_to_request)))
test_node.send_message(msg)
test_node.wait_until(lambda: "blocktxn" in test_node.last_message, timeout=10)
[tx.calc_sha256() for tx in block.vtx]
with p2p_lock:
assert_equal(test_node.last_message["blocktxn"].block_transactions.blockhash, int(block_hash, 16))
all_indices = msg.block_txn_request.to_absolute()
for index in all_indices:
tx = test_node.last_message["blocktxn"].block_transactions.transactions.pop(0)
tx.calc_sha256()
assert_equal(tx.sha256, block.vtx[index].sha256)
if version == 1:
# Witnesses should have been stripped
assert tx.wit.is_null()
else:
# Check that the witness matches
assert_equal(tx.calc_sha256(True), block.vtx[index].calc_sha256(True))
test_node.last_message.pop("blocktxn", None)
current_height -= 1
# Next request should send a full block response, as we're past the
# allowed depth for a blocktxn response.
block_hash = node.getblockhash(current_height)
msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), [0])
with p2p_lock:
test_node.last_message.pop("block", None)
test_node.last_message.pop("blocktxn", None)
test_node.send_and_ping(msg)
with p2p_lock:
test_node.last_message["block"].block.calc_sha256()
assert_equal(test_node.last_message["block"].block.sha256, int(block_hash, 16))
assert "blocktxn" not in test_node.last_message
def test_compactblocks_not_at_tip(self, test_node):
node = self.nodes[0]
# Test that requesting old compactblocks doesn't work.
MAX_CMPCTBLOCK_DEPTH = 5
new_blocks = []
for _ in range(MAX_CMPCTBLOCK_DEPTH + 1):
test_node.clear_block_announcement()
new_blocks.append(self.generate(node, 1)[0])
test_node.wait_until(test_node.received_block_announcement, timeout=30)
test_node.clear_block_announcement()
test_node.send_message(msg_getdata([CInv(MSG_CMPCT_BLOCK, int(new_blocks[0], 16))]))
test_node.wait_until(lambda: "cmpctblock" in test_node.last_message, timeout=30)
test_node.clear_block_announcement()
self.generate(node, 1)
test_node.wait_until(test_node.received_block_announcement, timeout=30)
test_node.clear_block_announcement()
with p2p_lock:
test_node.last_message.pop("block", None)
test_node.send_message(msg_getdata([CInv(MSG_CMPCT_BLOCK, int(new_blocks[0], 16))]))
test_node.wait_until(lambda: "block" in test_node.last_message, timeout=30)
with p2p_lock:
test_node.last_message["block"].block.calc_sha256()
assert_equal(test_node.last_message["block"].block.sha256, int(new_blocks[0], 16))
# Generate an old compactblock, and verify that it's not accepted.
cur_height = node.getblockcount()
hashPrevBlock = int(node.getblockhash(cur_height - 5), 16)
block = self.build_block_on_tip(node)
block.hashPrevBlock = hashPrevBlock
block.solve()
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block)
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
tips = node.getchaintips()
found = False
for x in tips:
if x["hash"] == block.hash:
assert_equal(x["status"], "headers-only")
found = True
break
assert found
# Requesting this block via getblocktxn should silently fail
# (to avoid fingerprinting attacks).
msg = msg_getblocktxn()
msg.block_txn_request = BlockTransactionsRequest(block.sha256, [0])
with p2p_lock:
test_node.last_message.pop("blocktxn", None)
test_node.send_and_ping(msg)
with p2p_lock:
assert "blocktxn" not in test_node.last_message
def test_end_to_end_block_relay(self, listeners):
node = self.nodes[0]
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 10)
[l.clear_block_announcement() for l in listeners]
# serialize without witness (this block has no witnesses anyway).
# TODO: repeat this test with witness tx's to a segwit node.
node.submitblock(block.serialize().hex())
for l in listeners:
l.wait_until(lambda: "cmpctblock" in l.last_message, timeout=30)
with p2p_lock:
for l in listeners:
l.last_message["cmpctblock"].header_and_shortids.header.calc_sha256()
assert_equal(l.last_message["cmpctblock"].header_and_shortids.header.sha256, block.sha256)
# Test that we don't get disconnected if we relay a compact block with valid header,
# but invalid transactions.
def test_invalid_tx_in_compactblock(self, test_node, use_segwit=True):
node = self.nodes[0]
assert len(self.utxos)
utxo = self.utxos[0]
block = self.build_block_with_transactions(node, utxo, 5)
del block.vtx[3]
block.hashMerkleRoot = block.calc_merkle_root()
if use_segwit:
# If we're testing with segwit, also drop the coinbase witness,
# but include the witness commitment.
add_witness_commitment(block)
block.vtx[0].wit.vtxinwit = []
block.solve()
# Now send the compact block with all transactions prefilled, and
# verify that we don't get disconnected.
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block, prefill_list=[0, 1, 2, 3, 4], use_witness=use_segwit)
msg = msg_cmpctblock(comp_block.to_p2p())
test_node.send_and_ping(msg)
# Check that the tip didn't advance
assert int(node.getbestblockhash(), 16) is not block.sha256
test_node.sync_with_ping()
# Helper for enabling cb announcements
# Send the sendcmpct request and sync headers
def request_cb_announcements(self, peer):
node = self.nodes[0]
tip = node.getbestblockhash()
peer.get_headers(locator=[int(tip, 16)], hashstop=0)
peer.send_and_ping(msg_sendcmpct(announce=True, version=peer.cmpct_version))
def test_compactblock_reconstruction_multiple_peers(self, stalling_peer, delivery_peer):
node = self.nodes[0]
assert len(self.utxos)
def announce_cmpct_block(node, peer):
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
cmpct_block = HeaderAndShortIDs()
cmpct_block.initialize_from_block(block)
msg = msg_cmpctblock(cmpct_block.to_p2p())
peer.send_and_ping(msg)
with p2p_lock:
assert "getblocktxn" in peer.last_message
return block, cmpct_block
block, cmpct_block = announce_cmpct_block(node, stalling_peer)
for tx in block.vtx[1:]:
delivery_peer.send_message(msg_tx(tx))
delivery_peer.sync_with_ping()
mempool = node.getrawmempool()
for tx in block.vtx[1:]:
assert tx.hash in mempool
delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p()))
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
# Now test that delivering an invalid compact block won't break relay
block, cmpct_block = announce_cmpct_block(node, stalling_peer)
for tx in block.vtx[1:]:
delivery_peer.send_message(msg_tx(tx))
delivery_peer.sync_with_ping()
cmpct_block.prefilled_txn[0].tx.wit.vtxinwit = [CTxInWitness()]
cmpct_block.prefilled_txn[0].tx.wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(0)]
cmpct_block.use_witness = True
delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p()))
assert int(node.getbestblockhash(), 16) != block.sha256
msg = msg_no_witness_blocktxn()
msg.block_transactions.blockhash = block.sha256
msg.block_transactions.transactions = block.vtx[1:]
stalling_peer.send_and_ping(msg)
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
def test_highbandwidth_mode_states_via_getpeerinfo(self):
# create new p2p connection for a fresh state w/o any prior sendcmpct messages sent
hb_test_node = self.nodes[0].add_p2p_connection(TestP2PConn(cmpct_version=2))
# assert the RPC getpeerinfo boolean fields `bip152_hb_{to, from}`
# match the given parameters for the last peer of a given node
def assert_highbandwidth_states(node, hb_to, hb_from):
peerinfo = node.getpeerinfo()[-1]
assert_equal(peerinfo['bip152_hb_to'], hb_to)
assert_equal(peerinfo['bip152_hb_from'], hb_from)
# initially, neither node has selected the other peer as high-bandwidth yet
assert_highbandwidth_states(self.nodes[0], hb_to=False, hb_from=False)
# peer requests high-bandwidth mode by sending sendcmpct(1)
hb_test_node.send_and_ping(msg_sendcmpct(announce=True, version=2))
assert_highbandwidth_states(self.nodes[0], hb_to=False, hb_from=True)
# peer generates a block and sends it to node, which should
# select the peer as high-bandwidth (up to 3 peers according to BIP 152)
block = self.build_block_on_tip(self.nodes[0])
hb_test_node.send_and_ping(msg_block(block))
assert_highbandwidth_states(self.nodes[0], hb_to=True, hb_from=True)
# peer requests low-bandwidth mode by sending sendcmpct(0)
hb_test_node.send_and_ping(msg_sendcmpct(announce=False, version=2))
assert_highbandwidth_states(self.nodes[0], hb_to=True, hb_from=False)
def run_test(self):
self.wallet = MiniWallet(self.nodes[0])
# Setup the p2p connections
self.segwit_node = self.nodes[0].add_p2p_connection(TestP2PConn(cmpct_version=2))
self.old_node = self.nodes[0].add_p2p_connection(TestP2PConn(cmpct_version=1), services=NODE_NETWORK)
self.additional_segwit_node = self.nodes[0].add_p2p_connection(TestP2PConn(cmpct_version=2))
# We will need UTXOs to construct transactions in later tests.
self.make_utxos()
assert softfork_active(self.nodes[0], "segwit")
self.log.info("Testing SENDCMPCT p2p message... ")
self.test_sendcmpct(self.segwit_node, old_node=self.old_node)
self.test_sendcmpct(self.additional_segwit_node)
self.log.info("Testing compactblock construction...")
self.test_compactblock_construction(self.old_node)
self.test_compactblock_construction(self.segwit_node)
self.log.info("Testing compactblock requests (segwit node)... ")
self.test_compactblock_requests(self.segwit_node)
self.log.info("Testing getblocktxn requests (segwit node)...")
self.test_getblocktxn_requests(self.segwit_node)
self.log.info("Testing getblocktxn handler (segwit node should return witnesses)...")
self.test_getblocktxn_handler(self.segwit_node)
self.test_getblocktxn_handler(self.old_node)
self.log.info("Testing compactblock requests/announcements not at chain tip...")
self.test_compactblocks_not_at_tip(self.segwit_node)
self.test_compactblocks_not_at_tip(self.old_node)
self.log.info("Testing handling of incorrect blocktxn responses...")
self.test_incorrect_blocktxn_response(self.segwit_node)
self.log.info("Testing reconstructing compact blocks from all peers...")
self.test_compactblock_reconstruction_multiple_peers(self.segwit_node, self.additional_segwit_node)
# Test that if we submitblock to node1, we'll get a compact block
# announcement to all peers.
# (Post-segwit activation, blocks won't propagate from node0 to node1
# automatically, so don't bother testing a block announced to node0.)
self.log.info("Testing end-to-end block relay...")
self.request_cb_announcements(self.old_node)
self.request_cb_announcements(self.segwit_node)
self.test_end_to_end_block_relay([self.segwit_node, self.old_node])
self.log.info("Testing handling of invalid compact blocks...")
self.test_invalid_tx_in_compactblock(self.segwit_node)
self.test_invalid_tx_in_compactblock(self.old_node)
self.log.info("Testing invalid index in cmpctblock message...")
self.test_invalid_cmpctblock_message()
self.log.info("Testing high-bandwidth mode states via getpeerinfo...")
self.test_highbandwidth_mode_states_via_getpeerinfo()
if __name__ == '__main__':
CompactBlocksTest().main()
|
|
"""Support for MQTT message handling."""
import asyncio
from functools import partial, wraps
import inspect
from itertools import groupby
import json
import logging
from operator import attrgetter
import os
import socket
import ssl
import time
from typing import Any, Callable, List, Optional, Union
import attr
import requests.certs
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components import websocket_api
from homeassistant.const import (
CONF_DEVICE,
CONF_NAME,
CONF_PASSWORD,
CONF_PAYLOAD,
CONF_PORT,
CONF_PROTOCOL,
CONF_USERNAME,
CONF_VALUE_TEMPLATE,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import Event, ServiceCall, callback
from homeassistant.exceptions import (
HomeAssistantError,
Unauthorized,
ConfigEntryNotReady,
)
from homeassistant.helpers import config_validation as cv, template
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.typing import ConfigType, HomeAssistantType, ServiceDataType
from homeassistant.loader import bind_hass
from homeassistant.util.async_ import run_callback_threadsafe, run_coroutine_threadsafe
from homeassistant.util.logging import catch_log_exception
# Loading the config flow file will register the flow
from . import config_flow, discovery, server # noqa pylint: disable=unused-import
from .const import (
CONF_BROKER,
CONF_DISCOVERY,
DEFAULT_DISCOVERY,
CONF_STATE_TOPIC,
ATTR_DISCOVERY_HASH,
)
_LOGGER = logging.getLogger(__name__)
DOMAIN = "mqtt"
DATA_MQTT = "mqtt"
DATA_MQTT_CONFIG = "mqtt_config"
DATA_MQTT_HASS_CONFIG = "mqtt_hass_config"
SERVICE_PUBLISH = "publish"
CONF_EMBEDDED = "embedded"
CONF_CLIENT_ID = "client_id"
CONF_DISCOVERY_PREFIX = "discovery_prefix"
CONF_KEEPALIVE = "keepalive"
CONF_CERTIFICATE = "certificate"
CONF_CLIENT_KEY = "client_key"
CONF_CLIENT_CERT = "client_cert"
CONF_TLS_INSECURE = "tls_insecure"
CONF_TLS_VERSION = "tls_version"
CONF_BIRTH_MESSAGE = "birth_message"
CONF_WILL_MESSAGE = "will_message"
CONF_COMMAND_TOPIC = "command_topic"
CONF_AVAILABILITY_TOPIC = "availability_topic"
CONF_PAYLOAD_AVAILABLE = "payload_available"
CONF_PAYLOAD_NOT_AVAILABLE = "payload_not_available"
CONF_JSON_ATTRS_TOPIC = "json_attributes_topic"
CONF_JSON_ATTRS_TEMPLATE = "json_attributes_template"
CONF_QOS = "qos"
CONF_RETAIN = "retain"
CONF_UNIQUE_ID = "unique_id"
CONF_IDENTIFIERS = "identifiers"
CONF_CONNECTIONS = "connections"
CONF_MANUFACTURER = "manufacturer"
CONF_MODEL = "model"
CONF_SW_VERSION = "sw_version"
CONF_VIA_DEVICE = "via_device"
CONF_DEPRECATED_VIA_HUB = "via_hub"
PROTOCOL_31 = "3.1"
PROTOCOL_311 = "3.1.1"
DEFAULT_PORT = 1883
DEFAULT_KEEPALIVE = 60
DEFAULT_QOS = 0
DEFAULT_RETAIN = False
DEFAULT_PROTOCOL = PROTOCOL_311
DEFAULT_DISCOVERY_PREFIX = "homeassistant"
DEFAULT_TLS_PROTOCOL = "auto"
DEFAULT_PAYLOAD_AVAILABLE = "online"
DEFAULT_PAYLOAD_NOT_AVAILABLE = "offline"
ATTR_TOPIC = "topic"
ATTR_PAYLOAD = "payload"
ATTR_PAYLOAD_TEMPLATE = "payload_template"
ATTR_QOS = CONF_QOS
ATTR_RETAIN = CONF_RETAIN
MAX_RECONNECT_WAIT = 300 # seconds
CONNECTION_SUCCESS = "connection_success"
CONNECTION_FAILED = "connection_failed"
CONNECTION_FAILED_RECOVERABLE = "connection_failed_recoverable"
def valid_topic(value: Any) -> str:
"""Validate that this is a valid topic name/filter."""
value = cv.string(value)
try:
raw_value = value.encode("utf-8")
except UnicodeError:
raise vol.Invalid("MQTT topic name/filter must be valid UTF-8 string.")
if not raw_value:
raise vol.Invalid("MQTT topic name/filter must not be empty.")
if len(raw_value) > 65535:
raise vol.Invalid(
"MQTT topic name/filter must not be longer than " "65535 encoded bytes."
)
if "\0" in value:
raise vol.Invalid("MQTT topic name/filter must not contain null " "character.")
return value
def valid_subscribe_topic(value: Any) -> str:
"""Validate that we can subscribe using this MQTT topic."""
value = valid_topic(value)
for i in (i for i, c in enumerate(value) if c == "+"):
if (i > 0 and value[i - 1] != "/") or (
i < len(value) - 1 and value[i + 1] != "/"
):
raise vol.Invalid(
"Single-level wildcard must occupy an entire " "level of the filter"
)
index = value.find("#")
if index != -1:
if index != len(value) - 1:
# If there are multiple wildcards, this will also trigger
raise vol.Invalid(
"Multi-level wildcard must be the last "
"character in the topic filter."
)
if len(value) > 1 and value[index - 1] != "/":
raise vol.Invalid(
"Multi-level wildcard must be after a topic " "level separator."
)
return value
def valid_publish_topic(value: Any) -> str:
"""Validate that we can publish using this MQTT topic."""
value = valid_topic(value)
if "+" in value or "#" in value:
raise vol.Invalid("Wildcards can not be used in topic names")
return value
def validate_device_has_at_least_one_identifier(value: ConfigType) -> ConfigType:
"""Validate that a device info entry has at least one identifying value."""
if not value.get(CONF_IDENTIFIERS) and not value.get(CONF_CONNECTIONS):
raise vol.Invalid(
"Device must have at least one identifying value in "
"'identifiers' and/or 'connections'"
)
return value
_VALID_QOS_SCHEMA = vol.All(vol.Coerce(int), vol.In([0, 1, 2]))
CLIENT_KEY_AUTH_MSG = (
"client_key and client_cert must both be present in "
"the MQTT broker configuration"
)
MQTT_WILL_BIRTH_SCHEMA = vol.Schema(
{
vol.Required(ATTR_TOPIC): valid_publish_topic,
vol.Required(ATTR_PAYLOAD, CONF_PAYLOAD): cv.string,
vol.Optional(ATTR_QOS, default=DEFAULT_QOS): _VALID_QOS_SCHEMA,
vol.Optional(ATTR_RETAIN, default=DEFAULT_RETAIN): cv.boolean,
},
required=True,
)
def embedded_broker_deprecated(value):
"""Warn user that embedded MQTT broker is deprecated."""
_LOGGER.warning(
"The embedded MQTT broker has been deprecated and will stop working"
"after June 5th, 2019. Use an external broker instead. For"
"instructions, see https://www.home-assistant.io/docs/mqtt/broker"
)
return value
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Optional(CONF_CLIENT_ID): cv.string,
vol.Optional(CONF_KEEPALIVE, default=DEFAULT_KEEPALIVE): vol.All(
vol.Coerce(int), vol.Range(min=15)
),
vol.Optional(CONF_BROKER): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_CERTIFICATE): vol.Any("auto", cv.isfile),
vol.Inclusive(
CONF_CLIENT_KEY, "client_key_auth", msg=CLIENT_KEY_AUTH_MSG
): cv.isfile,
vol.Inclusive(
CONF_CLIENT_CERT, "client_key_auth", msg=CLIENT_KEY_AUTH_MSG
): cv.isfile,
vol.Optional(CONF_TLS_INSECURE): cv.boolean,
vol.Optional(CONF_TLS_VERSION, default=DEFAULT_TLS_PROTOCOL): vol.Any(
"auto", "1.0", "1.1", "1.2"
),
vol.Optional(CONF_PROTOCOL, default=DEFAULT_PROTOCOL): vol.All(
cv.string, vol.In([PROTOCOL_31, PROTOCOL_311])
),
vol.Optional(CONF_EMBEDDED): vol.All(
server.HBMQTT_CONFIG_SCHEMA, embedded_broker_deprecated
),
vol.Optional(CONF_WILL_MESSAGE): MQTT_WILL_BIRTH_SCHEMA,
vol.Optional(CONF_BIRTH_MESSAGE): MQTT_WILL_BIRTH_SCHEMA,
vol.Optional(CONF_DISCOVERY, default=DEFAULT_DISCOVERY): cv.boolean,
# discovery_prefix must be a valid publish topic because if no
# state topic is specified, it will be created with the given prefix.
vol.Optional(
CONF_DISCOVERY_PREFIX, default=DEFAULT_DISCOVERY_PREFIX
): valid_publish_topic,
}
)
},
extra=vol.ALLOW_EXTRA,
)
SCHEMA_BASE = {vol.Optional(CONF_QOS, default=DEFAULT_QOS): _VALID_QOS_SCHEMA}
MQTT_AVAILABILITY_SCHEMA = vol.Schema(
{
vol.Optional(CONF_AVAILABILITY_TOPIC): valid_subscribe_topic,
vol.Optional(
CONF_PAYLOAD_AVAILABLE, default=DEFAULT_PAYLOAD_AVAILABLE
): cv.string,
vol.Optional(
CONF_PAYLOAD_NOT_AVAILABLE, default=DEFAULT_PAYLOAD_NOT_AVAILABLE
): cv.string,
}
)
MQTT_ENTITY_DEVICE_INFO_SCHEMA = vol.All(
cv.deprecated(CONF_DEPRECATED_VIA_HUB, CONF_VIA_DEVICE),
vol.Schema(
{
vol.Optional(CONF_IDENTIFIERS, default=list): vol.All(
cv.ensure_list, [cv.string]
),
vol.Optional(CONF_CONNECTIONS, default=list): vol.All(
cv.ensure_list, [vol.All(vol.Length(2), [cv.string])]
),
vol.Optional(CONF_MANUFACTURER): cv.string,
vol.Optional(CONF_MODEL): cv.string,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_SW_VERSION): cv.string,
vol.Optional(CONF_VIA_DEVICE): cv.string,
}
),
validate_device_has_at_least_one_identifier,
)
MQTT_JSON_ATTRS_SCHEMA = vol.Schema(
{
vol.Optional(CONF_JSON_ATTRS_TOPIC): valid_subscribe_topic,
vol.Optional(CONF_JSON_ATTRS_TEMPLATE): cv.template,
}
)
MQTT_BASE_PLATFORM_SCHEMA = cv.PLATFORM_SCHEMA.extend(SCHEMA_BASE)
# Sensor type platforms subscribe to MQTT events
MQTT_RO_PLATFORM_SCHEMA = MQTT_BASE_PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_STATE_TOPIC): valid_subscribe_topic,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
}
)
# Switch type platforms publish to MQTT and may subscribe
MQTT_RW_PLATFORM_SCHEMA = MQTT_BASE_PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_COMMAND_TOPIC): valid_publish_topic,
vol.Optional(CONF_RETAIN, default=DEFAULT_RETAIN): cv.boolean,
vol.Optional(CONF_STATE_TOPIC): valid_subscribe_topic,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
}
)
# Service call validation schema
MQTT_PUBLISH_SCHEMA = vol.Schema(
{
vol.Required(ATTR_TOPIC): valid_publish_topic,
vol.Exclusive(ATTR_PAYLOAD, CONF_PAYLOAD): object,
vol.Exclusive(ATTR_PAYLOAD_TEMPLATE, CONF_PAYLOAD): cv.string,
vol.Optional(ATTR_QOS, default=DEFAULT_QOS): _VALID_QOS_SCHEMA,
vol.Optional(ATTR_RETAIN, default=DEFAULT_RETAIN): cv.boolean,
},
required=True,
)
# pylint: disable=invalid-name
PublishPayloadType = Union[str, bytes, int, float, None]
SubscribePayloadType = Union[str, bytes] # Only bytes if encoding is None
@attr.s(slots=True, frozen=True)
class Message:
"""MQTT Message."""
topic = attr.ib(type=str)
payload = attr.ib(type=PublishPayloadType)
qos = attr.ib(type=int)
retain = attr.ib(type=bool)
MessageCallbackType = Callable[[Message], None]
def _build_publish_data(topic: Any, qos: int, retain: bool) -> ServiceDataType:
"""Build the arguments for the publish service without the payload."""
data = {ATTR_TOPIC: topic}
if qos is not None:
data[ATTR_QOS] = qos
if retain is not None:
data[ATTR_RETAIN] = retain
return data
@bind_hass
def publish(hass: HomeAssistantType, topic, payload, qos=None, retain=None) -> None:
"""Publish message to an MQTT topic."""
hass.add_job(async_publish, hass, topic, payload, qos, retain)
@callback
@bind_hass
def async_publish(
hass: HomeAssistantType, topic: Any, payload, qos=None, retain=None
) -> None:
"""Publish message to an MQTT topic."""
data = _build_publish_data(topic, qos, retain)
data[ATTR_PAYLOAD] = payload
hass.async_create_task(hass.services.async_call(DOMAIN, SERVICE_PUBLISH, data))
@bind_hass
def publish_template(
hass: HomeAssistantType, topic, payload_template, qos=None, retain=None
) -> None:
"""Publish message to an MQTT topic using a template payload."""
data = _build_publish_data(topic, qos, retain)
data[ATTR_PAYLOAD_TEMPLATE] = payload_template
hass.services.call(DOMAIN, SERVICE_PUBLISH, data)
def wrap_msg_callback(msg_callback: MessageCallbackType) -> MessageCallbackType:
"""Wrap an MQTT message callback to support deprecated signature."""
# Check for partials to properly determine if coroutine function
check_func = msg_callback
while isinstance(check_func, partial):
check_func = check_func.func
wrapper_func = None
if asyncio.iscoroutinefunction(check_func):
@wraps(msg_callback)
async def async_wrapper(msg: Any) -> None:
"""Catch and log exception."""
await msg_callback(msg.topic, msg.payload, msg.qos)
wrapper_func = async_wrapper
else:
@wraps(msg_callback)
def wrapper(msg: Any) -> None:
"""Catch and log exception."""
msg_callback(msg.topic, msg.payload, msg.qos)
wrapper_func = wrapper
return wrapper_func
@bind_hass
async def async_subscribe(
hass: HomeAssistantType,
topic: str,
msg_callback: MessageCallbackType,
qos: int = DEFAULT_QOS,
encoding: Optional[str] = "utf-8",
):
"""Subscribe to an MQTT topic.
Call the return value to unsubscribe.
"""
# Count callback parameters which don't have a default value
non_default = 0
if msg_callback:
non_default = sum(
p.default == inspect.Parameter.empty
for _, p in inspect.signature(msg_callback).parameters.items()
)
wrapped_msg_callback = msg_callback
# If we have 3 parameters with no default value, wrap the callback
if non_default == 3:
_LOGGER.warning(
"Signature of MQTT msg_callback '%s.%s' is deprecated",
inspect.getmodule(msg_callback).__name__,
msg_callback.__name__,
)
wrapped_msg_callback = wrap_msg_callback(msg_callback)
async_remove = await hass.data[DATA_MQTT].async_subscribe(
topic,
catch_log_exception(
wrapped_msg_callback,
lambda msg: "Exception in {} when handling msg on '{}': '{}'".format(
msg_callback.__name__, msg.topic, msg.payload
),
),
qos,
encoding,
)
return async_remove
@bind_hass
def subscribe(
hass: HomeAssistantType,
topic: str,
msg_callback: MessageCallbackType,
qos: int = DEFAULT_QOS,
encoding: str = "utf-8",
) -> Callable[[], None]:
"""Subscribe to an MQTT topic."""
async_remove = run_coroutine_threadsafe(
async_subscribe(hass, topic, msg_callback, qos, encoding), hass.loop
).result()
def remove():
"""Remove listener convert."""
run_callback_threadsafe(hass.loop, async_remove).result()
return remove
async def _async_setup_server(hass: HomeAssistantType, config: ConfigType):
"""Try to start embedded MQTT broker.
This method is a coroutine.
"""
conf: ConfigType = config.get(DOMAIN, {})
success, broker_config = await server.async_start(
hass, conf.get(CONF_PASSWORD), conf.get(CONF_EMBEDDED)
)
if not success:
return None
return broker_config
async def _async_setup_discovery(
hass: HomeAssistantType, conf: ConfigType, hass_config: ConfigType, config_entry
) -> bool:
"""Try to start the discovery of MQTT devices.
This method is a coroutine.
"""
if discovery is None:
_LOGGER.error("Unable to load MQTT discovery")
return False
success: bool = await discovery.async_start(
hass, conf[CONF_DISCOVERY_PREFIX], hass_config, config_entry
)
return success
async def async_setup(hass: HomeAssistantType, config: ConfigType) -> bool:
"""Start the MQTT protocol service."""
conf: Optional[ConfigType] = config.get(DOMAIN)
# We need this because discovery can cause components to be set up and
# otherwise it will not load the users config.
# This needs a better solution.
hass.data[DATA_MQTT_HASS_CONFIG] = config
websocket_api.async_register_command(hass, websocket_subscribe)
if conf is None:
# If we have a config entry, setup is done by that config entry.
# If there is no config entry, this should fail.
return bool(hass.config_entries.async_entries(DOMAIN))
conf = dict(conf)
if CONF_EMBEDDED in conf or CONF_BROKER not in conf:
broker_config = await _async_setup_server(hass, config)
if broker_config is None:
_LOGGER.error("Unable to start embedded MQTT broker")
return False
conf.update(
{
CONF_BROKER: broker_config[0],
CONF_PORT: broker_config[1],
CONF_USERNAME: broker_config[2],
CONF_PASSWORD: broker_config[3],
CONF_CERTIFICATE: broker_config[4],
CONF_PROTOCOL: broker_config[5],
CONF_CLIENT_KEY: None,
CONF_CLIENT_CERT: None,
CONF_TLS_INSECURE: None,
}
)
hass.data[DATA_MQTT_CONFIG] = conf
# Only import if we haven't before.
if not hass.config_entries.async_entries(DOMAIN):
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_IMPORT}, data={}
)
)
return True
async def async_setup_entry(hass, entry):
"""Load a config entry."""
conf = hass.data.get(DATA_MQTT_CONFIG)
# Config entry was created because user had configuration.yaml entry
# They removed that, so remove entry.
if conf is None and entry.source == config_entries.SOURCE_IMPORT:
hass.async_create_task(hass.config_entries.async_remove(entry.entry_id))
return False
# If user didn't have configuration.yaml config, generate defaults
if conf is None:
conf = CONFIG_SCHEMA({DOMAIN: entry.data})[DOMAIN]
elif any(key in conf for key in entry.data):
_LOGGER.warning(
"Data in your config entry is going to override your "
"configuration.yaml: %s",
entry.data,
)
conf.update(entry.data)
broker = conf[CONF_BROKER]
port = conf[CONF_PORT]
client_id = conf.get(CONF_CLIENT_ID)
keepalive = conf[CONF_KEEPALIVE]
username = conf.get(CONF_USERNAME)
password = conf.get(CONF_PASSWORD)
certificate = conf.get(CONF_CERTIFICATE)
client_key = conf.get(CONF_CLIENT_KEY)
client_cert = conf.get(CONF_CLIENT_CERT)
tls_insecure = conf.get(CONF_TLS_INSECURE)
protocol = conf[CONF_PROTOCOL]
# For cloudmqtt.com, secured connection, auto fill in certificate
if (
certificate is None
and 19999 < conf[CONF_PORT] < 30000
and broker.endswith(".cloudmqtt.com")
):
certificate = os.path.join(
os.path.dirname(__file__), "addtrustexternalcaroot.crt"
)
# When the certificate is set to auto, use bundled certs from requests
elif certificate == "auto":
certificate = requests.certs.where()
if CONF_WILL_MESSAGE in conf:
will_message = Message(**conf[CONF_WILL_MESSAGE])
else:
will_message = None
if CONF_BIRTH_MESSAGE in conf:
birth_message = Message(**conf[CONF_BIRTH_MESSAGE])
else:
birth_message = None
# Be able to override versions other than TLSv1.0 under Python3.6
conf_tls_version: str = conf.get(CONF_TLS_VERSION)
if conf_tls_version == "1.2":
tls_version = ssl.PROTOCOL_TLSv1_2
elif conf_tls_version == "1.1":
tls_version = ssl.PROTOCOL_TLSv1_1
elif conf_tls_version == "1.0":
tls_version = ssl.PROTOCOL_TLSv1
else:
import sys
# Python3.6 supports automatic negotiation of highest TLS version
if sys.hexversion >= 0x03060000:
tls_version = ssl.PROTOCOL_TLS # pylint: disable=no-member
else:
tls_version = ssl.PROTOCOL_TLSv1
hass.data[DATA_MQTT] = MQTT(
hass,
broker=broker,
port=port,
client_id=client_id,
keepalive=keepalive,
username=username,
password=password,
certificate=certificate,
client_key=client_key,
client_cert=client_cert,
tls_insecure=tls_insecure,
protocol=protocol,
will_message=will_message,
birth_message=birth_message,
tls_version=tls_version,
)
result: str = await hass.data[DATA_MQTT].async_connect()
if result == CONNECTION_FAILED:
return False
if result == CONNECTION_FAILED_RECOVERABLE:
raise ConfigEntryNotReady
async def async_stop_mqtt(event: Event):
"""Stop MQTT component."""
await hass.data[DATA_MQTT].async_disconnect()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, async_stop_mqtt)
async def async_publish_service(call: ServiceCall):
"""Handle MQTT publish service calls."""
msg_topic: str = call.data[ATTR_TOPIC]
payload = call.data.get(ATTR_PAYLOAD)
payload_template = call.data.get(ATTR_PAYLOAD_TEMPLATE)
qos: int = call.data[ATTR_QOS]
retain: bool = call.data[ATTR_RETAIN]
if payload_template is not None:
try:
payload = template.Template(payload_template, hass).async_render()
except template.jinja2.TemplateError as exc:
_LOGGER.error(
"Unable to publish to %s: rendering payload template of "
"%s failed because %s",
msg_topic,
payload_template,
exc,
)
return
await hass.data[DATA_MQTT].async_publish(msg_topic, payload, qos, retain)
hass.services.async_register(
DOMAIN, SERVICE_PUBLISH, async_publish_service, schema=MQTT_PUBLISH_SCHEMA
)
if conf.get(CONF_DISCOVERY):
await _async_setup_discovery(
hass, conf, hass.data[DATA_MQTT_HASS_CONFIG], entry
)
return True
@attr.s(slots=True, frozen=True)
class Subscription:
"""Class to hold data about an active subscription."""
topic = attr.ib(type=str)
callback = attr.ib(type=MessageCallbackType)
qos = attr.ib(type=int, default=0)
encoding = attr.ib(type=str, default="utf-8")
class MQTT:
"""Home Assistant MQTT client."""
def __init__(
self,
hass: HomeAssistantType,
broker: str,
port: int,
client_id: Optional[str],
keepalive: Optional[int],
username: Optional[str],
password: Optional[str],
certificate: Optional[str],
client_key: Optional[str],
client_cert: Optional[str],
tls_insecure: Optional[bool],
protocol: Optional[str],
will_message: Optional[Message],
birth_message: Optional[Message],
tls_version: Optional[int],
) -> None:
"""Initialize Home Assistant MQTT client."""
import paho.mqtt.client as mqtt
self.hass = hass
self.broker = broker
self.port = port
self.keepalive = keepalive
self.subscriptions: List[Subscription] = []
self.birth_message = birth_message
self.connected = False
self._mqttc: mqtt.Client = None
self._paho_lock = asyncio.Lock()
if protocol == PROTOCOL_31:
proto: int = mqtt.MQTTv31
else:
proto = mqtt.MQTTv311
if client_id is None:
self._mqttc = mqtt.Client(protocol=proto)
else:
self._mqttc = mqtt.Client(client_id, protocol=proto)
if username is not None:
self._mqttc.username_pw_set(username, password)
if certificate is not None:
self._mqttc.tls_set(
certificate,
certfile=client_cert,
keyfile=client_key,
tls_version=tls_version,
)
if tls_insecure is not None:
self._mqttc.tls_insecure_set(tls_insecure)
self._mqttc.on_connect = self._mqtt_on_connect
self._mqttc.on_disconnect = self._mqtt_on_disconnect
self._mqttc.on_message = self._mqtt_on_message
if will_message is not None:
self._mqttc.will_set(*attr.astuple(will_message))
async def async_publish(
self, topic: str, payload: PublishPayloadType, qos: int, retain: bool
) -> None:
"""Publish a MQTT message.
This method must be run in the event loop and returns a coroutine.
"""
async with self._paho_lock:
_LOGGER.debug("Transmitting message on %s: %s", topic, payload)
await self.hass.async_add_job(
self._mqttc.publish, topic, payload, qos, retain
)
async def async_connect(self) -> str:
"""Connect to the host. Does process messages yet.
This method is a coroutine.
"""
result: int = None
try:
result = await self.hass.async_add_job(
self._mqttc.connect, self.broker, self.port, self.keepalive
)
except OSError as err:
_LOGGER.error("Failed to connect due to exception: %s", err)
return CONNECTION_FAILED_RECOVERABLE
if result != 0:
import paho.mqtt.client as mqtt
_LOGGER.error("Failed to connect: %s", mqtt.error_string(result))
return CONNECTION_FAILED
self._mqttc.loop_start()
return CONNECTION_SUCCESS
@callback
def async_disconnect(self):
"""Stop the MQTT client.
This method must be run in the event loop and returns a coroutine.
"""
def stop():
"""Stop the MQTT client."""
self._mqttc.disconnect()
self._mqttc.loop_stop()
return self.hass.async_add_job(stop)
async def async_subscribe(
self,
topic: str,
msg_callback: MessageCallbackType,
qos: int,
encoding: Optional[str] = None,
) -> Callable[[], None]:
"""Set up a subscription to a topic with the provided qos.
This method is a coroutine.
"""
if not isinstance(topic, str):
raise HomeAssistantError("Topic needs to be a string!")
subscription = Subscription(topic, msg_callback, qos, encoding)
self.subscriptions.append(subscription)
await self._async_perform_subscription(topic, qos)
@callback
def async_remove() -> None:
"""Remove subscription."""
if subscription not in self.subscriptions:
raise HomeAssistantError("Can't remove subscription twice")
self.subscriptions.remove(subscription)
if any(other.topic == topic for other in self.subscriptions):
# Other subscriptions on topic remaining - don't unsubscribe.
return
# Only unsubscribe if currently connected.
if self.connected:
self.hass.async_create_task(self._async_unsubscribe(topic))
return async_remove
async def _async_unsubscribe(self, topic: str) -> None:
"""Unsubscribe from a topic.
This method is a coroutine.
"""
async with self._paho_lock:
result: int = None
result, _ = await self.hass.async_add_job(self._mqttc.unsubscribe, topic)
_raise_on_error(result)
async def _async_perform_subscription(self, topic: str, qos: int) -> None:
"""Perform a paho-mqtt subscription."""
_LOGGER.debug("Subscribing to %s", topic)
async with self._paho_lock:
result: int = None
result, _ = await self.hass.async_add_job(self._mqttc.subscribe, topic, qos)
_raise_on_error(result)
def _mqtt_on_connect(self, _mqttc, _userdata, _flags, result_code: int) -> None:
"""On connect callback.
Resubscribe to all topics we were subscribed to and publish birth
message.
"""
import paho.mqtt.client as mqtt
if result_code != mqtt.CONNACK_ACCEPTED:
_LOGGER.error(
"Unable to connect to the MQTT broker: %s",
mqtt.connack_string(result_code),
)
self._mqttc.disconnect()
return
self.connected = True
# Group subscriptions to only re-subscribe once for each topic.
keyfunc = attrgetter("topic")
for topic, subs in groupby(sorted(self.subscriptions, key=keyfunc), keyfunc):
# Re-subscribe with the highest requested qos
max_qos = max(subscription.qos for subscription in subs)
self.hass.add_job(self._async_perform_subscription, topic, max_qos)
if self.birth_message:
self.hass.add_job(self.async_publish(*attr.astuple(self.birth_message)))
def _mqtt_on_message(self, _mqttc, _userdata, msg) -> None:
"""Message received callback."""
self.hass.add_job(self._mqtt_handle_message, msg)
@callback
def _mqtt_handle_message(self, msg) -> None:
_LOGGER.debug(
"Received message on %s%s: %s",
msg.topic,
" (retained)" if msg.retain else "",
msg.payload,
)
for subscription in self.subscriptions:
if not _match_topic(subscription.topic, msg.topic):
continue
payload: SubscribePayloadType = msg.payload
if subscription.encoding is not None:
try:
payload = msg.payload.decode(subscription.encoding)
except (AttributeError, UnicodeDecodeError):
_LOGGER.warning(
"Can't decode payload %s on %s with encoding %s",
msg.payload,
msg.topic,
subscription.encoding,
)
continue
self.hass.async_run_job(
subscription.callback, Message(msg.topic, payload, msg.qos, msg.retain)
)
def _mqtt_on_disconnect(self, _mqttc, _userdata, result_code: int) -> None:
"""Disconnected callback."""
self.connected = False
# When disconnected because of calling disconnect()
if result_code == 0:
return
tries = 0
while True:
try:
if self._mqttc.reconnect() == 0:
self.connected = True
_LOGGER.info("Successfully reconnected to the MQTT server")
break
except socket.error:
pass
wait_time = min(2 ** tries, MAX_RECONNECT_WAIT)
_LOGGER.warning(
"Disconnected from MQTT (%s). Trying to reconnect in %s s",
result_code,
wait_time,
)
# It is ok to sleep here as we are in the MQTT thread.
time.sleep(wait_time)
tries += 1
def _raise_on_error(result_code: int) -> None:
"""Raise error if error result."""
if result_code != 0:
import paho.mqtt.client as mqtt
raise HomeAssistantError(
"Error talking to MQTT: {}".format(mqtt.error_string(result_code))
)
def _match_topic(subscription: str, topic: str) -> bool:
"""Test if topic matches subscription."""
from paho.mqtt.matcher import MQTTMatcher
matcher = MQTTMatcher()
matcher[subscription] = True
try:
next(matcher.iter_match(topic))
return True
except StopIteration:
return False
class MqttAttributes(Entity):
"""Mixin used for platforms that support JSON attributes."""
def __init__(self, config: dict) -> None:
"""Initialize the JSON attributes mixin."""
self._attributes = None
self._attributes_sub_state = None
self._attributes_config = config
async def async_added_to_hass(self) -> None:
"""Subscribe MQTT events.
This method must be run in the event loop and returns a coroutine.
"""
await super().async_added_to_hass()
await self._attributes_subscribe_topics()
async def attributes_discovery_update(self, config: dict):
"""Handle updated discovery message."""
self._attributes_config = config
await self._attributes_subscribe_topics()
async def _attributes_subscribe_topics(self):
"""(Re)Subscribe to topics."""
from .subscription import async_subscribe_topics
attr_tpl = self._attributes_config.get(CONF_JSON_ATTRS_TEMPLATE)
if attr_tpl is not None:
attr_tpl.hass = self.hass
@callback
def attributes_message_received(msg: Message) -> None:
try:
payload = msg.payload
if attr_tpl is not None:
payload = attr_tpl.async_render_with_possible_json_value(payload)
json_dict = json.loads(payload)
if isinstance(json_dict, dict):
self._attributes = json_dict
self.async_write_ha_state()
else:
_LOGGER.warning("JSON result was not a dictionary")
self._attributes = None
except ValueError:
_LOGGER.warning("Erroneous JSON: %s", payload)
self._attributes = None
self._attributes_sub_state = await async_subscribe_topics(
self.hass,
self._attributes_sub_state,
{
CONF_JSON_ATTRS_TOPIC: {
"topic": self._attributes_config.get(CONF_JSON_ATTRS_TOPIC),
"msg_callback": attributes_message_received,
"qos": self._attributes_config.get(CONF_QOS),
}
},
)
async def async_will_remove_from_hass(self):
"""Unsubscribe when removed."""
from .subscription import async_unsubscribe_topics
self._attributes_sub_state = await async_unsubscribe_topics(
self.hass, self._attributes_sub_state
)
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._attributes
class MqttAvailability(Entity):
"""Mixin used for platforms that report availability."""
def __init__(self, config: dict) -> None:
"""Initialize the availability mixin."""
self._availability_sub_state = None
self._available = False
self._avail_config = config
async def async_added_to_hass(self) -> None:
"""Subscribe MQTT events.
This method must be run in the event loop and returns a coroutine.
"""
await super().async_added_to_hass()
await self._availability_subscribe_topics()
async def availability_discovery_update(self, config: dict):
"""Handle updated discovery message."""
self._avail_config = config
await self._availability_subscribe_topics()
async def _availability_subscribe_topics(self):
"""(Re)Subscribe to topics."""
from .subscription import async_subscribe_topics
@callback
def availability_message_received(msg: Message) -> None:
"""Handle a new received MQTT availability message."""
if msg.payload == self._avail_config[CONF_PAYLOAD_AVAILABLE]:
self._available = True
elif msg.payload == self._avail_config[CONF_PAYLOAD_NOT_AVAILABLE]:
self._available = False
self.async_write_ha_state()
self._availability_sub_state = await async_subscribe_topics(
self.hass,
self._availability_sub_state,
{
"availability_topic": {
"topic": self._avail_config.get(CONF_AVAILABILITY_TOPIC),
"msg_callback": availability_message_received,
"qos": self._avail_config[CONF_QOS],
}
},
)
async def async_will_remove_from_hass(self):
"""Unsubscribe when removed."""
from .subscription import async_unsubscribe_topics
self._availability_sub_state = await async_unsubscribe_topics(
self.hass, self._availability_sub_state
)
@property
def available(self) -> bool:
"""Return if the device is available."""
availability_topic = self._avail_config.get(CONF_AVAILABILITY_TOPIC)
return availability_topic is None or self._available
class MqttDiscoveryUpdate(Entity):
"""Mixin used to handle updated discovery message."""
def __init__(self, discovery_hash, discovery_update=None) -> None:
"""Initialize the discovery update mixin."""
self._discovery_hash = discovery_hash
self._discovery_update = discovery_update
self._remove_signal = None
async def async_added_to_hass(self) -> None:
"""Subscribe to discovery updates."""
await super().async_added_to_hass()
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .discovery import MQTT_DISCOVERY_UPDATED, clear_discovery_hash
@callback
def discovery_callback(payload):
"""Handle discovery update."""
_LOGGER.info(
"Got update for entity with hash: %s '%s'",
self._discovery_hash,
payload,
)
if not payload:
# Empty payload: Remove component
_LOGGER.info("Removing component: %s", self.entity_id)
self.hass.async_create_task(self.async_remove())
clear_discovery_hash(self.hass, self._discovery_hash)
self._remove_signal()
elif self._discovery_update:
# Non-empty payload: Notify component
_LOGGER.info("Updating component: %s", self.entity_id)
payload.pop(ATTR_DISCOVERY_HASH)
self.hass.async_create_task(self._discovery_update(payload))
if self._discovery_hash:
self._remove_signal = async_dispatcher_connect(
self.hass,
MQTT_DISCOVERY_UPDATED.format(self._discovery_hash),
discovery_callback,
)
class MqttEntityDeviceInfo(Entity):
"""Mixin used for mqtt platforms that support the device registry."""
def __init__(self, device_config: Optional[ConfigType], config_entry=None) -> None:
"""Initialize the device mixin."""
self._device_config = device_config
self._config_entry = config_entry
async def device_info_discovery_update(self, config: dict):
"""Handle updated discovery message."""
self._device_config = config.get(CONF_DEVICE)
device_registry = await self.hass.helpers.device_registry.async_get_registry()
config_entry_id = self._config_entry.entry_id
device_info = self.device_info
if config_entry_id is not None and device_info is not None:
device_info["config_entry_id"] = config_entry_id
device_registry.async_get_or_create(**device_info)
@property
def device_info(self):
"""Return a device description for device registry."""
if not self._device_config:
return None
info = {
"identifiers": {
(DOMAIN, id_) for id_ in self._device_config[CONF_IDENTIFIERS]
},
"connections": {tuple(x) for x in self._device_config[CONF_CONNECTIONS]},
}
if CONF_MANUFACTURER in self._device_config:
info["manufacturer"] = self._device_config[CONF_MANUFACTURER]
if CONF_MODEL in self._device_config:
info["model"] = self._device_config[CONF_MODEL]
if CONF_NAME in self._device_config:
info["name"] = self._device_config[CONF_NAME]
if CONF_SW_VERSION in self._device_config:
info["sw_version"] = self._device_config[CONF_SW_VERSION]
if CONF_VIA_DEVICE in self._device_config:
info["via_device"] = (DOMAIN, self._device_config[CONF_VIA_DEVICE])
return info
@websocket_api.async_response
@websocket_api.websocket_command(
{
vol.Required("type"): "mqtt/subscribe",
vol.Required("topic"): valid_subscribe_topic,
}
)
async def websocket_subscribe(hass, connection, msg):
"""Subscribe to a MQTT topic."""
if not connection.user.is_admin:
raise Unauthorized
async def forward_messages(mqttmsg: Message):
"""Forward events to websocket."""
connection.send_message(
websocket_api.event_message(
msg["id"],
{
"topic": mqttmsg.topic,
"payload": mqttmsg.payload,
"qos": mqttmsg.qos,
"retain": mqttmsg.retain,
},
)
)
connection.subscriptions[msg["id"]] = await async_subscribe(
hass, msg["topic"], forward_messages
)
connection.send_message(websocket_api.result_message(msg["id"]))
|
|
# Copyright 2015. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from cfgm_common import exceptions as vnc_exc
from neutron.common import constants
from vnc_api import vnc_api
import contrail_res_handler as res_handler
import sg_res_handler as sg_handler
class SecurityGroupRuleMixin(object):
def _security_group_rule_vnc_to_neutron(self, sg_id, sg_rule,
sg_obj=None, fields=None):
sgr_q_dict = {}
if sg_id is None:
return sgr_q_dict
if not sg_obj:
try:
sg_obj = sg_handler.SecurityGroupHandler(
self._vnc_lib).get_sg_obj(id=sg_id)
except vnc_exc.NoIdError:
self._raise_contrail_exception(
'SecurityGroupNotFound',
id=sg_id, resource='security_group_rule')
remote_cidr = None
remote_sg_uuid = None
saddr = sg_rule.get_src_addresses()[0]
daddr = sg_rule.get_dst_addresses()[0]
if saddr.get_security_group() == 'local':
direction = 'egress'
addr = daddr
elif daddr.get_security_group() == 'local':
direction = 'ingress'
addr = saddr
else:
self._raise_contrail_exception(
'SecurityGroupRuleNotFound',
id=sg_rule.get_rule_uuid(), resource='security_group_rule')
if addr.get_subnet():
remote_cidr = '%s/%s' % (addr.get_subnet().get_ip_prefix(),
addr.get_subnet().get_ip_prefix_len())
elif addr.get_security_group():
if addr.get_security_group() != 'any' and (
addr.get_security_group() != 'local'):
remote_sg = addr.get_security_group()
try:
if remote_sg != ':'.join(sg_obj.get_fq_name()):
remote_sg_obj = sg_handler.SecurityGroupHandler(
self._vnc_lib).get_sg_obj(fq_name_str=remote_sg)
else:
remote_sg_obj = sg_obj
remote_sg_uuid = remote_sg_obj.uuid
except vnc_exc.NoIdError:
pass
sgr_q_dict['id'] = sg_rule.get_rule_uuid()
sgr_q_dict['tenant_id'] = self._project_id_vnc_to_neutron(
sg_obj.parent_uuid)
sgr_q_dict['security_group_id'] = sg_obj.uuid
if hasattr(sg_rule, 'get_ethertype'):
sgr_q_dict['ethertype'] = sg_rule.get_ethertype()
else:
sgr_q_dict['ethertype'] = 'IPv4'
sgr_q_dict['direction'] = direction
proto = sg_rule.get_protocol()
sgr_q_dict['protocol'] = None if proto == 'any' else proto
port_min = sg_rule.get_dst_ports()[0].get_start_port()
if sgr_q_dict['protocol'] in (constants.PROTO_NAME_ICMP,
str(constants.PROTO_NUM_ICMP)):
sgr_q_dict['port_range_min'] = port_min
else:
sgr_q_dict['port_range_min'] = None if port_min == 0 else port_min
port_max = (sg_rule.get_dst_ports()[0].get_end_port())
sgr_q_dict['port_range_max'] = None if port_max == 65535 else port_max
if remote_cidr == '0.0.0.0/0' or remote_cidr == '::/0':
remote_cidr = None
sgr_q_dict['remote_ip_prefix'] = remote_cidr
sgr_q_dict['remote_group_id'] = remote_sg_uuid
if fields:
sgr_q_dict = self._filter_res_dict(sgr_q_dict, fields)
return sgr_q_dict
# end _security_group_rule_vnc_to_neutron
def _security_group_rule_find(self, sgr_id, project_uuid=None):
dom_projects = []
if not project_uuid:
dom_projects = self._project_list_domain(None)
else:
dom_projects = [{'uuid': project_uuid}]
for project in dom_projects:
proj_id = project['uuid']
project_sgs = sg_handler.SecurityGroupHandler(
self._vnc_lib).resource_list_by_project(proj_id)
for sg_obj in project_sgs:
sgr_entries = sg_obj.get_security_group_entries()
if sgr_entries is None:
continue
for sg_rule in sgr_entries.get_policy_rule():
if sg_rule.get_rule_uuid() == sgr_id:
return sg_obj, sg_rule
return None, None
# end _security_group_rule_find
class SecurityGroupRuleGetHandler(res_handler.ResourceGetHandler,
SecurityGroupRuleMixin):
def resource_get(self, context, sgr_id, fields=None):
project_uuid = None
if not context['is_admin']:
project_uuid = self._project_id_neutron_to_vnc(context['tenant'])
sg_obj, sg_rule = self._security_group_rule_find(sgr_id, project_uuid)
if sg_obj and sg_rule:
return self._security_group_rule_vnc_to_neutron(sg_obj.uuid,
sg_rule, sg_obj,
fields=fields)
self._raise_contrail_exception('SecurityGroupRuleNotFound', id=sgr_id,
resource='security_group_rule')
def security_group_rules_read(self, sg_obj, fields=None, filters=None):
sgr_entries = sg_obj.get_security_group_entries()
sg_rules = []
if sgr_entries is None:
return
if filters:
filter_ids = [id for id in filters.get('id', []) if filters]
else:
filter_ids = None
for sg_rule in sgr_entries.get_policy_rule():
if filter_ids and sg_rule.get_rule_uuid() not in filter_ids:
continue
sg_info = self._security_group_rule_vnc_to_neutron(sg_obj.uuid,
sg_rule,
sg_obj,
fields=fields)
sg_rules.append(sg_info)
return sg_rules
# end security_group_rules_read
def resource_list(self, context, filters=None, fields=None):
ret_list = []
# collect phase
all_sgs = []
if filters and 'tenant_id' in filters:
project_ids = self._validate_project_ids(context,
filters['tenant_id'])
for p_id in project_ids:
project_sgs = sg_handler.SecurityGroupHandler(
self._vnc_lib).resource_list_by_project(p_id)
all_sgs.append(project_sgs)
else: # no filters
p_id = None
if context and not context['is_admin']:
p_id = self._project_id_neutron_to_vnc(context['tenant'])
project_sgs = sg_handler.SecurityGroupHandler(
self._vnc_lib).resource_list_by_project(p_id)
all_sgs.append(project_sgs)
# prune phase
for project_sgs in all_sgs:
for sg_obj in project_sgs:
# TODO() implement same for name specified in filter
sgr_info = self.security_group_rules_read(sg_obj,
fields=fields,
filters=filters)
if sgr_info:
ret_list.extend(sgr_info)
return ret_list
class SecurityGroupRuleDeleteHandler(res_handler.ResourceDeleteHandler,
SecurityGroupRuleMixin):
def _security_group_rule_delete(self, sg_obj, sg_rule):
rules = sg_obj.get_security_group_entries()
rules.get_policy_rule().remove(sg_rule)
sg_obj.set_security_group_entries(rules)
sg_handler.SecurityGroupHandler(
self._vnc_lib).resource_update_obj(sg_obj)
return
# end _security_group_rule_delete
def resource_delete(self, context, sgr_id):
project_uuid = None
if not context['is_admin']:
project_uuid = self._project_id_neutron_to_vnc(context['tenant'])
sg_obj, sg_rule = self._security_group_rule_find(sgr_id, project_uuid)
if sg_obj and sg_rule:
return self._security_group_rule_delete(sg_obj, sg_rule)
self._raise_contrail_exception('SecurityGroupRuleNotFound', id=sgr_id,
resource='security_group_rule')
class SecurityGroupRuleCreateHandler(res_handler.ResourceCreateHandler,
SecurityGroupRuleMixin):
resource_create_method = "security_group_rule_create"
def _convert_protocol(self, value):
IP_PROTOCOL_MAP = {constants.PROTO_NUM_TCP: constants.PROTO_NAME_TCP,
constants.PROTO_NUM_UDP: constants.PROTO_NAME_UDP,
constants.PROTO_NUM_ICMP: constants.PROTO_NAME_ICMP}
if value is None:
return
if isinstance(value, str) and value.lower() == 'any':
return 'any'
try:
val = int(value)
# TODO(ethuleau): support all protocol numbers
if val >= 0 and val <= 255:
return IP_PROTOCOL_MAP[val] if val in IP_PROTOCOL_MAP else (
str(val))
self._raise_contrail_exception(
'SecurityGroupRuleInvalidProtocol',
protocol=value, values=IP_PROTOCOL_MAP.values(),
resource='security_group_rule')
except (ValueError, TypeError):
if value.lower() in IP_PROTOCOL_MAP.values():
return value.lower()
self._raise_contrail_exception(
'SecurityGroupRuleInvalidProtocol',
protocol=value, values=IP_PROTOCOL_MAP.values(),
resource='security_group_rule')
def _validate_port_range(self, rule):
"""Check that port_range is valid."""
if (rule['port_range_min'] is None and
rule['port_range_max'] is None):
return
if not rule['protocol']:
self._raise_contrail_exception(
'SecurityGroupProtocolRequiredWithPorts',
resource='security_group_rule')
if rule['protocol'] in [constants.PROTO_NAME_TCP,
constants.PROTO_NAME_UDP]:
if (rule['port_range_min'] is not None and
rule['port_range_min'] <= rule['port_range_max']):
pass
else:
self._raise_contrail_exception(
'SecurityGroupInvalidPortRange',
resource='security_group_rule')
elif rule['protocol'] == constants.PROTO_NAME_ICMP:
for attr, field in [('port_range_min', 'type'),
('port_range_max', 'code')]:
if rule[attr] > 255:
self._raise_contrail_exception(
'SecurityGroupInvalidIcmpValue', field=field,
attr=attr, value=rule[attr],
resource='security_group_rule')
if (rule['port_range_min'] is None and
rule['port_range_max']):
self._raise_contrail_exception(
'SecurityGroupMissingIcmpType',
value=rule['port_range_max'],
resource='security_group_rule')
def _security_group_rule_neutron_to_vnc(self, sgr_q):
# default port values
if sgr_q['protocol'] in (constants.PROTO_NAME_ICMP,
str(constants.PROTO_NUM_ICMP)):
port_min = None
port_max = None
else:
port_min = 0
port_max = 65535
if sgr_q['port_range_min'] is not None:
port_min = sgr_q['port_range_min']
if sgr_q['port_range_max'] is not None:
port_max = sgr_q['port_range_max']
if sgr_q['remote_ip_prefix'] and sgr_q['remote_group_id']:
self._raise_contrail_exception("BadRequest",
msg="Can't set remote_ip_prefix with remote_group_id",
resource="security_group_rule")
endpt = [vnc_api.AddressType(security_group='any')]
if sgr_q['remote_ip_prefix']:
cidr = sgr_q['remote_ip_prefix'].split('/')
pfx = cidr[0]
pfx_len = int(cidr[1])
endpt = [vnc_api.AddressType(
subnet=vnc_api.SubnetType(pfx, pfx_len))]
elif sgr_q['remote_group_id']:
try:
sg_obj = sg_handler.SecurityGroupHandler(
self._vnc_lib).get_sg_obj(id=sgr_q['remote_group_id'])
except vnc_exc.NoIdError:
self._raise_contrail_exception('SecurityGroupNotFound',
id=sgr_q['remote_group_id'],
resource='security_group_rule')
if sgr_q.get('tenant_id') and (
sg_obj.parent_uuid != self._project_id_neutron_to_vnc(sgr_q['tenant_id'])):
self._raise_contrail_exception("NotFound")
endpt = [vnc_api.AddressType(
security_group=sg_obj.get_fq_name_str())]
if sgr_q['direction'] == 'ingress':
dir = '>'
local = endpt
remote = [vnc_api.AddressType(security_group='local')]
else:
dir = '>'
remote = endpt
local = [vnc_api.AddressType(security_group='local')]
if not sgr_q['protocol']:
sgr_q['protocol'] = 'any'
if not sgr_q['remote_ip_prefix'] and not sgr_q['remote_group_id']:
if not sgr_q['ethertype']:
sgr_q['ethertype'] = 'IPv4'
sgr_uuid = str(uuid.uuid4()) if 'id' not in sgr_q else sgr_q['id']
rule = vnc_api.PolicyRuleType(
rule_uuid=sgr_uuid, direction=dir,
protocol=sgr_q['protocol'],
src_addresses=local,
src_ports=[vnc_api.PortType(0, 65535)],
dst_addresses=remote,
dst_ports=[vnc_api.PortType(port_min, port_max)],
ethertype=sgr_q['ethertype'])
return rule
# end _security_group_rule_neutron_to_vnc
def _security_group_rule_create(self, sg_id, sg_rule, project_id):
sghandler = sg_handler.SecurityGroupHandler(self._vnc_lib)
try:
sg_vnc = sghandler.get_sg_obj(id=sg_id)
except vnc_exc.NoIdError:
self._raise_contrail_exception('SecurityGroupNotFound', id=sg_id,
resource='security_group')
if project_id and sg_vnc.parent_uuid != self._project_id_neutron_to_vnc(project_id):
self._raise_contrail_exception('NotFound')
rules = sg_vnc.get_security_group_entries()
if rules is None:
rules = vnc_api.PolicyEntriesType([sg_rule])
else:
rules.add_policy_rule(sg_rule)
sg_vnc.set_security_group_entries(rules)
try:
sghandler.resource_update_obj(sg_vnc)
except vnc_exc.PermissionDenied as e:
self._raise_contrail_exception(
'BadRequest',
resource='security_group_rule', msg=str(e))
except vnc_exc.BadRequest as e:
self._raise_contrail_exception(
'BadRequest',
resource='security_group_rule', msg=str(e.content))
except vnc_exc.RefsExistError as e:
try:
rule_uuid = str(e).split(':')[1].strip()
except IndexError:
rule_uuid = None
self._raise_contrail_exception('SecurityGroupRuleExists',
resource='security_group_rule',
id=rule_uuid)
return
# end _security_group_rule_create
def resource_create(self, context, sgr_q):
sgr_q['protocol'] = self._convert_protocol(sgr_q['protocol'])
self._validate_port_range(sgr_q)
sg_id = sgr_q['security_group_id']
sg_rule = self._security_group_rule_neutron_to_vnc(sgr_q)
self._security_group_rule_create(sg_id, sg_rule,
sgr_q.get('tenant_id', None))
ret_sg_rule_q = self._security_group_rule_vnc_to_neutron(sg_id,
sg_rule)
return ret_sg_rule_q
class SecurityGroupRuleHandler(SecurityGroupRuleGetHandler,
SecurityGroupRuleDeleteHandler,
SecurityGroupRuleCreateHandler):
pass
|
|
"""Extract and relay messages from the systemd journal
"""
from __future__ import print_function
import argparse
import errno
import datetime
import json
import os
import os.path
import re
#import select
import smtplib
import sys
from email.mime.text import MIMEText
from socket import gethostname
from systemd import journal
from systemd.id128 import get_boot
MUTEX = os.path.expanduser(os.path.join('~', '.journal-notify.lock'))
OOM_MESSAGE_START = re.compile('^.* invoked oom-killer: .*$')
OOM_MESSAGE_END = re.compile('^Killed process .*')
class timedelta(datetime.timedelta):
# there's a bug in systemd.journal that attempts to access this method
def totalseconds(self):
return self.total_seconds()
def _exit(msg, *args, **kwargs):
ec = kwargs.pop('ec', 1)
print(msg.format(*args, **kwargs), file=sys.stderr)
sys.exit(ec)
class LockFile(object):
def __init__(self):
self.__fname = MUTEX
self.__fd = None
def __open(self, flags, ignore=None, callback=None):
try:
return os.open(self.__fname, flags)
except (IOError, OSError) as exc:
if exc.errno not in ignore or ():
_exit('Unable to acquire lock {}: {}', self.__fname, str(exc))
if callback:
return callback()
return -1
def __acquire(self, first_attempt=True):
kwargs = {}
if first_attempt:
kwargs.update({'ignore': (errno.EEXIST,), 'callback': self.__handle_stale})
self.__fd = self.__open(os.O_WRONLY | os.O_CREAT | os.O_EXCL, **kwargs)
return self.__fd
def __handle_stale(self):
fd = self.__open(os.O_RDONLY, (errno.ENOENT,), lambda: self.__acquire(False))
if self.__fd is not None:
# second self.__acquire succeeded
return self.__fd
pid = os.read(fd, 5)
try:
assert not os.read(fd, 1), 'Contains extra data'
pid = int(pid)
except Exception as exc:
_exit('Lockfile {} contents malformed: {}', self.__fname, str(exc))
try:
os.kill(pid, 0)
except OSError:
print('Removing stale lock file: {}'.format(self.__fname), file=sys.stderr)
os.unlink(self.__fname)
return self.__acquire(False)
else:
_exit('Process is still running: {}', pid)
def __enter__(self):
self.__acquire()
os.write(self.__fd, str(os.getpid()))
os.fsync(self.__fd)
os.lseek(self.__fd, 0, 0)
def __exit__(self, exc_type, exc_val, traceback):
os.close(self.__fd)
os.unlink(self.__fname)
class JournalReader(object):
def __init__(self, **kwargs):
self._state = None
self._process = kwargs.get('process', True)
self._mails = kwargs.get('mails', True)
self._multiline_match = False
self._reader = journal.Reader()
for match in kwargs.get('matches', []):
self.add_match(match)
if kwargs.get('dmesg', False):
self.dmesg()
if kwargs.get('oom', False):
self.dmesg()
self.add_multiline_match(OOM_MESSAGE_START, OOM_MESSAGE_END)
self._load_state()
@property
def _state_file(self):
return os.path.expanduser(os.path.join('~', '.journal-notify.state'))
def _load_state(self):
try:
with open(self._state_file, 'r') as fh:
self._state = json.load(fh)
except (IOError, OSError) as exc:
if exc.errno == errno.ENOENT:
self._state = {}
else:
raise
def _dump_state(self):
with open(self._state_file, 'w') as fh:
json.dump(self._state, fh)
def dmesg(self):
self._reader.this_boot()
self.add_match('_TRANSPORT=kernel')
def add_match(self, match):
self._reader.add_match(match)
def add_multiline_match(self, start_msg, end_msg):
self._multiline_match = True
self._match_start = start_msg
self._match_end = end_msg
def _next_multiline_match(self):
match = []
for event in self._reader:
if not match and re.match(self._match_start, event['MESSAGE']):
match.append(event)
elif match and re.match(self._match_end, event['MESSAGE']):
match.append(event)
yield match
match = []
elif match:
match.append(event)
def _last_monotonic(self, boot_id=None):
if not boot_id:
boot_id = get_boot()
return timedelta(0, self._state.get(str(boot_id), 0))
def get_matching_events(self, boot_id=None):
r = []
if not boot_id:
boot_id = get_boot()
last_monotonic = self._last_monotonic()
self._reader.seek_monotonic(last_monotonic)
if self._multiline_match:
for match in self._next_multiline_match():
last_monotonic = match[-1]['__MONOTONIC_TIMESTAMP'][0]
r.append(match)
if self._process:
self._state[str(boot_id)] = last_monotonic.total_seconds()
self._dump_state()
return r
def output_matching_events(self, boot_id=None):
parts = []
for event in self.get_matching_events(boot_id=boot_id):
parts.append('>>> at {}:\n{}\n'.format(str(event[0]['__REALTIME_TIMESTAMP']), '\n'.join([e['MESSAGE'] for e in event])))
if not parts:
return
if self._mails:
msg = MIMEText('\n'.join(parts))
msg['Subject'] = '{}: found {} matching journal events since last run'.format(gethostname(), len(parts))
msg['To'] = ', '.join(self._mails)
msg['From'] = 'root@{}'.format(gethostname())
s = smtplib.SMTP('localhost')
s.sendmail(msg['From'], self._mails, msg.as_string())
else:
[print(part) for part in parts]
def parse_arguments():
parser = argparse.ArgumentParser(description="Watch systemd journal")
parser.add_argument('--dmesg', action='store_true', help='journalctl -k/--dmesg')
parser.add_argument('--oom', action='store_true', help='locate OOM Killer messages (implies --dmesg)')
parser.add_argument('--match', action='append', dest='matches', default=[], help='Match on FIELD=value')
parser.add_argument('--no-process', action='store_false', dest='process', default=True, help='Do not process messages and move pointer')
parser.add_argument('--mail', action='append', dest='mails', default=[], help='Send mail to address')
return parser.parse_args()
def reader_kwargs(args):
return vars(args)
def main():
args = parse_arguments()
kwargs = reader_kwargs(args)
with LockFile():
reader = JournalReader(**kwargs)
reader.output_matching_events()
if __name__ == '__main__':
main()
|
|
import pickle
from ..math.matrix import Matrix
from ..math.text2matrix import Text2Matrix
from ..nlp.segmenter import Segmenter
from ..common.global_info import GlobalInfo
from ..common.configuration import Configuration
class ChiSquareFilter:
def __init__(self, config, nodeName, loadFromFile = False):
self.curNode = config.GetChild(nodeName)
self.rate = float(self.curNode.GetChild("rate").GetValue())
self.method = self.curNode.GetChild("method").GetValue()
self.logPath = self.curNode.GetChild("log_path").GetValue()
self.modelPath = self.curNode.GetChild("model_path").GetValue()
self.idMap = None
self.trained = loadFromFile
if (loadFromFile):
f = open(self.modelPath, "r")
modelStr = pickle.load(f)
[self.idMap] = pickle.loads(modelStr)
f.close()
def SampleFilter(self, cols, vals):
if (not self.trained):
print "train filter before test"
return False
#check parameter
if (len(cols) <> len(vals)):
print "length of cols should equals length of vals"
return False
#filter sample
newCols = []
newVals = []
for c in range(0, len(cols)):
if self.idMap[cols[c]] >= 0:
newCols.append(self.idMap[cols[c]])
newVals.append(vals[c])
return [cols, vals]
"""
filter given x,y by blackList
x's row should == y's row
@return newx, newy filtered
"""
def MatrixFilter(self, x, y):
if (not self.trained):
print "train filter before test"
return False
#check parameter
if (x.nRow <> len(y)):
print "ERROR!x.nRow should == len(y)"
return False
#stores new rows, cols, and vals
newRows = [0]
newCols = []
newVals = []
for r in range(x.nRow):
curRowLen = 0
#debug
#print "===new doc==="
for c in range(x.rows[r], x.rows[r + 1]):
if self.idMap[x.cols[c]] >= 0 :
newCols.append(self.idMap[x.cols[c]])
newVals.append(x.vals[c])
curRowLen += 1
newRows.append(newRows[len(newRows) - 1] + curRowLen)
return [Matrix(newRows, newCols, newVals), y]
"""
create a blackList by given x,y
@rate is a percentage of selected feature
using next formulation:
X^2(t, c) = N * (AD - CB)^2
____________________
(A+C)(B+D)(A+B)(C+D)
A,B,C,D is doc-count
A: belong to c, include t
B: Not belong to c, include t
C: belong to c, Not include t
D: Not belong to c, Not include t
B = t's doc-count - A
C = c's doc-count - A
D = N - A - B - C
and score of t can be calculated by next 2 formulations:
X^2(t) = sigma p(ci)X^2(t,ci) (avg)
i
X^2(t) = max { X^2(t,c) } (max)
@return true if succeed
"""
def TrainFilter(self, x, y):
#check parameter
if not ((self.method == "avg") or (self.method == "max")):
print "ERROR!method should be avg or max"
return False
if (x.nRow <> len(y)):
print "ERROR!x.nRow should == len(y)"
return False
#using y get set of target
yy = set(y)
yy = list(yy)
yy.sort()
#create a table stores X^2(t, c)
#create a table stores A(belong to c, and include t
chiTable = [[0 for i in range(x.nCol)] for j in range(yy[len(yy) - 1] + 1)]
aTable = [[0 for i in range(x.nCol)] for j in range(yy[len(yy) - 1] + 1)]
#calculate a-table
for row in range(x.nRow):
for col in range(x.rows[row], x.rows[row + 1]):
aTable[y[row]][x.cols[col]] += 1
#calculate chi-table
n = x.nRow
for t in range(x.nCol):
for cc in range(len(yy)):
#get a
a = aTable[yy[cc]][t]
#get b
b = GlobalInfo.idToDocCount[t] - a
#get c
c = GlobalInfo.classToDocCount[yy[cc]] - a
#get d
d = n - a - b -c
#get X^2(t, c)
numberator = float(n) * (a*d - c*b) * (a*d - c*b)
denominator = float(a+c) * (b+d) * (a+b) * (c+d)
chiTable[yy[cc]][t] = numberator / denominator
#calculate chi-score of each t
#chiScore is [score, t's id] ...(n)
chiScore = [[0 for i in range(2)] for j in range(x.nCol)]
if (self.method == "avg"):
#calculate prior prob of each c
priorC = [0 for i in range(yy[len(yy) - 1] + 1)]
for i in range(len(yy)):
priorC[yy[i]] = float(GlobalInfo.classToDocCount[yy[i]]) / n
#calculate score of each t
for t in range(x.nCol):
chiScore[t][1] = t
for c in range(len(yy)):
chiScore[t][0] += priorC[yy[c]] * chiTable[yy[c]][t]
else:
#calculate score of each t
for t in range(x.nCol):
chiScore[t][1] = t
for c in range(len(yy)):
if (chiScore[t][0] < chiTable[yy[c]][t]):
chiScore[t][0] = chiTable[yy[c]][t]
#sort for chi-score, and make blackList
chiScore = sorted(chiScore, key = lambda chiType:chiType[0], reverse = True)
#init idmap
self.idMap = [0 for i in range(x.nCol)]
#add un-selected feature-id to idmap
for i in range(int(self.rate * len(chiScore)), len(chiScore)):
self.idMap[chiScore[i][1]] = -1
offset = 0
for i in range(x.nCol):
if (self.idMap[i] < 0):
offset += 1
else:
self.idMap[i] = i - offset
GlobalInfo.newIdToId[i - offset] = i
#output model information
if (self.modelPath <> ""):
f = open(self.modelPath, "w")
modelStr = pickle.dumps([self.idMap], 1)
pickle.dump(modelStr, f)
f.close()
#output chiSquare info
if (self.logPath <> ""):
f = open(self.logPath, "w")
f.write("chiSquare info:\n")
f.write("=======selected========\n")
for i in range(len(chiScore)):
if (i == int(self.rate * len(chiScore))):
f.write("========unselected=======\n")
term = GlobalInfo.idToTerm[chiScore[i][1]]
score = chiScore[i][0]
f.write(term.encode("utf-8") + " " + str(score) + "\n")
f.close()
self.trained = True
return True
"""
if __name__ == "__main__":
config = Configuration.FromFile("conf/test.xml")
GlobalInfo.Init(config, "__global__")
txt2mat = Text2Matrix(config, "__matrix__")
[trainx, trainy] = txt2mat.CreateTrainMatrix("data/tuangou_titles3.txt")
chiFilter = ChiSquareFilter(config, "__filter__")
chiFilter.TrainFilter(trainx, trainy)
"""
|
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Base types for nodes in a GRIT resource tree.
'''
import os
import sys
import types
from xml.sax import saxutils
from grit import exception
from grit import util
from grit import clique
import grit.format.interface
class Node(grit.format.interface.ItemFormatter):
'''An item in the tree that has children. Also implements the
ItemFormatter interface to allow formatting a node as a GRD document.'''
# Valid content types that can be returned by _ContentType()
_CONTENT_TYPE_NONE = 0 # No CDATA content but may have children
_CONTENT_TYPE_CDATA = 1 # Only CDATA, no children.
_CONTENT_TYPE_MIXED = 2 # CDATA and children, possibly intermingled
# Default nodes to not whitelist skipped
_whitelist_marked_as_skip = False
def __init__(self):
self.children = [] # A list of child elements
self.mixed_content = [] # A list of u'' and/or child elements (this
# duplicates 'children' but
# is needed to preserve markup-type content).
self.name = u'' # The name of this element
self.attrs = {} # The set of attributes (keys to values)
self.parent = None # Our parent unless we are the root element.
self.uberclique = None # Allows overriding uberclique for parts of tree
def __iter__(self):
'''An in-order iteration through the tree that this node is the
root of.'''
return self.inorder()
def inorder(self):
'''Generator that generates first this node, then the same generator for
any child nodes.'''
yield self
for child in self.children:
for iterchild in child.inorder():
yield iterchild
def GetRoot(self):
'''Returns the root Node in the tree this Node belongs to.'''
curr = self
while curr.parent:
curr = curr.parent
return curr
# TODO(joi) Use this (currently untested) optimization?:
#if hasattr(self, '_root'):
# return self._root
#curr = self
#while curr.parent and not hasattr(curr, '_root'):
# curr = curr.parent
#if curr.parent:
# self._root = curr._root
#else:
# self._root = curr
#return self._root
def StartParsing(self, name, parent):
'''Called at the start of parsing.
Args:
name: u'elementname'
parent: grit.node.base.Node or subclass or None
'''
assert isinstance(name, types.StringTypes)
assert not parent or isinstance(parent, Node)
self.name = name
self.parent = parent
def AddChild(self, child):
'''Adds a child to the list of children of this node, if it is a valid
child for the node.'''
assert isinstance(child, Node)
if (not self._IsValidChild(child) or
self._ContentType() == self._CONTENT_TYPE_CDATA):
explanation = 'invalid child %s for parent %s' % (str(child), self.name)
raise exception.UnexpectedChild(explanation)
self.children.append(child)
self.mixed_content.append(child)
def RemoveChild(self, child_id):
'''Removes the first node that has a "name" attribute which
matches "child_id" in the list of immediate children of
this node.
Args:
child_id: String identifying the child to be removed
'''
index = 0
# Safe not to copy since we only remove the first element found
for child in self.children:
name_attr = child.attrs['name']
if name_attr == child_id:
self.children.pop(index)
self.mixed_content.pop(index)
break
index += 1
def AppendContent(self, content):
'''Appends a chunk of text as content of this node.
Args:
content: u'hello'
Return:
None
'''
assert isinstance(content, types.StringTypes)
if self._ContentType() != self._CONTENT_TYPE_NONE:
self.mixed_content.append(content)
elif content.strip() != '':
raise exception.UnexpectedContent()
def HandleAttribute(self, attrib, value):
'''Informs the node of an attribute that was parsed out of the GRD file
for it.
Args:
attrib: 'name'
value: 'fooblat'
Return:
None
'''
assert isinstance(attrib, types.StringTypes)
assert isinstance(value, types.StringTypes)
if self._IsValidAttribute(attrib, value):
self.attrs[attrib] = value
else:
raise exception.UnexpectedAttribute(attrib)
def EndParsing(self):
'''Called at the end of parsing.'''
# TODO(joi) Rewrite this, it's extremely ugly!
if len(self.mixed_content):
if isinstance(self.mixed_content[0], types.StringTypes):
# Remove leading and trailing chunks of pure whitespace.
while (len(self.mixed_content) and
isinstance(self.mixed_content[0], types.StringTypes) and
self.mixed_content[0].strip() == ''):
self.mixed_content = self.mixed_content[1:]
# Strip leading and trailing whitespace from mixed content chunks
# at front and back.
if (len(self.mixed_content) and
isinstance(self.mixed_content[0], types.StringTypes)):
self.mixed_content[0] = self.mixed_content[0].lstrip()
# Remove leading and trailing ''' (used to demarcate whitespace)
if (len(self.mixed_content) and
isinstance(self.mixed_content[0], types.StringTypes)):
if self.mixed_content[0].startswith("'''"):
self.mixed_content[0] = self.mixed_content[0][3:]
if len(self.mixed_content):
if isinstance(self.mixed_content[-1], types.StringTypes):
# Same stuff all over again for the tail end.
while (len(self.mixed_content) and
isinstance(self.mixed_content[-1], types.StringTypes) and
self.mixed_content[-1].strip() == ''):
self.mixed_content = self.mixed_content[:-1]
if (len(self.mixed_content) and
isinstance(self.mixed_content[-1], types.StringTypes)):
self.mixed_content[-1] = self.mixed_content[-1].rstrip()
if (len(self.mixed_content) and
isinstance(self.mixed_content[-1], types.StringTypes)):
if self.mixed_content[-1].endswith("'''"):
self.mixed_content[-1] = self.mixed_content[-1][:-3]
# Check that all mandatory attributes are there.
for node_mandatt in self.MandatoryAttributes():
mandatt_list = []
if node_mandatt.find('|') >= 0:
mandatt_list = node_mandatt.split('|')
else:
mandatt_list.append(node_mandatt)
mandatt_option_found = False
for mandatt in mandatt_list:
assert mandatt not in self.DefaultAttributes().keys()
if mandatt in self.attrs:
if not mandatt_option_found:
mandatt_option_found = True
else:
raise exception.MutuallyExclusiveMandatoryAttribute(mandatt)
if not mandatt_option_found:
raise exception.MissingMandatoryAttribute(mandatt)
# Add default attributes if not specified in input file.
for defattr in self.DefaultAttributes():
if not defattr in self.attrs:
self.attrs[defattr] = self.DefaultAttributes()[defattr]
def GetCdata(self):
'''Returns all CDATA of this element, concatenated into a single
string. Note that this ignores any elements embedded in CDATA.'''
return ''.join([c for c in self.mixed_content
if isinstance(c, types.StringTypes)])
def __unicode__(self):
'''Returns this node and all nodes below it as an XML document in a Unicode
string.'''
header = u'<?xml version="1.0" encoding="UTF-8"?>\n'
return header + self.FormatXml()
# Compliance with ItemFormatter interface.
def Format(self, item, lang_re = None):
return item.FormatXml()
def FormatXml(self, indent = u'', one_line = False):
'''Returns this node and all nodes below it as an XML
element in a Unicode string. This differs from __unicode__ in that it does
not include the <?xml> stuff at the top of the string. If one_line is true,
children and CDATA are layed out in a way that preserves internal
whitespace.
'''
assert isinstance(indent, types.StringTypes)
content_one_line = (one_line or
self._ContentType() == self._CONTENT_TYPE_MIXED)
inside_content = self.ContentsAsXml(indent, content_one_line)
# Then the attributes for this node.
attribs = u' '
for (attrib, value) in self.attrs.iteritems():
# Only print an attribute if it is other than the default value.
if (not self.DefaultAttributes().has_key(attrib) or
value != self.DefaultAttributes()[attrib]):
attribs += u'%s=%s ' % (attrib, saxutils.quoteattr(value))
attribs = attribs.rstrip() # if no attribs, we end up with '', otherwise
# we end up with a space-prefixed string
# Finally build the XML for our node and return it
if len(inside_content) > 0:
if one_line:
return u'<%s%s>%s</%s>' % (self.name, attribs, inside_content, self.name)
elif content_one_line:
return u'%s<%s%s>\n%s %s\n%s</%s>' % (
indent, self.name, attribs,
indent, inside_content,
indent, self.name)
else:
return u'%s<%s%s>\n%s\n%s</%s>' % (
indent, self.name, attribs,
inside_content,
indent, self.name)
else:
return u'%s<%s%s />' % (indent, self.name, attribs)
def ContentsAsXml(self, indent, one_line):
'''Returns the contents of this node (CDATA and child elements) in XML
format. If 'one_line' is true, the content will be laid out on one line.'''
assert isinstance(indent, types.StringTypes)
# Build the contents of the element.
inside_parts = []
last_item = None
for mixed_item in self.mixed_content:
if isinstance(mixed_item, Node):
inside_parts.append(mixed_item.FormatXml(indent + u' ', one_line))
if not one_line:
inside_parts.append(u'\n')
else:
message = mixed_item
# If this is the first item and it starts with whitespace, we add
# the ''' delimiter.
if not last_item and message.lstrip() != message:
message = u"'''" + message
inside_parts.append(util.EncodeCdata(message))
last_item = mixed_item
# If there are only child nodes and no cdata, there will be a spurious
# trailing \n
if len(inside_parts) and inside_parts[-1] == '\n':
inside_parts = inside_parts[:-1]
# If the last item is a string (not a node) and ends with whitespace,
# we need to add the ''' delimiter.
if (isinstance(last_item, types.StringTypes) and
last_item.rstrip() != last_item):
inside_parts[-1] = inside_parts[-1] + u"'''"
return u''.join(inside_parts)
def RunGatherers(self, recursive=0, debug=False, substitute_messages=False):
'''Runs all gatherers on this object, which may add to the data stored
by the object. If 'recursive' is true, will call RunGatherers() recursively
on all child nodes first. If 'debug' is True, will print out information
as it is running each nodes' gatherers.
'''
if recursive:
for child in self.children:
assert child.name != 'translations' # <grit> node overrides
child.RunGatherers(recursive=recursive, debug=debug)
def SubstituteMessages(self, substituter):
'''Applies substitutions to all messages in the tree.
Called as a final step of RunGatherers.
Args:
substituter: a grit.util.Substituter object.
'''
for child in self.children:
child.SubstituteMessages(substituter)
def ItemFormatter(self, type):
'''Returns an instance of the item formatter for this object of the
specified type, or None if not supported.
Args:
type: 'rc-header'
Return:
(object RcHeaderItemFormatter)
'''
if type == 'xml':
return self
else:
return None
def SatisfiesOutputCondition(self):
'''Returns true if this node is either not a descendant of an <if> element,
or if all conditions on its <if> element ancestors are satisfied.
Used to determine whether to return item formatters for formats that
obey conditional output of resources (e.g. the RC formatters).
'''
from grit.node import misc
if self.parent:
return self.parent.SatisfiesOutputCondition()
else:
return True
def _IsValidChild(self, child):
'''Returns true if 'child' is a valid child of this node.
Overridden by subclasses.'''
return False
def _IsValidAttribute(self, name, value):
'''Returns true if 'name' is the name of a valid attribute of this element
and 'value' is a valid value for that attribute. Overriden by
subclasses unless they have only mandatory attributes.'''
return (name in self.MandatoryAttributes() or
name in self.DefaultAttributes())
def _ContentType(self):
'''Returns the type of content this element can have. Overridden by
subclasses. The content type can be one of the _CONTENT_TYPE_XXX constants
above.'''
return self._CONTENT_TYPE_NONE
def MandatoryAttributes(self):
'''Returns a list of attribute names that are mandatory (non-optional)
on the current element. One can specify a list of
"mutually exclusive mandatory" attributes by specifying them as one
element in the list, separated by a "|" character.
'''
return []
def DefaultAttributes(self):
'''Returns a dictionary of attribute names that have defaults, mapped to
the default value. Overridden by subclasses.'''
return {}
def GetCliques(self):
'''Returns all MessageClique objects belonging to this node. Overridden
by subclasses.
Return:
[clique1, clique2] or []
'''
return []
def ToRealPath(self, path_from_basedir):
'''Returns a real path (which can be absolute or relative to the current
working directory), given a path that is relative to the base directory
set for the GRIT input file.
Args:
path_from_basedir: '..'
Return:
'resource'
'''
return util.normpath(os.path.join(self.GetRoot().GetBaseDir(),
os.path.expandvars(path_from_basedir)))
def FilenameToOpen(self):
'''Returns a path, either absolute or relative to the current working
directory, that points to the file the node refers to. This is only valid
for nodes that have a 'file' or 'path' attribute. Note that the attribute
is a path to the file relative to the 'base-dir' of the .grd file, whereas
this function returns a path that can be used to open the file.'''
file_attribute = 'file'
if not file_attribute in self.attrs:
file_attribute = 'path'
return self.ToRealPath(self.attrs[file_attribute])
def UberClique(self):
'''Returns the uberclique that should be used for messages originating in
a given node. If the node itself has its uberclique set, that is what we
use, otherwise we search upwards until we find one. If we do not find one
even at the root node, we set the root node's uberclique to a new
uberclique instance.
'''
node = self
while not node.uberclique and node.parent:
node = node.parent
if not node.uberclique:
node.uberclique = clique.UberClique()
return node.uberclique
def IsTranslateable(self):
'''Returns false if the node has contents that should not be translated,
otherwise returns false (even if the node has no contents).
'''
if not 'translateable' in self.attrs:
return True
else:
return self.attrs['translateable'] == 'true'
def GetNodeById(self, id):
'''Returns the node in the subtree parented by this node that has a 'name'
attribute matching 'id'. Returns None if no such node is found.
'''
for node in self:
if 'name' in node.attrs and node.attrs['name'] == id:
return node
return None
def GetChildrenOfType(self, type):
'''Returns a list of all subnodes (recursing to all leaves) of this node
that are of the indicated type.
Args:
type: A type you could use with isinstance().
Return:
A list, possibly empty.
'''
return [child for child in self if isinstance(child, type)]
def GetTextualIds(self):
'''Returns the textual ids of this node, if it has some.
Otherwise it just returns None.
'''
if 'name' in self.attrs:
return [self.attrs['name']]
return None
def EvaluateCondition(self, expr):
'''Returns true if and only if the Python expression 'expr' evaluates
to true.
The expression is given a few local variables:
- 'lang' is the language currently being output
- 'defs' is a map of C preprocessor-style define names to their values
- 'os' is the current platform (likely 'linux2', 'win32' or 'darwin').
- 'pp_ifdef(define)' which behaves just like the C preprocessors #ifdef,
i.e. it is shorthand for "define in defs"
- 'pp_if(define)' which behaves just like the C preprocessor's #if, i.e.
it is shorthand for "define in defs and defs[define]".
'''
root = self.GetRoot()
lang = ''
defs = {}
def pp_ifdef(define):
return define in defs
def pp_if(define):
return define in defs and defs[define]
if hasattr(root, 'output_language'):
lang = root.output_language
if hasattr(root, 'defines'):
defs = root.defines
variable_map = {
'lang' : lang,
'defs' : defs,
'os': sys.platform,
'is_linux': sys.platform.startswith('linux'),
'is_macosx': sys.platform == 'darwin',
'is_win': sys.platform in ('cygwin', 'win32'),
'is_posix': (sys.platform in ('darwin', 'linux2', 'linux3', 'sunos5')
or sys.platform.find('bsd') != -1),
'pp_ifdef' : pp_ifdef,
'pp_if' : pp_if,
}
return eval(expr, {}, variable_map)
def OnlyTheseTranslations(self, languages):
'''Turns off loading of translations for languages not in the provided list.
Attrs:
languages: ['fr', 'zh_cn']
'''
for node in self:
if (hasattr(node, 'IsTranslation') and
node.IsTranslation() and
node.GetLang() not in languages):
node.DisableLoading()
def PseudoIsAllowed(self):
'''Returns true if this node is allowed to use pseudo-translations. This
is true by default, unless this node is within a <release> node that has
the allow_pseudo attribute set to false.
'''
p = self.parent
while p:
if 'allow_pseudo' in p.attrs:
return (p.attrs['allow_pseudo'].lower() == 'true')
p = p.parent
return True
def ShouldFallbackToEnglish(self):
'''Returns true iff this node should fall back to English when
pseudotranslations are disabled and no translation is available for a
given message.
'''
p = self.parent
while p:
if 'fallback_to_english' in p.attrs:
return (p.attrs['fallback_to_english'].lower() == 'true')
p = p.parent
return False
def WhitelistMarkedAsSkip(self):
'''Returns true if the node is marked to be skipped in the output by a
whitelist.
'''
return self._whitelist_marked_as_skip
def SetWhitelistMarkedAsSkip(self, mark_skipped):
'''Sets WhitelistMarkedAsSkip.
'''
self._whitelist_marked_as_skip = mark_skipped
def ExpandVariables(self):
'''Whether we need to expand variables on a given node.'''
return False
class ContentNode(Node):
'''Convenience baseclass for nodes that can have content.'''
def _ContentType(self):
return self._CONTENT_TYPE_MIXED
|
|
# Copyright (C) 2014 Parker Michaels
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from Lexer import RulesFileException
from ASTBase import Node, Scope
from ..Common import lower_nullable
class PatternAttributes(object):
"""
Represents the attributes which can be applied to a pattern using a
prefix attached to the first string. (e.g. i"hello")
@ivar is_literal: Indicates that all characters in the pattern are literal
@ivar is_unicode_defaults: Indicates that special characters and classes should use Unicode equivalents
@ivar is_case_insensitive: Indicates that letters in the pattern should match both their uppercase and lowercase versions
"""
def __init__(self, is_case_insensitive, is_unicode_defaults, is_literal):
"""
@param is_literal: Boolean which, if True, indicates that all characters in the pattern are literal
@param is_unicode_defaults: Boolean which, if True, indicates that special characters and classes should use Unicode equivalents
@param is_case_insensitive: Boolean which, if True, indicates that letters in the pattern should match both their uppercase and lowercase versions
"""
if is_literal and is_unicode_defaults:
raise ValueError("Pattern cannot be both literal and have Unicode special characters")
self.is_literal = is_literal
self.is_unicode_defaults = is_unicode_defaults
self.is_case_insensitive = is_case_insensitive
class Pattern(Node):
"""
Represents a regular expression string
@ivar regex: string containing the regular expression
@ivar attributes: a set of pattern attributes which guide parsing
@ivar line_number: integer with the line number in the source where this object was parsed
"""
def accept(self, visitor):
visitor.visit_pattern(self)
def __init__(self, regex, attributes = PatternAttributes(False, False, False), line_number=None):
self.regex = regex
self.attributes = attributes
self.line_number = line_number
def __ne__(self, rhs):
return not self.__eq__(rhs)
def __eq__(self, rhs):
return (isinstance(rhs, Pattern) and
self.regex == rhs.regex and
self.is_case_insensitive == rhs.is_case_insensitive)
def __repr__(self):
return "Pattern(\"%s\", %s)" % (self.regex, self.is_case_insensitive)
class Define(Node):
"""
Represents a 'Let id = pattern' variable declaration
@ivar id: the name of the variable
@ivar pattern: a Pattern object representing the variable's value
@ivar line_number: integer with the line number in the source where this object was parsed
"""
def accept(self, visitor):
visitor.visit_define(self)
def __init__(self, id, pattern, line_number=None):
self.id = id
self.pattern = pattern
self.line_number = line_number
def __ne__(self, rhs):
return not self.__eq__(rhs)
def __eq__(self, rhs):
return (isinstance(rhs, Define) and
self.id.lower() == rhs.id.lower() and
self.pattern == rhs.pattern)
def __repr__(self):
return "Define(%s, %s)" % (repr(self.id), repr(self.pattern))
class SectionReference(Node):
"""
Represents an unresolved reference to a section
@ivar name: the name of section being referenced
@ivar line_number: integer with the line number in the source where this object was parsed
"""
def accept(self, visitor):
visitor.visit_section_reference(self)
def __init__(self, name, line_number=None):
self.name = name
self.line_number = line_number
def __repr__(self):
return "SectionReference(%s)" % repr(self.name)
def __ne__(self, rhs):
return not self.__eq__(rhs)
def __eq__(self, rhs):
return self.name.lower() == rhs.name.lower()
class Rule(Node):
"""
Represents a lexical analyzer rule with a 'action? id: pattern (section_action section?)?' syntax
@ivar id: string containing the name of the rule
@ivar pattern: Pattern object representing the regular expression that matches the rule
@ivar rule_action: set of lowercase strings indicating what action to take if the rule matches
@ivar section_action: string containing the action to take after matching the rule
@ivar section: Section or SectionReference object specifying which section the analyzer should enter after matching the rule
@ivar line_number: integer with the line number in the source where this object was parsed
"""
def accept(self, visitor):
visitor.visit_rule(self)
def __init__(self, id, pattern, rule_action=[], section_action=None, line_number=None):
self.id = id
self.pattern = pattern
self.rule_action = list(i.lower() for i in rule_action)
if section_action is None:
self.section_action = (None, None)
else:
self.section_action = section_action
self.line_number = line_number
def __hash__(self):
return hash((lower_nullable(self.id), frozenset(self.rule_action), self.section_action))
def __ne__(self, rhs):
return not self.__eq__(rhs)
def __eq__(self, rhs):
return (isinstance(rhs, Rule) and
compare_nullable_icase(self.id, rhs.id) and
self.pattern == rhs.pattern and
self.rule_action == rhs.rule_action and
self.section_action == rhs.section_action)
def __repr__(self):
return "Rule(Id=%s, %s, Action=%s, SectionAction=%s)" % (repr(self.id), repr(self.pattern), repr(self.rule_action), repr(self.section_action))
class Section(Scope):
"""
Represents a grouping of rules, ids, and reserved keywords
@ivar id: string which identifies the section within its containing scope
@iver inherits: True if this section should fall back on its parent's rules, False otherwise
@iver exits: True if this section should fall back on its parent's rules and exit the section if it does so, False otherwise
@ivar line_number: integer with the line number in the source where this object was parsed
"""
def accept(self, visitor):
visitor.visit_section(self)
def __init__(self, id, parent=None, inherits=False, exits=False, line_number=None, **resources):
Scope.__init__(self, parent, line_number, **resources)
self.id = id
self.inherits = inherits
self.exits = exits
self.line_number = line_number
for id, section in self.all('section'):
section.parent = self
self.children[id.lower()] = section
for id, rule in self.all('rule'):
action, target = rule.section_action
if isinstance(target, Section):
self.add('section', target)
target.parent = self
if target.id is not None:
self.children[target.id.lower()] = target
def __ne__(self, rhs):
return not self.__eq__(rhs)
def __eq__(self, rhs):
return (isinstance(rhs, Section) and
compare_nullable_icase(self.id, rhs.id) and
self.inherits == rhs.inherits and
self.exits == rhs.exits and
Scope.__eq__(self, rhs))
def __repr__(self):
resources = []
for resource_type in self.resources:
formatted_type = resource_type.title()
contents = ', '.join(repr(i) for i in self.resources[resource_type].values())
resources.append('{type}=[{contents}]'.format(type=formatted_type, contents=contents))
return "Section({id}, {resources}, Inherits={inherits}, Exits={exits})".format(
id=repr(self.id),
resources=resources,
inherits=self.inherits,
exits=self.exits)
def get_qualified_name(self):
"""
Return the qualified (hierarchical) name of the section.
@return: string representing the qualified name of the section
"""
current_scope = self
qualified_name_items = []
while current_scope is not None:
qualified_name_items.insert(0, current_scope)
current_scope = current_scope.parent
return '.'.join(i.id for i in qualified_name_items)
|
|
#!/usr/bin/python3
import numpy as np
from datetime import datetime
import sys
from sys import argv
def softmax(x):
# print('softmax x dim:' + str(x.shape))
exp_scores = np.exp(x)
probs = exp_scores / np.sum(exp_scores, axis=0, keepdims=True)
return probs
class RNN:
def __init__(self, vocab_dim, hidden_dim=100):
self.vocab_dim = vocab_dim
self.hidden_dim = hidden_dim
# svaed hidden state
self.hprev = np.zeros((hidden_dim, 1))
# input matrix
self.U = np.random.randn(hidden_dim, vocab_dim) * 0.01
# output matrix
self.V = np.random.randn(vocab_dim, hidden_dim) * 0.01
# transition matrix
self.W = np.random.randn(hidden_dim, hidden_dim) * 0.01
# hidden bias
self.bh = np.zeros((hidden_dim, 1))
# output bias
self.by = np.zeros((vocab_dim, 1))
# memory for adaptive gradient
self.mU = np.zeros_like(self.U)
self.mV = np.zeros_like(self.V)
self.mW = np.zeros_like(self.W)
self.mbh = np.zeros_like(self.bh)
self.mby = np.zeros_like(self.by)
# total loss
self.sequence_len = 25
self.loss = 0
self.ch_to_x = {}
for i in range(self.vocab_dim):
ch = chr(i)
self.ch_to_x[ch] = self.convert_ch_to_x(ch)
def reset_epoch(self):
self.loss = -np.log(1.0 / self.vocab_dim) * self.sequence_len
self.hprev = np.zeros_like(self.hprev)
def reset_prediction(self):
self.hprev = np.zeros_like(self.hprev)
def convert_ch_to_x(self, ch):
x = np.zeros((self.vocab_dim, 1))
x[ord(ch)][0] = 1
return x
def get_data(self, string):
return np.array([self.ch_to_x[ch] for ch in string])
def replace_non_ascii(self, string):
return ''.join([ch if ord(ch) < 128 else '\x01' for ch in string])
def forward_propagation(self, x, y_chars = None):
# total number of input samples
T = len(x)
# saved hidden states T times
s = np.zeros((T, self.hidden_dim, 1))
# and last one row is hprev
s[-1] = np.copy(self.hprev)
# saved previous outputs T times
o = np.zeros((T, self.vocab_dim, 1)) # 1-to-vocab_size representation
#print('T=' + str(T))
#print('s' + str(s.shape))
#print('x' + str(x.shape))
#print('U' + str(self.U.shape))
#print('W' + str(self.W.shape))
#print('V' + str(self.V.shape))
#print('U*x' + str(self.U.dot(x[0]).shape))
#print('W*s' + str(self.W.dot(s[0]).shape))
#print('bh' + str(self.bh.shape))
# for each char
if y_chars:
#print('T=' + str(T))
for t in range(T):
s[t] = np.tanh(self.U.dot(x[t]) + self.W.dot(s[t-1]) + self.bh)
o[t] = softmax(self.V.dot(s[t]) + self.by)
self.loss += -np.log(o[t][ord(y_chars[t])])
else:
for t in range(T):
s[t] = np.tanh(self.U.dot(x[t]) + self.W.dot(s[t-1]) + self.bh)
o[t] = softmax(self.V.dot(s[t]) + self.by)
self.hprev = np.copy(s[-1])
return [o, s, self.loss]
def predict(self, x):
o, s, loss = self.forward_propagation(x)
#print(o[len(x)-1,:])
return np.argmax(o[len(x)-1:])
#return o[len(x)-1,:] # select only last state from o
def predict_char(self, ch):
return chr(self.predict(np.array([self.ch_to_x[ch]])))
def back_propagation(self, x, y, y_chars):
T = len(y)
# forward prop step
o, s, loss = self.forward_propagation(x, y_chars)
# gradients
dLdU = np.zeros_like(self.U)
dLdV = np.zeros_like(self.V)
dLdW = np.zeros_like(self.W)
dLdbh = np.zeros_like(self.bh)
dLdby = np.zeros_like(self.by)
dhnext = np.zeros_like(s[0])
# calculate errors [vectorized]
delta_o = o - y
#print('delta_o=' + str(delta_o.shape))
# for each output backwards
for t in reversed(range(T)):
dLdV += np.outer(delta_o[t], s[t].T)
dLdby += delta_o[t]
#initial dh calculation
dh = self.V.T.dot(delta_o[t]) + dhnext
dhraw = (1 - (s[t] ** 2)) * dh
dLdbh += dhraw
dLdW += np.outer(dhraw, s[t-1])
dLdU += np.outer(dhraw, x[t])
# update delta for next step
dhnext = self.W.T.dot(dhraw)
for dparam in [dLdU, dLdV, dLdW, dLdbh, dLdby]:
np.clip(dparam, -5, 5, out=dparam) # clip to mitigate exploding gradient
return [dLdU, dLdV, dLdW, dLdbh, dLdby]
# adaptive gradient learning step
def adagrad_step(self, x, y, y_chars, learning_rate):
dLdU, dLdV, dLdW, dLdbh, dLdby = self.back_propagation(x, y, y_chars)
# Change parameters according to gradients and learning rate
for param, dparam, mem in zip(
[self.U, self.V, self.W, self.bh, self.by ],
[dLdU, dLdV, dLdW, dLdbh, dLdby ],
[self.mU, self.mV, self.mW, self.mbh, self.mby]):
mem += dparam * dparam
param += -learning_rate * dparam / np.sqrt(mem + 1e-8) #adagrad update
@staticmethod
def load(filename):
import pickle
from os import path
if path.exists(filename):
with open(filename, 'rb') as f:
return pickle.load(f)
return None
def save(self, filename):
import pickle
with open(filename, 'wb') as f:
pickle.dump(self, f)
return True
return False
# Outer AdaGrad Loop
# - self: The RNN model instance
# - filename: text code file
# - learning_rate: Initial learning rate for SGD
# - nepoch: Number of times to iterate through the complete dataset
# - evaluate_loss_after: Evaluate the loss after this many epochs
def train(self, filename, learning_rate=0.1, nepoch=100, evaluate_loss_after=3):
# We keep track of the losses so we can plot them later
losses = []
num_examples_seen = 0
string = ''
with open(filename, 'rb') as f:
bytes = f.read()
print('Data have been read from %s, size=%d' % (filename, len(bytes)))
string = self.replace_non_ascii(bytes.decode('866'))
print('Data have been decoded into strin')
for epoch in range(nepoch):
# Optionally evaluate the loss
if (epoch % evaluate_loss_after == 0):
losses.append((num_examples_seen, self.loss))
time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print( "%s: Loss after num_examples_seen=%d epoch=%d: %f" % (time, num_examples_seen, epoch, self.loss))
# Adjust the learning rate if loss increases
if (len(losses) > 1 and losses[-1][1] > losses[-2][1]):
learning_rate = learning_rate * 0.5
print( "Setting learning rate to %f" % learning_rate)
self.save('model.' + str(epoch) + ".dat")
sys.stdout.flush()
self.reset_epoch()
print('epoch.' +str(epoch))
beg = 0
end = beg + self.sequence_len
while(end < len(string)):
# input sequence of enoded vectors
X_seq = self.get_data(string[beg:end])
# true output chars for the above input is shifted by one char the same sequence
y_chars = string[beg+1:end+1]
# encoded output
y_seq = self.get_data(y_chars)
# One adagrad step
self.adagrad_step(X_seq, y_seq, y_chars, learning_rate)
num_examples_seen += 1
# iterate
beg += self.sequence_len
end = beg + self.sequence_len
def usage():
print("Usage: " + argv[0] + " <samples> [APPEND]")
def main():
if len(argv) != 2 and len(argv) != 3:
usage()
exit(-1)
append_model = (len(argv) == 3)
filename = argv[1]
model = RNN(128) #[0-127] 128 total ascii characters, all non-ascii will be mapped onto 0
loaded_model = model.load('model.dat')
if not loaded_model:
model.train(filename)
model.save('model.dat')
elif append_model:
loaded_model.train(filename)
loaded_model.save('model.dat')
else:
print('model.dat already presented!!!! Remove it if you want to regenerate model')
exit(-1)
if __name__ == '__main__':
main()
|
|
# Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""COCO dataset register."""
# pylint: disable=invalid-name
# pylint: disable=g-explicit-length-test
# pylint: redefined-outer-name
import json
import numpy as np
import os
import tqdm
from tensorpack.utils import logger
from tensorpack.utils.timer import timed_operation
from config import config as cfg
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from FasterRCNN.dataset import DatasetRegistry
from FasterRCNN.dataset import DatasetSplit
from FasterRCNN.dataset.coco import COCODetection as COCODetectionBase
from FasterRCNN.dataset.coco import register_coco as register_coco_supervised
__all__ = ["register_coco"]
# register semi-supervised splits for coco
SEMI_SUPERVISED_SPLITS = []
for seed in range(1, 6):
for percent in [1, 2, 5, 10, 20, 30, 40, 50]:
SEMI_SUPERVISED_SPLITS.append("train2017.{}@{}".format(seed, percent))
# adding corresponding unlabeled one
SEMI_SUPERVISED_SPLITS.append("train2017.{}@{}-unlabeled".format(
seed, percent))
# 100% , unlab is with lab
SEMI_SUPERVISED_SPLITS.append("train2017.{}@{}-extra".format(0, 100))
SEMI_SUPERVISED_SPLITS.append("train2017.{}@{}-extra-unlabeled".format(0, 100))
# coco unlabled data
SEMI_SUPERVISED_SPLITS.append("unlabeled2017")
# coco 20 class unlabeled for voc
NUM_20CLASS = 1
SEMI_SUPERVISED_SPLITS.append("unlabeledtrainval20class")
class COCODetection(COCODetectionBase):
"""COCO class object.
Mapping from the incontinuous COCO category id to an id in [1, #category]
For your own coco-format, dataset, change this to an **empty dict**.
"""
# handle a few special splits whose names do not match the directory names
_INSTANCE_TO_BASEDIR = {
"valminusminival2014": "val2014",
"minival2014": "val2014",
"val2017_100": "val2017",
"unlabeled2017": "unlabeled2017",
"train2017.{}@{}-extra".format(0, 100): "",
"train2017.{}@{}-extra-unlabeled".format(0, 100): "",
}
def __init__(self, basedir, split):
"""Init.
Args:
basedir (str): root of the dataset which contains the subdirectories
for each split and annotations
split (str): the name of the split, e.g. "train2017". The split has
to match an annotation file in "annotations/" and a directory of
images.
Examples:
For a directory of this structure: DIR/ annotations/
instances_XX.json instances_YY.json XX/ YY/ use
`COCODetection(DIR, 'XX')` and `COCODetection(DIR, 'YY')`
"""
for sp in SEMI_SUPERVISED_SPLITS:
if sp not in self._INSTANCE_TO_BASEDIR:
self._INSTANCE_TO_BASEDIR.update({str(sp): "train2017"})
basedir = os.path.expanduser(basedir)
self._imgdir = os.path.realpath(
os.path.join(basedir, self._INSTANCE_TO_BASEDIR.get(split, split)))
assert os.path.isdir(self._imgdir), "{} is not a directory!".format(
self._imgdir)
if split in SEMI_SUPERVISED_SPLITS:
annotation_file = os.path.join(
basedir,
"annotations/semi_supervised/instances_{}.json".format(split))
else:
annotation_file = os.path.join(
basedir, "annotations/instances_{}.json".format(split))
assert os.path.isfile(annotation_file), annotation_file
self.coco = COCO(annotation_file)
self.annotation_file = annotation_file
logger.info("Instances loaded from {}.".format(annotation_file))
def eval_inference_results2(self,
results,
output=None,
threshold=None,
metric_only=False):
# Compared with eval_inference_results, v2 version has an threshold
# used to filter scores below. It is designed for SSL experiments.
if not metric_only:
if threshold is not None:
logger.warn(
"Use thresholding {} to filter final resulting boxes".format(
threshold))
continuous_id_to_COCO_id = {
v: k for k, v in self.COCO_id_to_category_id.items()
}
n = 0
final_results = []
for res in results:
# convert to COCO's incontinuous category id
if res["category_id"] in continuous_id_to_COCO_id:
res["category_id"] = continuous_id_to_COCO_id[res["category_id"]]
if threshold is not None:
if res["score"] < threshold:
n += 1
continue
# COCO expects results in xywh format
box = res["bbox"]
box[2] -= box[0]
box[3] -= box[1]
res["bbox"] = [round(float(x), 3) for x in box]
final_results.append(res)
results = final_results
if output is not None:
if not os.path.exists(os.path.dirname(output)):
os.makedirs(os.path.dirname(output))
with open(output, "w") as f:
json.dump(results, f)
if threshold is not None:
with open(output + "_boxcount.json", "w") as f:
r = {"passed": len(results), "removed": n}
print("Box thresholding stats: \n\t", r)
json.dump(r, f)
if len(results):
metrics = self.print_coco_metrics(results)
# save precision_recall data:
precision_recall = self.cocoEval.precision_recall
pr_path = os.path.join(os.path.split(output)[0], "precision_recall.npy")
print("Saving precision_recall curve to {}".format(pr_path))
np.save(pr_path, {"pr": precision_recall})
# sometimes may crash if the results are empty?
return metrics
else:
return {}
def register_coco(basedir):
"""Register COCO.
Add COCO datasets like "coco_train201x" to the registry,
so you can refer to them with names in `cfg.DATA.TRAIN/VAL`.
Note that train2017==trainval35k==train2014+val2014-minival2014, and
val2017==minival2014.
Args:
basedir: root dir that saves datasets.
"""
# 80 names for COCO
# For your own coco-format dataset, change this.
class_names = [
"person", "bicycle", "car", "motorcycle", "airplane", "bus", "train",
"truck", "boat", "traffic light", "fire hydrant", "stop sign",
"parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow",
"elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag",
"tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite",
"baseball bat", "baseball glove", "skateboard", "surfboard",
"tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon",
"bowl", "banana", "apple", "sandwich", "orange", "broccoli", "carrot",
"hot dog", "pizza", "donut", "cake", "chair", "couch", "potted plant",
"bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote",
"keyboard", "cell phone", "microwave", "oven", "toaster", "sink",
"refrigerator", "book", "clock", "vase", "scissors", "teddy bear",
"hair drier", "toothbrush"
] # noqa
class_names = ["BG"] + class_names
register_coco_supervised(basedir)
for split in SEMI_SUPERVISED_SPLITS[:-NUM_20CLASS]:
name = "coco_" + split
DatasetRegistry.register(name, lambda x=split: COCODetection(basedir, x))
DatasetRegistry.register_metadata(name, "class_names", class_names)
logger.info("Register dataset {}".format(
[a for a in DatasetRegistry._registry.keys()])) # pylint: disable=protected-access
assert os.environ["COCODIR"], "COCODIR environ variable is not set".format(
os.environ["COCODIR"])
# also register coco train set 20 class for voc experiments
register_coco_for_voc(os.environ["COCODIR"])
class COCODetectionForVOC(COCODetection):
"""COCODetection for VOC."""
# set to empty since this instances_unlabeledtrainval20class.json file has file_name with relative path to train2017 or val2017
_INSTANCE_TO_BASEDIR = {"unlabeledtrainval20class": ""}
# this mapping is obtained by running dataset/cls_mapping_coco_voc.py
COCO_id_to_category_id = {
64: 14,
1: 3,
2: 6,
3: 10,
4: 1,
5: 16,
6: 18,
7: 9,
72: 20,
9: 8,
67: 19,
44: 17,
16: 11,
17: 12,
18: 2,
19: 4,
20: 15,
21: 7,
62: 13,
63: 5
}
def register_coco_for_voc(basedir):
class_names = [
"person", "chair", "aeroplane", "bus", "cow", "bird", "motorbike", "boat",
"car", "horse", "sofa", "pottedplant", "tvmonitor", "cat", "train",
"bottle", "diningtable", "dog", "bicycle", "sheep"
]
class_names = ["BG"] + class_names
for split in SEMI_SUPERVISED_SPLITS[-NUM_20CLASS:]:
name = "coco_" + split
DatasetRegistry.register(
name, lambda x=split: COCODetectionForVOC(basedir, x))
DatasetRegistry.register_metadata(name, "class_names", class_names)
logger.info("Register dataset {}".format(
[a for a in DatasetRegistry._registry.keys()]))
if __name__ == "__main__":
basedir = "<add-data-path>"
c = COCODetection(basedir, "train2017")
roidb = c.load(add_gt=True, add_mask=True)
print("#Images:", len(roidb))
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
""" This module contains functions to determine where configuration and
data/cache files used by Astropy should be placed.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from ..extern import six
from ..utils.decorators import wraps
import os
import shutil
import sys
__all__ = ['get_config_dir', 'get_cache_dir', 'set_temp_config',
'set_temp_cache']
def _find_home():
""" Locates and return the home directory (or best approximation) on this
system.
Raises
------
OSError
If the home directory cannot be located - usually means you are running
Astropy on some obscure platform that doesn't have standard home
directories.
"""
# this is used below to make fix up encoding issues that sometimes crop up
# in py2.x but not in py3.x
if six.PY2:
decodepath = lambda pth: pth.decode(sys.getfilesystemencoding())
else:
decodepath = lambda pth: pth
# First find the home directory - this is inspired by the scheme ipython
# uses to identify "home"
if os.name == 'posix':
# Linux, Unix, AIX, OS X
if 'HOME' in os.environ:
homedir = decodepath(os.environ['HOME'])
else:
raise OSError('Could not find unix home directory to search for '
'astropy config dir')
elif os.name == 'nt': # This is for all modern Windows (NT or after)
if 'MSYSTEM' in os.environ and os.environ.get('HOME'):
# Likely using an msys shell; use whatever it is using for its
# $HOME directory
homedir = decodepath(os.environ['HOME'])
# Next try for a network home
elif 'HOMESHARE' in os.environ:
homedir = decodepath(os.environ['HOMESHARE'])
# See if there's a local home
elif 'HOMEDRIVE' in os.environ and 'HOMEPATH' in os.environ:
homedir = os.path.join(os.environ['HOMEDRIVE'],
os.environ['HOMEPATH'])
homedir = decodepath(homedir)
# Maybe a user profile?
elif 'USERPROFILE' in os.environ:
homedir = decodepath(os.path.join(os.environ['USERPROFILE']))
else:
try:
from ..extern.six.moves import winreg as wreg
shell_folders = r'Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders'
key = wreg.OpenKey(wreg.HKEY_CURRENT_USER, shell_folders)
homedir = wreg.QueryValueEx(key, 'Personal')[0]
homedir = decodepath(homedir)
key.Close()
except Exception:
# As a final possible resort, see if HOME is present
if 'HOME' in os.environ:
homedir = decodepath(os.environ['HOME'])
else:
raise OSError('Could not find windows home directory to '
'search for astropy config dir')
else:
# for other platforms, try HOME, although it probably isn't there
if 'HOME' in os.environ:
homedir = decodepath(os.environ['HOME'])
else:
raise OSError('Could not find a home directory to search for '
'astropy config dir - are you on an unspported '
'platform?')
return homedir
def get_config_dir(create=True):
"""
Determines the Astropy configuration directory name and creates the
directory if it doesn't exist.
This directory is typically ``$HOME/.astropy/config``, but if the
XDG_CONFIG_HOME environment variable is set and the
``$XDG_CONFIG_HOME/astropy`` directory exists, it will be that directory.
If neither exists, the former will be created and symlinked to the latter.
Returns
-------
configdir : str
The absolute path to the configuration directory.
"""
# symlink will be set to this if the directory is created
linkto = None
# If using set_temp_config, that overrides all
if set_temp_config._temp_path is not None:
xch = set_temp_config._temp_path
config_path = os.path.join(xch, 'astropy')
if not os.path.exists(config_path):
os.mkdir(config_path)
return os.path.abspath(config_path)
# first look for XDG_CONFIG_HOME
xch = os.environ.get('XDG_CONFIG_HOME')
if xch is not None and os.path.exists(xch):
xchpth = os.path.join(xch, 'astropy')
if not os.path.islink(xchpth):
if os.path.exists(xchpth):
return os.path.abspath(xchpth)
else:
linkto = xchpth
return os.path.abspath(_find_or_create_astropy_dir('config', linkto))
def get_cache_dir():
"""
Determines the Astropy cache directory name and creates the directory if it
doesn't exist.
This directory is typically ``$HOME/.astropy/cache``, but if the
XDG_CACHE_HOME environment variable is set and the
``$XDG_CACHE_HOME/astropy`` directory exists, it will be that directory.
If neither exists, the former will be created and symlinked to the latter.
Returns
-------
cachedir : str
The absolute path to the cache directory.
"""
# symlink will be set to this if the directory is created
linkto = None
# If using set_temp_cache, that overrides all
if set_temp_cache._temp_path is not None:
xch = set_temp_cache._temp_path
cache_path = os.path.join(xch, 'astropy')
if not os.path.exists(cache_path):
os.mkdir(cache_path)
return os.path.abspath(cache_path)
# first look for XDG_CACHE_HOME
xch = os.environ.get('XDG_CACHE_HOME')
if xch is not None and os.path.exists(xch):
xchpth = os.path.join(xch, 'astropy')
if not os.path.islink(xchpth):
if os.path.exists(xchpth):
return os.path.abspath(xchpth)
else:
linkto = xchpth
return os.path.abspath(_find_or_create_astropy_dir('cache', linkto))
class _SetTempPath(object):
_temp_path = None
_default_path_getter = None
def __init__(self, path=None, delete=False):
if path is not None:
path = os.path.abspath(path)
self._path = path
self._delete = delete
self._prev_path = self.__class__._temp_path
def __enter__(self):
self.__class__._temp_path = self._path
return self._default_path_getter()
def __exit__(self, *args):
self.__class__._temp_path = self._prev_path
if self._delete and self._path is not None:
shutil.rmtree(self._path)
def __call__(self, func):
"""Implements use as a decorator."""
@wraps(func)
def wrapper(*args, **kwargs):
with self:
func(*args, **kwargs)
return wrapper
class set_temp_config(_SetTempPath):
"""
Context manager to set a temporary path for the Astropy config, primarily
for use with testing.
If the path set by this context manager does not already exist it will be
created, if possible.
This may also be used as a decorator on a function to set the config path
just within that function.
Parameters
----------
path : str, optional
The directory (which must exist) in which to find the Astropy config
files, or create them if they do not already exist. If None, this
restores the config path to the user's default config path as returned
by `get_config_dir` as though this context manager were not in effect
(this is useful for testing). In this case the ``delete`` argument is
always ignored.
delete : bool, optional
If True, cleans up the temporary directory after exiting the temp
context (default: False).
"""
_default_path_getter = staticmethod(get_config_dir)
def __enter__(self):
# Special case for the config case, where we need to reset all the
# cached config objects
from .configuration import _cfgobjs
path = super(set_temp_config, self).__enter__()
_cfgobjs.clear()
return path
def __exit__(self, *args):
from .configuration import _cfgobjs
super(set_temp_config, self).__exit__(*args)
_cfgobjs.clear()
class set_temp_cache(_SetTempPath):
"""
Context manager to set a temporary path for the Astropy download cache,
primarily for use with testing (though there may be other applications
for setting a different cache directory, for example to switch to a cache
dedicated to large files).
If the path set by this context manager does not already exist it will be
created, if possible.
This may also be used as a decorator on a function to set the cache path
just within that function.
Parameters
----------
path : str
The directory (which must exist) in which to find the Astropy cache
files, or create them if they do not already exist. If None, this
restores the cache path to the user's default cache path as returned
by `get_cache_dir` as though this context manager were not in effect
(this is useful for testing). In this case the ``delete`` argument is
always ignored.
delete : bool, optional
If True, cleans up the temporary directory after exiting the temp
context (default: False).
"""
_default_path_getter = staticmethod(get_cache_dir)
def _find_or_create_astropy_dir(dirnm, linkto):
innerdir = os.path.join(_find_home(), '.astropy')
maindir = os.path.join(_find_home(), '.astropy', dirnm)
if not os.path.exists(maindir):
# first create .astropy dir if needed
if not os.path.exists(innerdir):
try:
os.mkdir(innerdir)
except OSError:
if not os.path.isdir(innerdir):
raise
elif not os.path.isdir(innerdir):
msg = 'Intended Astropy directory {0} is actually a file.'
raise IOError(msg.format(innerdir))
try:
os.mkdir(maindir)
except OSError:
if not os.path.isdir(maindir):
raise
if (not sys.platform.startswith('win') and
linkto is not None and
not os.path.exists(linkto)):
os.symlink(maindir, linkto)
elif not os.path.isdir(maindir):
msg = 'Intended Astropy {0} directory {1} is actually a file.'
raise IOError(msg.format(dirnm, maindir))
return os.path.abspath(maindir)
|
|
import nef.nef_theano as nef
from __future__ import generators
import sys
import space
from ca.nengo.math.impl import *
from ca.nengo.model.plasticity.impl import *
from ca.nengo.util import *
from ca.nengo.plot import *
from com.bulletphysics import *
from com.bulletphysics.linearmath import *
from com.bulletphysics.dynamics.constraintsolver import *
from math import *
import java
from java.awt import Color
import ccm
import random
random.seed(11)
from math import pi
from com.threed.jpct import SimpleVector
from com.bulletphysics.linearmath import Transform
from javax.vecmath import Vector3f
dt=0.001
N=1
pstc=0.01
net = nef.Network('simple arm controller')
class getShoulder(ca.nengo.math.Function):
def map(self,X):
x = float(X[0])
y = float(X[1])
# make sure we're in the unit circle
if sqrt(x**2+y**2) > 1:
x = x / (sqrt(x**2+y**2))
y = y / (sqrt(x**2+y**2))
L1 = .5
L2 = .5
EPS = 1e-10
D = (x**2 + y**2 - L1**2 - L2**2) / (2*L1*L2) # law of cosines
if (x**2+y**2) < (L1**2+L2**2):
D = -D
# find elbow down angles from shoulder to elbow
#java.lang.System.out.println("x: %f y:%f"%(x,y))
if D < 1 and D > -1:
elbow = acos(D)
else:
elbow = 0
if (x**2+y**2) < (L1**2+L2**2):
elbow = pi - elbow
if x==0 and y==0: y = y+EPS
inside = L2*sin(elbow)/(sqrt(x**2+y**2))
if inside > 1: inside = 1
if inside < -1: inside = -1
if x==0:
shoulder = 1.5708 - asin(inside) # magic numbers from matlab
else:
shoulder = atan(y/x) - asin(inside)
if x < 0: shoulder = shoulder + pi
return shoulder
def getDimension(self):
return 2
class getElbow(ca.nengo.math.Function):
def map(self,X):
x = float(X[0])
y = float(X[1])
# make sure we're in the unit circle
if sqrt(x**2+y**2) > 1:
x = x / (sqrt(x**2+y**2))
y = y / (sqrt(x**2+y**2))
L1 = .5
L2 = .5
D = (x**2 + y**2 - L1**2 - L2**2) / (2*L1*L2) # law of cosines
if (x**2+y**2) < (L1**2+L2**2):
D = -D
# find elbow down angles from shoulder to elbow
if D < 1 and D > -1:
elbow = acos(D)
else:
elbow = 0
if (x**2+y**2) < (L1**2+L2**2):
elbow = pi - elbow
return elbow
def getDimension(self):
return 2
class getX(ca.nengo.math.Function):
def map(self,X):
shoulder = X[0]
elbow = X[1]
L1 = .5
L2 = .5
return L1*cos(shoulder)+L2*cos(shoulder+elbow)
def getDimension(self):
return 2
class getY(ca.nengo.math.Function):
def map(self,X):
shoulder = X[0]
elbow = X[1]
L1 = .5
L2 = .5
return L1*sin(shoulder)+L2*sin(shoulder+elbow)
def getDimension(self):
return 2
# input functions
refX=net.make_input('refX',[-1])
refY=net.make_input('refY',[1])
Tfunc=net.make_input('T matrix',[1,0,0,1])
F=net.make_input('F',[-1,0,-1,0,0,-1,0,-1])
# neural populations
convertXY=net.make("convert XY",N,2)
convertAngles=net.make("convert Angles",N,2)
funcT=net.make("funcT",N,6)
FX=net.make("FX",N,12)
controlV=net.make("control signal v",N,2) # calculate 2D control signal
controlU=net.make("control signal u",500,2, quick=True) # calculates
#jkoint torque control
#signal
# add terminations
convertXY.addDecodedTermination('refXY',[[1,0],[0,1]],pstc,False)
convertAngles.addDecodedTermination('shoulder',[[1],[0]],pstc,False)
convertAngles.addDecodedTermination('elbow',[[0],[1]],pstc,False)
FX.addDecodedTermination('inputFs',[[1,0,0,0,0,0,0,0],[0,1,0,0,0,0,0,0],
[0,0,1,0,0,0,0,0], \
[0,0,0,1,0,0,0,0],[0,0,0,0,1,0,0,0],[0,0,0,0,0,1,0,0],[0,0,0,0,0,0,1,0],
[0,0,0,0,0,0,0,1], \
[0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0]],
pstc,False)
FX.addDecodedTermination('X1',[[0],[0],[0],[0],[0],[0],[0],[0],[1],[0],[0],
[0]],pstc,False)
FX.addDecodedTermination('X2',[[0],[0],[0],[0],[0],[0],[0],[0],[0],[1],[0],
[0]],pstc,False)
FX.addDecodedTermination('X3',[[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[1],
[0]],pstc,False)
FX.addDecodedTermination('X4',[[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],
[1]],pstc,False)
funcT.addDecodedTermination('shoulderRef',[[1],[0],[0],[0],[0],[0]],pstc,False)
funcT.addDecodedTermination('elbowRef',[[0],[1],[0],[0],[0],[0]],pstc,False)
funcT.addDecodedTermination('shoulder',[[0],[0],[0],[0],[0],[0]],pstc,False)
funcT.addDecodedTermination('elbow',[[0],[0],[0],[0],[0],[0]],pstc,False)
funcT.addDecodedTermination('inputTs',[[0,0,0,0],[0,0,0,0],[1,0,0,0],[0,1,0,0],
[0,0,1,0],[0,0,0,1]],pstc,False)
controlV.addDecodedTermination('inputCurrentX',[[-1],[0]],pstc,False)
controlV.addDecodedTermination('inputCurrentY',[[0],[-1]],pstc,False)
controlV.addDecodedTermination('inputRefX',[[1],[0]],pstc,False)
controlV.addDecodedTermination('inputRefY',[[0],[1]],pstc,False)
controlU.addDecodedTermination('inputFuncT1',[[1],[0]],pstc,False)
controlU.addDecodedTermination('inputFuncT2',[[0],[1]],pstc,False)
controlU.addDecodedTermination('inputFX1',[[1],[0]],pstc,False)
controlU.addDecodedTermination('inputFX2',[[0],[1]],pstc,False)
# add origins
interpreter=DefaultFunctionInterpreter()
convertXY.addDecodedOrigin('elbowRef',[getElbow()],"AXON")
convertXY.addDecodedOrigin('shoulderRef',[getShoulder()],"AXON")
convertAngles.addDecodedOrigin('currentX',[getX()],"AXON")
convertAngles.addDecodedOrigin('currentY',[getY()],"AXON")
FX.addDecodedOrigin('FX1',[interpreter.parse("x0*x8+x1*x9+x2*x10+x3*x11",12)],
"AXON")
FX.addDecodedOrigin('FX2',[interpreter.parse("x4*x8+x5*x9+x6*x10+x7*x11",12)],
"AXON")
funcT.addDecodedOrigin('funcT1',[interpreter.parse("x0*x2+x1*x3",6)],"AXON")
funcT.addDecodedOrigin('funcT2',[interpreter.parse("x0*x4+x1*x5",6)],"AXON")
controlU.addDecodedOrigin('u1',[interpreter.parse("x0",2)],"AXON")
controlU.addDecodedOrigin('u2',[interpreter.parse("x1",2)],"AXON")
# add projections
net.connect(controlV.getOrigin('X'),convertXY.getTermination('refXY'))
net.connect(refX.getOrigin('origin'),controlV.getTermination('inputRefX'))
net.connect(refY.getOrigin('origin'),controlV.getTermination('inputRefY'))
net.connect(convertAngles.getOrigin('currentX'),controlV.getTermination(
'inputCurrentX'))
net.connect(convertAngles.getOrigin('currentY'),controlV.getTermination(
'inputCurrentY'))
net.connect(F.getOrigin('origin'),FX.getTermination('inputFs'))
net.connect(convertXY.getOrigin('shoulderRef'),funcT.getTermination(
'shoulderRef'))
net.connect(convertXY.getOrigin('elbowRef'),funcT.getTermination('elbowRef'))
net.connect(Tfunc.getOrigin('origin'),funcT.getTermination('inputTs'))
net.connect(funcT.getOrigin('funcT1'),controlU.getTermination('inputFuncT1'))
net.connect(funcT.getOrigin('funcT2'),controlU.getTermination('inputFuncT2'))
net.connect(FX.getOrigin('FX1'),controlU.getTermination('inputFX1'))
net.connect(FX.getOrigin('FX2'),controlU.getTermination('inputFX2'))
net.add_to_nengo()
class Room(space.Room):
def __init__(self):
space.Room.__init__(self,10,10,gravity=0,color=[Color(0xFFFFFF),
Color(0xFFFFFF),Color(0xEEEEEE),Color(0xDDDDDD),
Color(0xCCCCCC),Color(0xBBBBBB)])
def start(self):
self.target=space.Sphere(0.2,mass=1,color=Color(0xFF0000))
self.add(self.target,0,0,2)
torso=space.Box(0.1,0.1,1.5,mass=100000,draw_as_cylinder=True,
color=Color(0x4444FF))
self.add(torso,0,0,1)
upperarm=space.Box(0.1,0.7,0.1,mass=0.5,draw_as_cylinder=True,
color=Color(0x8888FF),overdraw_radius=1.2,overdraw_length=1.2)
self.add(upperarm,0.7,0.5,2)
upperarm.add_sphere_at(0,0.5,0,0.1,Color(0x4444FF),self)
upperarm.add_sphere_at(0,-0.5,0,0.1,Color(0x4444FF),self)
lowerarm=space.Box(0.1,0.75,0.1,mass=0.1,draw_as_cylinder=True,
color=Color(0x8888FF),overdraw_radius=1.2,overdraw_length=1.1)
self.add(lowerarm,0.7,1.5,2)
shoulder=HingeConstraint(torso.physics,upperarm.physics,
Vector3f(0.7,0.1,1),Vector3f(0,-0.5,0),
Vector3f(0,0,1),Vector3f(0,0,1))
elbow=HingeConstraint(upperarm.physics,lowerarm.physics,
Vector3f(0,0.5,0),Vector3f(0,-0.5,0),
Vector3f(0,0,1),Vector3f(0,0,1))
shoulder.setLimit(-pi/2,pi/2+.1)
elbow.setLimit(-pi,0)
self.physics.addConstraint(elbow)
self.physics.addConstraint(shoulder)
#upperarm.physics.applyTorqueImpulse(Vector3f(0,0,300))
#lowerarm.physics.applyTorqueImpulse(Vector3f(0,0,300))
self.sch.add(space.Room.start,args=(self,))
self.update_neurons()
self.upperarm=upperarm
self.lowerarm=lowerarm
self.shoulder=shoulder
self.elbow=elbow
self.hinge1=self.shoulder.hingeAngle
self.hinge2=self.elbow.hingeAngle
self.upperarm.physics.setSleepingThresholds(0,0)
self.lowerarm.physics.setSleepingThresholds(0,0)
def update_neurons(self):
while True:
scale=0.0003
m1=controlU.getOrigin('u1').getValues().getValues()[0]*scale
m2=controlU.getOrigin('u2').getValues().getValues()[0]*scale
v1=Vector3f(0,0,0)
v2=Vector3f(0,0,0)
#java.lang.System.out.println("m1: %f m2:%f"%(m1,m2))
self.upperarm.physics.applyTorqueImpulse(Vector3f(0,0,m1))
self.lowerarm.physics.applyTorqueImpulse(Vector3f(0,0,m2))
self.hinge1=-(self.shoulder.hingeAngle-pi/2)
self.hinge2=-self.elbow.hingeAngle
#java.lang.System.out.println("angle1: %f
#angle2:%f"%(self.hinge1,self.hinge2))
self.upperarm.physics.getAngularVelocity(v1)
self.lowerarm.physics.getAngularVelocity(v2)
# put bounds on the velocity possible
if v1.z > 2:
self.upperarm.physics.setAngularVelocity(Vector3f(0,0,2))
if v1.z < -2:
self.upperarm.physics.setAngularVelocity(Vector3f(0,0,-2))
if v2.z > 2:
self.lowerarm.physics.setAngularVelocity(Vector3f(0,0,2))
if v2.z < -2:
self.lowerarm.physics.setAngularVelocity(Vector3f(0,0,-2))
self.upperarm.physics.getAngularVelocity(v1)
self.lowerarm.physics.getAngularVelocity(v2)
wt=Transform()
#self.target.physics.motionState.getWorldTransform(wt)
wt.setIdentity()
tx=controlV.getTermination('inputRefX').input
if tx is not None:
wt.origin.x=tx.values[0]+0.7
else:
wt.origin.x=0.7
ty=controlV.getTermination('inputRefY').input
if ty is not None:
wt.origin.y=ty.values[0]+0.1
else:
wt.origin.y=0.1
wt.origin.z=2
ms=self.target.physics.motionState
ms.worldTransform=wt
self.target.physics.motionState=ms
self.vel1=v1.z
self.vel2=v2.z
yield 0.0001
r=ccm.nengo.create(Room)
net.add(r)
# need to make hinge1, hinge2, vel1, and vel external nodes and hook up
# the output to the FX matrix
r.exposeOrigin(r.getNode('hinge1').getOrigin('origin'),'shoulderAngle')
r.exposeOrigin(r.getNode('hinge2').getOrigin('origin'),'elbowAngle')
r.exposeOrigin(r.getNode('vel1').getOrigin('origin'),'shoulderVel')
r.exposeOrigin(r.getNode('vel2').getOrigin('origin'),'elbowVel')
net.connect(r.getOrigin('shoulderAngle'),FX.getTermination('X1'))
net.connect(r.getOrigin('elbowAngle'),FX.getTermination('X2'))
net.connect(r.getOrigin('shoulderVel'),FX.getTermination('X3'))
net.connect(r.getOrigin('elbowVel'),FX.getTermination('X4'))
net.connect(r.getOrigin('shoulderAngle'),convertAngles.getTermination(
'shoulder'))
net.connect(r.getOrigin('elbowAngle'),convertAngles.getTermination('elbow'))
net.connect(r.getOrigin('shoulderAngle'),funcT.getTermination('shoulder'))
net.connect(r.getOrigin('elbowAngle'),funcT.getTermination('elbow'))
# put everything in direct mode
net.network.setMode(ca.nengo.model.SimulationMode.DIRECT)
# except the last population
controlU.setMode(ca.nengo.model.SimulationMode.DEFAULT)
|
|
# Copyright 2016 Huawei Technologies India Pvt. Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from ryu.services.protocols.bgp import bgpspeaker
from ryu.services.protocols.bgp.rtconf.neighbors import CONNECT_MODE_ACTIVE
from neutron_dynamic_routing.services.bgp.driver import base
from neutron_dynamic_routing.services.bgp.driver import exceptions as bgp_driver_exc
from neutron_dynamic_routing.services.bgp.driver import utils
from neutron._i18n import _LE, _LI
LOG = logging.getLogger(__name__)
# Function for logging BGP peer and path changes.
def bgp_peer_down_cb(remote_ip, remote_as):
LOG.info(_LI('BGP Peer %(peer_ip)s for remote_as=%(peer_as)d went DOWN.'),
{'peer_ip': remote_ip, 'peer_as': remote_as})
def bgp_peer_up_cb(remote_ip, remote_as):
LOG.info(_LI('BGP Peer %(peer_ip)s for remote_as=%(peer_as)d is UP.'),
{'peer_ip': remote_ip, 'peer_as': remote_as})
def best_path_change_cb(event):
LOG.info(_LI("Best path change observed. cidr=%(prefix)s, "
"nexthop=%(nexthop)s, remote_as=%(remote_as)d, "
"is_withdraw=%(is_withdraw)s"),
{'prefix': event.prefix, 'nexthop': event.nexthop,
'remote_as': event.remote_as,
'is_withdraw': event.is_withdraw})
class RyuBgpDriver(base.BgpDriverBase):
"""BGP speaker implementation via Ryu."""
def __init__(self, cfg):
LOG.info(_LI('Initializing Ryu driver for BGP Speaker functionality.'))
self._read_config(cfg)
# Note: Even though Ryu can only support one BGP speaker as of now,
# we have tried making the framework generic for the future purposes.
self.cache = utils.BgpMultiSpeakerCache()
def _read_config(self, cfg):
if cfg is None or cfg.bgp_router_id is None:
# If either cfg or router_id is not specified, raise voice
LOG.error(_LE('BGP router-id MUST be specified for the correct '
'functional working.'))
else:
self.routerid = cfg.bgp_router_id
LOG.info(_LI('Initialized Ryu BGP Speaker driver interface with '
'bgp_router_id=%s'), self.routerid)
def add_bgp_speaker(self, speaker_as):
curr_speaker = self.cache.get_bgp_speaker(speaker_as)
if curr_speaker is not None:
raise bgp_driver_exc.BgpSpeakerAlreadyScheduled(
current_as=speaker_as,
rtid=self.routerid)
# Ryu can only support One speaker
if self.cache.get_hosted_bgp_speakers_count() == 1:
raise bgp_driver_exc.BgpSpeakerMaxScheduled(count=1)
# Validate input parameters.
# speaker_as must be an integer in the allowed range.
utils.validate_as_num('local_as', speaker_as)
# Notify Ryu about BGP Speaker addition.
# Please note: Since, only the route-advertisement support is
# implemented we are explicitly setting the bgp_server_port
# attribute to 0 which disables listening on port 179.
curr_speaker = bgpspeaker.BGPSpeaker(as_number=speaker_as,
router_id=self.routerid, bgp_server_port=0,
best_path_change_handler=best_path_change_cb,
peer_down_handler=bgp_peer_down_cb,
peer_up_handler=bgp_peer_up_cb)
LOG.info(_LI('Added BGP Speaker for local_as=%(as)d with '
'router_id= %(rtid)s.'),
{'as': speaker_as, 'rtid': self.routerid})
self.cache.put_bgp_speaker(speaker_as, curr_speaker)
def delete_bgp_speaker(self, speaker_as):
curr_speaker = self.cache.get_bgp_speaker(speaker_as)
if not curr_speaker:
raise bgp_driver_exc.BgpSpeakerNotAdded(local_as=speaker_as,
rtid=self.routerid)
# Notify Ryu about BGP Speaker deletion
curr_speaker.shutdown()
LOG.info(_LI('Removed BGP Speaker for local_as=%(as)d with '
'router_id=%(rtid)s.'),
{'as': speaker_as, 'rtid': self.routerid})
self.cache.remove_bgp_speaker(speaker_as)
def add_bgp_peer(self, speaker_as, peer_ip, peer_as,
auth_type='none', password=None):
curr_speaker = self.cache.get_bgp_speaker(speaker_as)
if not curr_speaker:
raise bgp_driver_exc.BgpSpeakerNotAdded(local_as=speaker_as,
rtid=self.routerid)
# Validate peer_ip and peer_as.
utils.validate_as_num('remote_as', peer_as)
utils.validate_string(peer_ip)
utils.validate_auth(auth_type, password)
# Notify Ryu about BGP Peer addition
curr_speaker.neighbor_add(address=peer_ip,
remote_as=peer_as,
password=password,
connect_mode=CONNECT_MODE_ACTIVE)
LOG.info(_LI('Added BGP Peer %(peer)s for remote_as=%(as)d to '
'BGP Speaker running for local_as=%(local_as)d.'),
{'peer': peer_ip, 'as': peer_as, 'local_as': speaker_as})
def delete_bgp_peer(self, speaker_as, peer_ip):
curr_speaker = self.cache.get_bgp_speaker(speaker_as)
if not curr_speaker:
raise bgp_driver_exc.BgpSpeakerNotAdded(local_as=speaker_as,
rtid=self.routerid)
# Validate peer_ip. It must be a string.
utils.validate_string(peer_ip)
# Notify Ryu about BGP Peer removal
curr_speaker.neighbor_del(address=peer_ip)
LOG.info(_LI('Removed BGP Peer %(peer)s from BGP Speaker '
'running for local_as=%(local_as)d.'),
{'peer': peer_ip, 'local_as': speaker_as})
def advertise_route(self, speaker_as, cidr, nexthop):
curr_speaker = self.cache.get_bgp_speaker(speaker_as)
if not curr_speaker:
raise bgp_driver_exc.BgpSpeakerNotAdded(local_as=speaker_as,
rtid=self.routerid)
# Validate cidr and nexthop. Both must be strings.
utils.validate_string(cidr)
utils.validate_string(nexthop)
# Notify Ryu about route advertisement
curr_speaker.prefix_add(prefix=cidr, next_hop=nexthop)
LOG.info(_LI('Route cidr=%(prefix)s, nexthop=%(nexthop)s is '
'advertised for BGP Speaker running for '
'local_as=%(local_as)d.'),
{'prefix': cidr, 'nexthop': nexthop, 'local_as': speaker_as})
def withdraw_route(self, speaker_as, cidr, nexthop=None):
curr_speaker = self.cache.get_bgp_speaker(speaker_as)
if not curr_speaker:
raise bgp_driver_exc.BgpSpeakerNotAdded(local_as=speaker_as,
rtid=self.routerid)
# Validate cidr. It must be a string.
utils.validate_string(cidr)
# Notify Ryu about route withdrawal
curr_speaker.prefix_del(prefix=cidr)
LOG.info(_LI('Route cidr=%(prefix)s is withdrawn from BGP Speaker '
'running for local_as=%(local_as)d.'),
{'prefix': cidr, 'local_as': speaker_as})
def get_bgp_speaker_statistics(self, speaker_as):
LOG.info(_LI('Collecting BGP Speaker statistics for local_as=%d.'),
speaker_as)
curr_speaker = self.cache.get_bgp_speaker(speaker_as)
if not curr_speaker:
raise bgp_driver_exc.BgpSpeakerNotAdded(local_as=speaker_as,
rtid=self.routerid)
# TODO(vikram): Filter and return the necessary information.
# Will be done as part of new RFE requirement
# https://bugs.launchpad.net/neutron/+bug/1527993
return curr_speaker.neighbor_state_get()
def get_bgp_peer_statistics(self, speaker_as, peer_ip):
LOG.info(_LI('Collecting BGP Peer statistics for peer_ip=%(peer)s, '
'running in speaker_as=%(speaker_as)d '),
{'peer': peer_ip, 'speaker_as': speaker_as})
curr_speaker = self.cache.get_bgp_speaker(speaker_as)
if not curr_speaker:
raise bgp_driver_exc.BgpSpeakerNotAdded(local_as=speaker_as,
rtid=self.routerid)
# TODO(vikram): Filter and return the necessary information.
# Will be done as part of new RFE requirement
# https://bugs.launchpad.net/neutron/+bug/1527993
return curr_speaker.neighbor_state_get(address=peer_ip)
|
|
from copy import deepcopy
import numpy as np
from menpo.base import Vectorizable
from menpo.model import Similarity2dInstanceModel
from menpo.transform.base import Alignable, VComposableTransform, VInvertible
class ModelDrivenTransform(Vectorizable, VComposableTransform, VInvertible,
Alignable):
r"""
A transform that couples a traditional landmark-based transform to a
statistical model such that source points of the alignment transform
are the points of the model. The parameters of the transform are just
the weights of statistical model.
If no source is provided, the mean of the model is defined as the
source landmarks of the transform.
Parameters
----------
model : :class:`menpo.model.base.StatisticalModel`
A linear statistical shape model.
transform_cls : :class:`menpo.transform.AlignableTransform`
A class of :class:`menpo.transform.base.AlignableTransform`
The align constructor will be called on this with the source
and target landmarks. The target is
set to the points generated from the model using the
provide weights - the source is either given or set to the
model's mean.
source : :class:`menpo.shape.base.PointCloud`
The source landmarks of the transform. If no ``source`` is provided the
mean of the model is used.
weights : (P,) ndarray
The reconstruction weights that will be fed to the model in order to
generate an instance of the target landmarks.
composition: 'both', 'warp' or 'model', optional
The composition approximation employed by this
ModelDrivenTransform.
Default: 'both'
"""
#TODO: Rethink this transform so it knows how to deal with complex shapes
def __init__(self, model, transform_cls, source=None, weights=None,
composition='both'):
super(ModelDrivenTransform, self).__init__()
self._cached_points = None
self.model = model
self.composition = composition
if source is None:
# set the source to the model's mean
source = self.model.mean
self._source = source
if weights is None:
# set all weights to 0 (yielding the mean)
weights = np.zeros(self.model.n_active_components)
self._weights = weights
self._target = self._target_for_weights(self._weights)
# by providing _source and _target we conform to the
# AlignmentTransform interface
# utilize the align constructor to build the transform
self.transform = transform_cls.align(self.source, self.target)
@property
def n_dims(self):
r"""
The number of dimensions that the transform supports.
:type: int
"""
return self.transform.n_dims
@property
def n_parameters(self):
r"""
The total number of parameters.
Simply ``n_weights``.
:type: int
"""
return self.n_weights
@property
def n_weights(self):
r"""
The number of parameters in the linear model.
:type: int
"""
return self.model.n_active_components
@property
def has_true_inverse(self):
return False
def _build_pseudoinverse(self):
return self.from_vector(-self.as_vector())
@property
def weights(self):
return self._weights
@weights.setter
def weights(self, value):
r"""
Setting the weights value automatically triggers a recalculation of
the target, and an update of the transform
"""
self.target = self._target_for_weights(value)
def jacobian(self, points):
"""
Calculates the Jacobian of the ModelDrivenTransform wrt to
its parameters (the weights). This is done by chaining the relative
weight of each point wrt the source landmarks, i.e. the Jacobian of
the warp wrt the source landmarks when the target is assumed to be
equal to the source (dW/dx), together with the Jacobian of the
linear model wrt its weights (dX/dp).
Parameters
-----------
points: (N, D) ndarray
The points at which the Jacobian will be evaluated.
Returns
-------
dW/dp : (N, P, D) ndarray
The Jacobian of the ModelDrivenTransform evaluated at the
previous points.
"""
# check if re-computation of dW/dx can be avoided
if not np.array_equal(self._cached_points, points):
# recompute dW/dx, i.e. the relative weight of each point wrt
# the source landmarks
self.dW_dX = self.transform.weight_points(points)
# cache points
self._cached_points = points
# dX/dp is simply the Jacobian of the model
dX_dp = self.model.jacobian
# dW_dX: n_points x n_points x n_dims
# dX_dp: n_points x n_params x n_dims
dW_dp = np.einsum('ild, lpd -> ipd', self.dW_dX, dX_dp)
# dW_dp: n_points x n_params x n_dims
return dW_dp
# TODO: document me
def jacobian_points(self, points):
r"""
TO BE DOCUMENTED
Returns
-------
dW_dx : (N, D, D) ndarray
The jacobian with respect to the points
"""
pass
def as_vector(self):
r"""
Return the current parameters of this transform - this is the
just the linear model's weights
Returns
-------
params : (``n_parameters``,) ndarray
The vector of parameters
"""
return self.weights
def from_vector_inplace(self, vector):
r"""
Updates the ModelDrivenTransform's state from it's
vectorized form.
"""
self.weights = vector
def _target_setter(self, new_target):
r"""
On a new target being set, we need to:
1. Find the optimum weights that align the model to this target,
and set them as self.weights.
2. Update the transform to point to the closest target that the
model can provide to the requested target
3. Set our target to the closest target that the model can provide
to the requested target.
Parameters
----------
new_target: :class:`PointCloud`
The new_target that we want to set.
"""
# 1. Find the optimum weights and set them
self._weights = self._weights_for_target(new_target)
# 2. Find the closest target the model can reproduce and trigger an
# update of our transform
self.transform.target = self._target_for_weights(self._weights)
# 3. As always, update our self._target
self._target = self.transform.target
def _apply(self, x, **kwargs):
r"""
Apply this transform to the given object. Uses the internal transform.
Parameters
----------
x : (N, D) ndarray or a transformable object
The object to be transformed.
kwargs : dict
Passed through to transforms ``apply_inplace`` method.
Returns
--------
transformed : (N, D) ndarray or object
The transformed object
"""
return self.transform._apply(x, **kwargs)
def _compose_before_inplace(self, transform):
r"""
a_orig = deepcopy(a)
a.compose_before_inplace(b)
a.apply(p) == b.apply(a_orig.apply(p))
a is permanently altered to be the result of the composition. b is
left unchanged.
Parameters
----------
transform : :class:`ModelDrivenTransform`
Transform to be applied **after** self
Returns
--------
transform : self
self, updated to the result of the composition
"""
# naive approach - update self to be equal to transform and
# compose_before_from_vector_inplace
self_vector = self.as_vector().copy()
self.update_from_vector(transform.as_vector())
return self.compose_after_from_vector_inplace(self_vector)
def _compose_after_inplace(self, md_transform):
r"""
a_orig = deepcopy(a)
a.compose_after_inplace(b)
a.apply(p) == a_orig.apply(b.apply(p))
a is permanently altered to be the result of the composition. b is
left unchanged.
Parameters
----------
transform : :class:`ModelDrivenTransform`
Transform to be applied **before** self
Returns
--------
transform : self
self, updated to the result of the composition
"""
if self.composition is 'model':
# TODO this seems to be the same, revisit
self.target = self._compose_after_model(md_transform.target)
elif self.composition is 'warp':
self.target = self._compose_after_warp(md_transform.target)
elif self.composition is 'both':
new_params = self._compose_after_both(md_transform.as_vector())
self.from_vector_inplace(new_params)
else:
raise ValueError('Unknown composition string selected. Valid'
'options are: model, warp, both')
return self
def compose_after_from_vector_inplace(self, vector):
r"""
a_orig = deepcopy(a)
a.compose_after_from_vector_inplace(b_vec)
b = self.from_vector(b_vec)
a.apply(p) == a_orig.apply(b.apply(p))
a is permanently altered to be the result of the composition. b_vec
is left unchanged.
compose_after this :class:`ModelDrivenTransform` with another inplace.
Rather than requiring a new ModelDrivenTransform to compose_after
with, this method only requires the parameters of the new transform.
Parameters
----------
vector : (N,) ndarray
Vectorized :class:`ModelDrivenTransform` to be applied **before**
self
Returns
--------
transform : self
self, updated to the result of the composition
"""
if self.composition is 'model':
new_mdtransform = self.from_vector(vector)
self.target = self._compose_after_model(new_mdtransform.target)
elif self.composition is 'warp':
new_mdtransform = self.from_vector(vector)
self.target = self._compose_after_warp(new_mdtransform.target)
elif self.composition is 'both':
self.from_vector_inplace(self._compose_after_both(vector))
else:
raise ValueError('Unknown composition string selected. Valid'
'options are: model, warp, both')
def _compose_after_model(self, other_target):
r"""
Composes two statistically driven transforms together.
Parameters
----------
other_target : :class:`PointCloud`
the target of the ModelDrivenTransform we are
composing with.
Returns
-------
target: :class:`PointCloud`
The new target of the composed result
"""
model_variation = self.target.points - self.model.mean.points
composed_target = model_variation + other_target.points
from menpo.shape import PointCloud
return PointCloud(composed_target)
# TODO: The call to transform.apply_inplace will not work properly for PWA
# - Define a new function in TPS & PWA called .apply_to_target
# - For TPS this function should ne the same as the normal .apply_inplace()
# method
# - For PWA it should implement Bakers algorithmic approach to
# composition
def _compose_after_warp(self, other_target):
r"""
Composes two statistically driven transforms together. This approach
composes the
Parameters
----------
other_target : :class:`PointCloud`
the target of the ModelDrivenTransform we are
composing with.
Returns
-------
target: :class:`PointCloud`
The new target of the composed result
"""
return self.transform.apply(other_target)
def _compose_after_both(self, mdt_vector):
r"""
Composes two statistically driven transforms together based on the
first order approximation proposed by Papandreou and Maragos.
The resulting vector of parameters is equivalent to
self.compose_after_from_vector(mdt_vector)
Parameters
----------
mdt_vector : (P,) ndarray
the parameters of the ModelDrivenTransform we are
composing with, as provided by .as_vector().
Returns
-------
vector: (P,) ndarray
The new parameters of the composed result
References
----------
.. [1] G. Papandreou and P. Maragos, "Adaptive and Constrained
Algorithms for Inverse Compositional Active Appearance Model
Fitting", CVPR08
"""
model_jacobian = self.model.jacobian
# compute:
# -> dW/dp when p=0
# -> dW/dp when p!=0
# -> dW/dx when p!=0 evaluated at the source landmarks
# dW/dp when p=0 and when p!=0 are the same and simply given by
# the Jacobian of the model
dW_dp_0 = model_jacobian
dW_dp = dW_dp_0
# dW_dp_0: n_points x n_params x n_dims
# dW_dp: n_points x n_params x n_dims
dW_dx = self.transform.jacobian_points(self.model.mean.points)
# dW_dx: n_points x n_dims x n_dims
#TODO: Can we do this without splitting across the two dimensions?
dW_dx_x = dW_dx[:, 0, :].flatten()[..., None]
dW_dx_y = dW_dx[:, 1, :].flatten()[..., None]
dW_dp_0_mat = np.reshape(dW_dp_0, (self.model.mean.n_points *
self.n_dims, self.n_parameters))
dW_dx_dW_dp_0 = dW_dp_0_mat * dW_dx_x + dW_dp_0_mat * dW_dx_y
dW_dx_dW_dp_0 = np.reshape(dW_dx_dW_dp_0, (self.model.mean.n_points,
self.n_parameters,
self.n_dims))
# dW_dx: n_points x n_dims x n_dims
# dW_dp_0: n_points x n_params x n_dims
# dW_dx_dW_dp_0: n_points x n_params x n_dims
J = np.einsum('ijk, ilk -> jl', dW_dp, dW_dx_dW_dp_0)
H = np.einsum('ijk, ilk -> jl', dW_dp, dW_dp)
Jp = np.linalg.solve(H, J)
# Jp: n_params x n_params
return self.as_vector() + np.dot(Jp, mdt_vector)
def _target_for_weights(self, weights):
r"""
Return the appropriate target for the model weights provided.
Subclasses can override this.
Parameters
----------
weights: (P,) ndarray
weights of the statistical model that should be used to generate a
new instance
Returns
-------
new_target: :class:`menpo.shape.PointCloud`
A new target for the weights provided
"""
return self.model.instance(weights)
def _weights_for_target(self, target):
r"""
Return the appropriate model weights for target provided.
Subclasses can override this.
Parameters
----------
target: :class:`menpo.shape.PointCloud`
The target that the statistical model will try to reproduce
Returns
-------
weights: (P,) ndarray
Weights of the statistical model that generate the closest
PointCloud to the requested target
"""
return self.model.project(target)
def pseudoinverse_vector(self, vector):
r"""
The vectorized pseudoinverse of a provided vector instance.
Syntactic sugar for
self.from_vector(vector).pseudoinverse.as_vector()
On ModelDrivenTransform this is especially fast - we just negate the
vector provided.
Parameters
----------
vector : (P,) ndarray
A vectorized version of self
Returns
-------
pseudoinverse_vector : (N,) ndarray
The pseudoinverse of the vector provided
"""
# just have to negate the parameters!
return -vector
class GlobalMDTransform(ModelDrivenTransform):
r"""
A transform that couples an alignment transform to a
statistical model together with a global similarity transform,
such that the parameters of the transform are fully specified by
both the weights of statistical model and the parameters of the
similarity transform. The model is assumed to
generate an instance which is then transformed by the similarity
transform; the result defines the target landmarks of the transform.
If no source is provided, the mean of the model is defined as the
source landmarks of the transform.
Parameters
----------
model : :class:`menpo.model.base.StatisticalModel`
A linear statistical shape model.
transform_cls : :class:`menpo.transform.AlignableTransform`
A class of :class:`menpo.transform.base.AlignableTransform`
The align constructor will be called on this with the source
and target landmarks. The target is
set to the points generated from the model using the
provide weights - the source is either given or set to the
model's mean.
global_transform : :class:`menpo.transform.AlignableTransform`
A class of :class:`menpo.transform.base.AlignableTransform`
The global transform that should be applied to the model output.
Doesn't have to have been constructed from the .align() constructor.
Note that the GlobalMDTransform isn't guaranteed to hold on to the
exact object passed in here - so don't expect external changes to
the global_transform to be reflected in the behavior of this object.
source : :class:`menpo.shape.base.PointCloud`, optional
The source landmarks of the transform. If no ``source`` is provided the
mean of the model is used.
weights : (P,) ndarray, optional
The reconstruction weights that will be fed to the model in order to
generate an instance of the target landmarks.
composition: 'both', 'warp' or 'model', optional
The composition approximation employed by this
ModelDrivenTransform.
Default: `both`
"""
def __init__(self, model, transform_cls, global_transform, source=None,
weights=None, composition='both'):
# need to set the global transform right away - self
# ._target_for_weights() needs it in superclass __init__
self.global_transform = global_transform
super(GlobalMDTransform, self).__init__(
model, transform_cls, source=source, weights=weights,
composition=composition)
# after construction, we want our global_transform() to be an align
# transform. This is a little hacky, but is ok as long as the
# superclasses __init__ doesn't use _weights_for_target.
self.global_transform = global_transform.align(self.model.mean,
self.target)
@property
def n_parameters(self):
r"""
The total number of parameters.
This is ``n_weights + n_global_parameters``.
:type: int
"""
return self.n_weights + self.n_global_parameters
@property
def n_global_parameters(self):
r"""
The number of parameters in the ``global_transform``
:type: int
"""
return self.global_transform.n_parameters
@property
def global_parameters(self):
r"""
The parameters for the global transform.
:type: (``n_global_parameters``,) ndarray
"""
return self.global_transform.as_vector()
def jacobian(self, points):
"""
Calculates the Jacobian of the ModelDrivenTransform wrt to
its parameters (the weights). This is done by chaining the relative
weight of each point wrt the source landmarks, i.e. the Jacobian of
the warp wrt the source landmarks when the target is assumed to be
equal to the source (dW/dx), together with the Jacobian of the
linear model (and of the global transform if present) wrt its
weights (dX/dp).
Parameters
-----------
points: (N, D) ndarray
The points at which the Jacobian will be evaluated.
Returns
-------
dW/dp : (N, P, D) ndarray
The Jacobian of the ModelDrivenTransform evaluated at the
previous points.
"""
# check if re-computation of dW/dx can be avoided
if not np.array_equal(self._cached_points, points):
# recompute dW/dx, i.e. the relative weight of each point wrt
# the source landmarks
self.dW_dX = self.transform.weight_points(points)
# cache points
self._cached_points = points
model_jacobian = self.model.jacobian
# compute dX/dp
# dX/dq is the Jacobian of the global transform evaluated at the
# mean of the model.
dX_dq = self._global_transform_jacobian(self.model.mean.points)
# dX_dq: n_points x n_global_params x n_dims
# by application of the chain rule dX_db is the Jacobian of the
# model transformed by the linear component of the global transform
dS_db = model_jacobian
dX_dS = self.global_transform.jacobian_points(
self.model.mean.points)
dX_db = np.einsum('ilj, idj -> idj', dX_dS, dS_db)
# dS_db: n_points x n_weights x n_dims
# dX_dS: n_points x n_dims x n_dims
# dX_db: n_points x n_weights x n_dims
# dX/dp is simply the concatenation of the previous two terms
dX_dp = np.hstack((dX_dq, dX_db))
# dW_dX: n_points x n_points x n_dims
# dX_dp: n_points x n_params x n_dims
dW_dp = np.einsum('ild, lpd -> ipd', self.dW_dX, dX_dp)
# dW_dp: n_points x n_params x n_dims
return dW_dp
def _global_transform_jacobian(self, points):
return self.global_transform.jacobian(points)
def as_vector(self):
r"""
Return the current parameters of this transform. This is the
concatenated vector of the linear model's weights and the global
transform parameters.
Returns
-------
params : (``n_parameters``,) ndarray
The vector of parameters
"""
return np.hstack((self.global_parameters, self.weights))
def from_vector_inplace(self, vector):
# the only extra step we have to take in
global_params = vector[:self.n_global_parameters]
model_params = vector[self.n_global_parameters:]
self._update_global_weights(global_params)
self.weights = model_params
def _update_global_weights(self, global_weights):
r"""
Hook that allows for overriding behavior when the global weights are
set. Default implementation simply asks global_transform to
update itself from vector.
"""
self.global_transform.from_vector_inplace(global_weights)
def _compose_after_model(self, other_target):
r"""
Composes two statistically driven transforms together.
Parameters
----------
other_target : :class:`PointCloud`
the target of the ModelDrivenTransform we are
composing with.
Returns
-------
target: :class:`PointCloud`
The new target of the composed result
"""
model_variation = (
self.global_transform.pseudoinverse.apply(self.target.points) -
self.model.mean.points)
composed_target = self.global_transform.apply(
model_variation + other_target.points)
from menpo.shape import PointCloud
return PointCloud(composed_target)
def _compose_after_both(self, mdt_vector):
r"""
Composes two statistically driven transforms together based on the
first order approximation proposed by Papandreou and Maragos.
Parameters
----------
new_sdt_parameters : (P,) ndarray
the parameters of the ModelDrivenTransform we are
composing with, as provided by .as_vector().
Returns
-------
parameters: (P,) ndarray
The new parameters of the composed result
References
----------
.. [1] G. Papandreou and P. Maragos, "Adaptive and Constrained
Algorithms for Inverse Compositional Active Appearance Model
Fitting", CVPR08
"""
model_jacobian = self.model.jacobian
# compute:
# -> dW/dp when p=0
# -> dW/dp when p!=0
# -> dW/dx when p!=0 evaluated at the source landmarks
# dW/dq when p=0 and when p!=0 are the same and given by the
# Jacobian of the global transform evaluated at the mean of the
# model
dW_dq = self._global_transform_jacobian(self.model.mean.points)
# dW_dq: n_points x n_global_params x n_dims
# dW/db when p=0, is the Jacobian of the model
dW_db_0 = model_jacobian
# dW_db_0: n_points x n_weights x n_dims
# dW/dp when p=0, is simply the concatenation of the previous
# two terms
dW_dp_0 = np.hstack((dW_dq, dW_db_0))
# dW_dp_0: n_points x n_params x n_dims
# by application of the chain rule dW_db when p!=0,
# is the Jacobian of the global transform wrt the points times
# the Jacobian of the model: dX(S)/db = dX/dS * dS/db
dW_dS = self.global_transform.jacobian_points(self.model.mean.points)
dW_db = np.einsum('ilj, idj -> idj', dW_dS, dW_db_0)
# dW_dS: n_points x n_dims x n_dims
# dW_db: n_points x n_weights x n_dims
# dW/dp is simply the concatenation of dX_dq with dX_db
dW_dp = np.hstack((dW_dq, dW_db))
# dW_dp: n_points x n_params x n_dims
dW_dx = self.transform.jacobian_points(self.model.mean.points)
#dW_dx = np.dot(dW_dx, self.global_transform.linear_component.T)
# dW_dx: n_points x n_dims x n_dims
#TODO: Can we do this without splitting across the two dimensions?
dW_dx_x = dW_dx[:, 0, :].flatten()[..., None]
dW_dx_y = dW_dx[:, 1, :].flatten()[..., None]
dW_dp_0_mat = np.reshape(dW_dp_0, (self.model.mean.n_points * self.n_dims,
self.n_parameters))
dW_dx_dW_dp_0 = dW_dp_0_mat * dW_dx_x + dW_dp_0_mat * dW_dx_y
dW_dx_dW_dp_0 = np.reshape(dW_dx_dW_dp_0, (self.model.mean.n_points,
self.n_parameters,
self.n_dims))
# dW_dx: n_points x n_dims x n_dims
# dW_dp_0: n_points x n_params x n_dims
# dW_dx_dW_dp_0: n_points x n_params x n_dims
J = np.einsum('ijk, ilk -> jl', dW_dp, dW_dx_dW_dp_0)
H = np.einsum('ijk, ilk -> jl', dW_dp, dW_dp)
Jp = np.linalg.solve(H, J)
# Jp: n_params x n_params
return self.as_vector() + np.dot(Jp, mdt_vector)
def _target_for_weights(self, weights):
r"""
Return the appropriate target for the model weights provided,
accounting for the effect of the global transform
Parameters
----------
weights: (P,) ndarray
weights of the statistical model that should be used to generate a
new instance
Returns
-------
new_target: :class:`menpo.shape.PointCloud`
A new target for the weights provided
"""
return self.global_transform.apply(self.model.instance(weights))
def _weights_for_target(self, target):
r"""
Return the appropriate model weights for target provided, accounting
for the effect of the global transform. Note that this method
updates the global transform to be in the correct state.
Parameters
----------
target: :class:`menpo.shape.PointCloud`
The target that the statistical model will try to reproduce
Returns
-------
weights: (P,) ndarray
Weights of the statistical model that generate the closest
PointCloud to the requested target
"""
self._update_global_transform(target)
projected_target = self.global_transform.pseudoinverse.apply(target)
# now we have the target in model space, project it to recover the
# weights
new_weights = self.model.project(projected_target)
# TODO investigate the impact of this, could be problematic
# the model can't perfectly reproduce the target we asked for -
# reset the global_transform.target to what it CAN produce
#refined_target = self._target_for_weights(new_weights)
#self.global_transform.target = refined_target
return new_weights
def _update_global_transform(self, target):
self.global_transform.target = target
class OrthoMDTransform(GlobalMDTransform):
r"""
A transform that couples an alignment transform to a
statistical model together with a global similarity transform,
such that the parameters of the transform are fully specified by
both the weights of statistical model and the parameters of the
similarity transform. The model is assumed to
generate an instance which is then transformed by the similarity
transform; the result defines the target landmarks of the transform.
If no source is provided, the mean of the model is defined as the
source landmarks of the transform.
This transform (in contrast to the :class:`GlobalMDTransform`)
additionally orthonormalizes both the global and the model basis against
each other, ensuring that orthogonality and normalization is enforced
across the unified bases.
Parameters
----------
model : :class:`menpo.model.base.StatisticalModel`
A linear statistical shape model.
transform_cls : :class:`menpo.transform.AlignableTransform`
A class of :class:`menpo.transform.base.AlignableTransform`
The align constructor will be called on this with the source
and target landmarks. The target is
set to the points generated from the model using the
provide weights - the source is either given or set to the
model's mean.
global_transform : :class:`menpo.transform.AlignableTransform`
A class of :class:`menpo.transform.base.AlignableTransform`
The global transform that should be applied to the model output.
Doesn't have to have been constructed from the .align() constructor.
Note that the GlobalMDTransform isn't guaranteed to hold on to the
exact object passed in here - so don't expect external changes to
the global_transform to be reflected in the behavior of this object.
source : :class:`menpo.shape.base.PointCloud`, optional
The source landmarks of the transform. If no ``source`` is provided the
mean of the model is used.
weights : (P,) ndarray, optional
The reconstruction weights that will be fed to the model in order to
generate an instance of the target landmarks.
composition: 'both', 'warp' or 'model', optional
The composition approximation employed by this
ModelDrivenTransform.
Default: `both`
"""
def __init__(self, model, transform_cls, global_transform, source=None,
weights=None, composition='both'):
# 1. Construct similarity model from the mean of the model
self.similarity_model = Similarity2dInstanceModel(model.mean)
# 2. Orthonormalize model and similarity model
model = deepcopy(model)
model.orthonormalize_against_inplace(self.similarity_model)
self.similarity_weights = self.similarity_model.project(
global_transform.apply(model.mean))
super(OrthoMDTransform, self).__init__(
model, transform_cls, global_transform, source=source,
weights=weights, composition=composition)
def _update_global_transform(self, target):
self.similarity_weights = self.similarity_model.project(target)
self._update_global_weights(self.similarity_weights)
def _update_global_weights(self, global_weights):
self.similarity_weights = global_weights
new_target = self.similarity_model.instance(global_weights)
self.global_transform.target = new_target
def _global_transform_jacobian(self, points):
return self.similarity_model.jacobian
@property
def global_parameters(self):
r"""
The parameters for the global transform.
:type: (``n_global_parameters``,) ndarray
"""
return self.similarity_weights
|
|
# Copyright (C) Ivan Kravets <me@ikravets.com>
# See LICENSE for details.
"""
Arduino
Arduino Framework allows writing cross-platform software to control
devices attached to a wide range of Arduino boards to create all
kinds of creative coding, interactive objects, spaces or physical experiences.
http://arduino.cc/en/Reference/HomePage
"""
from os import listdir, walk
from os.path import isdir, isfile, join
from SCons.Script import DefaultEnvironment
env = DefaultEnvironment()
BOARD_OPTS = env.get("BOARD_OPTIONS", {})
BOARD_BUILDOPTS = BOARD_OPTS.get("build", {})
BOARD_CORELIBDIRNAME = BOARD_BUILDOPTS.get("core")
#
# Determine framework directory
# based on development platform
#
PLATFORMFW_DIR = join("$PIOPACKAGES_DIR",
"framework-arduino${PLATFORM.replace('atmel', '')}")
if "digispark" in BOARD_BUILDOPTS.get("core"):
BOARD_CORELIBDIRNAME = "digispark"
PLATFORMFW_DIR = join(
"$PIOPACKAGES_DIR",
"framework-arduino%s" % (
"sam" if BOARD_BUILDOPTS.get("cpu") == "cortex-m3" else "avr")
)
elif env.get("PLATFORM") == "timsp430":
PLATFORMFW_DIR = join(
"$PIOPACKAGES_DIR",
"framework-arduinomsp430"
)
elif env.get("PLATFORM") == "espressif":
env.Prepend(
CPPPATH=[join("$PLATFORMFW_DIR", "sdk", "include")],
LIBPATH=[join("$PLATFORMFW_DIR", "sdk", "lib")],
LIBS=["smartconfig", "pp", "main", "wpa", "lwip",
"net80211", "phy", "hal", "gcc", "m"]
)
env.Replace(PLATFORMFW_DIR=PLATFORMFW_DIR)
#
# Lookup for specific core's libraries
#
if isdir(join(env.subst("$PLATFORMFW_DIR"), "libraries", "__cores__",
BOARD_CORELIBDIRNAME)):
lib_dirs = env.get("LIBSOURCE_DIRS")
lib_dirs.insert(
lib_dirs.index(join("$PLATFORMFW_DIR", "libraries")),
join(PLATFORMFW_DIR, "libraries", "__cores__", BOARD_CORELIBDIRNAME)
)
env.Replace(
LIBSOURCE_DIRS=lib_dirs
)
#
# Base
#
ARDUINO_VERSION = int(
open(join(env.subst("$PLATFORMFW_DIR"),
"version.txt")).read().replace(".", "").strip())
# usb flags
ARDUINO_USBDEFINES = []
if "usb_product" in BOARD_BUILDOPTS:
ARDUINO_USBDEFINES = [
"USB_VID=${BOARD_OPTIONS['build']['vid']}",
"USB_PID=${BOARD_OPTIONS['build']['pid']}",
'USB_PRODUCT=\\"%s\\"' % (env.subst(
"${BOARD_OPTIONS['build']['usb_product']}").replace('"', ""))
]
if env.get("PLATFORM") == "teensy":
ARDUINO_USBDEFINES += [
"ARDUINO=10600",
"TEENSYDUINO=%d" % ARDUINO_VERSION
]
else:
ARDUINO_USBDEFINES += ["ARDUINO=%d" % ARDUINO_VERSION]
env.Append(
CPPDEFINES=ARDUINO_USBDEFINES,
CPPPATH=[
join("$BUILD_DIR", "FrameworkArduino")
]
)
#
# Atmel SAM platform
#
if env.subst("${PLATFORMFW_DIR}")[-3:] == "sam":
env.VariantDirWrap(
join("$BUILD_DIR", "FrameworkCMSISInc"),
join("$PLATFORMFW_DIR", "system", "CMSIS", "CMSIS", "Include")
)
env.VariantDirWrap(
join("$BUILD_DIR", "FrameworkDeviceInc"),
join("$PLATFORMFW_DIR", "system", "CMSIS", "Device", "ATMEL")
)
env.VariantDirWrap(
join("$BUILD_DIR", "FrameworkLibSam"),
join("$PLATFORMFW_DIR", "system", "libsam")
)
env.VariantDirWrap(
join("$BUILD_DIR", "FrameworkArduinoInc"),
join("$PLATFORMFW_DIR", "cores", "${BOARD_OPTIONS['build']['core']}")
)
env.Append(
CPPPATH=[
join("$BUILD_DIR", "FrameworkCMSISInc"),
join("$BUILD_DIR", "FrameworkLibSam"),
join("$BUILD_DIR", "FrameworkLibSam", "include"),
join("$BUILD_DIR", "FrameworkDeviceInc"),
join("$BUILD_DIR", "FrameworkDeviceInc", "sam3xa", "include")
]
)
# search relative includes in lib SAM directories
core_dir = join(env.subst("$PLATFORMFW_DIR"), "system", "libsam")
for root, _, files in walk(core_dir):
for lib_file in files:
file_path = join(root, lib_file)
if not isfile(file_path):
continue
content = None
content_changed = False
with open(file_path) as fp:
content = fp.read()
if '#include "../' in content:
content_changed = True
content = content.replace('#include "../', '#include "')
if not content_changed:
continue
with open(file_path, "w") as fp:
fp.write(content)
#
# Teensy platform
#
# Teensy 2.x Core
if BOARD_BUILDOPTS.get("core", None) == "teensy":
# search relative includes in teensy directories
core_dir = join(env.get("PIOHOME_DIR"), "packages",
"framework-arduinoteensy", "cores", "teensy")
for item in sorted(listdir(core_dir)):
file_path = join(core_dir, item)
if not isfile(file_path):
continue
content = None
content_changed = False
with open(file_path) as fp:
content = fp.read()
if '#include "../' in content:
content_changed = True
content = content.replace('#include "../', '#include "')
if not content_changed:
continue
with open(file_path, "w") as fp:
fp.write(content)
#
# Target: Build Core Library
#
libs = []
if "variant" in BOARD_BUILDOPTS:
env.Append(
CPPPATH=[
join("$BUILD_DIR", "FrameworkArduinoVariant")
]
)
libs.append(env.BuildLibrary(
join("$BUILD_DIR", "FrameworkArduinoVariant"),
join("$PLATFORMFW_DIR", "variants",
"${BOARD_OPTIONS['build']['variant']}")
))
envsafe = env.Clone()
libs.append(envsafe.BuildLibrary(
join("$BUILD_DIR", "FrameworkArduino"),
join("$PLATFORMFW_DIR", "cores", "${BOARD_OPTIONS['build']['core']}")
))
if env.subst("${PLATFORMFW_DIR}")[-3:] == "sam":
env.Append(
LIBPATH=[
join("$PLATFORMFW_DIR", "variants",
"${BOARD_OPTIONS['build']['variant']}")
]
)
envsafe.Append(
CFLAGS=[
"-std=gnu99"
]
)
libs.append("sam_sam3x8e_gcc_rel")
env.Append(LIBS=libs)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Adding hyperparameter optimization to:
A deep MNIST classifier using convolutional layers.
See extensive documentation at
https://www.tensorflow.org/get_started/mnist/pros
"""
# Disable linter warnings to maintain consistency with tutorial.
# pylint: disable=invalid-name
# pylint: disable=g-bad-import-order
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
import rfho as rf
FLAGS = None
def deepnn(x):
"""deepnn builds the graph for a deep net for classifying digits.
Args:
x: an input tensor with the dimensions (N_examples, 784), where 784 is the
number of pixels in a standard MNIST image.
Returns:
A tuple (y, keep_prob). y is a tensor of shape (N_examples, 10), with values
equal to the logits of classifying the digit into one of 10 classes (the
digits 0-9). keep_prob is a scalar placeholder for the probability of
dropout.
"""
# Reshape to use within a convolutional neural net.
# Last dimension is for "features" - there is only one here, since images are
# grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc.
x_image = tf.reshape(x, [-1, 28, 28, 1])
# First convolutional layer - maps one grayscale image to 32 feature maps.
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
# Pooling layer - downsamples by 2X.
h_pool1 = max_pool_2x2(h_conv1)
# Second convolutional layer -- maps 32 feature maps to 64.
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
# Second pooling layer.
h_pool2 = max_pool_2x2(h_conv2)
# Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image
# is down to 7x7x64 feature maps -- maps this to 1024 features.
W_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# TODO # Dropout - controls the complexity of the model, prevents co-adaptation of
# # features.
# keep_prob = tf.placeholder(tf.float32)
# h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
# Map the 1024 features to 10 classes, one for each digit
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
# RFHO: We use L2 norm weight penalty instead of dropout at the last layer.
y_conv = tf.matmul(h_fc1, W_fc2) + b_fc2
return y_conv, W_fc1, W_fc2
def conv2d(x, W):
"""conv2d returns a 2d convolution layer with full stride."""
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
"""max_pool_2x2 downsamples a feature map by 2X."""
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
def weight_variable(shape):
"""weight_variable generates a weight variable of a given shape."""
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
"""bias_variable generates a bias variable of a given shape."""
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def main(_):
"""
Modified MNIST for expert (CNN part) tensorflow tutorial experiment to include real time
hyperparameter optimization. Hyperparameters being optimized are learning rate for
ADAM optimizer and coefficient of L2 norm of fully connected part of the network.
Note that this codes requires ~ 3x (gpu) memory and ~ 4x time compared to the original one
but should yield a final test error of around 99.4 %
:param _:
:return:
"""
# Import data
mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
# Create the model
x = tf.placeholder(tf.float32, [None, 784])
# Define loss and optimizer
y_ = tf.placeholder(tf.float32, [None, 10])
# Build the graph for the deep net
y_conv, W_fc1, W_fc2 = deepnn(x)
# RFHO: collect model variables and "vectorize the model"
model_vairables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
# use adam optimizer:
optimizer = rf.AdamOptimizer
w, y_conv, W_fc1, W_fc2 = rf.vectorize_model(model_vairables, y_conv, W_fc1, W_fc2,
augment=optimizer.get_augmentation_multiplier(),
suppress_err_out=False)
# w is now a vector that contains all the weights, y_conv and W_fc2 are the same tensor as earlier,
# but in the new graph
# RFHO use cross entropy defined in the package since tensorflow one does not have Hessian,
# eps is the clipping threshold for cross entropy.
cross_entropy = tf.reduce_mean(
rf.cross_entropy_loss(labels=y_, logits=y_conv, eps=1.e-4))
# RFHO add an L2 regularizer on the last weight matrix, whose weight will be optimized
rho = tf.Variable(0., name='rho')
constraints = [rf.positivity(rho)] # rho >= 0
iterations_per_epoch = 1100 # with mini batch size of 50
training_error = cross_entropy + 1/iterations_per_epoch*tf.multiply(
rho, tf.nn.l2_loss(W_fc1) + tf.nn.l2_loss(W_fc2))
# train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
# RFHO define learning rate as an hyperparameter and create the parameter optimization dynamics
alpha = tf.Variable(1.e-4, name='alpha')
constraints.append(rf.positivity(alpha))
dynamics = optimizer.create(w, lr=alpha, loss=training_error)
# RFHO we want to optimize learning rate and L2 coefficient w.r.t. cross entropy loss on validation set
hyper_dict = {cross_entropy: [alpha, rho]}
# RFHO define the hyperparameter optimizer, we use Forward-HG method to compute hyper-gradients and RTHO algorithm
hyper_opt = rf.HyperOptimizer(dynamics, hyper_dict, rf.ForwardHG, lr=1.e-5)
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name='accuracy')
# RFHO last thing before running: define the example supplier:
def _train_fd():
batch = mnist.train.next_batch(50) # batch size of 50
return {x: batch[0], y_: batch[1]}
def _validation_fd():
return {x: mnist.validation.images, y_: mnist.validation.labels}
with tf.Session(config=rf.CONFIG_GPU_GROWTH).as_default() as ss: # RFHO use default session.
hyper_opt.initialize() # RFHO this will initialize all the variables, including hyperparameters
for i in range(200): # RFHO we run for 200 hyper-iterations
hyper_opt.run(100, train_feed_dict_supplier=_train_fd,
val_feed_dict_suppliers={cross_entropy: _validation_fd},
hyper_constraints_ops=constraints)
# if i % 100 == 0:
train_accuracy = accuracy.eval(feed_dict=_train_fd())
val_accuracy, val_error = ss.run([accuracy, cross_entropy], feed_dict=_validation_fd())
print('step %d, training accuracy %.2f; validation accuracy: %.4f, validation error: %.5f; '
'alpha: %.6f, %.5f, rho: %.6f, %.5f'
% (i*100, train_accuracy, val_accuracy, val_error, alpha.eval(),
hyper_opt.hyper_gradients.hyper_gradients_dict[alpha].eval(),
rho.eval(), hyper_opt.hyper_gradients.hyper_gradients_dict[rho].eval()))
# train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
test_accuracy = accuracy.eval(feed_dict={
x: mnist.test.images, y_: mnist.test.labels})
print('test accuracy %g' % test_accuracy)
return test_accuracy
def experiment(mnist, optimizer=rf.AdamOptimizer, optimizer_kwargs=None,
hyper_batch_size=100, T=200, hyper_learning_rate=1.e-4, use_mse=False):
"""
Modified MNIST for expert (CNN part) tensorflow tutorial experiment to include real time
hyperparameter optimization. Hyperparameters being optimized are learning rate for
ADAM optimizer and coefficient of L2 norm of fully connected part of the network.
Note that this codes requires ~ 3x (gpu) memory and ~ 4x time compared to the original one
but should yield a final test error of around 99.4 %
:return:
"""
# Create the model
x = tf.placeholder(tf.float32, [None, 784], name='x')
# Define loss and optimizer
y_ = tf.placeholder(tf.float32, [None, 10], name='y')
# Build the graph for the deep net
y_conv, W_fc1, W_fc2 = deepnn(x)
# RFHO: collect model variables and "vectorize the model"
model_vairables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
# use adam optimizer:
w, y_conv, W_fc1, W_fc2 = rf.vectorize_model(model_vairables, y_conv, W_fc1, W_fc2,
augment=optimizer.get_augmentation_multiplier())
# w is now a vector that contains all the weights, y_conv and W_fc2 are the same tensor as earlier,
# but in the new graph
# RFHO use cross entropy defined in the package since tensorflow one does not have Hessian,
# eps is the clipping threshold for cross entropy.
if use_mse:
error = tf.reduce_mean(tf.squared_difference(y_, y_conv), name='error')
else:
error = tf.reduce_mean(
rf.cross_entropy_loss(labels=y_, logits=y_conv, eps=1.e-4), name='error')
# RFHO add an L2 regularizer on the last weight matrix, whose weight will be optimized
rho = tf.Variable(0., name='rho')
constraints = [rf.positivity(rho)] # rho >= 0
iterations_per_epoch = 1100 # with mini batch size of 50
training_error = error + 1/iterations_per_epoch*tf.multiply(
rho, tf.nn.l2_loss(W_fc1) + tf.nn.l2_loss(W_fc2))
# train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
# RFHO define learning rate as an hyperparameter and create the parameter optimization dynamics
if optimizer_kwargs is None:
optimizer_kwargs = {'lr': tf.Variable(1.e-4, name='alpha')}
dynamics = optimizer.create(w, loss=training_error, **optimizer_kwargs)
constraints += dynamics.get_natural_hyperparameter_constraints() # add 'usual' constraints for
# if optimizer is rf.AdamOptimizer:
# constraints.append(dynamics.learning_rate.assign(tf.minimum(1.e-3, dynamics.learning_rate)))
# algorithmic hyperparameters
# RFHO we want to optimize learning rate and L2 coefficient w.r.t. cross entropy loss on validation set
hyper_dict = {error: [rho] + dynamics.get_optimization_hyperparameters(only_variables=True)}
# RFHO define the hyperparameter optimizer, we use Forward-HG method to compute hyper-gradients and RTHO algorithm
hyper_opt = rf.HyperOptimizer(dynamics, hyper_dict, rf.ForwardHG, lr=hyper_learning_rate)
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name='accuracy')
# RFHO last thing before running: define the example supplier:
_train_fd = rf.ExampleVisiting(mnist.train, batch_size=50).create_feed_dict_supplier(x, y_)
_validation_fd = mnist.validation.create_supplier(x, y_)
with tf.Session(config=rf.CONFIG_GPU_GROWTH).as_default(): # RFHO use default session.
hyper_opt.initialize() # RFHO this will initialize all the variables, including hyperparameters
for i in range(T): # RFHO we run for 200 hyper-iterations
hyper_opt.run(hyper_batch_size, train_feed_dict_supplier=_train_fd,
val_feed_dict_suppliers={error: _validation_fd},
hyper_constraints_ops=constraints)
test_accuracy = accuracy.eval(feed_dict=mnist.test.create_supplier(x, y_)())
print('test accuracy %g' % test_accuracy)
return test_accuracy
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str,
default='/tmp/tensorflow/mnist/input_data',
help='Directory for storing input data')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
|
"""fiber_center.py was written by Ryan Petersburg for use with fiber
characterization on the EXtreme PREcision Spectrograph
The functions in this module calculate the center and various dimensions of a
FiberImage object
"""
from .numpy_array_handler import circle_array, remove_circle, sum_array, mesh_grid_from_array
from .plotting import plot_image, plot_dot, plot_overlaid_cross_sections, show_plots
from .containers import Pixel
from .fiber_centroid import calc_centroid
import numpy as np
# Golden ratio for the optimization tests
PHI = (5 ** 0.5 - 1) / 2
def fiber_center_and_diameter(im_obj, method, show_image=False, **kwargs):
"""Find fiber center and diameter using the given method
Args
----
method : {'edge', 'radius', 'gaussian', 'circle'}
Uses the respective method to find the fiber center
show_image : boolean, optional (default=False)
Whether or not to show relevant fitting images
**kwargs :
The keyworded arguments to pass to the centering method
Raises
------
RuntimeError
needs a valid method string to run the proper algorithm
"""
if method == 'radius':
center, diameter = _radius_method(im_obj, **kwargs)
elif method == 'edge':
center, diameter = _edge_method(im_obj, **kwargs)
elif method == 'circle':
center, diameter = _circle_method(im_obj, **kwargs)
elif method == 'gaussian':
center, diameter = _gaussian_method(im_obj, **kwargs)
elif method == 'full':
center, diameter = _full_method(im_obj, **kwargs)
else:
raise RuntimeError('Incorrect string for fiber centering method')
if show_image:
radius = diameter / 2.0
image = im_obj.get_image()
if method == 'gaussian':
plot_overlaid_cross_sections(image,
im_obj.get_gaussian_fit(),
center)
plot_dot(image, center)
else:
plot_image(remove_circle(image, center, radius, res=1))
plot_overlaid_cross_sections(image,
circle_array(im_obj.get_mesh_grid(),
center.x, center.y,
radius, res=1)
*image.max() / 2.0,
center)
plot_dot(image, center)
if method == 'edge':
for corner in im_obj._edges:
plot_dot(image, corner)
show_plots()
return center, diameter
def _full_method(im_obj, kernel=None, threshold=None, **kwargs):
"""Centroids a boolean image above the FiberImage threshold.
Returns
-------
center : Pixel
diameter : float (pixels)
"""
if threshold is None:
threshold = im_obj.threshold
image = (im_obj.get_filtered_image(kernel) > threshold).astype('uint8')
center = calc_centroid(image)
x_array, y_array = mesh_grid_from_array(image)
dist_image = np.sqrt((x_array - center.x)**2 + (y_array - center.y)**2)
dist_image *= image
diameter = dist_image.max() * 2.0
# _, diameter = _edge_method(im_obj, kernel=kernel, **kwargs)
return center, diameter
def _edge_method(im_obj, **kwargs):
"""Averages the fiber edges to set the fiber center
Returns
-------
center : Pixel
diameter : float (pixels)
"""
im_obj.set_fiber_edges(**kwargs)
edges = im_obj._edges
y = (edges.top.y + edges.bottom.y) / 2.0
x = (edges.left.x + edges.right.x) / 2.0
center = Pixel(x,y)
# average the horizontal and vertical distances
diameter = (np.sqrt(((edges.right - edges.left)**2).as_array().sum())
+ np.sqrt(((edges.bottom - edges.top)**2).as_array().sum())) / 2.0
return center, diameter
def _gaussian_method(im_obj, **kwargs):
"""Set fiber center using a Gaussian Fit
Uses Scipy.optimize.curve_fit method to fit fiber image to
gaussian_array(). The radius found extends to 2-sigma of the gaussian
therefore encompassing ~95% of the imaged light. Use previous methods
of center-finding to approximate the location of the center
Returns
-------
center : Pixel
Center of the fiber in the gaussian method context
diameter : float (pixels)
"""
if im_obj.gaussian_coeffs is None:
im_obj.set_gaussian_fit(**kwargs)
coeffs = im_obj.gaussian_coeffs
center = Pixel(coeffs[0], coeffs[1])
diameter = abs(coeffs[2]) * 2.0
return center, diameter
def _radius_method(im_obj, radius_tol=.03, radius_range=None, option=None,
kernel=None, threshold=None, **kwargs):
"""Set fiber center using dark circle with varying radius
Uses a golden mean optimization method to find the optimal radius of the
dark circle that covers the fiber image used in
get_fiber_centerCircleMethod(). The optimization is for a parameter
array_sum which is weighted by the area of the circle, meaning that a
smaller circle is preferred over one that simply covers the entire image
Args
----
radius_tol : number (default=1)
Minimum possible range of radius values before ending iteration
radius_range: int (in pixels)
Range of tested radii, i.e. max(radius) - min(radius). If None,
uses full possible range
Returns
-------
center : Pixel
diameter : float (pixels)
array_sum : float
If option is 'all'
"""
image = im_obj.get_filtered_image(kernel)
if threshold is None:
threshold = im_obj.threshold
# Initialize range of tested radii
r = np.zeros(4).astype(float)
if radius_range is not None:
approx_radius = im_obj.get_fiber_radius(method='edge')
radius_range /= 2.0
r[0] = approx_radius - radius_range
if r[0] < 0.0:
r[0] = 0.0
r[3] = approx_radius + radius_range
else:
r[0] = 0
r[3] = min(im_obj.height, im_obj.width) / 2.0
r[1] = r[0] + (1 - PHI) * (r[3] - r[0])
r[2] = r[0] + PHI * (r[3] - r[0])
array_sum = np.zeros(2).astype(float)
for i in xrange(2):
center, _, array_sum[i] = _circle_method(im_obj, image=image,
radius=r[i+1],
option='all', **kwargs)
array_sum[i] += im_obj.threshold * np.pi * r[i+1]**2
min_index = np.argmin(array_sum) # Integer 0 or 1 for min of r[1], r[2]
while abs(r[3]-r[0]) > radius_tol:
if min_index == 0:
r[3] = r[2]
r[2] = r[1]
r[1] = r[0] + (1 - PHI) * (r[3] - r[0])
else:
r[0] = r[1]
r[1] = r[2]
r[2] = r[0] + PHI * (r[3] - r[0])
array_sum[1 - min_index] = array_sum[min_index]
center, _, array_sum[min_index] = _circle_method(im_obj, image=image,
radius=r[min_index+1],
option='all', **kwargs)
array_sum[min_index] += threshold * np.pi * r[min_index+1]**2
min_index = np.argmin(array_sum) # Integer 0 or 1 for min of r[1], r[2]
array_sum = np.amin(array_sum)
diameter = r[min_index+1] * 2
if option == 'all':
return center, diameter, array_sum
return center, diameter
def _circle_method(im_obj, image=None, radius=None, center_tol=.03,
center_range=None, option=None, kernel=None, **kwargs):
"""Finds fiber center using a dark circle of set radius
Uses golden mean method to find the optimal center for a circle
covering the fiber image. The optimization is for a parameter array_sum
that simply sums over the entire fiber image array
Args
----
radius : float
Radius to use when creating circle
center_tol : number (default=1)
Minimum possible range of center values before ending iteration
center_range: int (in pixels)
Range of tested centers, i.e. max(x0) - min(x0). If None,
uses full possible range
image : 2d numpy.ndarray, optional
The image being analyzed. This is only useful for the radius_method.
Probably not for use outside the class.
Returns
-------
center : Pixel
diameter : float (pixels)
array_sum : float
if option is 'all'
"""
res = int(1.0/center_tol)
if image is None:
image = im_obj.get_filtered_image(kernel)
if radius is None:
radius = im_obj.get_fiber_radius(method='edge')
# Create four "corners" to test center of the removed circle
x = np.zeros(4).astype(float)
y = np.zeros(4).astype(float)
if center_range is not None:
approx_center = im_obj.get_fiber_center(method='edge')
center_range = center_range / 2.0
x[0] = approx_center.x - center_range
if x[0] < radius:
x[0] = radius
x[3] = approx_center.x + center_range
if x[3] > im_obj.width - radius:
x[3] = im_obj.width - radius
y[0] = approx_center.y - center_range
if y[0] < radius:
y[0] = radius
y[3] = approx_center.y + center_range
if y[3] > im_obj.height - radius:
y[3] = im_obj.height - radius
else:
x[0] = radius
x[3] = im_obj.width - radius
y[0] = radius
y[3] = im_obj.height - radius
x[1] = x[0] + (1 - PHI) * (x[3] - x[0])
x[2] = x[0] + PHI * (x[3] - x[0])
y[1] = y[0] + (1 - PHI) * (y[3] - y[0])
y[2] = y[0] + PHI * (y[3] - y[0])
# Initialize array sums to each corner
array_sum = np.zeros((2, 2)).astype(float)
for i in xrange(2):
for j in xrange(2):
removed_circle_array = remove_circle(image,
Pixel(x[i+1], y[j+1]),
radius, res=1)
array_sum[j, i] = sum_array(removed_circle_array)
# Find the index of the corner with minimum array_sum
min_index = np.unravel_index(np.argmin(array_sum), (2, 2)) # Tuple
while abs(x[3] - x[0]) > center_tol and abs(y[3] - y[0]) > center_tol:
# Move the other corners to smaller search area
if min_index[0] == 0:
y[3] = y[2]
y[2] = y[1]
y[1] = y[0] + (1 - PHI) * (y[3] - y[0])
else:
y[0] = y[1]
y[1] = y[2]
y[2] = y[0] + PHI * (y[3] - y[0])
if min_index[1] == 0:
x[3] = x[2]
x[2] = x[1]
x[1] = x[0] + (1 - PHI) * (x[3] - x[0])
else:
x[0] = x[1]
x[1] = x[2]
x[2] = x[0] + PHI * (x[3] - x[0])
# Replace the opposite corner array sum (so it doesn't need to be recalculated)
array_sum[1 - min_index[0], 1 - min_index[1]] = array_sum[min_index]
min_index = (1 - min_index[0], 1 - min_index[1])
# Recalculate new sums for all four corners
for i in xrange(2):
for j in xrange(2):
if i != min_index[1] or j != min_index[0]:
temp_res = 1
if (abs(x[3] - x[0]) < 10*center_tol
and abs(y[3] - y[0]) < 10*center_tol):
temp_res = res
removed_circle_array = remove_circle(image,
Pixel(x[i+1], y[j+1]),
radius, temp_res)
array_sum[j, i] = sum_array(removed_circle_array)
min_index = np.unravel_index(np.argmin(array_sum), (2, 2))
center = Pixel(x[min_index[1]+1], y[min_index[0]+1])
diameter = radius * 2
array_sum = np.amin(array_sum)
if option == 'all':
return center, diameter, array_sum
return center, diameter
|
|
# -*- coding: utf-8 -*-
# Django settings for intranet project.
import os.path
import posixpath
import pinax
PINAX_ROOT = os.path.abspath(os.path.dirname(pinax.__file__))
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
# tells Pinax to use the default theme
PINAX_THEME = "default"
DEBUG = True
TEMPLATE_DEBUG = DEBUG
# tells Pinax to serve media through the staticfiles app.
SERVE_MEDIA = DEBUG
INTERNAL_IPS = [
"127.0.0.1",
]
ADMINS = [
# ("Your Name", "your_email@domain.com"),
]
MANAGERS = ADMINS
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3", # Add "postgresql_psycopg2", "postgresql", "mysql", "sqlite3" or "oracle".
"NAME": "dev.db", # Or path to database file if using sqlite3.
"USER": "", # Not used with sqlite3.
"PASSWORD": "", # Not used with sqlite3.
"HOST": "", # Set to empty string for localhost. Not used with sqlite3.
"PORT": "", # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = "US/Eastern"
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = "en-us"
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, "site_media", "media")
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = "/site_media/media/"
# Absolute path to the directory that holds static files like app media.
# Example: "/home/media/media.lawrence.com/apps/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, "site_media", "static")
# URL that handles the static files like app media.
# Example: "http://media.lawrence.com"
STATIC_URL = "/site_media/static/"
# Additional directories which hold static files
STATICFILES_DIRS = [
os.path.join(PROJECT_ROOT, "media"),
os.path.join(PINAX_ROOT, "media", PINAX_THEME),
]
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = posixpath.join(STATIC_URL, "admin/")
# Make this unique, and don't share it with anybody.
SECRET_KEY = ""
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = [
"django.template.loaders.filesystem.load_template_source",
"django.template.loaders.app_directories.load_template_source",
]
MIDDLEWARE_CLASSES = [
"django.middleware.common.CommonMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django_openid.consumer.SessionConsumer",
"django.contrib.messages.middleware.MessageMiddleware",
"groups.middleware.GroupAwareMiddleware",
"pinax.apps.account.middleware.LocaleMiddleware",
"pinax.apps.account.middleware.AuthenticatedMiddleware",
"pagination.middleware.PaginationMiddleware",
"pinax.middleware.security.HideSensistiveFieldsMiddleware",
"debug_toolbar.middleware.DebugToolbarMiddleware",
]
ROOT_URLCONF = "intranet_project.urls"
TEMPLATE_DIRS = [
os.path.join(PROJECT_ROOT, "templates"),
os.path.join(PINAX_ROOT, "templates", PINAX_THEME),
]
TEMPLATE_CONTEXT_PROCESSORS = [
"django.core.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.request",
"django.contrib.messages.context_processors.messages",
"staticfiles.context_processors.static_url",
"pinax.core.context_processors.pinax_settings",
"pinax.apps.account.context_processors.account",
"notification.context_processors.notification",
"announcements.context_processors.site_wide_announcements",
]
INSTALLED_APPS = [
# Django
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.messages",
"django.contrib.humanize",
"django.contrib.markup",
"pinax.templatetags",
# external
"notification", # must be first
"staticfiles",
"debug_toolbar",
"mailer",
"uni_form",
"django_openid",
"ajax_validation",
"timezones",
"emailconfirmation",
"announcements",
"pagination",
"idios",
"groups",
"bookmarks",
"avatar",
"tagging",
"threadedcomments",
"wakawaka",
"django_markup",
"attachments",
"django_filters",
"tagging_ext",
"voting",
# Pinax
"pinax.apps.account",
"pinax.apps.signup_codes",
"pinax.apps.topics",
"pinax.apps.tagging_utils",
"pinax.apps.threadedcomments_extras",
"pinax.apps.tasks",
# project
"profiles",
]
FIXTURE_DIRS = [
os.path.join(PROJECT_ROOT, "fixtures"),
]
MESSAGE_STORAGE = "django.contrib.messages.storage.session.SessionStorage"
ABSOLUTE_URL_OVERRIDES = {
"auth.user": lambda o: "/profiles/profile/%s/" % o.username,
}
MARKUP_FILTER_FALLBACK = "none"
MARKUP_CHOICES = [
("restructuredtext", u"reStructuredText"),
("textile", u"Textile"),
("markdown", u"Markdown"),
("creole", u"Creole"),
]
AUTH_PROFILE_MODULE = "profiles.Profile"
NOTIFICATION_LANGUAGE_MODULE = "account.Account"
ACCOUNT_OPEN_SIGNUP = False
ACCOUNT_REQUIRED_EMAIL = False
ACCOUNT_EMAIL_VERIFICATION = False
ACCOUNT_EMAIL_AUTHENTICATION = False
ACCOUNT_UNIQUE_EMAIL = EMAIL_CONFIRMATION_UNIQUE_EMAIL = False
AUTHENTICATION_BACKENDS = [
"pinax.apps.account.auth_backends.AuthenticationBackend",
]
LOGIN_URL = "/account/login/" # @@@ any way this can be a url name?
LOGIN_REDIRECT_URLNAME = "home"
EMAIL_CONFIRMATION_DAYS = 2
EMAIL_DEBUG = DEBUG
AUTHENTICATED_EXEMPT_URLS = [
r"^/account/signup/$",
r"^/account/password_reset",
r"^/account/confirm_email",
r"^/openid",
r"^/__debug__",
]
DEBUG_TOOLBAR_CONFIG = {
"INTERCEPT_REDIRECTS": False,
}
# local_settings.py can be used to override environment-specific settings
# like database and email that differ between development and production.
try:
from local_settings import *
except ImportError:
pass
|