gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
"""The tests for the Picnic sensor platform."""
import copy
from datetime import timedelta
import unittest
from unittest.mock import patch
import pytest
import requests
from homeassistant import config_entries
from homeassistant.components.picnic import const
from homeassistant.components.picnic.const import CONF_COUNTRY_CODE, SENSOR_TYPES
from homeassistant.const import (
CONF_ACCESS_TOKEN,
CURRENCY_EURO,
DEVICE_CLASS_TIMESTAMP,
STATE_UNAVAILABLE,
)
from homeassistant.util import dt
from tests.common import (
MockConfigEntry,
async_fire_time_changed,
async_test_home_assistant,
)
DEFAULT_USER_RESPONSE = {
"user_id": "295-6y3-1nf4",
"firstname": "User",
"lastname": "Name",
"address": {
"house_number": 123,
"house_number_ext": "a",
"postcode": "4321 AB",
"street": "Commonstreet",
"city": "Somewhere",
},
"total_deliveries": 123,
"completed_deliveries": 112,
}
DEFAULT_CART_RESPONSE = {
"items": [],
"delivery_slots": [
{
"slot_id": "611a3b074872b23576bef456a",
"window_start": "2021-03-03T14:45:00.000+01:00",
"window_end": "2021-03-03T15:45:00.000+01:00",
"cut_off_time": "2021-03-02T22:00:00.000+01:00",
"minimum_order_value": 3500,
},
],
"selected_slot": {"slot_id": "611a3b074872b23576bef456a", "state": "EXPLICIT"},
"total_count": 10,
"total_price": 2535,
}
DEFAULT_DELIVERY_RESPONSE = {
"delivery_id": "z28fjso23e",
"creation_time": "2021-02-24T21:48:46.395+01:00",
"slot": {
"slot_id": "602473859a40dc24c6b65879",
"hub_id": "AMS",
"window_start": "2021-02-26T20:15:00.000+01:00",
"window_end": "2021-02-26T21:15:00.000+01:00",
"cut_off_time": "2021-02-25T22:00:00.000+01:00",
"minimum_order_value": 3500,
},
"eta2": {
"start": "2021-02-26T20:54:00.000+01:00",
"end": "2021-02-26T21:14:00.000+01:00",
},
"status": "COMPLETED",
"delivery_time": {
"start": "2021-02-26T20:54:05.221+01:00",
"end": "2021-02-26T20:58:31.802+01:00",
},
"orders": [
{
"creation_time": "2021-02-24T21:48:46.418+01:00",
"total_price": 3597,
},
{
"creation_time": "2021-02-25T17:10:26.816+01:00",
"total_price": 536,
},
],
}
SENSOR_KEYS = [desc.key for desc in SENSOR_TYPES]
@pytest.mark.usefixtures("hass_storage")
class TestPicnicSensor(unittest.IsolatedAsyncioTestCase):
"""Test the Picnic sensor."""
async def asyncSetUp(self):
"""Set up things to be run when tests are started."""
self.hass = await async_test_home_assistant(None)
self.entity_registry = (
await self.hass.helpers.entity_registry.async_get_registry()
)
# Patch the api client
self.picnic_patcher = patch("homeassistant.components.picnic.PicnicAPI")
self.picnic_mock = self.picnic_patcher.start()
# Add a config entry and setup the integration
config_data = {
CONF_ACCESS_TOKEN: "x-original-picnic-auth-token",
CONF_COUNTRY_CODE: "NL",
}
self.config_entry = MockConfigEntry(
domain=const.DOMAIN,
data=config_data,
unique_id="295-6y3-1nf4",
)
self.config_entry.add_to_hass(self.hass)
async def asyncTearDown(self):
"""Tear down the test setup, stop hass/patchers."""
await self.hass.async_stop(force=True)
self.picnic_patcher.stop()
@property
def _coordinator(self):
return self.hass.data[const.DOMAIN][self.config_entry.entry_id][
const.CONF_COORDINATOR
]
def _assert_sensor(self, name, state=None, cls=None, unit=None, disabled=False):
sensor = self.hass.states.get(name)
if disabled:
assert sensor is None
return
assert sensor.state == state
if cls:
assert sensor.attributes["device_class"] == cls
if unit:
assert sensor.attributes["unit_of_measurement"] == unit
assert sensor.attributes["attribution"] == "Data provided by Picnic"
async def _setup_platform(
self, use_default_responses=False, enable_all_sensors=True
):
"""Set up the Picnic sensor platform."""
if use_default_responses:
self.picnic_mock().get_user.return_value = copy.deepcopy(
DEFAULT_USER_RESPONSE
)
self.picnic_mock().get_cart.return_value = copy.deepcopy(
DEFAULT_CART_RESPONSE
)
self.picnic_mock().get_deliveries.return_value = [
copy.deepcopy(DEFAULT_DELIVERY_RESPONSE)
]
self.picnic_mock().get_delivery_position.return_value = {}
await self.hass.config_entries.async_setup(self.config_entry.entry_id)
await self.hass.async_block_till_done()
if enable_all_sensors:
await self._enable_all_sensors()
async def _enable_all_sensors(self):
"""Enable all sensors of the Picnic integration."""
# Enable the sensors
for sensor_type in SENSOR_KEYS:
updated_entry = self.entity_registry.async_update_entity(
f"sensor.picnic_{sensor_type}", disabled_by=None
)
assert updated_entry.disabled is False
await self.hass.async_block_till_done()
# Trigger a reload of the data
async_fire_time_changed(
self.hass,
dt.utcnow()
+ timedelta(seconds=config_entries.RELOAD_AFTER_UPDATE_DELAY + 1),
)
await self.hass.async_block_till_done()
async def test_sensor_setup_platform_not_available(self):
"""Test the set-up of the sensor platform if API is not available."""
# Configure mock requests to yield exceptions
self.picnic_mock().get_user.side_effect = requests.exceptions.ConnectionError
self.picnic_mock().get_cart.side_effect = requests.exceptions.ConnectionError
self.picnic_mock().get_deliveries.side_effect = (
requests.exceptions.ConnectionError
)
self.picnic_mock().get_delivery_position.side_effect = (
requests.exceptions.ConnectionError
)
await self._setup_platform(enable_all_sensors=False)
# Assert that sensors are not set up
assert (
self.hass.states.get("sensor.picnic_selected_slot_max_order_time") is None
)
assert self.hass.states.get("sensor.picnic_last_order_status") is None
assert self.hass.states.get("sensor.picnic_last_order_total_price") is None
async def test_sensors_setup(self):
"""Test the default sensor setup behaviour."""
await self._setup_platform(use_default_responses=True)
self._assert_sensor("sensor.picnic_cart_items_count", "10")
self._assert_sensor(
"sensor.picnic_cart_total_price", "25.35", unit=CURRENCY_EURO
)
self._assert_sensor(
"sensor.picnic_selected_slot_start",
"2021-03-03T14:45:00.000+01:00",
cls=DEVICE_CLASS_TIMESTAMP,
)
self._assert_sensor(
"sensor.picnic_selected_slot_end",
"2021-03-03T15:45:00.000+01:00",
cls=DEVICE_CLASS_TIMESTAMP,
)
self._assert_sensor(
"sensor.picnic_selected_slot_max_order_time",
"2021-03-02T22:00:00.000+01:00",
cls=DEVICE_CLASS_TIMESTAMP,
)
self._assert_sensor("sensor.picnic_selected_slot_min_order_value", "35.0")
self._assert_sensor(
"sensor.picnic_last_order_slot_start",
"2021-02-26T20:15:00.000+01:00",
cls=DEVICE_CLASS_TIMESTAMP,
)
self._assert_sensor(
"sensor.picnic_last_order_slot_end",
"2021-02-26T21:15:00.000+01:00",
cls=DEVICE_CLASS_TIMESTAMP,
)
self._assert_sensor("sensor.picnic_last_order_status", "COMPLETED")
self._assert_sensor(
"sensor.picnic_last_order_eta_start",
"2021-02-26T20:54:00.000+01:00",
cls=DEVICE_CLASS_TIMESTAMP,
)
self._assert_sensor(
"sensor.picnic_last_order_eta_end",
"2021-02-26T21:14:00.000+01:00",
cls=DEVICE_CLASS_TIMESTAMP,
)
self._assert_sensor(
"sensor.picnic_last_order_delivery_time",
"2021-02-26T20:54:05.221+01:00",
cls=DEVICE_CLASS_TIMESTAMP,
)
self._assert_sensor(
"sensor.picnic_last_order_total_price", "41.33", unit=CURRENCY_EURO
)
async def test_sensors_setup_disabled_by_default(self):
"""Test that some sensors are disabled by default."""
await self._setup_platform(use_default_responses=True, enable_all_sensors=False)
self._assert_sensor("sensor.picnic_cart_items_count", disabled=True)
self._assert_sensor("sensor.picnic_last_order_slot_start", disabled=True)
self._assert_sensor("sensor.picnic_last_order_slot_end", disabled=True)
self._assert_sensor("sensor.picnic_last_order_status", disabled=True)
self._assert_sensor("sensor.picnic_last_order_total_price", disabled=True)
async def test_sensors_no_selected_time_slot(self):
"""Test sensor states with no explicit selected time slot."""
# Adjust cart response
cart_response = copy.deepcopy(DEFAULT_CART_RESPONSE)
cart_response["selected_slot"]["state"] = "IMPLICIT"
# Set mock responses
self.picnic_mock().get_user.return_value = copy.deepcopy(DEFAULT_USER_RESPONSE)
self.picnic_mock().get_cart.return_value = cart_response
self.picnic_mock().get_deliveries.return_value = [
copy.deepcopy(DEFAULT_DELIVERY_RESPONSE)
]
self.picnic_mock().get_delivery_position.return_value = {}
await self._setup_platform()
# Assert sensors are unknown
self._assert_sensor("sensor.picnic_selected_slot_start", STATE_UNAVAILABLE)
self._assert_sensor("sensor.picnic_selected_slot_end", STATE_UNAVAILABLE)
self._assert_sensor(
"sensor.picnic_selected_slot_max_order_time", STATE_UNAVAILABLE
)
self._assert_sensor(
"sensor.picnic_selected_slot_min_order_value", STATE_UNAVAILABLE
)
async def test_sensors_last_order_in_future(self):
"""Test sensor states when last order is not yet delivered."""
# Adjust default delivery response
delivery_response = copy.deepcopy(DEFAULT_DELIVERY_RESPONSE)
del delivery_response["delivery_time"]
# Set mock responses
self.picnic_mock().get_user.return_value = copy.deepcopy(DEFAULT_USER_RESPONSE)
self.picnic_mock().get_cart.return_value = copy.deepcopy(DEFAULT_CART_RESPONSE)
self.picnic_mock().get_deliveries.return_value = [delivery_response]
self.picnic_mock().get_delivery_position.return_value = {}
await self._setup_platform()
# Assert delivery time is not available, but eta is
self._assert_sensor("sensor.picnic_last_order_delivery_time", STATE_UNAVAILABLE)
self._assert_sensor(
"sensor.picnic_last_order_eta_start", "2021-02-26T20:54:00.000+01:00"
)
self._assert_sensor(
"sensor.picnic_last_order_eta_end", "2021-02-26T21:14:00.000+01:00"
)
async def test_sensors_use_detailed_eta_if_available(self):
"""Test sensor states when last order is not yet delivered."""
# Set-up platform with default mock responses
await self._setup_platform(use_default_responses=True)
# Provide a delivery position response with different ETA and remove delivery time from response
delivery_response = copy.deepcopy(DEFAULT_DELIVERY_RESPONSE)
del delivery_response["delivery_time"]
self.picnic_mock().get_deliveries.return_value = [delivery_response]
self.picnic_mock().get_delivery_position.return_value = {
"eta_window": {
"start": "2021-03-05T11:19:20.452+01:00",
"end": "2021-03-05T11:39:20.452+01:00",
}
}
await self._coordinator.async_refresh()
# Assert detailed ETA is used
self.picnic_mock().get_delivery_position.assert_called_with(
delivery_response["delivery_id"]
)
self._assert_sensor(
"sensor.picnic_last_order_eta_start", "2021-03-05T11:19:20.452+01:00"
)
self._assert_sensor(
"sensor.picnic_last_order_eta_end", "2021-03-05T11:39:20.452+01:00"
)
async def test_sensors_no_data(self):
"""Test sensor states when the api only returns empty objects."""
# Setup platform with default responses
await self._setup_platform(use_default_responses=True)
# Change mock responses to empty data and refresh the coordinator
self.picnic_mock().get_user.return_value = {}
self.picnic_mock().get_cart.return_value = None
self.picnic_mock().get_deliveries.return_value = None
self.picnic_mock().get_delivery_position.side_effect = ValueError
await self._coordinator.async_refresh()
# Assert all default-enabled sensors have STATE_UNAVAILABLE because the last update failed
assert self._coordinator.last_update_success is False
self._assert_sensor("sensor.picnic_cart_total_price", STATE_UNAVAILABLE)
self._assert_sensor("sensor.picnic_selected_slot_start", STATE_UNAVAILABLE)
self._assert_sensor("sensor.picnic_selected_slot_end", STATE_UNAVAILABLE)
self._assert_sensor(
"sensor.picnic_selected_slot_max_order_time", STATE_UNAVAILABLE
)
self._assert_sensor(
"sensor.picnic_selected_slot_min_order_value", STATE_UNAVAILABLE
)
self._assert_sensor("sensor.picnic_last_order_eta_start", STATE_UNAVAILABLE)
self._assert_sensor("sensor.picnic_last_order_eta_end", STATE_UNAVAILABLE)
self._assert_sensor("sensor.picnic_last_order_delivery_time", STATE_UNAVAILABLE)
async def test_sensors_malformed_response(self):
"""Test coordinator update fails when API yields ValueError."""
# Setup platform with default responses
await self._setup_platform(use_default_responses=True)
# Change mock responses to empty data and refresh the coordinator
self.picnic_mock().get_user.side_effect = ValueError
self.picnic_mock().get_cart.side_effect = ValueError
await self._coordinator.async_refresh()
# Assert coordinator update failed
assert self._coordinator.last_update_success is False
async def test_device_registry_entry(self):
"""Test if device registry entry is populated correctly."""
# Setup platform and default mock responses
await self._setup_platform(use_default_responses=True)
device_registry = await self.hass.helpers.device_registry.async_get_registry()
picnic_service = device_registry.async_get_device(
identifiers={(const.DOMAIN, DEFAULT_USER_RESPONSE["user_id"])}
)
assert picnic_service.model == DEFAULT_USER_RESPONSE["user_id"]
assert picnic_service.name == "Picnic: Commonstreet 123a"
assert picnic_service.entry_type == "service"
async def test_auth_token_is_saved_on_update(self):
"""Test that auth-token changes in the session object are reflected by the config entry."""
# Setup platform and default mock responses
await self._setup_platform(use_default_responses=True)
# Set a different auth token in the session mock
updated_auth_token = "x-updated-picnic-auth-token"
self.picnic_mock().session.auth_token = updated_auth_token
# Verify the updated auth token is not set and fetch data using the coordinator
assert self.config_entry.data.get(CONF_ACCESS_TOKEN) != updated_auth_token
await self._coordinator.async_refresh()
# Verify that the updated auth token is saved in the config entry
assert self.config_entry.data.get(CONF_ACCESS_TOKEN) == updated_auth_token
|
|
#!/usr/bin/env python3
# Copyright (c) 2019-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import time
from decimal import Decimal
from test_framework.test_framework import SyscoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
ZDAG_NOT_FOUND = -1
ZDAG_STATUS_OK = 0
ZDAG_WARNING_RBF = 1
ZDAG_WARNING_NOT_ZDAG_TX = 2
ZDAG_WARNING_SIZE_OVER_POLICY = 3
ZDAG_MAJOR_CONFLICT = 4
class AssetZDAGTest(SyscoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 4
self.rpc_timeout = 240
self.extra_args = [['-assetindex=1'],['-assetindex=1'],['-assetindex=1'],['-assetindex=1']]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.generate(self.nodes[0], 200)
self.sync_blocks()
self.basic_zdag_doublespend()
self.burn_zdag_doublespend()
self.burn_zdag_doublespend_chain()
def basic_zdag_doublespend(self):
self.basic_asset(guid=None)
self.generate(self.nodes[0], 1)
newaddress2 = self.nodes[1].getnewaddress()
newaddress1 = self.nodes[0].getnewaddress()
self.nodes[2].importprivkey(self.nodes[1].dumpprivkey(newaddress2))
self.nodes[0].assetsend(self.asset, newaddress1, 2)
# create 2 utxo's so below newaddress1 recipient of 0.5 COIN uses 1 and the newaddress3 recipient on node3 uses the other on dbl spend
self.nodes[0].sendtoaddress(newaddress2, 1)
self.nodes[0].sendtoaddress(newaddress2, 1)
self.generate(self.nodes[0], 1)
self.sync_blocks()
out = self.nodes[0].listunspent(query_options={'assetGuid': self.asset, 'minimumAmountAsset': 0.5})
assert_equal(len(out), 1)
out = self.nodes[2].listunspent()
assert_equal(len(out), 2)
# send 2 asset UTXOs to newaddress2 same logic as explained above about dbl spend
self.nodes[0].assetallocationsend(self.asset, newaddress2, 0.4)
time.sleep(0.25)
self.nodes[0].assetallocationsend(self.asset, newaddress2, 1)
time.sleep(0.25)
self.generate(self.nodes[0], 1)
self.sync_blocks()
# should have 2 sys utxos and 2 asset utxos
out = self.nodes[2].listunspent()
assert_equal(len(out), 4)
# this will use 1 sys utxo and 1 asset utxo and send it to change address owned by node2
self.nodes[1].assetallocationsend(self.asset, newaddress1, 0.4, 0, False)
time.sleep(0.25)
self.sync_mempools(self.nodes[0:3], timeout=30)
# node3 should have 2 less utxos because they were sent to change on node2
out = self.nodes[2].listunspent(minconf=0)
assert_equal(len(out), 2)
time.sleep(0.25)
tx1 = self.nodes[1].assetallocationsend(self.asset, newaddress1, 1, 0, False)['txid']
# dbl spend
tx2 = self.nodes[2].assetallocationsend(self.asset, newaddress1, 0.9, 0, False)['txid']
time.sleep(0.25)
# use tx2 to build tx3
tx3 = self.nodes[2].assetallocationsend(self.asset, newaddress1, 0.05, 0, False)['txid']
time.sleep(0.25)
# use tx3 to build tx4
tx4 = self.nodes[2].assetallocationsend(self.asset, newaddress1, 0.025, 0, False)['txid']
time.sleep(0.25)
self.sync_mempools(self.nodes[0:3], timeout=30)
for i in range(3):
self.nodes[i].getrawtransaction(tx1)
self.nodes[i].getrawtransaction(tx2)
assert_equal(self.nodes[i].assetallocationverifyzdag(tx1)['status'], ZDAG_MAJOR_CONFLICT)
# ensure the tx2 made it to mempool, should propagate dbl-spend first time
assert_equal(self.nodes[i].assetallocationverifyzdag(tx2)['status'], ZDAG_MAJOR_CONFLICT)
# will conflict because its using tx2 which is in conflict state
assert_equal(self.nodes[i].assetallocationverifyzdag(tx3)['status'], ZDAG_MAJOR_CONFLICT)
# will conflict because its using tx3 which uses tx2 which is in conflict state
assert_equal(self.nodes[i].assetallocationverifyzdag(tx4)['status'], ZDAG_MAJOR_CONFLICT)
self.generate(self.nodes[0], 1)
self.sync_blocks()
tx2inchain = False
for i in range(3):
try:
self.nodes[i].getrawtransaction(tx1)
except:
tx2inchain = True
continue
assert_equal(self.nodes[i].assetallocationverifyzdag(tx1)['status'], ZDAG_NOT_FOUND)
assert_equal(self.nodes[i].assetallocationverifyzdag(tx2)['status'], ZDAG_NOT_FOUND)
assert_equal(self.nodes[i].assetallocationverifyzdag(tx3)['status'], ZDAG_NOT_FOUND)
assert_equal(self.nodes[i].assetallocationverifyzdag(tx4)['status'], ZDAG_NOT_FOUND)
assert_raises_rpc_error(-5, 'No such mempool transaction', self.nodes[i].getrawtransaction, tx2)
assert_raises_rpc_error(-5, 'No such mempool transaction', self.nodes[i].getrawtransaction, tx3)
assert_raises_rpc_error(-5, 'No such mempool transaction', self.nodes[i].getrawtransaction, tx4)
out = self.nodes[0].listunspent(query_options={'assetGuid': self.asset, 'minimumAmountAsset':0,'maximumAmountAsset':0})
assert_equal(len(out), 1)
out = self.nodes[0].listunspent(query_options={'assetGuid': self.asset, 'minimumAmountAsset':0.4,'maximumAmountAsset':0.4})
assert_equal(len(out), 1)
out = self.nodes[0].listunspent(query_options={'assetGuid': self.asset, 'minimumAmountAsset':0.6,'maximumAmountAsset':0.6})
assert_equal(len(out), 1)
out = self.nodes[0].listunspent(query_options={'assetGuid': self.asset, 'minimumAmountAsset':1.0,'maximumAmountAsset':1.0})
if tx2inchain is True:
assert_equal(len(out), 0)
out = self.nodes[0].listunspent(query_options={'assetGuid': self.asset})
assert_equal(len(out), 6)
else:
assert_equal(len(out), 1)
out = self.nodes[0].listunspent(query_options={'assetGuid': self.asset})
assert_equal(len(out), 4)
def burn_zdag_doublespend(self):
self.basic_asset(guid=None)
self.generate(self.nodes[0], 1)
useraddress2 = self.nodes[1].getnewaddress()
useraddress3 = self.nodes[2].getnewaddress()
useraddress4 = self.nodes[3].getnewaddress()
useraddress1 = self.nodes[0].getnewaddress()
# needed by node4 when dbl-spending
self.nodes[3].importprivkey(self.nodes[0].dumpprivkey(useraddress1))
self.nodes[0].sendtoaddress(useraddress1, 1)
self.nodes[0].sendtoaddress(useraddress1, 1)
self.nodes[0].sendtoaddress(useraddress2, 1)
self.nodes[0].sendtoaddress(useraddress3, 1)
self.nodes[0].sendtoaddress(useraddress4, 1)
self.nodes[0].assetsendmany(self.asset,[{'address': useraddress1,'amount':1.0},{'address': useraddress2,'amount':0.4},{'address': useraddress3,'amount':0.5}])
self.generate(self.nodes[0], 1)
self.sync_blocks()
# create separate output for dbl spend
self.nodes[0].assetsend(self.asset, useraddress1, 0.5)
time.sleep(0.25)
# try to do multiple asset sends in one block
assert_raises_rpc_error(-4, 'No inputs found for this asset', self.nodes[0].assetsend, self.asset, useraddress1, 2)
self.generate(self.nodes[0], 1)
self.sync_blocks()
self.nodes[0].assetallocationsend(self.asset, useraddress2, 0.2)
time.sleep(0.25)
self.nodes[1].assetallocationsend(self.asset, useraddress1, 0.2)
time.sleep(0.25)
self.nodes[0].assetallocationsend(self.asset, useraddress3, 0.2)
time.sleep(0.25)
self.nodes[2].assetallocationsend(self.asset, useraddress1, 0.2)
time.sleep(0.25)
self.sync_mempools(self.nodes[0:3],timeout=30)
# put all in useraddress1 so node4 can access in dbl spend, its probably in change address prior to this on node0
self.nodes[0].assetallocationsend(self.asset, useraddress1, 1.5)
time.sleep(0.25)
self.sync_mempools(timeout=30)
txid = self.nodes[0].assetallocationsend(self.asset, useraddress1, 1.5)['txid']
# dbl spend
txdblspend = self.nodes[3].assetallocationburn(self.asset, 1.1, "0x931d387731bbbc988b312206c74f77d004d6b84b")["txid"]
rawtx = self.nodes[0].getrawtransaction(txid)
self.nodes[1].sendrawtransaction(rawtx)
self.nodes[2].sendrawtransaction(rawtx)
self.nodes[1].assetallocationsend(self.asset, useraddress3, 0.2)
time.sleep(0.25)
self.nodes[2].assetallocationburn(self.asset, 0.3, "0x931d387731bbbc988b312206c74f77d004d6b84b")
time.sleep(0.25)
self.sync_mempools(self.nodes[0:3], timeout=30)
# node1/node2/node3 shouldn't have dbl spend tx because no RBF and not zdag tx
assert_raises_rpc_error(-5, 'No such mempool transaction', self.nodes[0].getrawtransaction, txdblspend)
assert_raises_rpc_error(-5, 'No such mempool transaction', self.nodes[1].getrawtransaction, txdblspend)
assert_raises_rpc_error(-5, 'No such mempool transaction', self.nodes[2].getrawtransaction, txdblspend)
self.nodes[3].getrawtransaction(txdblspend)
self.generate(self.nodes[0], 1)
self.sync_blocks()
# after block, even node4 should have removed conflicting tx
assert_raises_rpc_error(-5, 'No such mempool transaction', self.nodes[3].getrawtransaction, txdblspend)
out = self.nodes[0].listunspent(query_options={'assetGuid': self.asset, 'minimumAmountAsset':0,'maximumAmountAsset':0})
assert_equal(len(out), 1)
out = self.nodes[0].listunspent(query_options={'assetGuid': self.asset, 'minimumAmountAsset':1.5,'maximumAmountAsset':1.5})
assert_equal(len(out), 1)
out = self.nodes[1].listunspent(query_options={'assetGuid': self.asset, 'minimumAmountAsset':0.2,'maximumAmountAsset':0.2})
assert_equal(len(out), 1)
out = self.nodes[2].listunspent(query_options={'assetGuid': self.asset, 'minimumAmountAsset':0.2,'maximumAmountAsset':0.2})
assert_equal(len(out), 2)
out = self.nodes[3].listunspent(query_options={'assetGuid': self.asset, 'minimumAmountAsset':1.5,'maximumAmountAsset':1.5})
assert_equal(len(out), 1)
def burn_zdag_doublespend_chain(self):
# SYSX guid on regtest is 123456
self.basic_asset('123456')
self.generate(self.nodes[0], 1)
useraddress1 = self.nodes[1].getnewaddress()
useraddress2 = self.nodes[2].getnewaddress()
useraddress3 = self.nodes[3].getnewaddress()
self.nodes[0].sendtoaddress(useraddress1, 1)
self.nodes[0].sendtoaddress(useraddress2, 1)
self.nodes[0].sendtoaddress(useraddress3, 1)
self.generate(self.nodes[0], 1)
self.nodes[0].syscoinburntoassetallocation(self.asset, 1)
time.sleep(0.25)
self.nodes[0].syscoinburntoassetallocation(self.asset, 1)
time.sleep(0.25)
self.nodes[0].assetallocationsend(self.asset, useraddress1, 0.1)
time.sleep(0.25)
self.nodes[0].assetallocationsend(self.asset, useraddress2, 0.01)
time.sleep(0.25)
self.nodes[0].assetallocationsend(self.asset, useraddress2, 0.001)
time.sleep(0.25)
self.nodes[0].assetallocationsend(self.asset, useraddress3, 0.0001)
time.sleep(0.25)
self.nodes[0].assetallocationsend(self.asset, useraddress2, 0.00001)
time.sleep(0.25)
balanceBefore = self.nodes[0].getbalance(minconf=0)
self.nodes[0].assetallocationburn(self.asset, 1, '')
time.sleep(0.25)
self.nodes[0].assetupdate(self.asset, '', '', 127, '', {}, {})
time.sleep(0.25)
self.nodes[0].assetallocationburn(self.asset, 0.88889, '')
time.sleep(0.25)
# subtract balance with 0.001 threshold to account for update fee
assert(self.nodes[0].getbalance(minconf=0) - (balanceBefore+Decimal(1.88889)) < Decimal(0.001))
# listunspent for node0 should be have just 1 (asset ownership) in mempool
out = self.nodes[0].listunspent(minconf=0, query_options={'assetGuid': self.asset})
assert_equal(len(out), 1)
assert_equal(out[0]['asset_guid'], '123456')
assert_equal(out[0]['asset_amount'], 0)
self.generate(self.nodes[0], 1)
out = self.nodes[0].listunspent(query_options={'assetGuid': self.asset})
assert_equal(len(out), 1)
assert_equal(out[0]['asset_guid'], '123456')
assert_equal(out[0]['asset_amount'], 0)
self.sync_blocks()
# listunspent for node0 should be have just 1 (asset ownership)
# check that nodes have allocations in listunspent before burning
self.nodes[1].assetallocationburn(self.asset, 0.1, '')
time.sleep(0.25)
self.nodes[2].assetallocationburn(self.asset, 0.01, '')
time.sleep(0.25)
self.nodes[2].assetallocationburn(self.asset, 0.001, '')
time.sleep(0.25)
self.nodes[3].assetallocationburn(self.asset, 0.0001, '')
time.sleep(0.25)
self.nodes[2].assetallocationburn(self.asset, 0.00001, '')
time.sleep(0.25)
# ensure burning sysx gives new sys balance
# account for rounding errors in Decimal
assert(self.nodes[1].getbalance(minconf=0) - (balanceBefore+Decimal(0.1)) < Decimal(0.001))
assert(self.nodes[2].getbalance(minconf=0) - (balanceBefore+Decimal(0.01101)) < Decimal(0.001))
assert(self.nodes[3].getbalance(minconf=0) - (balanceBefore+Decimal(0.0001)) < Decimal(0.0001))
out = self.nodes[1].listunspent(minconf=0, query_options={'assetGuid': self.asset})
assert_equal(len(out), 0)
out = self.nodes[2].listunspent(minconf=0, query_options={'assetGuid': self.asset})
assert_equal(len(out), 0)
out = self.nodes[3].listunspent(minconf=0, query_options={'assetGuid': self.asset})
assert_equal(len(out), 0)
# check listunspent is empty in mempool, all should be burned
self.nodes[0].assetupdate(self.asset, '', '', 127, '', {}, {})
self.generate(self.nodes[0], 1)
time.sleep(0.25)
assert(self.nodes[1].getbalance() - (balanceBefore+Decimal(0.1)) < Decimal(0.001))
assert(self.nodes[2].getbalance() - (balanceBefore+Decimal(0.01101)) < Decimal(0.001))
assert(self.nodes[3].getbalance() - (balanceBefore+Decimal(0.0001)) < Decimal(0.0001))
# check listunspent is empty, all should be burned
out = self.nodes[1].listunspent(query_options={'assetGuid': self.asset})
assert_equal(len(out), 0)
out = self.nodes[2].listunspent(query_options={'assetGuid': self.asset})
assert_equal(len(out), 0)
out = self.nodes[3].listunspent(query_options={'assetGuid': self.asset})
assert_equal(len(out), 0)
def basic_asset(self, guid):
if guid is None:
self.asset = self.nodes[0].assetnew('1', "TST", "asset description", "0x9f90b5093f35aeac5fbaeb591f9c9de8e2844a46", 8, 10000, 127, '', {}, {})['asset_guid']
else:
self.asset = self.nodes[0].assetnewtest(guid, '1', "TST", "asset description", "0x9f90b5093f35aeac5fbaeb591f9c9de8e2844a46", 8, 10000, 127, '', {}, {})['asset_guid']
if __name__ == '__main__':
AssetZDAGTest().main()
|
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import base64
import json
import logging
import time
import unittest
import uuid
import fixtures
import mock
import requests
from requests_mock.contrib import fixture as rm_fixture
from six.moves import urllib
from swift.common.middleware.s3api import s3token
from swift.common.swob import Request, Response
from swift.common.wsgi import ConfigFileError
GOOD_RESPONSE_V2 = {'access': {
'user': {
'username': 'S3_USER',
'name': 'S3_USER',
'id': 'USER_ID',
'roles': [
{'name': 'swift-user'},
{'name': '_member_'},
],
},
'token': {
'id': 'TOKEN_ID',
'tenant': {
'id': 'TENANT_ID',
'name': 'TENANT_NAME'
}
}
}}
GOOD_RESPONSE_V3 = {'token': {
'user': {
'domain': {
'name': 'Default',
'id': 'default',
},
'name': 'S3_USER',
'id': 'USER_ID',
},
'project': {
'domain': {
'name': 'PROJECT_DOMAIN_NAME',
'id': 'PROJECT_DOMAIN_ID',
},
'name': 'PROJECT_NAME',
'id': 'PROJECT_ID',
},
'roles': [
{'name': 'swift-user'},
{'name': '_member_'},
],
}}
class TestResponse(requests.Response):
"""Utility class to wrap requests.Response.
Class used to wrap requests.Response and provide some convenience to
initialize with a dict.
"""
def __init__(self, data):
self._text = None
super(TestResponse, self).__init__()
if isinstance(data, dict):
self.status_code = data.get('status_code', 200)
headers = data.get('headers')
if headers:
self.headers.update(headers)
# Fake the text attribute to streamline Response creation
# _content is defined by requests.Response
self._content = data.get('text')
else:
self.status_code = data
def __eq__(self, other):
return self.__dict__ == other.__dict__
@property
def text(self):
return self.content
class FakeApp(object):
calls = 0
"""This represents a WSGI app protected by the auth_token middleware."""
def __call__(self, env, start_response):
self.calls += 1
resp = Response()
resp.environ = env
return resp(env, start_response)
class S3TokenMiddlewareTestBase(unittest.TestCase):
TEST_AUTH_URI = 'https://fakehost/identity/v2.0'
TEST_URL = '%s/s3tokens' % (TEST_AUTH_URI, )
TEST_DOMAIN_ID = '1'
TEST_DOMAIN_NAME = 'aDomain'
TEST_GROUP_ID = uuid.uuid4().hex
TEST_ROLE_ID = uuid.uuid4().hex
TEST_TENANT_ID = '1'
TEST_TENANT_NAME = 'aTenant'
TEST_TOKEN = 'aToken'
TEST_TRUST_ID = 'aTrust'
TEST_USER = 'test'
TEST_USER_ID = uuid.uuid4().hex
TEST_ROOT_URL = 'http://127.0.0.1:5000/'
def setUp(self):
super(S3TokenMiddlewareTestBase, self).setUp()
self.logger = fixtures.FakeLogger(level=logging.DEBUG)
self.logger.setUp()
self.time_patcher = mock.patch.object(time, 'time', lambda: 1234)
self.time_patcher.start()
self.app = FakeApp()
self.conf = {
'auth_uri': self.TEST_AUTH_URI,
}
self.middleware = s3token.S3Token(self.app, self.conf)
self.requests_mock = rm_fixture.Fixture()
self.requests_mock.setUp()
def tearDown(self):
self.requests_mock.cleanUp()
self.time_patcher.stop()
self.logger.cleanUp()
super(S3TokenMiddlewareTestBase, self).tearDown()
def start_fake_response(self, status, headers):
self.response_status = int(status.split(' ', 1)[0])
self.response_headers = dict(headers)
class S3TokenMiddlewareTestGood(S3TokenMiddlewareTestBase):
def setUp(self):
super(S3TokenMiddlewareTestGood, self).setUp()
self.requests_mock.post(self.TEST_URL,
status_code=201,
json=GOOD_RESPONSE_V2)
# Ignore the request and pass to the next middleware in the
# pipeline if no path has been specified.
def test_no_path_request(self):
req = Request.blank('/')
self.middleware(req.environ, self.start_fake_response)
self.assertEqual(self.response_status, 200)
# Ignore the request and pass to the next middleware in the
# pipeline if no Authorization header has been specified
def test_without_authorization(self):
req = Request.blank('/v1/AUTH_cfa/c/o')
self.middleware(req.environ, self.start_fake_response)
self.assertEqual(self.response_status, 200)
def test_nukes_auth_headers(self):
client_env = {
'HTTP_X_IDENTITY_STATUS': 'Confirmed',
'HTTP_X_ROLES': 'admin,_member_,swift-user',
'HTTP_X_TENANT_ID': 'cfa'
}
req = Request.blank('/v1/AUTH_cfa/c/o', environ=client_env)
self.middleware(req.environ, self.start_fake_response)
self.assertEqual(self.response_status, 200)
for key in client_env:
self.assertNotIn(key, req.environ)
def test_without_auth_storage_token(self):
req = Request.blank('/v1/AUTH_cfa/c/o')
req.headers['Authorization'] = 'AWS badboy'
self.middleware(req.environ, self.start_fake_response)
self.assertEqual(self.response_status, 200)
def _assert_authorized(self, req, expect_token=True,
account_path='/v1/AUTH_TENANT_ID/'):
self.assertTrue(
req.path.startswith(account_path),
'%r does not start with %r' % (req.path, account_path))
expected_headers = {
'X-Identity-Status': 'Confirmed',
'X-Roles': 'swift-user,_member_',
'X-User-Id': 'USER_ID',
'X-User-Name': 'S3_USER',
'X-Tenant-Id': 'TENANT_ID',
'X-Tenant-Name': 'TENANT_NAME',
'X-Project-Id': 'TENANT_ID',
'X-Project-Name': 'TENANT_NAME',
'X-Auth-Token': 'TOKEN_ID',
}
for header, value in expected_headers.items():
if header == 'X-Auth-Token' and not expect_token:
self.assertNotIn(header, req.headers)
continue
self.assertIn(header, req.headers)
self.assertEqual(value, req.headers[header])
# WSGI wants native strings for headers
self.assertIsInstance(req.headers[header], str)
self.assertEqual(1, self.middleware._app.calls)
self.assertEqual(1, self.requests_mock.call_count)
request_call = self.requests_mock.request_history[0]
self.assertEqual(json.loads(request_call.body), {'credentials': {
'access': 'access',
'signature': 'signature',
'token': base64.urlsafe_b64encode(b'token').decode('ascii')}})
def test_authorized(self):
req = Request.blank('/v1/AUTH_cfa/c/o')
req.environ['s3api.auth_details'] = {
'access_key': u'access',
'signature': u'signature',
'string_to_sign': u'token',
}
req.get_response(self.middleware)
self._assert_authorized(req)
def test_tolerate_missing_token_id(self):
resp = copy.deepcopy(GOOD_RESPONSE_V2)
del resp['access']['token']['id']
self.requests_mock.post(self.TEST_URL,
status_code=201,
json=resp)
req = Request.blank('/v1/AUTH_cfa/c/o')
req.environ['s3api.auth_details'] = {
'access_key': u'access',
'signature': u'signature',
'string_to_sign': u'token',
}
req.get_response(self.middleware)
self._assert_authorized(req, expect_token=False)
def test_authorized_bytes(self):
req = Request.blank('/v1/AUTH_cfa/c/o')
req.environ['s3api.auth_details'] = {
'access_key': b'access',
'signature': b'signature',
'string_to_sign': b'token',
}
req.get_response(self.middleware)
self._assert_authorized(req)
def test_authorized_http(self):
auth_uri = 'http://fakehost:35357/v2.0'
self.requests_mock.post(
'%s/s3tokens' % auth_uri,
status_code=201, json=GOOD_RESPONSE_V2)
self.middleware = s3token.filter_factory({
'auth_uri': auth_uri})(self.app)
req = Request.blank('/v1/AUTH_cfa/c/o')
req.environ['s3api.auth_details'] = {
'access_key': u'access',
'signature': u'signature',
'string_to_sign': u'token',
}
req.get_response(self.middleware)
self._assert_authorized(req)
def test_authorized_v3(self):
# Prior to https://github.com/openstack/keystone/commit/dd1e705
# even v3 URLs would respond with a v2-format response
auth_uri = 'http://fakehost:35357/v3'
self.requests_mock.post(
'%s/s3tokens' % auth_uri,
status_code=201, json=GOOD_RESPONSE_V2)
self.middleware = s3token.filter_factory({
'auth_uri': auth_uri})(self.app)
req = Request.blank('/v1/AUTH_cfa/c/o')
req.environ['s3api.auth_details'] = {
'access_key': u'access',
'signature': u'signature',
'string_to_sign': u'token',
}
req.get_response(self.middleware)
self._assert_authorized(req)
def test_authorized_trailing_slash(self):
self.middleware = s3token.filter_factory({
'auth_uri': self.TEST_AUTH_URI + '/'})(self.app)
req = Request.blank('/v1/AUTH_cfa/c/o')
req.environ['s3api.auth_details'] = {
'access_key': u'access',
'signature': u'signature',
'string_to_sign': u'token',
}
req.get_response(self.middleware)
self._assert_authorized(req)
def test_authorization_nova_toconnect(self):
req = Request.blank('/v1/AUTH_swiftint/c/o')
req.environ['s3api.auth_details'] = {
'access_key': u'access:FORCED_TENANT_ID',
'signature': u'signature',
'string_to_sign': u'token',
}
req.get_response(self.middleware)
self._assert_authorized(req, account_path='/v1/AUTH_FORCED_TENANT_ID/')
@mock.patch.object(requests, 'post')
def test_insecure(self, MOCK_REQUEST):
self.middleware = s3token.filter_factory(
{'insecure': 'True', 'auth_uri': 'http://example.com'})(self.app)
text_return_value = json.dumps(GOOD_RESPONSE_V2)
MOCK_REQUEST.return_value = TestResponse({
'status_code': 201,
'text': text_return_value})
req = Request.blank('/v1/AUTH_cfa/c/o')
req.environ['s3api.auth_details'] = {
'access_key': u'access',
'signature': u'signature',
'string_to_sign': u'token',
}
req.get_response(self.middleware)
self.assertTrue(MOCK_REQUEST.called)
mock_args, mock_kwargs = MOCK_REQUEST.call_args
self.assertIs(mock_kwargs['verify'], False)
def test_insecure_option(self):
# insecure is passed as a string.
# Some non-secure values.
true_values = ['true', 'True', '1', 'yes']
for val in true_values:
config = {'insecure': val,
'certfile': 'false_ind',
'auth_uri': 'http://example.com'}
middleware = s3token.filter_factory(config)(self.app)
self.assertIs(False, middleware._verify)
# Some "secure" values, including unexpected value.
false_values = ['false', 'False', '0', 'no', 'someweirdvalue']
for val in false_values:
config = {'insecure': val,
'certfile': 'false_ind',
'auth_uri': 'http://example.com'}
middleware = s3token.filter_factory(config)(self.app)
self.assertEqual('false_ind', middleware._verify)
# Default is secure.
config = {'certfile': 'false_ind',
'auth_uri': 'http://example.com'}
middleware = s3token.filter_factory(config)(self.app)
self.assertIs('false_ind', middleware._verify)
def test_auth_uris(self):
for conf, expected in [
({'auth_uri': 'https://example.com/v2.0'},
'https://example.com/v2.0/s3tokens'),
# Trailing slash doesn't interfere
({'auth_uri': 'https://example.com/v2.0/'},
'https://example.com/v2.0/s3tokens'),
# keystone running under mod_wsgi often has a path prefix
({'auth_uri': 'https://example.com/identity/v2.0'},
'https://example.com/identity/v2.0/s3tokens'),
({'auth_uri': 'https://example.com/identity/v2.0/'},
'https://example.com/identity/v2.0/s3tokens'),
# IPv4 addresses are fine
({'auth_uri': 'http://127.0.0.1:35357/v3'},
'http://127.0.0.1:35357/v3/s3tokens'),
({'auth_uri': 'http://127.0.0.1:35357/v3/'},
'http://127.0.0.1:35357/v3/s3tokens'),
# IPv6 addresses need [brackets] per RFC 3986
({'auth_uri': 'https://[::FFFF:129.144.52.38]:5000/v3'},
'https://[::FFFF:129.144.52.38]:5000/v3/s3tokens'),
({'auth_uri': 'https://[::FFFF:129.144.52.38]:5000/v3/'},
'https://[::FFFF:129.144.52.38]:5000/v3/s3tokens'),
]:
middleware = s3token.filter_factory(conf)(self.app)
self.assertEqual(expected, middleware._request_uri)
@mock.patch.object(requests, 'post')
def test_http_timeout(self, MOCK_REQUEST):
self.middleware = s3token.filter_factory({
'http_timeout': '2',
'auth_uri': 'http://example.com',
})(FakeApp())
MOCK_REQUEST.return_value = TestResponse({
'status_code': 201,
'text': json.dumps(GOOD_RESPONSE_V2)})
req = Request.blank('/v1/AUTH_cfa/c/o')
req.environ['s3api.auth_details'] = {
'access_key': u'access',
'signature': u'signature',
'string_to_sign': u'token',
}
req.get_response(self.middleware)
self.assertTrue(MOCK_REQUEST.called)
mock_args, mock_kwargs = MOCK_REQUEST.call_args
self.assertEqual(mock_kwargs['timeout'], 2)
def test_http_timeout_option(self):
good_values = ['1', '5.3', '10', '.001']
for val in good_values:
middleware = s3token.filter_factory({
'http_timeout': val,
'auth_uri': 'http://example.com',
})(FakeApp())
self.assertEqual(float(val), middleware._timeout)
bad_values = ['1, 4', '-3', '100', 'foo', '0']
for val in bad_values:
with self.assertRaises(ValueError) as ctx:
s3token.filter_factory({
'http_timeout': val,
'auth_uri': 'http://example.com',
})(FakeApp())
self.assertTrue(ctx.exception.args[0].startswith((
'invalid literal for float():',
'could not convert string to float:',
'http_timeout must be between 0 and 60 seconds',
)), 'Unexpected error message: %s' % ctx.exception)
# default is 10 seconds
middleware = s3token.filter_factory({
'auth_uri': 'http://example.com'})(FakeApp())
self.assertEqual(10, middleware._timeout)
def test_bad_auth_uris(self):
for auth_uri in [
'/not/a/uri',
'http://',
'//example.com/path']:
with self.assertRaises(ConfigFileError) as cm:
s3token.filter_factory({'auth_uri': auth_uri})(self.app)
self.assertEqual('Invalid auth_uri; must include scheme and host',
cm.exception.message)
with self.assertRaises(ConfigFileError) as cm:
s3token.filter_factory({
'auth_uri': 'nonhttp://example.com'})(self.app)
self.assertEqual('Invalid auth_uri; scheme must be http or https',
cm.exception.message)
for auth_uri in [
'http://user@example.com/',
'http://example.com/?with=query',
'http://example.com/#with-fragment']:
with self.assertRaises(ConfigFileError) as cm:
s3token.filter_factory({'auth_uri': auth_uri})(self.app)
self.assertEqual('Invalid auth_uri; must not include username, '
'query, or fragment', cm.exception.message)
def test_unicode_path(self):
url = u'/v1/AUTH_cfa/c/euro\u20ac'.encode('utf8')
req = Request.blank(urllib.parse.quote(url))
req.environ['s3api.auth_details'] = {
'access_key': u'access',
'signature': u'signature',
'string_to_sign': u'token',
}
req.get_response(self.middleware)
self._assert_authorized(req)
class S3TokenMiddlewareTestBad(S3TokenMiddlewareTestBase):
def test_unauthorized_token(self):
ret = {"error":
{"message": "EC2 access key not found.",
"code": 401,
"title": "Unauthorized"}}
self.requests_mock.post(self.TEST_URL, status_code=403, json=ret)
req = Request.blank('/v1/AUTH_cfa/c/o')
req.environ['s3api.auth_details'] = {
'access_key': u'access',
'signature': u'signature',
'string_to_sign': u'token',
}
resp = req.get_response(self.middleware)
s3_denied_req = self.middleware._deny_request('AccessDenied')
self.assertEqual(resp.body, s3_denied_req.body)
self.assertEqual(
resp.status_int, # pylint: disable-msg=E1101
s3_denied_req.status_int) # pylint: disable-msg=E1101
self.assertEqual(0, self.middleware._app.calls)
self.assertEqual(1, self.requests_mock.call_count)
request_call = self.requests_mock.request_history[0]
self.assertEqual(json.loads(request_call.body), {'credentials': {
'access': 'access',
'signature': 'signature',
'token': base64.urlsafe_b64encode(b'token').decode('ascii')}})
def test_no_s3_creds_defers_to_auth_middleware(self):
# Without an Authorization header, we should just pass through to the
# auth system to make a decision.
req = Request.blank('/v1/AUTH_cfa/c/o')
resp = req.get_response(self.middleware)
self.assertEqual(resp.status_int, 200) # pylint: disable-msg=E1101
self.assertEqual(1, self.middleware._app.calls)
def test_fail_to_connect_to_keystone(self):
with mock.patch.object(self.middleware, '_json_request') as o:
s3_invalid_resp = self.middleware._deny_request('InvalidURI')
o.side_effect = s3_invalid_resp
req = Request.blank('/v1/AUTH_cfa/c/o')
req.environ['s3api.auth_details'] = {
'access_key': u'access',
'signature': u'signature',
'string_to_sign': u'token',
}
resp = req.get_response(self.middleware)
self.assertEqual(resp.body, s3_invalid_resp.body)
self.assertEqual(
resp.status_int, # pylint: disable-msg=E1101
s3_invalid_resp.status_int) # pylint: disable-msg=E1101
self.assertEqual(0, self.middleware._app.calls)
def _test_bad_reply(self, response_body):
self.requests_mock.post(self.TEST_URL,
status_code=201,
text=response_body)
req = Request.blank('/v1/AUTH_cfa/c/o')
req.environ['s3api.auth_details'] = {
'access_key': u'access',
'signature': u'signature',
'string_to_sign': u'token',
}
resp = req.get_response(self.middleware)
s3_invalid_resp = self.middleware._deny_request('InvalidURI')
self.assertEqual(resp.body, s3_invalid_resp.body)
self.assertEqual(
resp.status_int, # pylint: disable-msg=E1101
s3_invalid_resp.status_int) # pylint: disable-msg=E1101
self.assertEqual(0, self.middleware._app.calls)
def test_bad_reply_not_json(self):
self._test_bad_reply('<badreply>')
def _test_bad_reply_missing_parts(self, *parts):
resp = copy.deepcopy(GOOD_RESPONSE_V2)
part_dict = resp
for part in parts[:-1]:
part_dict = part_dict[part]
del part_dict[parts[-1]]
self._test_bad_reply(json.dumps(resp))
def test_bad_reply_missing_token_dict(self):
self._test_bad_reply_missing_parts('access', 'token')
def test_bad_reply_missing_user_dict(self):
self._test_bad_reply_missing_parts('access', 'user')
def test_bad_reply_missing_user_roles(self):
self._test_bad_reply_missing_parts('access', 'user', 'roles')
def test_bad_reply_missing_user_name(self):
self._test_bad_reply_missing_parts('access', 'user', 'name')
def test_bad_reply_missing_user_id(self):
self._test_bad_reply_missing_parts('access', 'user', 'id')
def test_bad_reply_missing_tenant_dict(self):
self._test_bad_reply_missing_parts('access', 'token', 'tenant')
def test_bad_reply_missing_tenant_id(self):
self._test_bad_reply_missing_parts('access', 'token', 'tenant', 'id')
def test_bad_reply_missing_tenant_name(self):
self._test_bad_reply_missing_parts('access', 'token', 'tenant', 'name')
def test_bad_reply_valid_but_bad_json(self):
self._test_bad_reply('{}')
self._test_bad_reply('[]')
self._test_bad_reply('null')
self._test_bad_reply('"foo"')
self._test_bad_reply('1')
self._test_bad_reply('true')
class S3TokenMiddlewareTestDeferredAuth(S3TokenMiddlewareTestBase):
def setUp(self):
super(S3TokenMiddlewareTestDeferredAuth, self).setUp()
self.conf['delay_auth_decision'] = 'yes'
self.middleware = s3token.S3Token(FakeApp(), self.conf)
def test_unauthorized_token(self):
ret = {"error":
{"message": "EC2 access key not found.",
"code": 401,
"title": "Unauthorized"}}
self.requests_mock.post(self.TEST_URL, status_code=403, json=ret)
req = Request.blank('/v1/AUTH_cfa/c/o')
req.environ['s3api.auth_details'] = {
'access_key': u'access',
'signature': u'signature',
'string_to_sign': u'token',
}
resp = req.get_response(self.middleware)
self.assertEqual(
resp.status_int, # pylint: disable-msg=E1101
200)
self.assertNotIn('X-Auth-Token', req.headers)
self.assertEqual(1, self.middleware._app.calls)
self.assertEqual(1, self.requests_mock.call_count)
request_call = self.requests_mock.request_history[0]
self.assertEqual(json.loads(request_call.body), {'credentials': {
'access': 'access',
'signature': 'signature',
'token': base64.urlsafe_b64encode(b'token').decode('ascii')}})
def test_fail_to_connect_to_keystone(self):
with mock.patch.object(self.middleware, '_json_request') as o:
o.side_effect = self.middleware._deny_request('InvalidURI')
req = Request.blank('/v1/AUTH_cfa/c/o')
req.environ['s3api.auth_details'] = {
'access_key': u'access',
'signature': u'signature',
'string_to_sign': u'token',
}
resp = req.get_response(self.middleware)
self.assertEqual(
resp.status_int, # pylint: disable-msg=E1101
200)
self.assertNotIn('X-Auth-Token', req.headers)
self.assertEqual(1, self.middleware._app.calls)
def test_bad_reply(self):
self.requests_mock.post(self.TEST_URL,
status_code=201,
text="<badreply>")
req = Request.blank('/v1/AUTH_cfa/c/o')
req.environ['s3api.auth_details'] = {
'access_key': u'access',
'signature': u'signature',
'string_to_sign': u'token',
}
resp = req.get_response(self.middleware)
self.assertEqual(
resp.status_int, # pylint: disable-msg=E1101
200)
self.assertNotIn('X-Auth-Token', req.headers)
self.assertEqual(1, self.middleware._app.calls)
class S3TokenMiddlewareTestV3(S3TokenMiddlewareTestBase):
def setUp(self):
super(S3TokenMiddlewareTestV3, self).setUp()
self.requests_mock.post(self.TEST_URL,
status_code=200,
json=GOOD_RESPONSE_V3)
def _assert_authorized(self, req,
account_path='/v1/AUTH_PROJECT_ID/'):
self.assertTrue(req.path.startswith(account_path))
expected_headers = {
'X-Identity-Status': 'Confirmed',
'X-Roles': 'swift-user,_member_',
'X-User-Id': 'USER_ID',
'X-User-Name': 'S3_USER',
'X-User-Domain-Id': 'default',
'X-User-Domain-Name': 'Default',
'X-Tenant-Id': 'PROJECT_ID',
'X-Tenant-Name': 'PROJECT_NAME',
'X-Project-Id': 'PROJECT_ID',
'X-Project-Name': 'PROJECT_NAME',
'X-Project-Domain-Id': 'PROJECT_DOMAIN_ID',
'X-Project-Domain-Name': 'PROJECT_DOMAIN_NAME',
}
for header, value in expected_headers.items():
self.assertIn(header, req.headers)
self.assertEqual(value, req.headers[header])
# WSGI wants native strings for headers
self.assertIsInstance(req.headers[header], str)
self.assertNotIn('X-Auth-Token', req.headers)
self.assertEqual(1, self.middleware._app.calls)
def test_authorized(self):
req = Request.blank('/v1/AUTH_cfa/c/o')
req.environ['s3api.auth_details'] = {
'access_key': u'access',
'signature': u'signature',
'string_to_sign': u'token',
}
req.get_response(self.middleware)
self._assert_authorized(req)
def test_authorized_bytes(self):
req = Request.blank('/v1/AUTH_cfa/c/o')
req.environ['s3api.auth_details'] = {
'access_key': b'access',
'signature': b'signature',
'string_to_sign': b'token',
}
req.get_response(self.middleware)
self._assert_authorized(req)
def test_authorized_http(self):
# Following https://github.com/openstack/keystone/commit/3ec1aa4
# even v2 URLs would respond with a v3-format response
auth_uri = 'http://fakehost:35357/v2.0/'
self.requests_mock.post(
auth_uri + 's3tokens',
status_code=201, json=GOOD_RESPONSE_V3)
self.middleware = s3token.filter_factory({
'auth_uri': auth_uri})(self.app)
req = Request.blank('/v1/AUTH_cfa/c/o')
req.environ['s3api.auth_details'] = {
'access_key': u'access',
'signature': u'signature',
'string_to_sign': u'token',
}
req.get_response(self.middleware)
self._assert_authorized(req)
def test_authorized_v3(self):
auth_uri = 'http://fakehost:35357/v3/'
self.requests_mock.post(
auth_uri + 's3tokens',
status_code=201, json=GOOD_RESPONSE_V3)
self.middleware = s3token.filter_factory({
'auth_uri': auth_uri})(self.app)
req = Request.blank('/v1/AUTH_cfa/c/o')
req.environ['s3api.auth_details'] = {
'access_key': u'access',
'signature': u'signature',
'string_to_sign': u'token',
}
req.get_response(self.middleware)
self._assert_authorized(req)
def test_authorized_trailing_slash(self):
self.middleware = s3token.filter_factory({
'auth_uri': self.TEST_AUTH_URI + '/'})(self.app)
req = Request.blank('/v1/AUTH_cfa/c/o')
req.environ['s3api.auth_details'] = {
'access_key': u'access',
'signature': u'signature',
'string_to_sign': u'token',
}
req.get_response(self.middleware)
self._assert_authorized(req)
def test_authorization_nova_toconnect(self):
req = Request.blank('/v1/AUTH_swiftint/c/o')
req.environ['s3api.auth_details'] = {
'access_key': u'access:FORCED_TENANT_ID',
'signature': u'signature',
'string_to_sign': u'token',
}
req.get_response(self.middleware)
self._assert_authorized(req, account_path='/v1/AUTH_FORCED_TENANT_ID/')
def _test_bad_reply_missing_parts(self, *parts):
resp = copy.deepcopy(GOOD_RESPONSE_V3)
part_dict = resp
for part in parts[:-1]:
part_dict = part_dict[part]
del part_dict[parts[-1]]
self.requests_mock.post(self.TEST_URL,
status_code=201,
text=json.dumps(resp))
req = Request.blank('/v1/AUTH_cfa/c/o')
req.environ['s3api.auth_details'] = {
'access_key': u'access',
'signature': u'signature',
'string_to_sign': u'token',
}
resp = req.get_response(self.middleware)
s3_invalid_resp = self.middleware._deny_request('InvalidURI')
self.assertEqual(resp.body, s3_invalid_resp.body)
self.assertEqual(
resp.status_int, # pylint: disable-msg=E1101
s3_invalid_resp.status_int) # pylint: disable-msg=E1101
self.assertEqual(0, self.middleware._app.calls)
def test_bad_reply_missing_parts(self):
self._test_bad_reply_missing_parts('token', 'user', 'id')
self._test_bad_reply_missing_parts('token', 'user', 'name')
self._test_bad_reply_missing_parts('token', 'user', 'domain', 'id')
self._test_bad_reply_missing_parts('token', 'user', 'domain', 'name')
self._test_bad_reply_missing_parts('token', 'user', 'domain')
self._test_bad_reply_missing_parts('token', 'user')
self._test_bad_reply_missing_parts('token', 'project', 'id')
self._test_bad_reply_missing_parts('token', 'project', 'name')
self._test_bad_reply_missing_parts('token', 'project', 'domain', 'id')
self._test_bad_reply_missing_parts('token', 'project', 'domain',
'name')
self._test_bad_reply_missing_parts('token', 'project', 'domain')
self._test_bad_reply_missing_parts('token', 'project')
self._test_bad_reply_missing_parts('token', 'roles')
|
|
import pytz
import datetime
import calendar as standardlib_calendar
from django.conf import settings
from django.template.defaultfilters import date as date_filter
from django.utils.translation import ugettext
from django.utils.dates import WEEKDAYS, WEEKDAYS_ABBR
from schedule.conf.settings import FIRST_DAY_OF_WEEK, SHOW_CANCELLED_OCCURRENCES
from schedule.models import Occurrence
from django.utils import timezone
weekday_names = []
weekday_abbrs = []
if FIRST_DAY_OF_WEEK == 1:
# The calendar week starts on Monday
for i in range(7):
weekday_names.append(WEEKDAYS[i])
weekday_abbrs.append(WEEKDAYS_ABBR[i])
else:
# The calendar week starts on Sunday, not Monday
weekday_names.append(WEEKDAYS[6])
weekday_abbrs.append(WEEKDAYS_ABBR[6])
for i in range(6):
weekday_names.append(WEEKDAYS[i])
weekday_abbrs.append(WEEKDAYS_ABBR[i])
class Period(object):
"""
This class represents a period of time. It can return a set of occurrences
based on its events, and its time period (start and end).
"""
def __init__(self, events, start, end, parent_persisted_occurrences=None,
occurrence_pool=None, tzinfo=timezone.get_current_timezone()):
self.utc_start = self._normalize_timezone_to_utc(start, tzinfo)
self.utc_end = self._normalize_timezone_to_utc(end, tzinfo)
self.events = events
self.tzinfo = self._get_tzinfo(tzinfo)
self.occurrence_pool = occurrence_pool
if parent_persisted_occurrences is not None:
self._persisted_occurrences = parent_persisted_occurrences
def _normalize_timezone_to_utc(self, point_in_time, tzinfo):
if point_in_time.tzinfo is not None:
return point_in_time.astimezone(pytz.utc)
if tzinfo is not None:
return tzinfo.localize(point_in_time).astimezone(pytz.utc)
if settings.USE_TZ:
return pytz.utc.localize(point_in_time)
else:
if timezone.is_aware(point_in_time):
return timezone.make_naive(point_in_time, pytz.utc)
else:
return point_in_time
def __eq__(self, period):
return self.utc_start == period.utc_start and self.utc_end == period.utc_end and self.events == period.events
def __ne__(self, period):
return self.utc_start != period.utc_start or self.utc_end != period.utc_end or self.events != period.events
def _get_tzinfo(self, tzinfo):
return tzinfo if settings.USE_TZ else None
def _get_sorted_occurrences(self):
occurrences = []
if hasattr(self, "occurrence_pool") and self.occurrence_pool is not None:
for occurrence in self.occurrence_pool:
if occurrence.start <= self.utc_end and occurrence.end >= self.utc_start:
occurrences.append(occurrence)
return occurrences
for event in self.events:
event_occurrences = event.get_occurrences(self.start, self.end)
occurrences += event_occurrences
return sorted(occurrences)
def cached_get_sorted_occurrences(self):
if hasattr(self, '_occurrences'):
return self._occurrences
occs = self._get_sorted_occurrences()
self._occurrences = occs
return occs
occurrences = property(cached_get_sorted_occurrences)
def get_persisted_occurrences(self):
if hasattr(self, '_persisted_occurrenes'):
return self._persisted_occurrences
else:
self._persisted_occurrences = Occurrence.objects.filter(event__in=self.events)
return self._persisted_occurrences
def classify_occurrence(self, occurrence):
if occurrence.cancelled and not SHOW_CANCELLED_OCCURRENCES:
return
if occurrence.start > self.end or occurrence.end < self.start:
return None
started = False
ended = False
if self.utc_start <= occurrence.start < self.utc_end:
started = True
if self.utc_start <= occurrence.end < self.utc_end:
ended = True
if started and ended:
return {'occurrence': occurrence, 'class': 1}
elif started:
return {'occurrence': occurrence, 'class': 0}
elif ended:
return {'occurrence': occurrence, 'class': 3}
# it existed during this period but it didn't begin or end within it
# so it must have just continued
return {'occurrence': occurrence, 'class': 2}
def get_occurrence_partials(self):
occurrence_dicts = []
for occurrence in self.occurrences:
occurrence = self.classify_occurrence(occurrence)
if occurrence:
occurrence_dicts.append(occurrence)
return occurrence_dicts
def get_occurrences(self):
return self.occurrences
def has_occurrences(self):
return any(self.classify_occurrence(o) for o in self.occurrences)
def get_time_slot(self, start, end):
if start >= self.start and end <= self.end:
return Period(self.events, start, end)
return None
def create_sub_period(self, cls, start=None, tzinfo=None):
if tzinfo is None:
tzinfo = self.tzinfo
start = start or self.start
return cls(self.events, start, self.get_persisted_occurrences(), self.occurrences, tzinfo)
def get_periods(self, cls, tzinfo=None):
if tzinfo is None:
tzinfo = self.tzinfo
period = self.create_sub_period(cls)
while period.start < self.end:
yield self.create_sub_period(cls, period.start, tzinfo)
period = period.next()
@property
def start(self):
if self.tzinfo is not None:
return self.utc_start.astimezone(self.tzinfo)
return self.utc_start.replace(tzinfo=None)
@property
def end(self):
if self.tzinfo is not None:
return self.utc_end.astimezone(self.tzinfo)
return self.utc_end.replace(tzinfo=None)
class Year(Period):
def __init__(self, events, date=None, parent_persisted_occurrences=None, tzinfo=pytz.utc):
self.tzinfo = self._get_tzinfo(tzinfo)
if date is None:
date = timezone.now()
start, end = self._get_year_range(date)
super(Year, self).__init__(events, start, end, parent_persisted_occurrences, tzinfo=tzinfo)
def get_months(self):
return self.get_periods(Month)
def next_year(self):
return Year(self.events, self.end, tzinfo=self.tzinfo)
next = next_year
def prev_year(self):
start = datetime.datetime(self.start.year - 1, self.start.month, self.start.day)
return Year(self.events, start, tzinfo=self.tzinfo)
prev = prev_year
def _get_year_range(self, year):
#If tzinfo is not none get the local start of the year and convert it to utc.
naive_start = datetime.datetime(year.year, datetime.datetime.min.month, datetime.datetime.min.day)
naive_end = datetime.datetime(year.year + 1, datetime.datetime.min.month, datetime.datetime.min.day)
start = naive_start
end = naive_end
if self.tzinfo is not None:
local_start = self.tzinfo.localize(naive_start)
local_end = self.tzinfo.localize(naive_end)
start = local_start.astimezone(pytz.utc)
end = local_end.astimezone(pytz.utc)
return start, end
def __unicode__(self):
return self.start.year
class Month(Period):
"""
The month period has functions for retrieving the week periods within this period
and day periods within the date.
"""
def __init__(self, events, date=None, parent_persisted_occurrences=None,
occurrence_pool=None, tzinfo=timezone.get_current_timezone()):
self.tzinfo = self._get_tzinfo(tzinfo)
if date is None:
date = timezone.now()
start, end = self._get_month_range(date)
super(Month, self).__init__(events, start, end,
parent_persisted_occurrences, occurrence_pool, tzinfo=tzinfo)
def get_weeks(self):
return self.get_periods(Week)
def get_days(self):
return self.get_periods(Day)
def get_day(self, daynumber):
date = self.start
if daynumber > 1:
date += datetime.timedelta(days=daynumber - 1)
return self.create_sub_period(Day, date)
def next_month(self):
return Month(self.events, self.end, tzinfo=self.tzinfo)
next = next_month
def prev_month(self):
start = (self.start - datetime.timedelta(days=1)).replace(day=1, tzinfo=self.tzinfo)
return Month(self.events, start, tzinfo=self.tzinfo)
prev = prev_month
def current_year(self):
return Year(self.events, self.start, tzinfo=self.tzinfo)
def prev_year(self):
start = datetime.datetime.min.replace(year=self.start.year - 1, tzinfo=self.tzinfo)
return Year(self.events, start, tzinfo=self.tzinfo)
def next_year(self):
start = datetime.datetime.min.replace(year=self.start.year + 1, tzinfo=self.tzinfo)
return Year(self.events, start, tzinfo=self.tzinfo)
def _get_month_range(self, month):
year = month.year
month = month.month
#If tzinfo is not none get the local start of the month and convert it to utc.
naive_start = datetime.datetime.min.replace(year=year, month=month)
if month == 12:
naive_end = datetime.datetime.min.replace(month=1, year=year + 1, day=1)
else:
naive_end = datetime.datetime.min.replace(month=month + 1, year=year, day=1)
start = naive_start
end = naive_end
if self.tzinfo is not None:
local_start = self.tzinfo.localize(naive_start)
local_end = self.tzinfo.localize(naive_end)
start = local_start.astimezone(pytz.utc)
end = local_end.astimezone(pytz.utc)
return start, end
def __unicode__(self):
return self.name()
def name(self):
return standardlib_calendar.month_name[self.start.month]
def year(self):
return self.start.year
class Week(Period):
"""
The Week period that has functions for retrieving Day periods within it
"""
def __init__(self, events, date=None, parent_persisted_occurrences=None,
occurrence_pool=None, tzinfo=timezone.get_current_timezone()):
self.tzinfo = self._get_tzinfo(tzinfo)
if date is None:
date = timezone.now()
start, end = self._get_week_range(date)
super(Week, self).__init__(events, start, end,
parent_persisted_occurrences, occurrence_pool, tzinfo=tzinfo)
def prev_week(self):
return Week(self.events, self.start - datetime.timedelta(days=7), tzinfo=self.tzinfo)
prev = prev_week
def next_week(self):
return Week(self.events, self.end, tzinfo=self.tzinfo)
next = next_week
def current_month(self):
return Month(self.events, self.start, tzinfo=self.tzinfo)
def current_year(self):
return Year(self.events, self.start, tzinfo=self.tzinfo)
def get_days(self):
return self.get_periods(Day)
def _get_week_range(self, week):
if isinstance(week, datetime.datetime):
week = week.date()
# Adjust the start datetime to midnight of the week datetime
naive_start = datetime.datetime.combine(week, datetime.time.min)
# Adjust the start datetime to Monday or Sunday of the current week
if FIRST_DAY_OF_WEEK == 1:
# The week begins on Monday
sub_days = naive_start.isoweekday() - 1
else:
# The week begins on Sunday
sub_days = naive_start.isoweekday()
if sub_days == 7:
sub_days = 0
if sub_days > 0:
naive_start = naive_start - datetime.timedelta(days=sub_days)
naive_end = naive_start + datetime.timedelta(days=7)
if self.tzinfo is not None:
local_start = self.tzinfo.localize(naive_start)
local_end = self.tzinfo.localize(naive_end)
start = local_start.astimezone(pytz.utc)
end = local_end.astimezone(pytz.utc)
else:
start = naive_start
end = naive_end
return start, end
def __unicode__(self):
date_format = u'l, %s' % ugettext("DATE_FORMAT")
return ugettext('Week: %(start)s-%(end)s') % {
'start': date_filter(self.start, date_format),
'end': date_filter(self.end, date_format),
}
class Day(Period):
def __init__(self, events, date=None, parent_persisted_occurrences=None,
occurrence_pool=None, tzinfo=timezone.get_current_timezone()):
self.tzinfo = self._get_tzinfo(tzinfo)
if date is None:
date = timezone.now()
start, end = self._get_day_range(date)
super(Day, self).__init__(events, start, end,
parent_persisted_occurrences, occurrence_pool, tzinfo=tzinfo)
def _get_day_range(self, date):
if isinstance(date, datetime.datetime):
date = date.date()
naive_start = datetime.datetime.combine(date, datetime.time.min)
naive_end = datetime.datetime.combine(date + datetime.timedelta(days=1), datetime.time.min)
if self.tzinfo is not None:
local_start = self.tzinfo.localize(naive_start)
local_end = self.tzinfo.localize(naive_end)
start = local_start.astimezone(pytz.utc)
end = local_end.astimezone(pytz.utc)
else:
start = naive_start
end = naive_end
return start, end
def __unicode__(self):
date_format = u'l, %s' % ugettext("DATE_FORMAT")
return ugettext('Day: %(start)s-%(end)s') % {
'start': date_filter(self.start, date_format),
'end': date_filter(self.end, date_format),
}
def prev_day(self):
return Day(self.events, self.start - datetime.timedelta(days=1), tzinfo=self.tzinfo)
prev = prev_day
def next_day(self):
return Day(self.events, self.end, tzinfo=self.tzinfo)
next = next_day
def current_year(self):
return Year(self.events, self.start, tzinfo=self.tzinfo)
def current_month(self):
return Month(self.events, self.start, tzinfo=self.tzinfo)
def current_week(self):
return Week(self.events, self.start, tzinfo=self.tzinfo)
|
|
# sqlalchemy/pool.py
# Copyright (C) 2005-2019 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Base constructs for connection pools.
"""
from collections import deque
import time
import weakref
from .. import event
from .. import exc
from .. import interfaces
from .. import log
from .. import util
from ..util import threading
reset_rollback = util.symbol("reset_rollback")
reset_commit = util.symbol("reset_commit")
reset_none = util.symbol("reset_none")
class _ConnDialect(object):
"""partial implementation of :class:`.Dialect`
which provides DBAPI connection methods.
When a :class:`.Pool` is combined with an :class:`.Engine`,
the :class:`.Engine` replaces this with its own
:class:`.Dialect`.
"""
def do_rollback(self, dbapi_connection):
dbapi_connection.rollback()
def do_commit(self, dbapi_connection):
dbapi_connection.commit()
def do_close(self, dbapi_connection):
dbapi_connection.close()
def do_ping(self, dbapi_connection):
raise NotImplementedError(
"The ping feature requires that a dialect is "
"passed to the connection pool."
)
class Pool(log.Identified):
"""Abstract base class for connection pools."""
_dialect = _ConnDialect()
@util.deprecated_params(
listeners=(
"0.7",
":class:`.PoolListener` is deprecated in favor of the "
":class:`.PoolEvents` listener interface. The "
":paramref:`.Pool.listeners` parameter will be removed in a "
"future release.",
)
)
def __init__(
self,
creator,
recycle=-1,
echo=None,
logging_name=None,
reset_on_return=True,
listeners=None,
events=None,
dialect=None,
pre_ping=False,
_dispatch=None,
):
"""
Construct a Pool.
:param creator: a callable function that returns a DB-API
connection object. The function will be called with
parameters.
:param recycle: If set to a value other than -1, number of
seconds between connection recycling, which means upon
checkout, if this timeout is surpassed the connection will be
closed and replaced with a newly opened connection. Defaults to -1.
:param logging_name: String identifier which will be used within
the "name" field of logging records generated within the
"sqlalchemy.pool" logger. Defaults to a hexstring of the object's
id.
:param echo: if True, the connection pool will log
informational output such as when connections are invalidated
as well as when connections are recycled to the default log handler,
which defaults to ``sys.stdout`` for output.. If set to the string
``"debug"``, the logging will include pool checkouts and checkins.
The :paramref:`.Pool.echo` parameter can also be set from the
:func:`.create_engine` call by using the
:paramref:`.create_engine.echo_pool` parameter.
.. seealso::
:ref:`dbengine_logging` - further detail on how to configure
logging.
:param reset_on_return: Determine steps to take on
connections as they are returned to the pool.
reset_on_return can have any of these values:
* ``"rollback"`` - call rollback() on the connection,
to release locks and transaction resources.
This is the default value. The vast majority
of use cases should leave this value set.
* ``True`` - same as 'rollback', this is here for
backwards compatibility.
* ``"commit"`` - call commit() on the connection,
to release locks and transaction resources.
A commit here may be desirable for databases that
cache query plans if a commit is emitted,
such as Microsoft SQL Server. However, this
value is more dangerous than 'rollback' because
any data changes present on the transaction
are committed unconditionally.
* ``None`` - don't do anything on the connection.
This setting should generally only be made on a database
that has no transaction support at all,
namely MySQL MyISAM; when used on this backend, performance
can be improved as the "rollback" call is still expensive on
MySQL. It is **strongly recommended** that this setting not be
used for transaction-supporting databases in conjunction with
a persistent pool such as :class:`.QueuePool`, as it opens
the possibility for connections still in a transaction to be
idle in the pool. The setting may be appropriate in the
case of :class:`.NullPool` or special circumstances where
the connection pool in use is not being used to maintain connection
lifecycle.
* ``False`` - same as None, this is here for
backwards compatibility.
:param events: a list of 2-tuples, each of the form
``(callable, target)`` which will be passed to :func:`.event.listen`
upon construction. Provided here so that event listeners
can be assigned via :func:`.create_engine` before dialect-level
listeners are applied.
:param listeners: A list of :class:`.PoolListener`-like objects or
dictionaries of callables that receive events when DB-API
connections are created, checked out and checked in to the
pool.
:param dialect: a :class:`.Dialect` that will handle the job
of calling rollback(), close(), or commit() on DBAPI connections.
If omitted, a built-in "stub" dialect is used. Applications that
make use of :func:`~.create_engine` should not use this parameter
as it is handled by the engine creation strategy.
.. versionadded:: 1.1 - ``dialect`` is now a public parameter
to the :class:`.Pool`.
:param pre_ping: if True, the pool will emit a "ping" (typically
"SELECT 1", but is dialect-specific) on the connection
upon checkout, to test if the connection is alive or not. If not,
the connection is transparently re-connected and upon success, all
other pooled connections established prior to that timestamp are
invalidated. Requires that a dialect is passed as well to
interpret the disconnection error.
.. versionadded:: 1.2
"""
if logging_name:
self.logging_name = self._orig_logging_name = logging_name
else:
self._orig_logging_name = None
log.instance_logger(self, echoflag=echo)
self._threadconns = threading.local()
self._creator = creator
self._recycle = recycle
self._invalidate_time = 0
self._pre_ping = pre_ping
self._reset_on_return = util.symbol.parse_user_argument(
reset_on_return,
{
reset_rollback: ["rollback", True],
reset_none: ["none", None, False],
reset_commit: ["commit"],
},
"reset_on_return",
resolve_symbol_names=False,
)
self.echo = echo
if _dispatch:
self.dispatch._update(_dispatch, only_propagate=False)
if dialect:
self._dialect = dialect
if events:
for fn, target in events:
event.listen(self, target, fn)
if listeners:
for l in listeners:
self.add_listener(l)
@property
def _creator(self):
return self.__dict__["_creator"]
@_creator.setter
def _creator(self, creator):
self.__dict__["_creator"] = creator
self._invoke_creator = self._should_wrap_creator(creator)
def _should_wrap_creator(self, creator):
"""Detect if creator accepts a single argument, or is sent
as a legacy style no-arg function.
"""
try:
argspec = util.get_callable_argspec(self._creator, no_self=True)
except TypeError:
return lambda crec: creator()
defaulted = argspec[3] is not None and len(argspec[3]) or 0
positionals = len(argspec[0]) - defaulted
# look for the exact arg signature that DefaultStrategy
# sends us
if (argspec[0], argspec[3]) == (["connection_record"], (None,)):
return creator
# or just a single positional
elif positionals == 1:
return creator
# all other cases, just wrap and assume legacy "creator" callable
# thing
else:
return lambda crec: creator()
def _close_connection(self, connection):
self.logger.debug("Closing connection %r", connection)
try:
self._dialect.do_close(connection)
except Exception:
self.logger.error(
"Exception closing connection %r", connection, exc_info=True
)
@util.deprecated(
"0.7",
"The :meth:`.Pool.add_listener` method is deprecated and "
"will be removed in a future release. Please use the "
":class:`.PoolEvents` listener interface.",
)
def add_listener(self, listener):
"""Add a :class:`.PoolListener`-like object to this pool.
``listener`` may be an object that implements some or all of
PoolListener, or a dictionary of callables containing implementations
of some or all of the named methods in PoolListener.
"""
interfaces.PoolListener._adapt_listener(self, listener)
def _create_connection(self):
"""Called by subclasses to create a new ConnectionRecord."""
return _ConnectionRecord(self)
def _invalidate(self, connection, exception=None, _checkin=True):
"""Mark all connections established within the generation
of the given connection as invalidated.
If this pool's last invalidate time is before when the given
connection was created, update the timestamp til now. Otherwise,
no action is performed.
Connections with a start time prior to this pool's invalidation
time will be recycled upon next checkout.
"""
rec = getattr(connection, "_connection_record", None)
if not rec or self._invalidate_time < rec.starttime:
self._invalidate_time = time.time()
if _checkin and getattr(connection, "is_valid", False):
connection.invalidate(exception)
def recreate(self):
"""Return a new :class:`.Pool`, of the same class as this one
and configured with identical creation arguments.
This method is used in conjunction with :meth:`dispose`
to close out an entire :class:`.Pool` and create a new one in
its place.
"""
raise NotImplementedError()
def dispose(self):
"""Dispose of this pool.
This method leaves the possibility of checked-out connections
remaining open, as it only affects connections that are
idle in the pool.
.. seealso::
:meth:`Pool.recreate`
"""
raise NotImplementedError()
def connect(self):
"""Return a DBAPI connection from the pool.
The connection is instrumented such that when its
``close()`` method is called, the connection will be returned to
the pool.
"""
return _ConnectionFairy._checkout(self)
def _return_conn(self, record):
"""Given a _ConnectionRecord, return it to the :class:`.Pool`.
This method is called when an instrumented DBAPI connection
has its ``close()`` method called.
"""
self._do_return_conn(record)
def _do_get(self):
"""Implementation for :meth:`get`, supplied by subclasses."""
raise NotImplementedError()
def _do_return_conn(self, conn):
"""Implementation for :meth:`return_conn`, supplied by subclasses."""
raise NotImplementedError()
def status(self):
raise NotImplementedError()
class _ConnectionRecord(object):
"""Internal object which maintains an individual DBAPI connection
referenced by a :class:`.Pool`.
The :class:`._ConnectionRecord` object always exists for any particular
DBAPI connection whether or not that DBAPI connection has been
"checked out". This is in contrast to the :class:`._ConnectionFairy`
which is only a public facade to the DBAPI connection while it is checked
out.
A :class:`._ConnectionRecord` may exist for a span longer than that
of a single DBAPI connection. For example, if the
:meth:`._ConnectionRecord.invalidate`
method is called, the DBAPI connection associated with this
:class:`._ConnectionRecord`
will be discarded, but the :class:`._ConnectionRecord` may be used again,
in which case a new DBAPI connection is produced when the :class:`.Pool`
next uses this record.
The :class:`._ConnectionRecord` is delivered along with connection
pool events, including :meth:`.PoolEvents.connect` and
:meth:`.PoolEvents.checkout`, however :class:`._ConnectionRecord` still
remains an internal object whose API and internals may change.
.. seealso::
:class:`._ConnectionFairy`
"""
def __init__(self, pool, connect=True):
self.__pool = pool
if connect:
self.__connect(first_connect_check=True)
self.finalize_callback = deque()
fairy_ref = None
starttime = None
connection = None
"""A reference to the actual DBAPI connection being tracked.
May be ``None`` if this :class:`._ConnectionRecord` has been marked
as invalidated; a new DBAPI connection may replace it if the owning
pool calls upon this :class:`._ConnectionRecord` to reconnect.
"""
_soft_invalidate_time = 0
@util.memoized_property
def info(self):
"""The ``.info`` dictionary associated with the DBAPI connection.
This dictionary is shared among the :attr:`._ConnectionFairy.info`
and :attr:`.Connection.info` accessors.
.. note::
The lifespan of this dictionary is linked to the
DBAPI connection itself, meaning that it is **discarded** each time
the DBAPI connection is closed and/or invalidated. The
:attr:`._ConnectionRecord.record_info` dictionary remains
persistent throughout the lifespan of the
:class:`._ConnectionRecord` container.
"""
return {}
@util.memoized_property
def record_info(self):
"""An "info' dictionary associated with the connection record
itself.
Unlike the :attr:`._ConnectionRecord.info` dictionary, which is linked
to the lifespan of the DBAPI connection, this dictionary is linked
to the lifespan of the :class:`._ConnectionRecord` container itself
and will remain persistent throughout the life of the
:class:`._ConnectionRecord`.
.. versionadded:: 1.1
"""
return {}
@classmethod
def checkout(cls, pool):
rec = pool._do_get()
try:
dbapi_connection = rec.get_connection()
except Exception as err:
with util.safe_reraise():
rec._checkin_failed(err)
echo = pool._should_log_debug()
fairy = _ConnectionFairy(dbapi_connection, rec, echo)
rec.fairy_ref = weakref.ref(
fairy,
lambda ref: _finalize_fairy
and _finalize_fairy(None, rec, pool, ref, echo),
)
_refs.add(rec)
if echo:
pool.logger.debug(
"Connection %r checked out from pool", dbapi_connection
)
return fairy
def _checkin_failed(self, err):
self.invalidate(e=err)
self.checkin(_no_fairy_ref=True)
def checkin(self, _no_fairy_ref=False):
if self.fairy_ref is None and not _no_fairy_ref:
util.warn("Double checkin attempted on %s" % self)
return
self.fairy_ref = None
connection = self.connection
pool = self.__pool
while self.finalize_callback:
finalizer = self.finalize_callback.pop()
finalizer(connection)
if pool.dispatch.checkin:
pool.dispatch.checkin(connection, self)
pool._return_conn(self)
@property
def in_use(self):
return self.fairy_ref is not None
@property
def last_connect_time(self):
return self.starttime
def close(self):
if self.connection is not None:
self.__close()
def invalidate(self, e=None, soft=False):
"""Invalidate the DBAPI connection held by this :class:`._ConnectionRecord`.
This method is called for all connection invalidations, including
when the :meth:`._ConnectionFairy.invalidate` or
:meth:`.Connection.invalidate` methods are called, as well as when any
so-called "automatic invalidation" condition occurs.
:param e: an exception object indicating a reason for the invalidation.
:param soft: if True, the connection isn't closed; instead, this
connection will be recycled on next checkout.
.. versionadded:: 1.0.3
.. seealso::
:ref:`pool_connection_invalidation`
"""
# already invalidated
if self.connection is None:
return
if soft:
self.__pool.dispatch.soft_invalidate(self.connection, self, e)
else:
self.__pool.dispatch.invalidate(self.connection, self, e)
if e is not None:
self.__pool.logger.info(
"%sInvalidate connection %r (reason: %s:%s)",
"Soft " if soft else "",
self.connection,
e.__class__.__name__,
e,
)
else:
self.__pool.logger.info(
"%sInvalidate connection %r",
"Soft " if soft else "",
self.connection,
)
if soft:
self._soft_invalidate_time = time.time()
else:
self.__close()
self.connection = None
def get_connection(self):
recycle = False
if self.connection is None:
self.info.clear()
self.__connect()
elif (
self.__pool._recycle > -1
and time.time() - self.starttime > self.__pool._recycle
):
self.__pool.logger.info(
"Connection %r exceeded timeout; recycling", self.connection
)
recycle = True
elif self.__pool._invalidate_time > self.starttime:
self.__pool.logger.info(
"Connection %r invalidated due to pool invalidation; "
+ "recycling",
self.connection,
)
recycle = True
elif self._soft_invalidate_time > self.starttime:
self.__pool.logger.info(
"Connection %r invalidated due to local soft invalidation; "
+ "recycling",
self.connection,
)
recycle = True
if recycle:
self.__close()
self.info.clear()
self.__connect()
return self.connection
def __close(self):
self.finalize_callback.clear()
if self.__pool.dispatch.close:
self.__pool.dispatch.close(self.connection, self)
self.__pool._close_connection(self.connection)
self.connection = None
def __connect(self, first_connect_check=False):
pool = self.__pool
# ensure any existing connection is removed, so that if
# creator fails, this attribute stays None
self.connection = None
try:
self.starttime = time.time()
connection = pool._invoke_creator(self)
pool.logger.debug("Created new connection %r", connection)
self.connection = connection
except Exception as e:
pool.logger.debug("Error on connect(): %s", e)
raise
else:
if first_connect_check:
pool.dispatch.first_connect.for_modify(
pool.dispatch
).exec_once(self.connection, self)
if pool.dispatch.connect:
pool.dispatch.connect(self.connection, self)
def _finalize_fairy(
connection, connection_record, pool, ref, echo, fairy=None
):
"""Cleanup for a :class:`._ConnectionFairy` whether or not it's already
been garbage collected.
"""
_refs.discard(connection_record)
if ref is not None:
if connection_record.fairy_ref is not ref:
return
assert connection is None
connection = connection_record.connection
if connection is not None:
if connection_record and echo:
pool.logger.debug(
"Connection %r being returned to pool", connection
)
try:
fairy = fairy or _ConnectionFairy(
connection, connection_record, echo
)
assert fairy.connection is connection
fairy._reset(pool)
# Immediately close detached instances
if not connection_record:
if pool.dispatch.close_detached:
pool.dispatch.close_detached(connection)
pool._close_connection(connection)
except BaseException as e:
pool.logger.error(
"Exception during reset or similar", exc_info=True
)
if connection_record:
connection_record.invalidate(e=e)
if not isinstance(e, Exception):
raise
if connection_record and connection_record.fairy_ref is not None:
connection_record.checkin()
_refs = set()
class _ConnectionFairy(object):
"""Proxies a DBAPI connection and provides return-on-dereference
support.
This is an internal object used by the :class:`.Pool` implementation
to provide context management to a DBAPI connection delivered by
that :class:`.Pool`.
The name "fairy" is inspired by the fact that the
:class:`._ConnectionFairy` object's lifespan is transitory, as it lasts
only for the length of a specific DBAPI connection being checked out from
the pool, and additionally that as a transparent proxy, it is mostly
invisible.
.. seealso::
:class:`._ConnectionRecord`
"""
def __init__(self, dbapi_connection, connection_record, echo):
self.connection = dbapi_connection
self._connection_record = connection_record
self._echo = echo
connection = None
"""A reference to the actual DBAPI connection being tracked."""
_connection_record = None
"""A reference to the :class:`._ConnectionRecord` object associated
with the DBAPI connection.
This is currently an internal accessor which is subject to change.
"""
_reset_agent = None
"""Refer to an object with a ``.commit()`` and ``.rollback()`` method;
if non-None, the "reset-on-return" feature will call upon this object
rather than directly against the dialect-level do_rollback() and
do_commit() methods.
In practice, a :class:`.Connection` assigns a :class:`.Transaction` object
to this variable when one is in scope so that the :class:`.Transaction`
takes the job of committing or rolling back on return if
:meth:`.Connection.close` is called while the :class:`.Transaction`
still exists.
This is essentially an "event handler" of sorts but is simplified as an
instance variable both for performance/simplicity as well as that there
can only be one "reset agent" at a time.
"""
@classmethod
def _checkout(cls, pool, threadconns=None, fairy=None):
if not fairy:
fairy = _ConnectionRecord.checkout(pool)
fairy._pool = pool
fairy._counter = 0
if threadconns is not None:
threadconns.current = weakref.ref(fairy)
if fairy.connection is None:
raise exc.InvalidRequestError("This connection is closed")
fairy._counter += 1
if (
not pool.dispatch.checkout and not pool._pre_ping
) or fairy._counter != 1:
return fairy
# Pool listeners can trigger a reconnection on checkout, as well
# as the pre-pinger.
# there are three attempts made here, but note that if the database
# is not accessible from a connection standpoint, those won't proceed
# here.
attempts = 2
while attempts > 0:
try:
if pool._pre_ping:
if fairy._echo:
pool.logger.debug(
"Pool pre-ping on connection %s", fairy.connection
)
result = pool._dialect.do_ping(fairy.connection)
if not result:
if fairy._echo:
pool.logger.debug(
"Pool pre-ping on connection %s failed, "
"will invalidate pool",
fairy.connection,
)
raise exc.InvalidatePoolError()
pool.dispatch.checkout(
fairy.connection, fairy._connection_record, fairy
)
return fairy
except exc.DisconnectionError as e:
if e.invalidate_pool:
pool.logger.info(
"Disconnection detected on checkout, "
"invalidating all pooled connections prior to "
"current timestamp (reason: %r)",
e,
)
fairy._connection_record.invalidate(e)
pool._invalidate(fairy, e, _checkin=False)
else:
pool.logger.info(
"Disconnection detected on checkout, "
"invalidating individual connection %s (reason: %r)",
fairy.connection,
e,
)
fairy._connection_record.invalidate(e)
try:
fairy.connection = (
fairy._connection_record.get_connection()
)
except Exception as err:
with util.safe_reraise():
fairy._connection_record._checkin_failed(err)
attempts -= 1
pool.logger.info("Reconnection attempts exhausted on checkout")
fairy.invalidate()
raise exc.InvalidRequestError("This connection is closed")
def _checkout_existing(self):
return _ConnectionFairy._checkout(self._pool, fairy=self)
def _checkin(self):
_finalize_fairy(
self.connection,
self._connection_record,
self._pool,
None,
self._echo,
fairy=self,
)
self.connection = None
self._connection_record = None
_close = _checkin
def _reset(self, pool):
if pool.dispatch.reset:
pool.dispatch.reset(self, self._connection_record)
if pool._reset_on_return is reset_rollback:
if self._echo:
pool.logger.debug(
"Connection %s rollback-on-return%s",
self.connection,
", via agent" if self._reset_agent else "",
)
if self._reset_agent:
self._reset_agent.rollback()
else:
pool._dialect.do_rollback(self)
elif pool._reset_on_return is reset_commit:
if self._echo:
pool.logger.debug(
"Connection %s commit-on-return%s",
self.connection,
", via agent" if self._reset_agent else "",
)
if self._reset_agent:
self._reset_agent.commit()
else:
pool._dialect.do_commit(self)
@property
def _logger(self):
return self._pool.logger
@property
def is_valid(self):
"""Return True if this :class:`._ConnectionFairy` still refers
to an active DBAPI connection."""
return self.connection is not None
@util.memoized_property
def info(self):
"""Info dictionary associated with the underlying DBAPI connection
referred to by this :class:`.ConnectionFairy`, allowing user-defined
data to be associated with the connection.
The data here will follow along with the DBAPI connection including
after it is returned to the connection pool and used again
in subsequent instances of :class:`._ConnectionFairy`. It is shared
with the :attr:`._ConnectionRecord.info` and :attr:`.Connection.info`
accessors.
The dictionary associated with a particular DBAPI connection is
discarded when the connection itself is discarded.
"""
return self._connection_record.info
@property
def record_info(self):
"""Info dictionary associated with the :class:`._ConnectionRecord
container referred to by this :class:`.ConnectionFairy`.
Unlike the :attr:`._ConnectionFairy.info` dictionary, the lifespan
of this dictionary is persistent across connections that are
disconnected and/or invalidated within the lifespan of a
:class:`._ConnectionRecord`.
.. versionadded:: 1.1
"""
if self._connection_record:
return self._connection_record.record_info
else:
return None
def invalidate(self, e=None, soft=False):
"""Mark this connection as invalidated.
This method can be called directly, and is also called as a result
of the :meth:`.Connection.invalidate` method. When invoked,
the DBAPI connection is immediately closed and discarded from
further use by the pool. The invalidation mechanism proceeds
via the :meth:`._ConnectionRecord.invalidate` internal method.
:param e: an exception object indicating a reason for the invalidation.
:param soft: if True, the connection isn't closed; instead, this
connection will be recycled on next checkout.
.. versionadded:: 1.0.3
.. seealso::
:ref:`pool_connection_invalidation`
"""
if self.connection is None:
util.warn("Can't invalidate an already-closed connection.")
return
if self._connection_record:
self._connection_record.invalidate(e=e, soft=soft)
if not soft:
self.connection = None
self._checkin()
def cursor(self, *args, **kwargs):
"""Return a new DBAPI cursor for the underlying connection.
This method is a proxy for the ``connection.cursor()`` DBAPI
method.
"""
return self.connection.cursor(*args, **kwargs)
def __getattr__(self, key):
return getattr(self.connection, key)
def detach(self):
"""Separate this connection from its Pool.
This means that the connection will no longer be returned to the
pool when closed, and will instead be literally closed. The
containing ConnectionRecord is separated from the DB-API connection,
and will create a new connection when next used.
Note that any overall connection limiting constraints imposed by a
Pool implementation may be violated after a detach, as the detached
connection is removed from the pool's knowledge and control.
"""
if self._connection_record is not None:
rec = self._connection_record
_refs.remove(rec)
rec.fairy_ref = None
rec.connection = None
# TODO: should this be _return_conn?
self._pool._do_return_conn(self._connection_record)
self.info = self.info.copy()
self._connection_record = None
if self._pool.dispatch.detach:
self._pool.dispatch.detach(self.connection, rec)
def close(self):
self._counter -= 1
if self._counter == 0:
self._checkin()
|
|
__author__ = 'Tom Schaul, tom@idsia.ch and Daan Wiertra, daan@idsia.ch'
from scipy import zeros, array, mean, randn, exp, dot, argmax
from learner import Learner
from pybrain.datasets import ReinforcementDataSet, ImportanceDataSet, SequentialDataSet
from pybrain.supervised import BackpropTrainer
from pybrain.utilities import drawIndex
# TODO: greedy runs: start once in every possible starting state!
# TODO: supervised: train-set, test-set, early stopping -> actual convergence!
class RWR(Learner):
""" Reward-weighted regression.
The algorithm is currently limited to discrete-action episodic tasks, subclasses of POMDPTasks.
"""
# parameters
batchSize = 20
# feedback settings
verbose = True
greedyRuns = 20
supervisedPlotting = False
# settings for the supervised training
learningRate = 0.005
momentum = 0.9
maxEpochs = 20
validationProportion = 0.33
continueEpochs = 2
# parameters for the variation that uses a value function
# TODO: split into 2 classes.
valueLearningRate = None
valueMomentum = None
#valueTrainEpochs = 5
resetAllWeights = False
netweights = 0.01
def __init__(self, net, task, valueNetwork = None, **args):
self.net = net
self.task = task
self.setArgs(**args)
if self.valueLearningRate == None:
self.valueLearningRate = self.learningRate
if self.valueMomentum == None:
self.valueMomentum = self.momentum
if self.supervisedPlotting:
from pylab import ion
ion()
# adaptive temperature:
self.tau = 1.
# prepare the datasets to be used
self.weightedDs = ImportanceDataSet(self.task.outdim, self.task.indim)
self.rawDs = ReinforcementDataSet(self.task.outdim, self.task.indim)
self.valueDs = SequentialDataSet(self.task.outdim, 1)
# prepare the supervised trainers
self.bp = BackpropTrainer(self.net, self.weightedDs, self.learningRate,
self.momentum, verbose= False,
batchlearning = True)
# CHECKME: outsource
self.vnet = valueNetwork
if valueNetwork != None:
self.vbp = BackpropTrainer(self.vnet, self.valueDs, self.valueLearningRate,
self.valueMomentum, verbose = self.verbose)
# keep information:
self.totalSteps = 0
self.totalEpisodes = 0
def shapingFunction(self, R):
return exp(self.tau * R)
def updateTau(self, R, U):
self.tau = sum(U)/dot((R - self.task.minReward), U)
def reset(self):
self.weightedDs.clear()
self.valueDs.clear()
self.rawDs.clear()
self.bp.momentumvector *= 0.0
if self.vnet != None:
self.vbp.momentumvector *= 0.0
if self.resetAllWeights:
self.vnet.params[:] = randn(len(self.vnet.params))*self.netweights
def greedyEpisode(self):
""" run one episode with greedy decisions, return the list of rewards recieved."""
rewards = []
self.task.reset()
self.net.reset()
while not self.task.isFinished():
obs = self.task.getObservation()
act = self.net.activate(obs)
chosen = argmax(act)
self.task.performAction(chosen)
reward = self.task.getReward()
rewards.append(reward)
return rewards
def learn(self, batches):
self.greedyAvg = []
self.rewardAvg = []
self.lengthAvg = []
self.initr0Avg = []
for b in range(batches):
if self.verbose:
print
print 'Batch', b+1
self.reset()
self.learnOneBatch()
self.totalEpisodes += self.batchSize
# greedy measure (avg over some greedy runs)
rws = 0.
for dummy in range(self.greedyRuns):
tmp = self.greedyEpisode()
rws += (sum(tmp)/float(len(tmp)))
self.greedyAvg.append(rws/self.greedyRuns)
if self.verbose:
print '::', round(rws/self.greedyRuns, 5), '::'
def learnOneBatch(self):
# collect a batch of runs as experience
r0s = []
lens = []
avgReward = 0.
for dummy in range(self.batchSize):
self.rawDs.newSequence()
self.valueDs.newSequence()
self.task.reset()
self.net.reset()
acts, obss, rewards = [],[],[]
while not self.task.isFinished():
obs = self.task.getObservation()
act = self.net.activate(obs)
chosen = drawIndex(act)
self.task.performAction(chosen)
reward = self.task.getReward()
obss.append(obs)
y = zeros(len(act))
y[chosen] = 1
acts.append(y)
rewards.append(reward)
avgReward += sum(rewards)/float(len(rewards))
# compute the returns from the list of rewards
current = 0
returns = []
for r in reversed(rewards):
current *= self.task.discount
current += r
returns.append(current)
returns.reverse()
for i in range(len(obss)):
self.rawDs.addSample(obss[i], acts[i], returns[i])
self.valueDs.addSample(obss[i], returns[i])
r0s.append(returns[0])
lens.append(len(returns))
r0s = array(r0s)
self.totalSteps += sum(lens)
avgLen = sum(lens)/float(self.batchSize)
avgR0 = mean(r0s)
avgReward /= self.batchSize
if self.verbose:
print '***', round(avgLen, 3), '***', '(avg init exp. return:', round(avgR0, 5), ')',
print 'avg reward', round(avgReward, 5), '(tau:', round(self.tau, 3), ')'
print lens
# storage:
self.rewardAvg.append(avgReward)
self.lengthAvg.append(avgLen)
self.initr0Avg.append(avgR0)
# if self.vnet == None:
# # case 1: no value estimator:
# prepare the dataset for training the acting network
shaped = self.shapingFunction(r0s)
self.updateTau(r0s, shaped)
shaped /= max(shaped)
for i, seq in enumerate(self.rawDs):
self.weightedDs.newSequence()
for sample in seq:
obs, act, dummy = sample
self.weightedDs.addSample(obs, act, shaped[i])
# else:
# # case 2: value estimator:
#
#
# # train the value estimating network
# if self.verbose: print 'Old value error: ', self.vbp.testOnData()
# self.vbp.trainEpochs(self.valueTrainEpochs)
# if self.verbose: print 'New value error: ', self.vbp.testOnData()
#
# # produce the values and analyze
# rminusvs = []
# sizes = []
# for i, seq in enumerate(self.valueDs):
# self.vnet.reset()
# seq = list(seq)
# for sample in seq:
# obs, ret = sample
# val = self.vnet.activate(obs)
# rminusvs.append(ret-val)
# sizes.append(len(seq))
#
# rminusvs = array(rminusvs)
# shapedRminusv = self.shapingFunction(rminusvs)
# # CHECKME: here?
# self.updateTau(rminusvs, shapedRminusv)
# shapedRminusv /= array(sizes)
# shapedRminusv /= max(shapedRminusv)
#
# # prepare the dataset for training the acting network
# rvindex = 0
# for i, seq in enumerate(self.rawDs):
# self.weightedDs.newSequence()
# self.vnet.reset()
# for sample in seq:
# obs, act, ret = sample
# self.weightedDs.addSample(obs, act, shapedRminusv[rvindex])
# rvindex += 1
# train the acting network
tmp1, tmp2 = self.bp.trainUntilConvergence(maxEpochs = self.maxEpochs,
validationProportion = self.validationProportion,
continueEpochs = self.continueEpochs,
verbose = self.verbose)
if self.supervisedPlotting:
from pylab import plot, legend, figure, clf, draw
figure(1)
clf()
plot(tmp1, label = 'train')
plot(tmp2, label = 'valid')
legend()
draw()
return avgLen, avgR0
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Real-time and echo stress tests of network in Crazy Cake.
'''
__author__ = 'Ethan Kennerly'
from client import *
# Real-time
# from decorator import decorator
# @decorator
# XXX decorator stomps docstring?
def digest_and(do, *args, **kwargs):
'''Force eat to complete, wait a second, then do.
>>> gateway_process = configuration.subprocess_gateway(configuration.amf_host, 'embassy.py', verbose)
>>> joris = configuration.globe_class()
>>> joris.setup(configuration.mock_speed, configuration.setup_client)
>>> time.sleep(1.0 / joris._speed)
>>> mouse_down_and_sleep = digest_and(mouse_down_and_sleep)
>>> mouse_down_and_sleep(joris, joris.root.title_mc.start_btn, 1.0 / joris._speed)
'''
def digest_and_do(*args, **kwargs):
globe = args[0]
set_property(globe, globe.root.eat_mc.act_mc, 'currentLabel', 'none')
time.sleep(1.0 / globe._speed)
diff = property_diff(globe, globe.root.eat_mc.act_mc, 'currentLabel', 'none')
if diff:
return diff
else:
return do(*args, **kwargs)
return digest_and_do
def real_time_example():
'''
ethan and joris start
>>> code_unit.inline_examples(
... ethan_joris_start_example.__doc__,
... locals(), globals(),
... verify_examples = False)
by default, turns are required.
ethan may not move.
white, 0, 0
>>> mouse_down_and_sleep(ethan, ethan.root._0_0_mc, 1.0 / ethan._speed)
>>> board_diff(ethan, ethan.root._0_0_mc, 'currentLabel', 'empty_white')
joris moves.
black, 0, 1
Soon, Joris previews a black stone appear there.
>>> mouse_down_and_sleep(joris, joris.root._0_1_mc, 1.0 / joris._speed)
>>> board_diff(joris, joris.root._0_1_mc, 'currentLabel', 'question_black')
>>> mouse_down_and_sleep(joris, joris.root._0_1_mc, 1.0 / joris._speed)
>>> board_diff(joris, joris.root._0_1_mc, 'currentLabel', 'black')
joris may not move again.
>>> mouse_down_and_sleep(joris, joris.root._1_0_mc, 1.0 / joris._speed)
>>> mouse_down_and_sleep(joris, joris.root._1_0_mc, 1.0 / joris._speed)
>>> board_diff(joris, joris.root._1_0_mc, 'currentLabel', 'empty_black')
ethan moves.
white, 0, 0
>>> mouse_down_and_sleep(ethan, ethan.root._0_0_mc, 1.0 / ethan._speed)
>>> board_diff(ethan, ethan.root._0_0_mc, 'currentLabel', 'white')
ethan may not move again.
>>> mouse_down_and_sleep(ethan, ethan.root._0_2_mc, 1.0 / ethan._speed)
>>> board_diff(ethan, ethan.root._0_2_mc, 'currentLabel', 'empty_white')
>>> joris.pb()
OX,,,,,,,
,,,,,,,,,
,,,,,,,,,
,,,,,,,,,
,,,,,,,,,
,,,,,,,,,
,,,,,,,,,
,,,,,,,,,
,,,,,,,,,
REAL-TIME
ethan clicks clock button.
>>> mouse_down_and_sleep(ethan, ethan.root.clock_mc.enter_mc.enter_btn, 1.0 / ethan._speed)
joris and ethan see a clock between them.
>>> property_diff(joris, joris.root.clock_mc, 'currentLabel', 'time')
>>> property_diff(ethan, ethan.root.clock_mc, 'currentLabel', 'time')
each player sees that it is their turn.
>>> property_diff(joris, joris.root.turn_mc, 'currentLabel', 'black')
>>> property_diff(ethan, ethan.root.turn_mc, 'currentLabel', 'white')
joris previews.
>>> mouse_down_and_sleep(joris, joris.root._1_0_mc, 1.0 / joris._speed)
>>> if not joris.ambassador.sends[-1].get('eat_mc').get('act_mc').get('currentLabel') == 'none':
... joris.ambassador.sends[-1].get('eat_mc')
>>> board_diff(ethan, ethan.root._1_0_mc, 'currentLabel', 'empty_white')
>>> board_diff(joris, joris.root._1_0_mc, 'currentLabel', 'question_black')
ethan moves again.
>>> property_diff(ethan, ethan.root.eat_mc.act_mc, 'currentLabel', 'none')
>>> mouse_down_and_sleep(ethan, ethan.root._0_2_mc, 3.0 / ethan._speed)
>>> property_diff(joris, joris.root.turn_mc, 'currentLabel', 'black')
>>> property_diff(ethan, ethan.root.turn_mc, 'currentLabel', 'white')
>>> board_diff(ethan, ethan.root._0_2_mc, 'currentLabel', 'white')
>>> board_diff(joris, joris.root._0_2_mc, 'currentLabel', 'white')
ethan's move reverts joris' preview of a capture and shows white's new move.
>>> board_diff(ethan, ethan.root._1_0_mc, 'currentLabel', 'empty_white')
>>> board_diff(joris, joris.root._1_0_mc, 'currentLabel', 'empty_black')
>>> board_diff(joris, joris.root._0_0_mc, 'currentLabel', 'white')
>>> board_diff(joris, joris.root._0_2_mc, 'currentLabel', 'white')
during this time, ethan starts eating, (which in flash lasts for 30 seconds)
While eating, he cannot move again.
because client may become corrupt,
relying on client for eat animation state is insecure and unfaithful.
>>> mouse_down_and_sleep(ethan, ethan.root._0_3_mc, 3.0 / ethan._speed)
>>> board_diff(ethan, ethan.root._0_3_mc, 'currentLabel', 'empty_white')
>>> board_diff(joris, joris.root._0_3_mc, 'currentLabel', 'empty_black')
>>> property_diff(ethan, ethan.root.eat_mc.act_mc, 'currentLabel', 'eat')
>>> property_diff(ethan, ethan.root.help_mc, 'currentLabel', 'eat')
>>> property_diff(ethan, ethan.root.eat_mc, 'x', ethan.root._0_2_mc.x)
joris previews
>>> mouse_down_and_sleep(joris, joris.root._6_2_mc, 1.0 / joris._speed)
ethan tries to move.
>>> property_diff(ethan, ethan.root.eat_mc.act_mc, 'currentLabel', 'eat')
>>> mouse_down_and_sleep(ethan, ethan.root._0_3_mc, 5.0 / ethan._speed)
>>> board_diff(ethan, ethan.root._0_3_mc, 'currentLabel', 'empty_white')
in flash, eating expires after about 30 seconds.
joris moves. ethan finishes eating.
only when eat has stopped, is news of eat sent. joris gets extra stone.
>>> mouse_down_and_sleep(joris, joris.root._6_2_mc, 1.0 / joris._speed)
>>> print 'hack to give time to update'; time.sleep(1.0/joris._speed)
>>> if not joris.ambassador.sends[-1].get('eat_mc').get('act_mc').get('currentLabel') == 'none':
... joris.ambassador.sends[-1].get('eat_mc')
>>> property_diff(joris, joris.root.eat_mc.act_mc, 'currentLabel', 'eat')
>>> property_diff(joris, joris.root.turn_mc, 'currentLabel', 'black')
>>> property_diff(ethan, ethan.root.turn_mc, 'currentLabel', 'white')
>>> board_diff(joris, joris.root._6_2_mc, 'currentLabel', 'black')
>>> board_diff(ethan, ethan.root._6_2_mc, 'currentLabel', 'black')
>>> property_diff(joris, joris.root.extra_stone_gift_mc, 'currentLabel', '_1')
!^_^ if both are eating, then the player who started eating first can play.
!^_^ this is equivalent to: whenever partner moves, you finish eating.
>>> property_diff(ethan, ethan.root.eat_mc.act_mc, 'currentLabel', 'none')
joris previews.
30 seconds passes. ethan decides not to move.
only when eat has stopped, is news of eat sent.
black has moved.
black is eating.
black previews.
black sees eating expire.
>>> mouse_down_and_sleep(joris, joris.root._2_0_mc, 1.0 / joris._speed)
>>> time.sleep(30.0 / joris._speed / 32.0)
>>> joris.ambassador.sends[-1].get('eat_mc')
>>> set_property(joris, joris.root.eat_mc.act_mc, 'currentLabel', 'none')
>>> property_diff(joris, joris.root.eat_mc.act_mc, 'currentLabel', 'none')
white moves.
black sees white moved.
in pdb, I detected no inconsistency, here:
>>> mouse_down_and_sleep(ethan, ethan.root._0_3_mc, 1.0 / ethan._speed)
>>> property_diff(joris, joris.root.turn_mc, 'currentLabel', 'black')
>>> property_diff(ethan, ethan.root.turn_mc, 'currentLabel', 'white')
>>> board_diff(ethan, ethan.root._0_3_mc, 'currentLabel', 'white')
>>> board_diff(joris, joris.root._0_3_mc, 'currentLabel', 'white')
black sees eating remains expired.
black moves and eating starts at new move. black gets a hide gift.
>>> property_diff(joris, joris.root.eat_mc.act_mc, 'currentLabel', 'none')
>>> mouse_down_and_sleep(joris, joris.root._6_6_mc, 1.0 / joris._speed)
>>> board_diff(joris, joris.root._6_6_mc, 'currentLabel', 'question_black')
>>> mouse_down_and_sleep(joris, joris.root._6_6_mc, 1.0 / joris._speed)
>>> property_diff(joris, joris.root.turn_mc, 'currentLabel', 'black')
>>> property_diff(ethan, ethan.root.turn_mc, 'currentLabel', 'white')
>>> joris.ambassador.sends[-1].get('eat_mc').get('act_mc').get('currentLabel')
'none'
>>> board_diff(joris, joris.root._6_6_mc, 'currentLabel', 'black')
>>> board_diff(ethan, ethan.root._6_6_mc, 'currentLabel', 'black')
>>> property_diff(joris, joris.root.eat_mc, 'x', joris.root._6_6_mc.x)
>>> property_diff(joris, joris.root.eat_mc, 'y', joris.root._6_6_mc.y)
>>> property_diff(joris, joris.root.eat_mc.act_mc, 'currentLabel', 'eat')
>>> property_diff(joris, joris.root.extra_stone_gift_mc, 'currentLabel', '_1')
>>> property_diff(joris, joris.root.hide_gift_mc, 'currentLabel', '_1')
Because joris moved and now both players are waiting, ethan finishes eating.
>>> property_diff(ethan, ethan.root.eat_mc.act_mc, 'currentLabel', 'none')
ethan moves.
>>> ## mouse_down_and_sleep(ethan, ethan.root._0_4_mc, 1.0 / ethan._speed)
>>> ## set_property(ethan, ethan.root.eat_mc.act_mc, 'currentLabel', 'none')
>>> mouse_down_and_sleep(ethan, ethan.root._0_4_mc, 1.0 / ethan._speed)
>>> property_diff(joris, joris.root.turn_mc, 'currentLabel', 'black')
>>> property_diff(ethan, ethan.root.turn_mc, 'currentLabel', 'white')
>>> board_diff(ethan, ethan.root._0_4_mc, 'currentLabel', 'white')
>>> board_diff(joris, joris.root._0_4_mc, 'currentLabel', 'white')
>>> property_diff(ethan, ethan.root.eat_mc.act_mc, 'currentLabel', 'eat')
Joris could eat immediately.
>>> property_diff(joris, joris.root.eat_mc.act_mc, 'currentLabel', 'none')
30 seconds passes. ethan moves.
>>> time.sleep(30.0 / joris._speed / 32.0)
>>> set_property(ethan, ethan.root.eat_mc.act_mc, 'currentLabel', 'none')
>>> mouse_down_and_sleep(ethan, ethan.root._0_5_mc, 1.0 / ethan._speed)
>>> board_diff(ethan, ethan.root._0_5_mc, 'currentLabel', 'white')
>>> board_diff(joris, joris.root._0_5_mc, 'currentLabel', 'white')
>>> joris.pb()
OXOOOO,,,
,,,,,,,,,
,,,,,,,,,
,,,,,,,,,
,,,,,,,,,
,,,,,,,,,
,,X,,,X,,
,,,,,,,,,
,,,,,,,,,
>>> ethan.pb()
OXOOOO,,,
,,,,,,,,,
,,,,,,,,,
,,,,,,,,,
,,,,,,,,,
,,,,,,,,,
,,X,,,X,,
,,,,,,,,,
,,,,,,,,,
click two cake. see two cake on cursor. eating. click to take. see 'still eating'. eating expires.
>>> ## property_diff(joris, joris.root.eat_mc.act_mc, 'currentLabel', 'eat')
>>> mouse_down_and_sleep(joris, joris.root.extra_stone_gift_mc.use_mc, 1.0 / joris._speed)
>>> property_diff(joris, joris.root.cursor_mc.extra_stone_mc, 'currentLabel', '_1')
>>> ## property_diff(joris, joris.root.eat_mc.act_mc, 'currentLabel', 'eat')
>>> ## mouse_down_and_sleep(joris, joris.root._3_0_mc, 1.0 / joris._speed)
>>> ## mouse_down_and_sleep(joris, joris.root._3_0_mc, 1.0 / joris._speed)
>>> ## board_diff(joris, joris.root._3_0_mc, 'currentLabel', 'empty_black')
>>> ## property_diff(joris, joris.root.cursor_mc.extra_stone_mc, 'currentLabel', '_1')
>>> ## set_property(joris, joris.root.eat_mc.act_mc, 'currentLabel', 'none')
take. do not see eating. try to take again. take.
>>> mouse_down_and_sleep(joris, joris.root._3_0_mc, 1.0 / joris._speed)
>>> mouse_down_and_sleep(joris, joris.root._3_0_mc, 1.0 / joris._speed)
>>> board_diff(joris, joris.root._3_0_mc, 'currentLabel', 'black')
>>> board_diff(ethan, ethan.root._3_0_mc, 'currentLabel', 'black')
>>> property_diff(joris, joris.root.eat_mc.act_mc, 'currentLabel', 'none')
>>> mouse_down_and_sleep(joris, joris.root._4_0_mc, 1.0 / joris._speed)
>>> mouse_down_and_sleep(joris, joris.root._4_0_mc, 1.0 / joris._speed)
>>> board_diff(joris, joris.root._4_0_mc, 'currentLabel', 'black')
>>> board_diff(ethan, ethan.root._4_0_mc, 'currentLabel', 'black')
>>> property_diff(joris, joris.root.eat_mc.act_mc, 'currentLabel', 'eat')
>>> property_diff(joris, joris.root.eat_mc, 'x', joris.root._4_0_mc.x)
>>> property_diff(joris, joris.root.eat_mc, 'y', joris.root._4_0_mc.y)
!^_^ black is done eating. black hides.
>>> set_property(joris, joris.root.eat_mc.act_mc, 'currentLabel', 'none')
>>> property_diff(joris, joris.root.cursor_mc, 'currentLabel', 'black')
>>> mouse_down_and_sleep(joris, joris.root.hide_gift_mc.use_mc, 1.0 / joris._speed)
>>> property_diff(joris, joris.root.cursor_mc, 'currentLabel', 'hide_black')
>>> mouse_down_and_sleep(joris, joris.root._6_4_mc, 1.0 / joris._speed)
>>> mouse_down_and_sleep(joris, joris.root._6_4_mc, 1.0 / joris._speed)
>>> board_diff(joris, joris.root._6_4_mc, 'currentLabel', 'hide_black')
>>> board_diff(ethan, ethan.root._6_4_mc, 'currentLabel', 'empty_white')
After hiding, joris' cursor reverts.
>>> property_diff(joris, joris.root.cursor_mc, 'currentLabel', 'black')
after white moves, white is eating. white clicks black hidden.
white sees that he is still eating and does not see the hidden black.
>>> mouse_down_and_sleep(ethan, ethan.root._4_4_mc, 1.0 / ethan._speed)
>>> property_diff(ethan, ethan.root.eat_mc.act_mc, 'currentLabel', 'eat')
>>> mouse_down_and_sleep(ethan, ethan.root._6_4_mc, 1.0 / ethan._speed)
>>> property_diff(ethan, ethan.root.help_mc, 'currentLabel', 'eat')
>>> board_diff(ethan, ethan.root._6_4_mc, 'currentLabel', 'empty_white')
>>> board_diff(joris, joris.root._6_4_mc, 'currentLabel', 'hide_black')
>>> property_diff(ethan, ethan.root.eat_mc.act_mc, 'currentLabel', 'eat')
>>> property_diff(ethan, ethan.root.eat_mc, 'x', ethan.root._4_4_mc.x)
>>> property_diff(ethan, ethan.root.eat_mc, 'y', ethan.root._4_4_mc.y)
'''
def iterate_stress(ethan, joris, delay, r, i,
mouse_down_and_sleep, set_property):
set_property(joris, joris.root.eat_mc.act_mc, 'currentLabel', 'none')
time.sleep(delay)
set_property(ethan, ethan.root.eat_mc.act_mc, 'currentLabel', 'none')
time.sleep(delay)
joris_intersection_mc = joris.intersection_mc_array[r][i]
ethan_intersection_mc = ethan.intersection_mc_array[8-r][8-i]
mouse_down_and_sleep(joris, joris_intersection_mc, delay )
mouse_down_and_sleep(joris, joris_intersection_mc, delay )
mouse_down_and_sleep(ethan, ethan_intersection_mc, delay )
# joris.pb()
# ethan.pb()
def real_time_stress_example():
'''
ethan and joris start
>>> code_unit.inline_examples(
... ethan_joris_start_example.__doc__,
... locals(), globals(),
... verify_examples = False)
by default, turns are required.
joris moves.
black, 4, 4
Soon, Joris previews a black stone appear there.
>>> mouse_down_and_sleep(joris, joris.root._4_4_mc, 1.0 / joris._speed)
>>> board_diff(joris, joris.root._4_4_mc, 'currentLabel', 'question_black')
>>> mouse_down_and_sleep(joris, joris.root._4_4_mc, 1.0 / joris._speed)
>>> board_diff(joris, joris.root._4_4_mc, 'currentLabel', 'black')
XXX must be ethan's turn to start clock.
ethan clicks clock button.
>>> mouse_down_and_sleep(ethan, ethan.root.clock_mc.enter_mc.enter_btn, 1.0 / ethan._speed)
joris and ethan see a clock between them.
>>> property_diff(joris, joris.root.clock_mc, 'currentLabel', 'time')
>>> property_diff(ethan, ethan.root.clock_mc, 'currentLabel', 'time')
each player sees that it is their turn.
>>> property_diff(joris, joris.root.turn_mc, 'currentLabel', 'black')
>>> property_diff(ethan, ethan.root.turn_mc, 'currentLabel', 'white')
joris and ethan fill the board, nearly simultaneously from opposite corners
joris previews.
>>> ## delay = 1.0 / joris._speed; columns = 3 # pass
>>> ## delay = 1.0 / joris._speed; columns = 9 # pass mock, client, flash
fail half mock, client; with complaint of 'help_mc:eat'
flash server and master receives all; slave receives none or last only.
>>> delay = 1.0 / 2 / joris._speed; columns = 9
>>> ## delay = 1.0 / 4 / joris._speed; columns = 9 # fail half
>>> logging.info('delay = %s; columns = %s' % (delay, columns))
>>> r = 0
>>> for c in range(columns):
... iterate_stress(ethan, joris, delay, r, c, mouse_down_and_sleep, set_property)
>>>
>>> time.sleep(delay)
>>> time.sleep(delay)
>>> joris.pb()
XXXXXXXXX
,,,,,,,,,
,,,,,,,,,
,,,,,,,,,
,,,,X,,,,
,,,,,,,,,
,,,,,,,,,
,,,,,,,,,
OOOOOOOOO
>>> ethan.pb()
XXXXXXXXX
,,,,,,,,,
,,,,,,,,,
,,,,,,,,,
,,,,X,,,,
,,,,,,,,,
,,,,,,,,,
,,,,,,,,,
OOOOOOOOO
'''
# Echo to stress network
def echo(globe, news):
'''Echo by sending back same.
>>> gateway_process = configuration.subprocess_gateway(configuration.amf_host, 'embassy.py', verbose)
XXX if more than one line indented, IndentationError
>>> if 'ethan' in globals() or 'ethan' in locals():
... mouse_down_and_sleep(ethan, ethan.root.lobby_mc.enter_mc, 1.0 / ethan._speed)
>>> if 'ethan' not in globals() and 'ethan' not in locals():
... ethan = globe_class(); ethan.setup(configuration.mock_speed, configuration.setup_client)
>>> set_property(ethan, ethan.root.title_mc.username_txt, 'text', 'ethan')
>>> time.sleep(1.0 / ethan._speed)
>>> set_property(ethan, ethan.root.title_mc.password_txt, 'text', 'kennerly')
>>> time.sleep(1.0 / ethan._speed)
>>> mouse_down_and_sleep(ethan, ethan.root.title_mc.start_btn, 1.0 / ethan._speed)
>>> set_property(ethan, ethan.root.gateway_mc.ready_time_txt, 'text', 'echo')
Need a second to go into echo mode.
>>> mouse_down_and_sleep(ethan, ethan.root.title_mc.start_btn, 1.0 / ethan._speed)
>>> set_property(ethan, ethan.root, 'currentLabel', 'table')
>>> time.sleep(1.0 / ethan._speed)
pass flash
>>> columns = 9
Log does not copy. Large news at full screen. 04/09/2010 Fri 11:01
Copy tests that slave receives messages from master. Slave will not return or echo. Copy large test passes. 04/09/2010 Fri 15:15
have slave mouse down, or just mark on the slave if slave is an echo.
>>> ## inject = slave_log_large_news
>>> inject = slave_copy
>>> ## inject = slave_copy_large_news
>>> ## inject = mouse_down_and_sleep
>>> ## inject = slave_echo_once
>>> print inject
>>> echo_many(inject, ethan, 1.0 / 8, 0, columns)
flash receives most,
>>> echo_many(inject, ethan, 1.0 /16, 1, columns)
fail one in mock and python client
>>> echo_many(inject, ethan, 1.0 /32, 2, columns)
>>> echo_many(inject, ethan, 1.0 /64, 3, columns)
>>> time.sleep(1.0/4)
>>> ethan.pb()
$$$$$$$$$
$$$$$$$$$
$$$$$$$$$
$$$$$$$$$
,,,,,,,,,
,,,,,,,,,
,,,,,,,,,
,,,,,,,,,
,,,,,,,,,
Now ask server to echo large news
>>> set_property(ethan, ethan.root.gateway_mc.ready_time_txt, 'text', 'echo_large')
>>> time.sleep(1.0 / ethan._speed)
>>> set_property(ethan, ethan.root.title_mc.username_txt, 'text', 'ethan')
>>> time.sleep(1.0 / ethan._speed)
>>> set_property(ethan, ethan.root.title_mc.password_txt, 'text', 'kennerly')
>>> time.sleep(1.0 / ethan._speed)
Need a second to go into echo mode.
>>> mouse_down_and_sleep(ethan, ethan.root.title_mc.start_btn, 1.0 / ethan._speed)
>>> set_property(ethan, ethan.root, 'currentLabel', 'table')
>>> time.sleep(1.0 / ethan._speed)
pass mock and flash, except flash master does not get the board back.
>>> echo_many(inject, ethan, 1.0/ 4, 5, columns)
>>> echo_many(inject, ethan, 1.0/ 8, 6, columns)
in flash, most but not all receive
>>> echo_many(inject, ethan, 1.0/16, 7, columns)
>>> echo_many(inject, ethan, 1.0/32, 8, columns)
>>> time.sleep(1.0/4)
>>> ethan.pb()
$$$$$$$$$
$$$$$$$$$
$$$$$$$$$
$$$$$$$$$
,,,,,,,,,
$$$$$$$$$
$$$$$$$$$
$$$$$$$$$
$$$$$$$$$
'''
globe.log_news('echo', news)
# logging.info('echo: %s' % news)
globe.publish(news)
def echo_many(mouse_down_and_sleep, globe, delay, r, columns):
logging.info('echo_many delay = %s; r = %s; columns = %s' % (delay, r, columns))
for c in range(columns):
mouse_down_and_sleep(globe, globe.intersection_mc_array[r][c], delay)
def echo_test(ethan):
'''Repeat echo several times. Watch for failure.
>>> gateway_process = configuration.subprocess_gateway(configuration.amf_host, 'embassy.py', verbose)
>>> ethan = configuration.globe_class()
>>> ethan.setup(configuration.mock_speed, configuration.setup_client)
>>> set_property(ethan, ethan.root.title_mc.username_txt, 'text', 'ethan')
>>> time.sleep(1.0 / ethan._speed)
>>> set_property(ethan, ethan.root.title_mc.password_txt, 'text', 'kennerly')
>>> time.sleep(1.0 / ethan._speed)
>>> mouse_down_and_sleep(ethan, ethan.root.title_mc.start_btn, 1.0 / ethan._speed)
>>> mouse_down_and_sleep(ethan, ethan.root.lobby_mc.enter_mc, 1.0 / ethan._speed)
>>> echo_test(ethan)
>>> mouse_down_and_sleep(ethan, ethan.root.lobby_mc.enter_mc, 1.0 / ethan._speed)
>>> echo_test(ethan)
>>> mouse_down_and_sleep(ethan, ethan.root.lobby_mc.enter_mc, 1.0 / ethan._speed)
>>> echo_test(ethan)
>>> mouse_down_and_sleep(ethan, ethan.root.lobby_mc.enter_mc, 1.0 / ethan._speed)
>>> echo_test(ethan)
>>> mouse_down_and_sleep(ethan, ethan.root.lobby_mc.enter_mc, 1.0 / ethan._speed)
>>> echo_test(ethan)
>>> mouse_down_and_sleep(ethan, ethan.root.lobby_mc.enter_mc, 1.0 / ethan._speed)
>>> echo_test(ethan)
>>> mouse_down_and_sleep(ethan, ethan.root.lobby_mc.enter_mc, 1.0 / ethan._speed)
>>> echo_test(ethan)
>>> mouse_down_and_sleep(ethan, ethan.root.lobby_mc.enter_mc, 1.0 / ethan._speed)
>>> echo_test(ethan)
>>> mouse_down_and_sleep(ethan, ethan.root.lobby_mc.enter_mc, 1.0 / ethan._speed)
>>> echo_test(ethan)
>>> mouse_down_and_sleep(ethan, ethan.root.lobby_mc.enter_mc, 1.0 / ethan._speed)
>>> echo_test(ethan)
'''
## script = doctest.script_from_examples(echo.__doc__)
## trim setup
# Echo by sending back same.
#
set_property(ethan, ethan.root.title_mc.username_txt, 'text', 'ethan')
time.sleep(1.0 / ethan._speed)
set_property(ethan, ethan.root.title_mc.password_txt, 'text', 'kennerly')
time.sleep(1.0 / ethan._speed)
mouse_down_and_sleep(ethan, ethan.root.title_mc.start_btn, 1.0 / ethan._speed)
mouse_down_and_sleep(ethan, ethan.root.lobby_mc.enter_mc, 1.0 / ethan._speed)
set_property(ethan, ethan.root.gateway_mc.ready_time_txt, 'text', 'echo')
#
# Need a second to go into echo mode.
mouse_down_and_sleep(ethan, ethan.root.title_mc.start_btn, 1.0 / ethan._speed)
set_property(ethan, ethan.root, 'currentLabel', 'table')
time.sleep(1.0 / ethan._speed)
#
# pass flash
columns = 9
#
# Log does not copy. Large news at full screen. 04/09/2010 Fri 11:01
# Copy tests that slave receives messages from master. Slave will not return or echo. Copy large test passes. 04/09/2010 Fri 15:15
# have slave mouse down, or just mark on the slave if slave is an echo.
inject = slave_copy
print inject
echo_many(inject, ethan, 1.0 / 8, 0, columns)
#
# flash receives most,
echo_many(inject, ethan, 1.0 /16, 1, columns)
#
# fail one in mock and python client
echo_many(inject, ethan, 1.0 /32, 2, columns)
echo_many(inject, ethan, 1.0 /64, 3, columns)
time.sleep(1.0/4)
ethan.pb()
# Expected:
## $$$$$$$$$
## $$$$$$$$$
## $$$$$$$$$
## $$$$$$$$$
## ,,,,,,,,,
## ,,,,,,,,,
## ,,,,,,,,,
## ,,,,,,,,,
## ,,,,,,,,,
#
# Now ask server to echo large news
set_property(ethan, ethan.root.gateway_mc.ready_time_txt, 'text', 'echo_large')
time.sleep(1.0 / ethan._speed)
set_property(ethan, ethan.root.title_mc.username_txt, 'text', 'ethan')
time.sleep(1.0 / ethan._speed)
set_property(ethan, ethan.root.title_mc.password_txt, 'text', 'kennerly')
time.sleep(1.0 / ethan._speed)
#
# Need a second to go into echo mode.
mouse_down_and_sleep(ethan, ethan.root.title_mc.start_btn, 1.0 / ethan._speed)
set_property(ethan, ethan.root, 'currentLabel', 'table')
time.sleep(1.0 / ethan._speed)
#
# pass mock and flash, except flash master does not get the board back.
echo_many(inject, ethan, 1.0/ 4, 5, columns)
echo_many(inject, ethan, 1.0/ 8, 6, columns)
#
# in flash, most but not all receive
echo_many(inject, ethan, 1.0/16, 7, columns)
echo_many(inject, ethan, 1.0/32, 8, columns)
time.sleep(1.0/4)
ethan.pb()
# Expected:
## $$$$$$$$$
## $$$$$$$$$
## $$$$$$$$$
## $$$$$$$$$
## ,,,,,,,,,
## $$$$$$$$$
## $$$$$$$$$
## $$$$$$$$$
## $$$$$$$$$
time.sleep(2.0)
def echo_large(globe, news):
'''Large news. See echo
'''
globe.log_news('echo_large', news)
# logging.info('echo_large: %s' % news)
large_news = load('lifeanddeath.large_news.py')
news = upgrade(large_news, news)
globe.publish(news)
def remote_echo():
'''test echo remotely in flash.
>>> code_unit.inline_examples(
... setup_remote_control_snippet.__doc__,
... locals(), globals(),
... verify_examples = False)
>>> code_unit.inline_examples(
... echo.__doc__,
... locals(), globals(),
... verify_examples = True)
'''
def stress_black(globe, start_interval = 16.0):
'''Stress test black by sending large news to 9 intersections in a row.
>>> gateway_process = configuration.subprocess_gateway(configuration.amf_host, 'embassy.py', verbose)
>>> laurens = configuration.globe_class()
>>> laurens.setup(configuration.mock_speed, configuration.setup_client)
>>> set_property(laurens, laurens.root.title_mc.username_txt, 'text', 'laurens')
>>> time.sleep(1.0 / laurens._speed)
>>> set_property(laurens, laurens.root.title_mc.password_txt, 'text', 'l')
>>> time.sleep(1.0 / laurens._speed)
>>> mouse_down_and_sleep(laurens, laurens.root.title_mc.start_btn, 1.0 / laurens._speed)
>>> set_property(laurens, laurens.root.gateway_mc.ready_time_txt, 'text', 'stress')
>>> mouse_down_and_sleep(laurens, laurens.root.title_mc.start_btn, 64.0 / laurens._speed)
>>> laurens.pb()
XXXXXXXXX
,,,,,,,,,
,,,,,,,,,
,,,,,,,,,
,,,,,,,,,
,,,,,,,,,
,,,,,,,,,
,,,,,,,,,
,,,,,,,,,
>>> len(laurens.ambassador.receives)
12
>>> laurens.ambassador.receives[-1].keys()
'''
import copy
logging.info('stress_black start interval=%s' % start_interval)
interval = 2 * start_interval
large_news = load('lifeanddeath.large_news.py')
for r in range(1):
for c in range(9):
interval = 0.5 * interval
news = copy.deepcopy(large_news)
intersection_name = get_intersection_name(r, c)
news[intersection_name] = {'currentLabel': 'black'}
stress_log = 'stress_black publish=%s: interval=%s' % (intersection_name, interval)
logging.info(stress_log)
globe.publish(news)
time.sleep(interval / globe._speed)
logging.info('stress_black end interval=%s' % interval)
time.sleep(start_interval)
globe.pb()
def moonhyoung_computer_pass_example():
'''Moonhyoung sees computer pass. Then Moonhyoung moves.
Gotcha: want to replay white. white_computer, not partnered.
>>> code_unit.inline_examples(
... ethan_lukasz_begin_example.__doc__,
... locals(), globals(),
... verify_examples = False)
>>> moonhyoung = black
>>> computer_moonhyoung = white
>>> sloth = 1.0 / moonhyoung._speed
>>> moonhyoung.root.lobby_mc.main_mc._04_mc.dispatchEvent(mouseDown)
>>> time.sleep(sloth * 1.897133)
>>> moonhyoung.root.lobby_mc._04_mc.dominate_3_3_mc.dispatchEvent(mouseDown)
>>> time.sleep(sloth * 3.567127)
>>> mouse_down_and_sleep(moonhyoung, moonhyoung.root.game_over_mc.start_mc, wait)
>>> moonhyoung.root.game_over_mc.white_computer_mc.currentLabel
'computer'
>>> moonhyoung.root.game_over_mc.white_computer_mc.enter_mc.dispatchEvent(mouseDown)
>>> time.sleep(sloth * 2.757399)
TODO: do not sequence white computer option
>>> time.sleep(3 * wait)
>>> moonhyoung.root.game_over_mc.white_computer_mc.currentLabel
'none'
>>> mouse_down_and_sleep(moonhyoung, moonhyoung.root._1_1_mc, wait)
>>> mouse_down_and_sleep(computer_moonhyoung, computer_moonhyoung.root._2_2_mc, wait)
>>> time.sleep(sloth * 7.485410)
>>> mouse_down_and_sleep(moonhyoung, moonhyoung.root._2_1_mc, wait)
>>> mouse_down_and_sleep(computer_moonhyoung, computer_moonhyoung.root._0_0_mc, wait)
>>> time.sleep(sloth * 8.023928)
>>> mouse_down_and_sleep(moonhyoung, moonhyoung.root._2_0_mc, wait)
>>> mouse_down_and_sleep(computer_moonhyoung, computer_moonhyoung.root._0_2_mc, wait)
>>> time.sleep(sloth * 2.384054)
>>> moonhyoung.root.game_over_mc.white_computer_mc.enter_mc.dispatchEvent(mouseDown)
>>> time.sleep(wait)
>>> moonhyoung.root.game_over_mc.white_computer_mc.currentLabel
'computer'
>>> moonhyoung.pb()
OXO
,X,
XXO
>>> mouse_down_and_sleep(moonhyoung, moonhyoung.root._1_0_mc, wait)
>>> ## computer_moonhyoung.root.pass_mc.dispatchEvent(mouseDown)
>>> time.sleep(sloth * 2.082763)
>>> moonhyoung.pb()
,XO
XX,
XXO
>>> mouse_down_and_sleep(moonhyoung, moonhyoung.root._1_2_mc, wait)
>>> ## computer_moonhyoung.root.pass_mc.dispatchEvent(mouseDown)
>>> time.sleep(sloth * 2.869093)
>>> moonhyoung.pb()
,X,
XXX
XX,
>>> mouse_down_and_sleep(moonhyoung, moonhyoung.root._0_2_mc, wait)
>>> ## computer_moonhyoung.root.pass_mc.dispatchEvent(mouseDown)
>>> moonhyoung.pb()
,XX
XXX
XX,
'''
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2017, Zhijiang Yao, Jie Dong and Dongsheng Cao
# All rights reserved.
# This file is part of the PyBioMed.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the PyBioMed source tree.
# from PyPretreatMolutil import Standardizer
# from PyPretreatMolutil import validate_smiles
# from PyPretreatMolutil import standardize_smiles
# from PyPretreatMolutil import Validator
# Core Library modules
import logging
# Third party modules
from rdkit import Chem
# First party modules
from PyBioMed.PyPretreat.PyPretreatMolutil import *
log = logging.getLogger(__name__)
map_dict = {
"1": "disconnect_metals",
"2": "normalize",
"3": "addhs",
"4": "rmhs",
"5": "reionize",
"6": "uncharge",
"7": "largest_fragment",
"8": "canonicalize_tautomer",
}
# NORMALIZATIONS = NORMALIZATIONS
class StandardizeMol(object):
"""
The main class for performing standardization of molecules and deriving parent molecules.
The primary usage is via the :meth:`~molvs.standardize.Standardizer.standardize` method::
s = Standardizer()
mol1 = Chem.MolFromSmiles('C1=CC=CC=C1')
mol2 = s.standardize(mol1)
There are separate methods to derive fragment, charge, tautomer, isotope and stereo parent molecules.
"""
def __init__(
self,
normalizations=NORMALIZATIONS,
acid_base_pairs=ACID_BASE_PAIRS,
tautomer_transforms=TAUTOMER_TRANSFORMS,
tautomer_scores=TAUTOMER_SCORES,
max_restarts=MAX_RESTARTS,
max_tautomers=MAX_TAUTOMERS,
prefer_organic=PREFER_ORGANIC,
):
"""Initialize a Standardizer with optional custom parameters.
:param normalizations: A list of Normalizations to apply (default: :data:`~molvs.normalize.NORMALIZATIONS`).
:param acid_base_pairs: A list of AcidBasePairs for competitive reionization (default:
:data:`~molvs.charge.ACID_BASE_PAIRS`).
:param tautomer_transforms: A list of TautomerTransforms to apply (default:
:data:`~molvs.tautomer.TAUTOMER_TRANSFORMS`).
:param tautomer_scores: A list of TautomerScores used to determine canonical tautomer (default:
:data:`~molvs.tautomer.TAUTOMER_SCORES`).
:param max_restarts: The maximum number of times to attempt to apply the series of normalizations (default 200).
:param max_tautomers: The maximum number of tautomers to enumerate (default 1000).
:param prefer_organic: Whether to prioritize organic fragments when choosing fragment parent (default False).
"""
log.debug("Initializing Standardizer")
self.normalizations = normalizations
self.acid_base_pairs = acid_base_pairs
self.tautomer_transforms = tautomer_transforms
self.tautomer_scores = tautomer_scores
self.max_restarts = max_restarts
self.max_tautomers = max_tautomers
self.prefer_organic = prefer_organic
def __call__(self, mol):
"""Calling a Standardizer instance like a function is the same as calling its
:meth:`~molvs.standardize.Standardizer.standardize` method."""
return self.standardize(mol)
def addhs(self, mol):
from rdkit.Chem import AddHs
return AddHs(mol)
def rmhs(self, mol):
from rdkit.Chem import RemoveHs
return RemoveHs(mol)
@memoized_property
def disconnect_metals(self):
"""
:returns: A callable :class:`~molvs.metal.MetalDisconnector` instance.
"""
return MetalDisconnector()
@memoized_property
def normalize(self):
"""
:returns: A callable :class:`~molvs.normalize.Normalizer` instance.
"""
return Normalizer(
normalizations=self.normalizations, max_restarts=self.max_restarts
)
@memoized_property
def reionize(self):
"""
:returns: A callable :class:`~molvs.charge.Reionizer` instance.
"""
return Reionizer(acid_base_pairs=self.acid_base_pairs)
@memoized_property
def uncharge(self):
"""
:returns: A callable :class:`~molvs.charge.Uncharger` instance.
"""
return Uncharger()
@memoized_property
def largest_fragment(self):
"""
:returns: A callable :class:`~molvs.fragment.LargestFragmentChooser` instance.
"""
return LargestFragmentChooser(prefer_organic=self.prefer_organic)
@memoized_property
def canonicalize_tautomer(self):
"""
:returns: A callable :class:`~molvs.tautomer.TautomerCanonicalizer` instance.
"""
return TautomerCanonicalizer(
transforms=self.tautomer_transforms,
scores=self.tautomer_scores,
max_tautomers=self.max_tautomers,
)
def StandardMol(mol):
"""
The function for performing standardization of molecules and deriving parent molecules.
The function contains derive fragment, charge, tautomer, isotope and stereo parent molecules.
The primary usage is::
mol1 = Chem.MolFromSmiles('C1=CC=CC=C1')
mol2 = s.standardize(mol1)
"""
s = Standardizer()
mol = s.disconnect_metals(mol)
mol = s.normalize(mol)
mol = s.uncharge(mol)
mol = s.largest_fragment(mol)
mol = s.canonicalize_tautomer(mol)
mol = s.reionize(mol)
mol = s.addhs(mol)
mol = s.rmhs(mol)
return mol
def StandardSmi(smi):
"""
The function for performing standardization of molecules and deriving parent molecules.
The function contains derive fragment, charge, tautomer, isotope and stereo parent molecules.
The primary usage is::
smi = StandardSmi('C[n+]1c([N-](C))cccc1')
"""
mol = Chem.MolFromSmiles(smi)
mol = StandardMol(mol)
smi = Chem.MolToSmiles(mol, isomericSmiles=True)
return smi
def ValidatorMol(mol):
"""
Return log messages for a given SMILES string using the default validations.
Note: This is a convenience function for quickly validating a single SMILES string.
:param string smiles: The SMILES for the molecule.
:returns: A list of log messages.
:rtype: list of strings.
"""
return Validator().validate(mol)
def ValidatorSmi(smi):
"""
Return log messages for a given SMILES string using the default validations.
Note: This is a convenience function for quickly validating a single SMILES string.
:param string smiles: The SMILES for the molecule.
:returns: A list of log messages.
:rtype: list of strings.
"""
return validate_smiles(smi)
if __name__ == "__main__":
smiles = ["O=C([O-])c1ccccc1", "C[n+]1c([N-](C))cccc1", "[2H]C(Cl)(Cl)Cl"]
mol = Chem.MolFromSmiles("[Na]OC(=O)c1ccc(C[S+2]([O-])([O-]))cc1")
sm = StandardizeMol()
mol = sm.addhs(mol)
mol = sm.disconnect_metals(mol)
mol = sm.largest_fragment(mol)
mol = sm.normalize(mol)
mol = sm.uncharge(mol)
mol = sm.canonicalize_tautomer(mol)
mol = sm.reionize(mol)
mol = sm.rmhs(mol)
mol = sm.addhs(mol)
print(Chem.MolToSmiles(mol, isomericSmiles=True))
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generate docs for the TensorFlow Python API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import six
from tensorflow.python.util import tf_inspect
from tensorflow.tools.common import public_api
from tensorflow.tools.common import traverse
from tensorflow.tools.docs import doc_generator_visitor
from tensorflow.tools.docs import parser
from tensorflow.tools.docs import pretty_docs
from tensorflow.tools.docs import py_guide_parser
def _is_free_function(py_object, full_name, index):
"""Check if input is a free function (and not a class- or static method)."""
if not tf_inspect.isfunction(py_object):
return False
# Static methods are functions to tf_inspect (in 2.7), so check if the parent
# is a class. If there is no parent, it's not a function.
if '.' not in full_name:
return False
parent_name = full_name.rsplit('.', 1)[0]
if tf_inspect.isclass(index[parent_name]):
return False
return True
def write_docs(output_dir, parser_config, yaml_toc, root_title='TensorFlow'):
"""Write previously extracted docs to disk.
Write a docs page for each symbol included in the indices of parser_config to
a tree of docs at `output_dir`.
Symbols with multiple aliases will have only one page written about
them, which is referenced for all aliases.
Args:
output_dir: Directory to write documentation markdown files to. Will be
created if it doesn't exist.
parser_config: A `parser.ParserConfig` object, containing all the necessary
indices.
yaml_toc: Set to `True` to generate a "_toc.yaml" file.
root_title: The title name for the root level index.md.
Raises:
ValueError: if `output_dir` is not an absolute path
"""
# Make output_dir.
if not os.path.isabs(output_dir):
raise ValueError("'output_dir' must be an absolute path.\n"
" output_dir='%s'" % output_dir)
try:
if not os.path.exists(output_dir):
os.makedirs(output_dir)
except OSError as e:
print('Creating output dir "%s" failed: %s' % (output_dir, e))
raise
# These dictionaries are used for table-of-contents generation below
# They will contain, after the for-loop below::
# - module name(string):classes and functions the module contains(list)
module_children = {}
# - symbol name(string):pathname (string)
symbol_to_file = {}
# Parse and write Markdown pages, resolving cross-links (@{symbol}).
for full_name, py_object in six.iteritems(parser_config.index):
parser_config.reference_resolver.current_doc_full_name = full_name
if full_name in parser_config.duplicate_of:
continue
# Methods and some routines are documented only as part of their class.
if not (tf_inspect.ismodule(py_object) or tf_inspect.isclass(py_object) or
_is_free_function(py_object, full_name, parser_config.index)):
continue
sitepath = os.path.join('api_docs/python',
parser.documentation_path(full_name)[:-3])
# For TOC, we need to store a mapping from full_name to the file
# we're generating
symbol_to_file[full_name] = sitepath
# For a module, remember the module for the table-of-contents
if tf_inspect.ismodule(py_object):
if full_name in parser_config.tree:
module_children.setdefault(full_name, [])
# For something else that's documented,
# figure out what module it lives in
else:
subname = str(full_name)
while True:
subname = subname[:subname.rindex('.')]
if tf_inspect.ismodule(parser_config.index[subname]):
module_children.setdefault(subname, []).append(full_name)
break
print('Writing docs for %s (%r).' % (full_name, py_object))
# Generate docs for `py_object`, resolving references.
page_info = parser.docs_for_object(full_name, py_object, parser_config)
path = os.path.join(output_dir, parser.documentation_path(full_name))
directory = os.path.dirname(path)
try:
if not os.path.exists(directory):
os.makedirs(directory)
with open(path, 'w') as f:
f.write(pretty_docs.build_md_page(page_info))
except OSError as e:
print('Cannot write documentation for %s to %s: %s' % (full_name,
directory, e))
raise
if yaml_toc:
# Generate table of contents
# Put modules in alphabetical order, case-insensitive
modules = sorted(module_children.keys(), key=lambda a: a.upper())
leftnav_path = os.path.join(output_dir, '_toc.yaml')
with open(leftnav_path, 'w') as f:
# Generate header
f.write('# Automatically generated file; please do not edit\ntoc:\n')
for module in modules:
indent_num = module.count('.')
# Don't list `tf.submodule` inside `tf`
indent_num = max(indent_num, 1)
indent = ' '*indent_num
if indent_num > 1:
# tf.contrib.baysflow.entropy will be under
# tf.contrib->baysflow->entropy
title = module.split('.')[-1]
else:
title = module
header = [
'- title: ' + title,
' section:',
' - title: Overview',
' path: /TARGET_DOC_ROOT/VERSION/' + symbol_to_file[module]]
header = ''.join([indent+line+'\n' for line in header])
f.write(header)
symbols_in_module = module_children.get(module, [])
# Sort case-insensitive, if equal sort case sensitive (upper first)
symbols_in_module.sort(key=lambda a: (a.upper(), a))
for full_name in symbols_in_module:
item = [
' - title: ' + full_name[len(module) + 1:],
' path: /TARGET_DOC_ROOT/VERSION/' + symbol_to_file[full_name]]
item = ''.join([indent+line+'\n' for line in item])
f.write(item)
# Write a global index containing all full names with links.
with open(os.path.join(output_dir, 'index.md'), 'w') as f:
f.write(
parser.generate_global_index(root_title, parser_config.index,
parser_config.reference_resolver))
def add_dict_to_dict(add_from, add_to):
for key in add_from:
if key in add_to:
add_to[key].extend(add_from[key])
else:
add_to[key] = add_from[key]
# Exclude some libaries in contrib from the documentation altogether.
def _get_default_private_map():
return {'tf.test': ['mock']}
# Exclude members of some libaries.
def _get_default_do_not_descend_map():
# TODO(wicke): Shrink this list once the modules get sealed.
return {
'tf': ['cli', 'lib', 'wrappers'],
'tf.contrib': [
'compiler',
'grid_rnn',
# Block contrib.keras to de-clutter the docs
'keras',
'labeled_tensor',
'ndlstm',
'quantization',
'session_bundle',
'slim',
'solvers',
'specs',
'tensor_forest',
'tensorboard',
'testing',
'tfprof',
],
'tf.contrib.bayesflow': [
'special_math', 'stochastic_gradient_estimators',
'stochastic_variables'
],
'tf.contrib.ffmpeg': ['ffmpeg_ops'],
'tf.contrib.graph_editor': [
'edit', 'match', 'reroute', 'subgraph', 'transform', 'select', 'util'
],
'tf.contrib.keras': ['api', 'python'],
'tf.contrib.layers': ['feature_column', 'summaries'],
'tf.contrib.learn': [
'datasets',
'head',
'graph_actions',
'io',
'models',
'monitors',
'ops',
'preprocessing',
'utils',
],
'tf.contrib.util': ['loader'],
}
def extract(py_modules, private_map, do_not_descend_map):
"""Extract docs from tf namespace and write them to disk."""
# Traverse the first module.
visitor = doc_generator_visitor.DocGeneratorVisitor(py_modules[0][0])
api_visitor = public_api.PublicAPIVisitor(visitor)
api_visitor.set_root_name(py_modules[0][0])
add_dict_to_dict(private_map, api_visitor.private_map)
add_dict_to_dict(do_not_descend_map, api_visitor.do_not_descend_map)
traverse.traverse(py_modules[0][1], api_visitor)
# Traverse all py_modules after the first:
for module_name, module in py_modules[1:]:
visitor.set_root_name(module_name)
api_visitor.set_root_name(module_name)
traverse.traverse(module, api_visitor)
return visitor
class _GetMarkdownTitle(py_guide_parser.PyGuideParser):
"""Extract the title from a .md file."""
def __init__(self):
self.title = None
py_guide_parser.PyGuideParser.__init__(self)
def process_title(self, _, title):
if self.title is None: # only use the first title
self.title = title
class _DocInfo(object):
"""A simple struct for holding a doc's url and title."""
def __init__(self, url, title):
self.url = url
self.title = title
def build_doc_index(src_dir):
"""Build an index from a keyword designating a doc to _DocInfo objects."""
doc_index = {}
if not os.path.isabs(src_dir):
raise ValueError("'src_dir' must be an absolute path.\n"
" src_dir='%s'" % src_dir)
if not os.path.exists(src_dir):
raise ValueError("'src_dir' path must exist.\n"
" src_dir='%s'" % src_dir)
for dirpath, _, filenames in os.walk(src_dir):
suffix = os.path.relpath(path=dirpath, start=src_dir)
for base_name in filenames:
if not base_name.endswith('.md'):
continue
title_parser = _GetMarkdownTitle()
title_parser.process(os.path.join(dirpath, base_name))
key_parts = os.path.join(suffix, base_name[:-3]).split('/')
if key_parts[-1] == 'index':
key_parts = key_parts[:-1]
doc_info = _DocInfo(os.path.join(suffix, base_name), title_parser.title)
doc_index[key_parts[-1]] = doc_info
if len(key_parts) > 1:
doc_index['/'.join(key_parts[-2:])] = doc_info
return doc_index
class _GuideRef(object):
def __init__(self, base_name, title, section_title, section_tag):
self.url = 'api_guides/python/' + (('%s#%s' % (base_name, section_tag))
if section_tag else base_name)
self.link_text = (('%s > %s' % (title, section_title))
if section_title else title)
def make_md_link(self, url_prefix):
return '[%s](%s%s)' % (self.link_text, url_prefix, self.url)
class _GenerateGuideIndex(py_guide_parser.PyGuideParser):
"""Turn guide files into an index from symbol name to a list of _GuideRefs."""
def __init__(self):
self.index = {}
py_guide_parser.PyGuideParser.__init__(self)
def process(self, full_path, base_name):
"""Index a file, reading from `full_path`, with `base_name` as the link."""
self.full_path = full_path
self.base_name = base_name
self.title = None
self.section_title = None
self.section_tag = None
py_guide_parser.PyGuideParser.process(self, full_path)
def process_title(self, _, title):
if self.title is None: # only use the first title
self.title = title
def process_section(self, _, section_title, tag):
self.section_title = section_title
self.section_tag = tag
def process_line(self, _, line):
"""Index @{symbol} references as in the current file & section."""
for match in parser.SYMBOL_REFERENCE_RE.finditer(line):
val = self.index.get(match.group(1), [])
val.append(
_GuideRef(self.base_name, self.title, self.section_title,
self.section_tag))
self.index[match.group(1)] = val
def _build_guide_index(guide_src_dir):
"""Return dict: symbol name -> _GuideRef from the files in `guide_src_dir`."""
index_generator = _GenerateGuideIndex()
if os.path.exists(guide_src_dir):
for full_path, base_name in py_guide_parser.md_files_in_dir(guide_src_dir):
index_generator.process(full_path, base_name)
return index_generator.index
class _UpdateTags(py_guide_parser.PyGuideParser):
"""Rewrites a Python guide so that each section has an explicit tag."""
def process_section(self, line_number, section_title, tag):
self.replace_line(line_number, '<h2 id="%s">%s</h2>' % (tag, section_title))
EXCLUDED = set(['__init__.py', 'OWNERS', 'README.txt'])
def _other_docs(src_dir, output_dir, reference_resolver):
"""Convert all the files in `src_dir` and write results to `output_dir`."""
header = '<!-- DO NOT EDIT! Automatically generated file. -->\n'
# Iterate through all the source files and process them.
tag_updater = _UpdateTags()
for dirpath, _, filenames in os.walk(src_dir):
# How to get from `dirpath` to api_docs/python/
relative_path_to_root = os.path.relpath(
path=os.path.join(src_dir, 'api_docs/python'), start=dirpath)
# Make the directory under output_dir.
new_dir = os.path.join(output_dir,
os.path.relpath(path=dirpath, start=src_dir))
try:
if not os.path.exists(new_dir):
os.makedirs(new_dir)
except OSError as e:
print('Creating output dir "%s" failed: %s' % (new_dir, e))
raise
for base_name in filenames:
if base_name in EXCLUDED:
print('Skipping excluded file %s...' % base_name)
continue
full_in_path = os.path.join(dirpath, base_name)
reference_resolver.current_doc_full_name = full_in_path
suffix = os.path.relpath(path=full_in_path, start=src_dir)
full_out_path = os.path.join(output_dir, suffix)
if not base_name.endswith('.md'):
print('Copying non-md file %s...' % suffix)
open(full_out_path, 'w').write(open(full_in_path).read())
continue
if dirpath.endswith('/api_guides/python'):
print('Processing Python guide %s...' % base_name)
md_string = tag_updater.process(full_in_path)
else:
print('Processing doc %s...' % suffix)
md_string = open(full_in_path).read()
output = reference_resolver.replace_references(md_string,
relative_path_to_root)
with open(full_out_path, 'w') as f:
f.write(header + output)
print('Done.')
class DocGenerator(object):
"""Main entry point for generating docs."""
def __init__(self):
if sys.version_info >= (3, 0):
sys.exit('Doc generation is not supported from python3.')
self.argument_parser = argparse.ArgumentParser()
self._py_modules = None
self._private_map = _get_default_private_map()
self._do_not_descend_map = _get_default_do_not_descend_map()
self.yaml_toc = True
def add_output_dir_argument(self):
self.argument_parser.add_argument(
'--output_dir',
type=str,
default=None,
required=True,
help='Directory to write docs to.')
def add_src_dir_argument(self):
self.argument_parser.add_argument(
'--src_dir',
type=str,
default=None,
required=True,
help='Directory with the source docs.')
def add_base_dir_argument(self, default_base_dir):
self.argument_parser.add_argument(
'--base_dir',
type=str,
default=default_base_dir,
help='Base directory to strip from file names referenced in docs.')
def parse_known_args(self):
flags, _ = self.argument_parser.parse_known_args()
return flags
def add_to_private_map(self, d):
add_dict_to_dict(d, self._private_map)
def add_to_do_not_descend_map(self, d):
add_dict_to_dict(d, self._do_not_descend_map)
def set_private_map(self, d):
self._private_map = d
def set_do_not_descend_map(self, d):
self._do_not_descend_map = d
def set_py_modules(self, py_modules):
self._py_modules = py_modules
def py_module_names(self):
if self._py_modules is None:
raise RuntimeError(
'Must call set_py_modules() before running py_module_names().')
return [name for (name, _) in self._py_modules]
def make_reference_resolver(self, visitor, doc_index):
return parser.ReferenceResolver.from_visitor(
visitor, doc_index, py_module_names=self.py_module_names())
def make_parser_config(self, visitor, reference_resolver, guide_index,
base_dir):
return parser.ParserConfig(
reference_resolver=reference_resolver,
duplicates=visitor.duplicates,
duplicate_of=visitor.duplicate_of,
tree=visitor.tree,
index=visitor.index,
reverse_index=visitor.reverse_index,
guide_index=guide_index,
base_dir=base_dir)
def run_extraction(self):
return extract(self._py_modules, self._private_map,
self._do_not_descend_map)
def build(self, flags):
"""Actually build the docs."""
doc_index = build_doc_index(flags.src_dir)
visitor = self.run_extraction()
reference_resolver = self.make_reference_resolver(visitor, doc_index)
root_title = getattr(flags, 'root_title', 'TensorFlow')
guide_index = _build_guide_index(
os.path.join(flags.src_dir, 'api_guides/python'))
parser_config = self.make_parser_config(visitor, reference_resolver,
guide_index, flags.base_dir)
output_dir = os.path.join(flags.output_dir, 'api_docs/python')
write_docs(
output_dir,
parser_config,
yaml_toc=self.yaml_toc,
root_title=root_title)
_other_docs(flags.src_dir, flags.output_dir, reference_resolver)
parser_config.reference_resolver.log_errors()
return parser_config.reference_resolver.num_errors()
|
|
'''
BSD Licence
Copyright (c) 2012, Science & Technology Facilities Council (STFC)
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the Science & Technology Facilities Council (STFC)
nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Created on 6 May 2012
@author: Maurizio Nagni
'''
from ceda_markup.markup import createMarkup, createSimpleMarkup
from ceda_markup import extend_element
ATOM_NAMESPACE = 'http://www.w3.org/2005/Atom'
ATOM_PREFIX = 'atom'
ATOM_ROOT_TAG = 'feed'
def createAtomDocument(iid, title, updated, subtitle = None, rights = None):
'''
Returns an ElementTree.Element representing an Atom document
@param iid: a string
@param title: a string
@param updated: a string
@param subtitle: a string
@param rights: a string
@return: a new ElementTree.Element instance
'''
atom = createAtom()
_id = createID(iid, atom)
atom.append(_id)
_title = createTitle(title, atom)
atom.append(_title)
_updated = createUpdated(updated, atom)
atom.append(_updated)
if subtitle is not None:
_subtitle = createSubTitle(subtitle, atom)
atom.append(_subtitle)
if rights is not None:
_rights = createRigths(rights, atom)
atom.append(_rights)
return atom
def createEntry(iid, title, updated, \
author = None, content = None, link = None, \
published = None, root = None,
ns = ATOM_NAMESPACE):
'''
Constructor
@param iid: an atom.ID instance
@param title: an atom.Title instance
@param updated: an atom.Update instance
@param author: one or more atom.Author instances
@param content: an atom.Content instance
@param link: one or more atom.Link instances
@param published: an atom.Published instance
@param root: the document root element where attach the prefix:namespace for this element
'''
markup = createMarkup('entry', ATOM_PREFIX, ns, root)
markup.append(iid)
markup.append(title)
markup.append(updated)
if author is not None:
if isinstance(author, list):
extend_element(markup, author)
else:
markup.append(author)
if content is not None:
markup.append(content)
if link is not None:
markup.append(link)
if published is not None:
markup.append(published)
return markup
def createAtom(root = None, tagName = ATOM_ROOT_TAG, ns = ATOM_NAMESPACE):
'''
Returns an ElementTree.Element representing an Atom tag
@param root: the root tag of the document containing this element
@param tagName: the tagName
@param ns: the tag namespace
@return: a new ElementTree.Element instance
'''
return createMarkup(tagName, ATOM_PREFIX, ns, root)
def createID(iid, root = None, ns = ATOM_NAMESPACE):
'''
Returns an Atom.id instance as ElementTree
@param iid: a unique identifier, eventually an URI
@param root: the root tag of the document containing this element
@param ns: the tag namespace
@return: a new ElementTree.Element instance
'''
return createSimpleMarkup(str(iid), root, 'id', ns, ATOM_PREFIX)
def createTitle(title, root = None, ns = ATOM_NAMESPACE):
'''
Returns an Atom.title instance as ElementTree
@param title: the title's text
@param root: the root tag of the document containing this element
@param ns: the tag namespace
@return: a new ElementTree.Element instance
'''
return createSimpleMarkup(title, root, 'title', ns, ATOM_PREFIX)
def createSubTitle(subtitle, root = None, ns = ATOM_NAMESPACE):
'''
Returns an Atom.subtitle instance as ElementTree
@param title: the title's text
@param root: the root tag of the document containing this element
@param ns: the tag namespace
@return: a new ElementTree.Element instance
'''
return createSimpleMarkup(str(subtitle), root, 'subtitle', ns, ATOM_PREFIX)
def createRigths(rigths, root = None, ns = ATOM_NAMESPACE):
'''
Returns an Atom.title instance as ElementTree
@param rigths: the rigths's text
@param root: the root tag of the document containing this element
@param ns: the tag namespace
@return: a new ElementTree.Element instance
'''
return createSimpleMarkup(str(rigths), root, 'rigths', ns, ATOM_PREFIX)
def createUpdated(updated, root = None, ns = ATOM_NAMESPACE):
'''
Returns an Atom.updated instance as ElementTree
@param updated: is a Date construct indicating the most
recent instant in time when an entry or feed was modified in a way
the publisher considers significant.
@param root: the root tag of the document containing this element
@param ns: the tag namespace
@return: a new ElementTree.Element instance
'''
return createSimpleMarkup(str(updated), root, 'updated', ns, ATOM_PREFIX)
def createPublished(published, root = None, ns = ATOM_NAMESPACE):
'''
@param published: is a Date construct indicating an
instant in time associated with an event early in the life cycle of
the entry
@param root: the root tag of the document containing this element
@param ns: the tag namespace
@return: a new ElementTree.Element instance
'''
return createSimpleMarkup(str(published), root, 'published', ns, ATOM_PREFIX)
ATOM_LINK_REL_SELF = 'self'
ATOM_LINK_REL_FIRST = 'first'
ATOM_LINK_REL_LAST = 'last'
ATOM_LINK_REL_SEARCH = 'search'
ATOM_LINK_REL = [ATOM_LINK_REL_SELF, ATOM_LINK_REL_FIRST, ATOM_LINK_REL_LAST, ATOM_LINK_REL_SEARCH]
def createLink(iri, rel = None, itype = None, root = None, ns = ATOM_NAMESPACE):
'''
@param iri: contains the link's IRI
@param rel: a string like 'self', 'first', 'last', ...
@param itype: an advisory media type as 'application/atom+xml'
@param root: the root tag of the document containing this element
@param ns: the tag namespace
@return: a new ElementTree.Element instance
'''
markup = createMarkup('link', ATOM_PREFIX, ns, root)
markup.set('href', iri)
if rel is not None:
markup.set('rel', rel)
if itype is not None:
markup.set('type', itype)
return markup
|
|
import json
import pytest
from munch import DefaultMunch, Munch, munchify, unmunchify
def test_base():
b = Munch()
b.hello = 'world'
assert b.hello == 'world'
b['hello'] += "!"
assert b.hello == 'world!'
b.foo = Munch(lol=True)
assert b.foo.lol is True
assert b.foo is b['foo']
assert sorted(b.keys()) == ['foo', 'hello']
b.update({'ponies': 'are pretty!'}, hello=42)
assert b == Munch({'ponies': 'are pretty!', 'foo': Munch({'lol': True}), 'hello': 42})
assert sorted([(k, b[k]) for k in b]) == [('foo', Munch({'lol': True})), ('hello', 42), ('ponies', 'are pretty!')]
assert "The {knights} who say {ni}!".format(**Munch(knights='lolcats', ni='can haz')) == 'The lolcats who say can haz!'
def test_contains():
b = Munch(ponies='are pretty!')
assert 'ponies' in b
assert ('foo' in b) is False
b['foo'] = 42
assert 'foo' in b
b.hello = 'hai'
assert 'hello' in b
b[None] = 123
assert None in b
b[False] = 456
assert False in b
def test_getattr():
b = Munch(bar='baz', lol={})
with pytest.raises(AttributeError):
b.foo
assert b.bar == 'baz'
assert getattr(b, 'bar') == 'baz'
assert b['bar'] == 'baz'
assert b.lol is b['lol']
assert b.lol is getattr(b, 'lol')
def test_setattr():
b = Munch(foo='bar', this_is='useful when subclassing')
assert hasattr(b.values, '__call__')
b.values = 'uh oh'
assert b.values == 'uh oh'
with pytest.raises(KeyError):
b['values']
def test_delattr():
b = Munch(lol=42)
del b.lol
with pytest.raises(KeyError):
b['lol']
with pytest.raises(AttributeError):
b.lol
def test_toDict():
b = Munch(foo=Munch(lol=True), hello=42, ponies='are pretty!')
assert sorted(b.toDict().items()) == [('foo', {'lol': True}), ('hello', 42), ('ponies', 'are pretty!')]
def test_dict_property():
b = Munch(foo=Munch(lol=True), hello=42, ponies='are pretty!')
assert sorted(b.__dict__.items()) == [('foo', {'lol': True}), ('hello', 42), ('ponies', 'are pretty!')]
def test_repr():
b = Munch(foo=Munch(lol=True), hello=42, ponies='are pretty!')
assert repr(b).startswith("Munch({'")
assert "'ponies': 'are pretty!'" in repr(b)
assert "'hello': 42" in repr(b)
assert "'foo': Munch({'lol': True})" in repr(b)
assert "'hello': 42" in repr(b)
with_spaces = Munch({1: 2, 'a b': 9, 'c': Munch({'simple': 5})})
assert repr(with_spaces).startswith("Munch({")
assert "'a b': 9" in repr(with_spaces)
assert "1: 2" in repr(with_spaces)
assert "'c': Munch({'simple': 5})" in repr(with_spaces)
assert eval(repr(with_spaces)) == Munch({'a b': 9, 1: 2, 'c': Munch({'simple': 5})})
def test_dir():
m = Munch(a=1, b=2)
assert dir(m) == ['a', 'b']
def test_fromDict():
b = Munch.fromDict({'urmom': {'sez': {'what': 'what'}}})
assert b.urmom.sez.what == 'what'
def test_copy():
m = Munch(urmom=Munch(sez=Munch(what='what')))
c = m.copy()
assert c is not m
assert c.urmom is not m.urmom
assert c.urmom.sez is not m.urmom.sez
assert c.urmom.sez.what == 'what'
assert c == m
def test_munchify():
b = munchify({'urmom': {'sez': {'what': 'what'}}})
assert b.urmom.sez.what == 'what'
b = munchify({'lol': ('cats', {'hah': 'i win again'}), 'hello': [{'french': 'salut', 'german': 'hallo'}]})
assert b.hello[0].french == 'salut'
assert b.lol[1].hah == 'i win again'
def test_unmunchify():
b = Munch(foo=Munch(lol=True), hello=42, ponies='are pretty!')
assert sorted(unmunchify(b).items()) == [('foo', {'lol': True}), ('hello', 42), ('ponies', 'are pretty!')]
b = Munch(foo=['bar', Munch(lol=True)], hello=42, ponies=('are pretty!', Munch(lies='are trouble!')))
assert sorted(unmunchify(b).items()) == [('foo', ['bar', {'lol': True}]), ('hello', 42), ('ponies', ('are pretty!', {'lies': 'are trouble!'}))]
def test_toJSON():
b = Munch(foo=Munch(lol=True), hello=42, ponies='are pretty!')
assert json.dumps(b) == b.toJSON()
@pytest.mark.parametrize("attrname", dir(Munch))
def test_reserved_attributes(attrname):
# Make sure that the default attributes on the Munch instance are
# accessible.
taken_munch = Munch(**{attrname: 'abc123'})
# Make sure that the attribute is determined as in the filled collection...
assert attrname in taken_munch
# ...and that it is available using key access...
assert taken_munch[attrname] == 'abc123'
# ...but that it is not available using attribute access.
attr = getattr(taken_munch, attrname)
assert attr != 'abc123'
empty_munch = Munch()
# Make sure that the attribute is not seen contained in the empty
# collection...
assert attrname not in empty_munch
# ...and that the attr is of the correct original type.
attr = getattr(empty_munch, attrname)
if attrname == '__doc__':
assert isinstance(attr, str)
elif attrname in ('__hash__', '__weakref__'):
assert attr is None
elif attrname == '__module__':
assert attr == 'munch'
elif attrname == '__dict__':
assert attr == {}
else:
assert callable(attr)
def test_getattr_default():
b = DefaultMunch(bar='baz', lol={})
assert b.foo is None
assert b['foo'] is None
assert b.bar == 'baz'
assert getattr(b, 'bar') == 'baz'
assert b['bar'] == 'baz'
assert b.lol is b['lol']
assert b.lol is getattr(b, 'lol')
undefined = object()
b = DefaultMunch(undefined, bar='baz', lol={})
assert b.foo is undefined
assert b['foo'] is undefined
def test_setattr_default():
b = DefaultMunch(foo='bar', this_is='useful when subclassing')
assert hasattr(b.values, '__call__')
b.values = 'uh oh'
assert b.values == 'uh oh'
assert b['values'] is None
assert b.__default__ is None
assert '__default__' not in b
def test_delattr_default():
b = DefaultMunch(lol=42)
del b.lol
assert b.lol is None
assert b['lol'] is None
def test_fromDict_default():
undefined = object()
b = DefaultMunch.fromDict({'urmom': {'sez': {'what': 'what'}}}, undefined)
assert b.urmom.sez.what == 'what'
assert b.urmom.sez.foo is undefined
def test_copy_default():
undefined = object()
m = DefaultMunch.fromDict({'urmom': {'sez': {'what': 'what'}}}, undefined)
c = m.copy()
assert c is not m
assert c.urmom is not m.urmom
assert c.urmom.sez is not m.urmom.sez
assert c.urmom.sez.what == 'what'
assert c == m
assert c.urmom.sez.foo is undefined
assert c.urmom.sez.__undefined__ is undefined
def test_munchify_default():
undefined = object()
b = munchify(
{'urmom': {'sez': {'what': 'what'}}},
lambda d: DefaultMunch(undefined, d))
assert b.urmom.sez.what == 'what'
assert b.urdad is undefined
assert b.urmom.sez.ni is undefined
def test_repr_default():
b = DefaultMunch(foo=DefaultMunch(lol=True), ponies='are pretty!')
assert repr(b).startswith("DefaultMunch(None, {'")
assert "'ponies': 'are pretty!'" in repr(b)
|
|
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
import json
import re
import traceback
from datetime import datetime
from typing import Any, Dict
from urllib.parse import quote_plus
import requests
import urllib3
import copy
# Disable insecure warnings
urllib3.disable_warnings()
requests.packages.urllib3.disable_warnings()
''' CONSTANTS '''
NO_RESULT_MSG = 'No results found for the command.'
''' CLIENT CLASS '''
class Client(BaseClient):
"""
Client will implement the service API, and should not contain any Demisto logic.
Should only do requests and return data.
"""
# Sends request to API Endpoint URL using _http_request() method.
def http_request(self, method, url_suffix, data=None, headers=None, json_data=None, params=None, full_url=None,
resp_type='response'):
return self._http_request(
method=method,
url_suffix=url_suffix,
data=data,
headers=headers,
resp_type=resp_type,
json_data=json_data,
params=params,
full_url=full_url
)
# Validate limit value
def validate_limit_sta(self, limit):
limit = arg_to_number(arg=limit)
if limit not in range(1, 10001):
raise Exception("Limit must be between 1 to 10000.")
return limit
# Validate date value
def validate_date_sta(self, datestring):
pattern = re.compile("[0-9]{4}-[0-9]{2}-[0-9]{2}T([0-9]{2}:){2}[0-9]{2}.[0-9]{3}Z")
if re.match(pattern, datestring):
try:
datetime.fromisoformat(datestring.replace('Z', '+00:00'))
return datestring
except DemistoException as e:
raise Exception(f"Please enter a valid date. \nError:\n{str(e)}")
raise Exception("Date must be in format yyyy-mm-ddTHH:mm:ss.fffZ \nExample: 2021-05-16T02:30:00.234Z")
# Validate mandatory argument.
def validate_mandatory_argument_sta(self, fields={}):
for key, value in fields.items():
if not value or value == "":
raise Exception(f"Please provide the value for {key}.")
# Get paginated results from API endpoint URL.
def get_paged_results_sta(self, uri, query_params=None, limit=None):
response = self.http_request(
method="GET",
url_suffix=uri,
params=query_params
)
request_count = 1
quotient = 0
if limit:
items_count = len(response.json()['page']['items'])
quotient = limit // items_count
reminder = limit % items_count
paged_results = response.json()['page']['items']
while "next" in response.json()['links'] and len(response.json()['page']['items']) > 0:
if request_count >= 10:
raise Exception("You have reached the maximum number of request attempts."
" Please use the limit argument to get the required result.")
next_page = response.json()['links']["next"]
if quotient == request_count:
query_params = (
('pageIndex', quotient),
('pageSize', reminder),
)
response = self.http_request(
method="GET",
url_suffix=uri,
params=query_params
)
paged_results += response.json()['page']['items']
break
response = self.http_request(
method="GET",
full_url=next_page,
url_suffix='',
params=query_params
)
request_count = request_count + 1
paged_results += response.json()['page']['items']
return paged_results
# Get list of all the users in the tenant.
def get_userlist_sta(self, limit=None):
uri = '/users'
if limit:
limit = self.validate_limit_sta(limit)
if limit <= 100:
query_params = (
('pageIndex', 0),
('pageSize', limit),
)
return self.http_request(
method='GET',
url_suffix=uri,
params=query_params,
).json()['page']['items']
else:
return self.get_paged_results_sta(uri=uri, limit=limit)
else:
return self.get_paged_results_sta(uri=uri)
# Get profile information of a specific user.
def get_user_info_sta(self, userName):
self.validate_mandatory_argument_sta(fields={"username": userName})
return self.http_request(
method='GET',
url_suffix=urljoin('/users/', quote_plus(userName)),
).json()
# Get information of a group in a tenant.
def get_group_info_sta(self, groupName):
self.validate_mandatory_argument_sta(fields={"group": groupName})
response = self.http_request(
method='GET',
url_suffix='/groups',
)
paged_results = response.json()['page']['items']
while "next" in response.json()['links'] and len(response.json()['page']['items']) > 0:
next_page = response.json()['links']["next"]
response = self.http_request(
method="GET",
full_url=next_page,
url_suffix='',
)
paged_results += response.json()['page']['items']
for group in paged_results:
if group['name'] == groupName:
return group
raise Exception(f'The group {groupName} was not found.')
# Create a new user in the tenant.
def create_user_sta(self, args):
data = {
"userName": args.get('userName'),
"firstName": args.get('first_name'),
"lastName": args.get('last_name'),
"email": args.get('email'),
"mobileNumber": args.get('mobile_number'),
"alias1": args.get('alias1'),
"alias2": args.get('alias2'),
"custom1": args.get('custom1'),
"custom2": args.get('custom2'),
"custom3": args.get('custom3'),
"address": args.get('address'),
"city": args.get('city'),
"state": args.get('state'),
"country": args.get('country'),
"postalCode": args.get('postal_code'),
"isSynchronized": args.get('synchronized')
}
return self.http_request(
method='POST',
url_suffix='/users',
data=json.dumps(data)
).json()
# Update profile of a specific user.
def update_user_sta(self, args):
data = {}
if args.get('userName_new') is not None:
data['userName'] = args.get('userName_new')
if args.get('first_name') is not None:
data['firstName'] = args.get('first_name')
if args.get('last_name') is not None:
data['lastName'] = args.get('last_name')
if args.get('email') is not None:
data['email'] = args.get('email')
if args.get('mobile_number') is not None:
data['mobileNumber'] = args.get('mobile_number')
if args.get('alias1') is not None:
data['alias1'] = args.get('alias1')
if args.get('alias2') is not None:
data['alias2'] = args.get('alias2')
if args.get('address') is not None:
data['address'] = args.get('address')
if args.get('city') is not None:
data['city'] = args.get('city')
if args.get('state') is not None:
data['state'] = args.get('state')
if args.get('country') is not None:
data['country'] = args.get('country')
if args.get('postal_code') is not None:
data['postalCode'] = args.get('postal_code')
return self.http_request(
method='PATCH',
url_suffix=urljoin('/users/', quote_plus(args.get('userName'))),
data=json.dumps(data)
).json()
# Get user ID from username.
def get_user_id_sta(self, userName):
return self.get_user_info_sta(userName=userName)['id']
# Get group ID from groupname.
def get_group_id_sta(self, groupName):
return self.get_group_info_sta(groupName=groupName)['id']
# Delete user from the tenant.
def delete_user_sta(self, userName):
user_id = self.get_user_id_sta(userName=userName)
self.http_request(
method='DELETE',
url_suffix=urljoin('/users/', quote_plus(userName)),
)
return {"id": user_id, "userName": userName, "Deleted": True}
# Get all the groups associated with a specific user.
def get_user_groups_sta(self, userName, limit=None):
user_id = self.get_user_id_sta(userName=userName)
uri = urljoin(urljoin('/users/', quote_plus(user_id)), '/groups')
if limit:
limit = self.validate_limit_sta(limit)
if limit <= 100:
query_params = (
('pageIndex', 0),
('pageSize', limit),
)
return self.http_request(
method='GET',
url_suffix=uri,
params=query_params,
).json()['page']['items']
else:
return self.get_paged_results_sta(uri=uri, limit=limit)
else:
return self.get_paged_results_sta(uri=uri)
# Returns output data for group members.
def user_groups_data(self, userName, limit=None):
response = self.get_user_groups_sta(userName=userName, limit=limit)
data = self.get_user_info_sta(userName=userName)
data['groups'] = response
return response, data
# Get list of groups in the tenant.
def get_group_list_sta(self, limit=None):
uri = '/groups'
if limit:
limit = self.validate_limit_sta(limit)
if limit <= 100:
query_params = (
('pageIndex', 0),
('pageSize', limit),
)
return self.http_request(
method='GET',
url_suffix=uri,
params=query_params,
).json()['page']['items']
else:
return self.get_paged_results_sta(uri=uri, limit=limit)
else:
return self.get_paged_results_sta(uri=uri)
# Get list of all the users in a group.
def get_group_members_sta(self, groupName, limit=None):
group_id = self.get_group_id_sta(groupName=groupName)
uri = urljoin(urljoin('/groups/', quote_plus(group_id)), '/members')
if limit:
limit = self.validate_limit_sta(limit)
if limit <= 100:
query_params = (
('pageIndex', 0),
('pageSize', limit),
)
return self.http_request(
method='GET',
url_suffix=uri,
params=query_params,
).json()['page']['items']
else:
return self.get_paged_results_sta(uri=uri, limit=limit)
else:
return self.get_paged_results_sta(uri=uri)
# Returns output data for group members.
def group_members_data(self, groupName, limit=None):
response = self.get_group_members_sta(groupName=groupName, limit=limit)
data = self.get_group_info_sta(groupName=groupName)
data['users'] = response
return response, data
# Create a group in the tenant.
def create_group_sta(self, args):
data = {
"name": args.get('groupName'),
"description": args.get('description'),
"isSynchronized": args.get('synchronized'),
}
return self.http_request(
method='POST',
url_suffix='/groups',
data=json.dumps(data)
).json()
# Delete a group from the tenant.
def delete_group_sta(self, groupName):
group_id = self.get_group_id_sta(groupName=groupName)
self.http_request(
method='DELETE',
url_suffix=urljoin('/groups/', quote_plus(group_id)),
)
return {"id": group_id, "groupName": groupName, "Deleted": True}
# Update information of a specific group.
def update_group_sta(self, args):
group_id = self.get_group_id_sta(groupName=args.get('groupName'))
data = {}
if args.get('groupName_new') is not None:
data['name'] = args.get('groupName_new')
if args.get('description') is not None:
data['description'] = args.get('description')
return self.http_request(
method='PATCH',
url_suffix=urljoin('/groups/', quote_plus(group_id)),
data=json.dumps(data)
).json()
# Check if a user exist in a group.
def user_exist_group_sta(self, userName, groupName):
user_id = self.get_user_id_sta(userName=userName)
group_id = self.get_group_id_sta(groupName=groupName)
user_groups = self.http_request(
method='GET',
url_suffix=urljoin(urljoin('/users/', quote_plus(user_id)), '/groups'),
).json()['page']['items']
if user_groups is not None:
for group in user_groups:
if group['id'] == group_id:
return True
return False
# Add user in a group.
def add_user_group_sta(self, userName, groupName):
if self.user_exist_group_sta(userName=userName, groupName=groupName) is False:
user_id = self.get_user_id_sta(userName=userName)
group_id = self.get_group_id_sta(groupName=groupName)
data = {
"id": user_id,
"type": "User",
}
self.http_request(
method='POST',
url_suffix=urljoin(urljoin('/groups/', quote_plus(group_id)), '/members'),
data=json.dumps(data),
)
return {"user_id": user_id, "userName": userName, "group_id": group_id, "groupName": groupName,
"status": True}
else:
raise Exception(f"Username - {userName} is already a member of the group - {groupName}.")
# Remove user from a group.
def remove_user_group_sta(self, userName, groupName):
if self.user_exist_group_sta(userName=userName, groupName=groupName) is True:
user_id = self.get_user_id_sta(userName=userName)
group_id = self.get_group_id_sta(groupName=groupName)
self.http_request(
method='DELETE',
url_suffix=urljoin(urljoin(urljoin('/groups/', quote_plus(group_id)), '/members/'),
quote_plus(user_id)),
)
return {"user_id": user_id, "userName": userName, "group_id": group_id, "groupName": groupName,
"status": False}
else:
raise Exception(f"Username - {userName} is not a member of the group - {groupName}.")
# Creates a log's attribute dictionary from API's response data.
def logs_attributes_sta(self, response):
logs_attributes = {'timeStamp': response['timeStamp'], 'userName': response['context']['principalId'],
'logType': response['details']['type'], 'ip': response['context']['originatingAddress'],
"credentialType": "", "resultText": "", "actionText": "", "applicationName": "",
"policyName": "", "state": "", "operationType": "", "operationObjectType": "",
"operationObjectName": "", "message": "", "serial": ""}
if 'credentialType' in response['details']:
logs_attributes['credentialType'] = response['details']['credentialType']
elif 'credentials' in response['details']:
logs_attributes['credentialType'] = response['details']['credentials'][0]['type']
if 'resultText' in response['details'].keys():
logs_attributes['resultText'] = response['details']['resultText']
if 'actionText' in response['details'].keys():
logs_attributes['actionText'] = response['details']['actionText']
if 'applicationName' in response['context'].keys():
logs_attributes['applicationName'] = response['context']['applicationName']
if 'policyName' in response['context'].keys():
logs_attributes['policyName'] = response['context']['policyName']
if 'state' in response['details'].keys():
logs_attributes['state'] = response['details']['state']
if 'operationType' in response['details'].keys():
logs_attributes['operationType'] = response['details']['operationType']
if 'operationObjectType' in response['details'].keys():
logs_attributes['operationObjectType'] = response['details']['operationObjectType']
if 'operationObjectName' in response['details'].keys():
logs_attributes['operationObjectName'] = response['details']['operationObjectName']
if 'message' in response['details'].keys():
logs_attributes['message'] = response['details']['message']
elif 'description' in response['details'].keys():
logs_attributes['message'] = response['details']['description']
if 'serial' in response['details'].keys():
logs_attributes['serial'] = response['details']['serial']
return logs_attributes
# Filter out the required data from total items as per limit and userName argument.
def logs_data_filter_sta(self, total_items, userName=None, limit=None, count=1, logs_items=None):
if logs_items is None:
logs_items = []
if userName:
for response in total_items:
if 'principalId' in response['context'].keys():
if response['context']['principalId'] == userName:
if limit:
if limit >= count:
count = count + 1
else:
break
logs_items.append(self.logs_attributes_sta(response=response))
else:
for response in total_items:
if 'principalId' in response['context'].keys():
if limit:
if limit >= count:
count = count + 1
else:
break
logs_items.append(self.logs_attributes_sta(response=response))
return logs_items, count
# Get user's logs.
def get_logs_sta(self, userName=None, since=None, until=None, limit=None):
uri = '/logs'
query_params = {}
if userName:
self.get_user_info_sta(userName=userName)
if since:
query_params['since'] = self.validate_date_sta(datestring=since)
if until:
query_params['until'] = self.validate_date_sta(datestring=until)
if since and until:
if until <= since:
raise Exception("Until argument's date and time must be greater than since.")
if not since and until:
raise Exception("Use until argument only while using since.")
query_params = tuple(query_params.items())
response = self.http_request(
method='GET',
url_suffix=uri,
params=query_params,
)
if since and not limit:
limit = 10000
if limit:
limit = self.validate_limit_sta(limit)
request_count = 1
paged_results, count = self.logs_data_filter_sta(response.json()['page']['items'], userName=userName,
limit=limit)
while "next" in response.json()['links'] and len(response.json()['page']['items']) > 0 and limit >= count:
if request_count >= 10:
if userName:
break
raise Exception("You have reached the maximum number of request attempts."
" Please use either the since or until argument to get the required result.")
next_page = response.json()['links']["next"]
response = self.http_request(
method="GET",
full_url=next_page,
url_suffix='',
)
request_count = request_count + 1
paged_results, count = self.logs_data_filter_sta(response.json()['page']['items'], userName=userName,
limit=limit, count=count, logs_items=paged_results)
else:
paged_results = self.logs_data_filter_sta(response.json()['page']['items'], userName)[0]
return paged_results
# Validate tenant and permission.
def validate_tenant_sta(self):
return self.http_request(
method='GET',
url_suffix='/authorized'
)
# Get list of all the applications in the tenant.
def get_application_list_sta(self, limit=None):
uri = '/applications'
if limit:
limit = self.validate_limit_sta(limit)
if limit <= 100:
query_params = (
('pageIndex', 0),
('pageSize', limit),
)
return self.http_request(
method='GET',
url_suffix=uri,
params=query_params,
).json()['page']['items']
else:
return self.get_paged_results_sta(uri=uri, limit=limit)
else:
return self.get_paged_results_sta(uri=uri)
# Returns basic information of an application if exist in the tenant.
def get_basic_application_info_sta(self, applicationName):
self.validate_mandatory_argument_sta(fields={"applicationName": applicationName})
response = self.http_request(
method='GET',
url_suffix='/applications',
)
paged_results = response.json()['page']['items']
while "next" in response.json()['links'] and len(response.json()['page']['items']) > 0:
next_page = response.json()['links']["next"]
response = self.http_request(
method="GET",
full_url=next_page,
url_suffix='',
)
paged_results += response.json()['page']['items']
for application in paged_results:
if application['name'] == applicationName:
return application
raise Exception(f'The application - {application} was not found.')
# Get application id of an application.
def get_application_id_sta(self, applicationName):
return self.get_basic_application_info_sta(applicationName=applicationName)['id']
# Get information for a specific application.
def get_application_info_sta(self, applicationName):
application_id = self.get_application_id_sta(applicationName=applicationName)
response = self.http_request(
method="GET",
url_suffix=urljoin('/applications/', quote_plus(application_id))
).json()
context_data = {
'id': response['id'],
'name': response['name'],
'status': response['status'],
'applicationType': response['applicationType'],
'templateName': response['templateName'],
'assignment': response['assignment'],
'schemaVersionNumber': response['schemaVersionNumber'],
'lastModified': response['lastModified']
}
readable_output = dict(context_data)
if 'everyone' in readable_output['assignment']:
if readable_output['assignment']['everyone'] is True:
readable_output['assignment'] = 'All'
else:
readable_output['assignment'] = 'None'
elif 'groups' in readable_output['assignment']:
readable_output['assignment'] = ', '.join(readable_output['assignment']['groups'])
return readable_output, context_data
# Get the list of applications assigned to a user.
def get_user_applications_sta(self, userName, limit=None):
user_id = self.get_user_id_sta(userName=userName)
uri = urljoin(urljoin('/users/', quote_plus(user_id)), '/applications')
if limit:
limit = self.validate_limit_sta(limit)
if limit <= 1000:
query_params = (
('pageIndex', 0),
('pageSize', limit),
)
return self.http_request(
method='GET',
url_suffix=uri,
params=query_params,
).json()['page']['items']
else:
return self.get_paged_results_sta(uri=uri, limit=limit)
else:
return self.get_paged_results_sta(uri=uri)
# Returns output data for group members.
def user_applications_data(self, userName, limit=None):
response = self.get_user_applications_sta(userName=userName, limit=limit)
data = self.get_user_info_sta(userName=userName)
data['applications'] = response
return response, data
# Get the sessions for a specific user.
def get_user_sessions_sta(self, userName):
user_id = self.get_user_id_sta(userName=userName)
uri = urljoin(urljoin('/users/', quote_plus(user_id)), '/sessions')
return self.http_request(
method='GET',
url_suffix=uri,
).json()['sessions']
# Returns output data for group members.
def user_sessions_data(self, userName):
data = self.get_user_info_sta(userName=userName)
response = self.get_user_sessions_sta(userName=userName)
data['sessions'] = copy.deepcopy(response)
if response:
numb = 0
for session in response:
session['start'] = datetime.fromtimestamp(session['start']).strftime('%Y-%m-%dT%H:%M:%S.000Z')
session['expiry'] = datetime.fromtimestamp(session['expiry']).strftime('%Y-%m-%dT%H:%M:%S.000Z')
applications = []
for application in session['applications']:
applications.append(application['name'])
if applications:
response[numb]['applications'] = ', '.join(applications)
else:
response[numb]['applications'] = 'No applications.'
numb = numb + 1
return response, data
# Delete all the IDP session for a specific user.
def delete_sessions_sta(self, userName):
user_id = self.get_user_id_sta(userName=userName)
self.http_request(
method='DELETE',
url_suffix=urljoin(urljoin('/users/', quote_plus(user_id)), '/sessions'),
)
data = {
"id": user_id,
"userName": userName,
"sessions": {
"Deleted": True
}
}
return data
''' COMMAND FUNCTIONS '''
def test_module(client, args):
"""
Returning 'ok' indicates that the integration works like it is supposed to. Connection to the service is successful.
Args:
client: SafeNet Trusted Access
Returns:
'ok' if test passed, anything else will fail the test.
"""
try:
client._http_request(method='GET', url_suffix='/users')
except DemistoException as e:
if 'Forbidden' in str(e):
return 'Authorization Error: Ensure that the API key is correct.'
else:
raise e
return 'ok'
def get_userlist_sta_command(client: Client, args: Dict[str, Any]) -> CommandResults:
""" Function for sta-get-user-list command. Get list of all the users in the tenant. """
response = client.get_userlist_sta(limit=args.get('limit'))
if not response:
return CommandResults(
readable_output=NO_RESULT_MSG,
)
header_sequence = ["id", "schemaVersionNumber", "userName", "firstName", "lastName", "email", "mobileNumber",
"alias1", "alias2", "alias3", "alias4", "address", "city", "state", "country", "postalCode",
"isSynchronized"]
return CommandResults(
readable_output=tableToMarkdown("List of users in the tenant :", response, headers=header_sequence,
headerTransform=pascalToSpace, removeNull=True),
outputs_prefix='STA.USER',
outputs_key_field=['id'],
outputs=response
)
def get_user_info_sta_command(client: Client, args: Dict[str, Any]) -> CommandResults:
""" Function for sta-get-user-info command. Get profile information of a specific user."""
response = client.get_user_info_sta(userName=args.get('userName'))
if not response:
return CommandResults(
readable_output=NO_RESULT_MSG,
)
header_sequence = ["id", "schemaVersionNumber", "userName", "firstName", "lastName", "email", "mobileNumber",
"alias1", "alias2", "alias3", "alias4", "custom1", "custom2", "custom3", "address", "city",
"state", "country", "postalCode", "isSynchronized"]
return CommandResults(
readable_output=tableToMarkdown(f"Information for user - {args.get('userName')} :",
response, headers=header_sequence, headerTransform=pascalToSpace,
removeNull=True),
outputs_prefix='STA.USER',
outputs_key_field=['id'],
outputs=response
)
def create_user_sta_command(client: Client, args: Dict[str, Any]) -> CommandResults:
""" Function for sta-create-user command. Create a new user in the tenant. """
response = client.create_user_sta(args=args)
if not response:
return CommandResults(
readable_output=NO_RESULT_MSG,
)
header_sequence = ["id", "schemaVersionNumber", "userName", "firstName", "lastName", "email", "mobileNumber",
"alias1", "alias2", "custom1", "custom2", "custom3", "address", "city", "state", "country",
"postalCode", "isSynchronized"]
return CommandResults(
readable_output=tableToMarkdown("STA user successfully created :", response, headers=header_sequence,
headerTransform=pascalToSpace, removeNull=True),
outputs_prefix='STA.USER',
outputs_key_field=['id'],
outputs=response
)
def update_user_sta_command(client: Client, args: Dict[str, Any]) -> CommandResults:
""" Function for sta-update-user-info command. Update profile of a specific user. """
response = client.update_user_sta(args=args)
if not response:
return CommandResults(
readable_output=NO_RESULT_MSG,
)
header_sequence = ["id", "schemaVersionNumber", "userName", "firstName", "lastName", "email", "mobileNumber",
"alias1", "alias2", "custom1", "custom2", "custom3", "address", "city", "state", "country",
"postalCode", "isSynchronized"]
return CommandResults(
readable_output=tableToMarkdown("STA user successfully updated:", response,
headers=header_sequence, headerTransform=pascalToSpace, removeNull=True),
outputs_prefix='STA.USER',
outputs_key_field=['id'],
outputs=response
)
def delete_user_sta_command(client: Client, args: Dict[str, Any]) -> CommandResults:
""" Function for sta-delete-user command. Delete user from the tenant. """
response = client.delete_user_sta(userName=args.get('userName'))
if not response:
return CommandResults(
readable_output=NO_RESULT_MSG,
)
return CommandResults(
readable_output=f"## STA user - {args.get('userName')} successfully deleted.",
outputs_prefix='STA.USER',
outputs_key_field=['id'],
outputs=response
)
def get_user_groups_sta_command(client: Client, args: Dict[str, Any]) -> CommandResults:
""" Function for sta-get-user-groups command. Get all the groups associated with a specific user. """
response, output_data = client.user_groups_data(userName=args.get('userName'), limit=args.get('limit'))
if not response:
return CommandResults(
readable_output=NO_RESULT_MSG,
)
header_sequence = ['id', 'schemaVersionNumber', 'name', 'description', 'isSynchronized']
return CommandResults(
readable_output=tableToMarkdown(
f"Groups associated with user - {args.get('userName')} : ", response, headers=header_sequence,
headerTransform=pascalToSpace, removeNull=True),
outputs_prefix='STA.USER',
outputs_key_field=['id'],
outputs=output_data
)
def get_group_list_sta_command(client: Client, args: Dict[str, Any]) -> CommandResults:
""" Function for sta-get-group-list command. Get list of all the groups in the tenant. """
response = client.get_group_list_sta(limit=args.get('limit'))
if not response:
return CommandResults(
readable_output=NO_RESULT_MSG,
)
header_sequence = ['id', 'schemaVersionNumber', 'name', 'description', 'isSynchronized']
return CommandResults(
readable_output=tableToMarkdown("STA groups in the tenant : ", response,
headers=header_sequence, headerTransform=pascalToSpace, removeNull=True),
outputs_prefix='STA.GROUP',
outputs_key_field=['id'],
outputs=response
)
def get_group_info_sta_command(client: Client, args: Dict[str, Any]) -> CommandResults:
""" Function for sta-get-group-info command. Get information of a specific group. """
response = client.get_group_info_sta(groupName=args.get('groupName'))
if not response:
return CommandResults(
readable_output=NO_RESULT_MSG,
)
header_sequence = ['id', 'schemaVersionNumber', 'name', 'description', 'isSynchronized']
return CommandResults(
readable_output=tableToMarkdown(f"Group - {args.get('groupName')} :", response,
headers=header_sequence, headerTransform=pascalToSpace, removeNull=True),
outputs_prefix='STA.GROUP',
outputs_key_field=['id'],
outputs=response
)
def get_group_members_sta_command(client: Client, args: Dict[str, Any]) -> CommandResults:
""" Function for sta-get-group-members command. Get list of users in a specific group. """
response, output_data = client.group_members_data(groupName=args.get('groupName'), limit=args.get('limit'))
if not response:
return CommandResults(
readable_output=NO_RESULT_MSG,
)
header_sequence = ['id', 'name', 'type']
return CommandResults(
readable_output=tableToMarkdown(f"Members of group - {args.get('groupName')} : ", response,
headers=header_sequence, headerTransform=pascalToSpace, removeNull=True),
outputs_prefix='STA.GROUP',
outputs_key_field=['id'],
outputs=output_data
)
def create_group_sta_command(client: Client, args: Dict[str, Any]) -> CommandResults:
""" Function for sta-create-group command. Create a new group in the tenant. """
response = client.create_group_sta(args=args)
if not response:
return CommandResults(
readable_output=NO_RESULT_MSG,
)
header_sequence = ['id', 'schemaVersionNumber', 'name', 'description', 'isSynchronized']
return CommandResults(
readable_output=tableToMarkdown(
f"STA group - {args.get('groupName')} successfully created:", response, headers=header_sequence,
headerTransform=pascalToSpace, removeNull=True),
outputs_prefix='STA.GROUP',
outputs_key_field=['id'],
outputs=response
)
def delete_group_sta_command(client: Client, args: Dict[str, Any]) -> CommandResults:
""" Function for sta-delete-group command. Delete group from the tenant. """
response = client.delete_group_sta(groupName=args.get('groupName'))
if not response:
return CommandResults(
readable_output=NO_RESULT_MSG,
)
return CommandResults(
readable_output=f"## STA group - {args.get('groupName')} successfully deleted.",
outputs_prefix='STA.GROUP',
outputs_key_field=['id'],
outputs=response
)
def update_group_sta_command(client: Client, args: Dict[str, Any]) -> CommandResults:
""" Function for sta-update-group-info command. Update information of a specific group. """
response = client.update_group_sta(args=args)
if not response:
return CommandResults(
readable_output=NO_RESULT_MSG,
)
header_sequence = ['id', 'schemaVersionNumber', 'name', 'description', 'isSynchronized']
return CommandResults(
readable_output=tableToMarkdown("STA user successfully updated :", response, headers=header_sequence,
headerTransform=pascalToSpace, removeNull=True),
outputs_prefix='STA.GROUP',
outputs_key_field=['id'],
outputs=response
)
def user_exist_group_sta_command(client: Client, args: Dict[str, Any]) -> CommandResults:
""" Function for sta-user-exist-group command. Checks if a user is a member of a specific group."""
response = client.user_exist_group_sta(userName=args.get('userName'), groupName=args.get('groupName'))
if response is True:
return CommandResults(
readable_output=f"## Yes, user - {args.get('userName')} is a member of group - {args.get('groupName')}.",
outputs_prefix='STA.EXIST.USER.GROUP',
outputs=response
)
else:
return CommandResults(
readable_output=f"## No, user - {args.get('userName')} is not a member of group - {args.get('groupName')}.",
outputs_prefix='STA.EXIST.USER.GROUP',
outputs=response
)
def add_user_group_sta_command(client: Client, args: Dict[str, Any]) -> CommandResults:
""" Function for sta-add-user-group command. Add user to a specific group. """
response = client.add_user_group_sta(userName=args.get('userName'), groupName=args.get('groupName'))
if not response:
return CommandResults(
readable_output=NO_RESULT_MSG,
)
return CommandResults(
readable_output=f"## User - {args.get('userName')} successfully added to the group - {args.get('groupName')}.",
outputs_prefix='STA.UPDATE.USER.GROUP',
outputs_key_field=['user_id', 'group_id'],
outputs=response
)
def remove_user_group_sta_command(client: Client, args: Dict[str, Any]) -> CommandResults:
""" Function for sta-remove-user-group command. Remove user from a specific group. """
response = client.remove_user_group_sta(userName=args.get('userName'), groupName=args.get('groupName'))
if not response:
return CommandResults(
readable_output=NO_RESULT_MSG,
)
return CommandResults(
readable_output=f"## User - {args.get('userName')} successfully removed from the group - {args.get('groupName')}.",
outputs_prefix='STA.UPDATE.USER.GROUP',
outputs_key_field=['user_id', 'group_id'],
outputs=response
)
def get_logs_sta_command(client: Client, args: Dict[str, Any]) -> CommandResults:
""" Function for sta-get-logs command. Get user's logs. """
response = client.get_logs_sta(userName=args.get('userName'), since=args.get('since'),
until=args.get('until'), limit=args.get('limit'))
if not response:
return CommandResults(
readable_output=NO_RESULT_MSG,
)
header_sequence = ['timeStamp', 'userName', 'logType', 'credentialType', 'actionText', 'resultText', 'message',
'applicationName', 'policyName', 'state', 'operationType', 'operationObjectType',
'operationObjectName', 'serial', 'ip']
return CommandResults(
readable_output=tableToMarkdown("Logs : ", response, headers=header_sequence,
headerTransform=pascalToSpace, removeNull=True),
outputs_prefix='STA.LOGS',
outputs=response
)
def validate_tenant_sta_command(client: Client, args: Dict[str, Any]) -> CommandResults:
""" Function for sta-validate-tenant command. Validate key and permissions. """
client.validate_tenant_sta()
return CommandResults(
readable_output="## The requested tenant is accessible.",
outputs_prefix='STA.VALIDATE.TENANT',
outputs=True
)
def get_application_list_sta_command(client: Client, args: Dict[str, Any]) -> CommandResults:
""" Function for sta-get-application-list command. Get list of all the applications in the tenant. """
response = client.get_application_list_sta(limit=args.get('limit'))
if not response:
return CommandResults(
readable_output=NO_RESULT_MSG,
)
header_sequence = ["id", "name", "status"]
return CommandResults(
readable_output=tableToMarkdown("List of applications in the tenant :", response, headers=header_sequence,
headerTransform=pascalToSpace, removeNull=True),
outputs_prefix='STA.APPLICATION',
outputs_key_field=['id'],
outputs=response
)
def get_application_info_sta_command(client: Client, args: Dict[str, Any]) -> CommandResults:
""" Function for sta-get-application-info command. Get profile information of a specific application."""
readable_output, context_data = client.get_application_info_sta(applicationName=args.get('applicationName'))
if not context_data:
return CommandResults(
readable_output=NO_RESULT_MSG,
)
header_sequence = ["id", "name", "status", "applicationType", "templateName", "assignment", "schemaVersionNumber",
"lastModified"]
return CommandResults(
readable_output=tableToMarkdown(f"Information of application - {args.get('applicationName')} :",
readable_output, headers=header_sequence, headerTransform=pascalToSpace,
removeNull=True),
outputs_prefix='STA.APPLICATION',
outputs_key_field=['id'],
outputs=context_data
)
def get_user_applications_sta_command(client: Client, args: Dict[str, Any]) -> CommandResults:
""" Function for sta-get-user-applications. Get all the applications associated with a specific user. """
response, output_data = client.user_applications_data(userName=args.get('userName'), limit=args.get('limit'))
if not response:
return CommandResults(
readable_output=NO_RESULT_MSG,
)
header_sequence = ["id", "name", "status"]
return CommandResults(
readable_output=tableToMarkdown(
f"Applications associated with user - {args.get('userName')} : ", response, headers=header_sequence,
headerTransform=pascalToSpace, removeNull=True),
outputs_prefix='STA.USER',
outputs_key_field=['id'],
outputs=output_data
)
def get_user_sessions_sta_command(client: Client, args: Dict[str, Any]) -> CommandResults:
""" Function for sta-get-user-sessions command. Get all the sessions associated with a specific user. """
response, output_data = client.user_sessions_data(userName=args.get('userName'))
if not response:
return CommandResults(
readable_output=NO_RESULT_MSG,
)
session_header = ["id", "start", "expiry", "applications"]
session_data = tableToMarkdown(
f"Sessions associated with user - {args.get('userName')} : ", response, headers=session_header,
headerTransform=pascalToSpace, removeNull=True)
return CommandResults(
readable_output=session_data,
outputs_prefix='STA.USER',
outputs_key_field=['id'],
outputs=output_data
)
def delete_user_sessions_sta_command(client: Client, args: Dict[str, Any]) -> CommandResults:
""" Function for sta-delete-user-sessions command. Delete all the IDP sessions associated with a specific user. """
response = client.delete_sessions_sta(userName=args.get('userName'))
if not response:
return CommandResults(
readable_output=NO_RESULT_MSG,
)
return CommandResults(
readable_output=f"## IDP Sessions for the user - {args.get('userName')} successfully deleted.",
outputs_prefix='STA.USER',
outputs_key_field=['id'],
outputs=response
)
''' MAIN FUNCTION '''
def main() -> None:
"""
main function, parses params and runs command functions
"""
verify_certificate = not demisto.params().get('insecure', False)
proxy = demisto.params().get('proxy', False)
base_url = urljoin(urljoin(demisto.params()['url'], 'api/v1/tenants/'), demisto.params()['tenant_code'])
api_key = demisto.params().get('api_key')
demisto.debug(f'Command being called is {demisto.command()}')
try:
headers = {
'accept': 'application/json',
'Object-Id-Format': 'base64',
'Content-Type': 'application/json',
'apikey': api_key
}
client = Client(
base_url=base_url,
verify=verify_certificate,
headers=headers,
proxy=proxy,
ok_codes=(200, 201, 204))
commands = {
'test-module': test_module,
'sta-get-user-list': get_userlist_sta_command,
'sta-get-user-info': get_user_info_sta_command,
'sta-create-user': create_user_sta_command,
'sta-update-user-info': update_user_sta_command,
'sta-delete-user': delete_user_sta_command,
'sta-get-user-groups': get_user_groups_sta_command,
'sta-get-group-list': get_group_list_sta_command,
'sta-get-group-info': get_group_info_sta_command,
'sta-get-group-members': get_group_members_sta_command,
'sta-create-group': create_group_sta_command,
'sta-delete-group': delete_group_sta_command,
'sta-update-group-info': update_group_sta_command,
'sta-user-exist-group': user_exist_group_sta_command,
'sta-add-user-group': add_user_group_sta_command,
'sta-remove-user-group': remove_user_group_sta_command,
'sta-get-logs': get_logs_sta_command,
'sta-validate-tenant': validate_tenant_sta_command,
'sta-get-application-list': get_application_list_sta_command,
'sta-get-application-info': get_application_info_sta_command,
'sta-get-user-applications': get_user_applications_sta_command,
'sta-get-user-sessions': get_user_sessions_sta_command,
'sta-delete-user-sessions': delete_user_sessions_sta_command
}
command = demisto.command()
if command in commands:
return_results(commands[command](client, demisto.args()))
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
''' ENTRY POINT '''
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
|
|
__date__ = '4/15/2014'
__author__ = 'ABREZNIC'
"""
The MIT License (MIT)
Copyright (c) 2016 Texas Department of Transportation
Author: Adam Breznicky
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import os, shutil, arcpy
import datetime
# global variables/date variables
now = datetime.datetime.now()
curMonth = now.strftime("%m")
curDay = now.strftime("%d")
curYear = now.strftime("%Y")
curDate = curMonth + "_" + curDay + "_" + curYear
runday = now.strftime("%B") + " " + curDay + ", " + curYear
weekly_maps_folder = os.path.dirname(__file__)
house = weekly_maps_folder + os.sep + curYear + os.sep + curDate
exported_cri_sharepoint_excel_name = "CountyRoadCertification_SharepointExport.xlsx"
arcpy.env.workspace = "in_memory"
sharepoint_table = {}
print "working out of: " + weekly_maps_folder
def build_sharepoint_dict():
exported_table = weekly_maps_folder + os.sep + exported_cri_sharepoint_excel_name
print "converting excel sheet"
arcpy.ExcelToTable_conversion(exported_table, "table")
print "iterating excel sheet"
cursor = arcpy.da.SearchCursor("table", ["County", "Update_Yea", "Status", "Needs_Upda", "Inital_Mar", "Needs_Fiel", "Data_Colle", "Comanche_U"])
for row in cursor:
if row[1] == curYear:
sharepoint_table[row[0]] = row
del cursor
del row
def make_directories():
print "making directory to put the maps in"
if os.path.exists(weekly_maps_folder + os.sep + curYear):
if os.path.exists(weekly_maps_folder + os.sep + curYear + os.sep + curDate):
shutil.rmtree(weekly_maps_folder + os.sep + curYear + os.sep + curDate)
os.makedirs(weekly_maps_folder + os.sep + curYear + os.sep + curDate)
else:
os.makedirs(weekly_maps_folder + os.sep + curYear + os.sep + curDate)
else:
os.makedirs(weekly_maps_folder + os.sep + curYear)
os.makedirs(weekly_maps_folder + os.sep + curYear + os.sep + curDate)
def tracking():
arcpy.AddMessage("Generating Tracking Map...")
statusMap = arcpy.mapping.MapDocument(weekly_maps_folder + "\\CRI_TRACKING.mxd")
dataFrame = arcpy.mapping.ListDataFrames(statusMap)[0]
newextent = dataFrame.extent
print "compiling the changes"
dict = {}
counter = 0
comper = 0
for record in sharepoint_table.keys():
county = record
stat = sharepoint_table[record][2]
counter += 1
if stat == "Electronic Update (GIS)" or stat == "Electronic Update (Road Logs)":
dict[county] = "Electronic Update"
comper += 1
elif stat == "Paper Update":
dict[county] = stat
comper += 1
else:
dict[county] = stat
#north county:
if int(curYear) % 2 == 0:
print "working the north side"
newextent.XMin, newextent.YMin = 582786.47000423, 826927.373313854
newextent.XMax, newextent.YMax = 1687689.94133357, 1600359.80324447
dataFrame.extent = newextent
print "updating changes"
cursor = arcpy.da.UpdateCursor(weekly_maps_folder + "\\Tracking_Shapefiles\\NorthCounties.shp", ["status", "CNTY_NM"])
for row in cursor:
row[0] = "No Response"
cursor.updateRow(row)
cnty = row[1]
if cnty in dict.keys():
row[0] = dict[cnty]
cursor.updateRow(row)
del cursor
print "doing math"
differguy = float(comper) / float(counter) * 100
integ = str(differguy).split(".")[0]
deci = str(differguy).split(".")[1][:2]
numsz = integ + "." + deci
differguy2 = float(counter) / float(139) * 100
integ2 = str(differguy2).split(".")[0]
deci2 = str(differguy2).split(".")[1][:2]
numsz2 = integ2 + "." + deci2
for lyr in arcpy.mapping.ListLayers(statusMap):
if lyr.name == "NorthCounties":
lyr.visible = True
if lyr.name == "SouthCounties":
lyr.visible = False
arcpy.AddMessage("Layers visualized.")
for textElement in arcpy.mapping.ListLayoutElements(statusMap, "TEXT_ELEMENT"):
if textElement.name == "topYEAR":
textElement.text = curYear
if textElement.name == "bottomDate":
textElement.text = now.strftime("%B") + " " + curDay + ", " + curYear
if textElement.name == "copyright":
textElement.text = "Copyright " + curYear
if textElement.name == "finalDate":
lastYears = int(curYear) - 1
textElement.text = str(lastYears) + "."
if textElement.name == "responder":
textElement.text = numsz2 + "% Have Responded (" + str(counter) + " of 139)"
textElement.elementPositionX = 7.2
textElement.elementPositionY = 5.8
if textElement.name == "updater":
textElement.text = numsz + "% of Responses Require Update (" + str(comper) + " of " + str(counter) + ")"
textElement.elementPositionX = 6.3
textElement.elementPositionY = 5.5
arcpy.AddMessage("Text elements updated.")
legend = arcpy.mapping.ListLayoutElements(statusMap, "LEGEND_ELEMENT")[0]
legend.elementPositionX = 7
legend.elementPositionY = 6.2
arcpy.AddMessage("Legend moved.")
arcpy.RefreshActiveView()
arcpy.mapping.ExportToPDF(statusMap, house + os.sep + "TrackingMap" + curDate + ".pdf")
#
# south county
elif int(curYear) % 2 != 0:
print "working the south side"
newextent.XMin, newextent.YMin = 364911.216382526, 350798.309516114
newextent.XMax, newextent.YMax = 1628319.75219708, 1235184.28458639
dataFrame.extent = newextent
print "updating changes"
cursor = arcpy.da.UpdateCursor(weekly_maps_folder + "\\Tracking_Shapefiles\SouthCounties.shp", ["status", "CNTY_NM"])
for row in cursor:
row[0] = "No Response"
cursor.updateRow(row)
cnty = row[1]
if cnty in dict.keys():
row[0] = dict[cnty]
cursor.updateRow(row)
del cursor
print "doing math"
differguy = float(comper) / float(counter) * 100
integ = str(differguy).split(".")[0]
deci = str(differguy).split(".")[1][:2]
numsz = integ + "." + deci
differguy2 = float(counter) / float(115) * 100
integ2 = str(differguy2).split(".")[0]
deci2 = str(differguy2).split(".")[1][:2]
numsz2 = integ2 + "." + deci2
for lyr in arcpy.mapping.ListLayers(statusMap):
if lyr.name == "NorthCounties":
lyr.visible = False
if lyr.name == "SouthCounties":
lyr.visible = True
arcpy.AddMessage("Layers visualized.")
for textElement in arcpy.mapping.ListLayoutElements(statusMap, "TEXT_ELEMENT"):
if textElement.name == "topYEAR":
textElement.text = curYear
if textElement.name == "bottomDate":
textElement.text = now.strftime("%B") + " " + curDay + ", " + curYear
if textElement.name == "copyright":
textElement.text = "Copyright " + curYear
if textElement.name == "finalDate":
lastYears = int(curYear) - 1
textElement.text = str(lastYears) + "."
if textElement.name == "responder":
textElement.text = numsz2 + "% Have Responded (" + str(counter) + " of 115)"
textElement.elementPositionX = 1.04
textElement.elementPositionY = 1.46
if textElement.name == "updater":
textElement.text = numsz + "% of Responses Require Update (" + str(comper) + " of " + str(counter) + ")"
textElement.elementPositionX = 1.04
textElement.elementPositionY = 1.14
arcpy.AddMessage("Text elements updated.")
legend = arcpy.mapping.ListLayoutElements(statusMap, "LEGEND_ELEMENT")[0]
legend.elementPositionX = 1.04
legend.elementPositionY = 1.88
arcpy.AddMessage("Legend moved.")
arcpy.RefreshActiveView()
arcpy.mapping.ExportToPDF(statusMap, house + os.sep + "TrackingMap" + curDate + ".pdf")
arcpy.AddMessage("Tracking Map Complete.")
def status():
arcpy.AddMessage("Generating Status Map...")
statusMap = arcpy.mapping.MapDocument(weekly_maps_folder + "\\CRI_STATUS.mxd")
dataFrame = arcpy.mapping.ListDataFrames(statusMap)[0]
newextent = dataFrame.extent
print "compiling the changes"
dict = {}
counter = 0
comper = 0
for record in sharepoint_table.keys():
county = record
NU = sharepoint_table[record][3]
NF = sharepoint_table[record][5]
DC = sharepoint_table[record][6]
CU = sharepoint_table[record][7]
if CU == 1:
dict[county] = "Markup Complete"
counter += 1
comper += 1
elif DC == 1:
dict[county] = "Field Inventory Complete"
counter += 1
elif NF == 1:
dict[county] = "Pending Field Work"
counter += 1
elif NU == 1:
dict[county] = "Markup Required"
counter += 1
#north county:
if int(curYear) % 2 == 0:
print "working the north side"
newextent.XMin, newextent.YMin = 582786.47000423, 826927.373313854
newextent.XMax, newextent.YMax = 1687689.94133357, 1600359.80324447
dataFrame.extent = newextent
print "updating changes"
cursor = arcpy.da.UpdateCursor(weekly_maps_folder + "\\Status_Shapefiles\\NorthCounties.shp", ["mrk_status", "CNTY_NM"])
for row in cursor:
row[0] = ""
cursor.updateRow(row)
cnty = row[1]
if cnty in dict.keys():
row[0] = dict[cnty]
cursor.updateRow(row)
del cursor
if counter == 0:
counter += 1
print "doing math"
differguy = float(comper) / float(counter) * 100
integ = str(differguy).split(".")[0]
deci = str(differguy).split(".")[1][:2]
numsz = integ + "." + deci
for lyr in arcpy.mapping.ListLayers(statusMap):
if lyr.name == "NorthCounties":
lyr.visible = True
if lyr.name == "SouthCounties":
lyr.visible = False
arcpy.AddMessage("Layers visualized.")
for textElement in arcpy.mapping.ListLayoutElements(statusMap, "TEXT_ELEMENT"):
if textElement.name == "topYEAR":
textElement.text = curYear
if textElement.name == "bottomDate":
textElement.text = now.strftime("%B") + " " + curDay + ", " + curYear
if textElement.name == "copyright":
textElement.text = "Copyright " + curYear
if textElement.name == "finalDate":
lastYears = int(curYear) - 1
textElement.text = str(lastYears) + "."
if textElement.name == "complete":
textElement.text = numsz + "% Complete (" + str(comper) + " of " + str(counter) + ")"
textElement.elementPositionX = 7.6
textElement.elementPositionY = 5.9
arcpy.AddMessage("Text elements updated.")
legend = arcpy.mapping.ListLayoutElements(statusMap, "LEGEND_ELEMENT")[0]
legend.elementPositionX = 7.4
legend.elementPositionY = 6.4
arcpy.AddMessage("Legend moved.")
arcpy.RefreshActiveView()
arcpy.mapping.ExportToPDF(statusMap, house + os.sep + "StatusMap" + curDate + ".pdf")
#
# south county
elif int(curYear) % 2 != 0:
print "working the south side"
newextent.XMin, newextent.YMin = 364911.216382526, 350798.309516114
newextent.XMax, newextent.YMax = 1628319.75219708, 1235184.28458639
dataFrame.extent = newextent
print "updating changes"
cursor = arcpy.da.UpdateCursor(weekly_maps_folder + "\\Status_Shapefiles\\SouthCounties.shp", ["mrk_status", "CNTY_NM"])
for row in cursor:
row[0] = ""
cursor.updateRow(row)
cnty = row[1]
if cnty in dict.keys():
row[0] = dict[cnty]
cursor.updateRow(row)
del cursor
if counter == 0:
counter += 1
print "doing math"
differguy = float(comper) / float(counter) * 100
integ = str(differguy).split(".")[0]
deci = str(differguy).split(".")[1][:2]
numsz = integ + "." + deci
for lyr in arcpy.mapping.ListLayers(statusMap):
if lyr.name == "NorthCounties":
lyr.visible = False
if lyr.name == "SouthCounties":
lyr.visible = True
arcpy.AddMessage("Layers visualized.")
for textElement in arcpy.mapping.ListLayoutElements(statusMap, "TEXT_ELEMENT"):
if textElement.name == "topYEAR":
textElement.text = curYear
if textElement.name == "bottomDate":
textElement.text = now.strftime("%B") + " " + curDay + ", " + curYear
if textElement.name == "copyright":
textElement.text = "Copyright " + curYear
if textElement.name == "finalDate":
lastYears = int(curYear) - 1
textElement.text = str(lastYears) + "."
if textElement.name == "complete":
textElement.text = numsz + "% Complete (" + str(comper) + " of " + str(counter) + ")"
textElement.elementPositionX = 1.41
textElement.elementPositionY = 1.46
arcpy.AddMessage("Text elements updated.")
legend = arcpy.mapping.ListLayoutElements(statusMap, "LEGEND_ELEMENT")[0]
legend.elementPositionX = 1.25
legend.elementPositionY = 1.9
arcpy.AddMessage("Legend moved.")
arcpy.RefreshActiveView()
arcpy.mapping.ExportToPDF(statusMap, house + os.sep + "StatusMap" + curDate + ".pdf")
arcpy.AddMessage("Status Map Complete.")
def tcopy():
print "doing the T-drive copy shuffle"
cri = "T:\\DATAMGT\\MAPPING\\Data Collection\\Core Projects\\CountyRoad"
if os.path.exists(cri + os.sep + curYear):
if os.path.exists(cri + os.sep + curYear + os.sep + "_Progress Maps"):
if os.path.exists(cri + os.sep + curYear + os.sep + "_Progress Maps" + os.sep + curDate):
shutil.rmtree(cri + os.sep + curYear + os.sep + "_Progress Maps" + os.sep + curDate)
os.makedirs(cri + os.sep + curYear + os.sep + "_Progress Maps" + os.sep + curDate)
else:
os.makedirs(cri + os.sep + curYear + os.sep + "_Progress Maps")
os.makedirs(cri + os.sep + curYear + os.sep + "_Progress Maps" + os.sep + curDate)
else:
os.makedirs(cri + os.sep + curYear)
os.makedirs(cri + os.sep + curYear + os.sep + "_Progress Maps")
os.makedirs(cri + os.sep + curYear + os.sep + "_Progress Maps" + os.sep + curDate)
tdrive = cri + os.sep + curYear + os.sep + "_Progress Maps" + os.sep + curDate
shutil.copy(weekly_maps_folder + os.sep + curYear + os.sep + curDate + os.sep + "TrackingMap" + curDate + ".pdf", tdrive)
shutil.copy(weekly_maps_folder + os.sep + curYear + os.sep + curDate + os.sep + "StatusMap" + curDate + ".pdf", tdrive)
print "Maps copied to T-drive."
make_directories()
build_sharepoint_dict()
tracking()
status()
tcopy()
print "that's all folks!"
|
|
#!/usr/bin/env python3
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2019 Intel Corporation
from scapy.all import *
import unittest
import pkttest
#{ipv4{ipv4}} test
SRC_ADDR_IPV4_1 = "192.168.1.1"
DST_ADDR_IPV4_1 = "192.168.2.1"
#{ipv6{ipv6}} test
SRC_ADDR_IPV6_1 = "1111:0000:0000:0000:0000:0000:0000:0001"
DST_ADDR_IPV6_1 = "2222:0000:0000:0000:0000:0000:0000:0001"
#{ipv4{ipv6}} test
SRC_ADDR_IPV4_2 = "192.168.11.1"
DST_ADDR_IPV4_2 = "192.168.12.1"
SRC_ADDR_IPV6_2 = "1111:0000:0000:0000:0000:0000:0001:0001"
DST_ADDR_IPV6_2 = "2222:0000:0000:0000:0000:0000:0001:0001"
#{ipv6{ipv4}} test
SRC_ADDR_IPV4_3 = "192.168.21.1"
DST_ADDR_IPV4_3 = "192.168.22.1"
SRC_ADDR_IPV6_3 = "1111:0000:0000:0000:0000:0001:0001:0001"
DST_ADDR_IPV6_3 = "2222:0000:0000:0000:0000:0001:0001:0001"
def config():
return """
#outter-ipv4 inner-ipv4 tunnel mode test
sp ipv4 out esp protect 5 pri 1 \\
src {0}/32 \\
dst {1}/32 \\
sport 0:65535 dport 0:65535
sp ipv4 in esp protect 6 pri 1 \\
src {1}/32 \\
dst {0}/32 \\
sport 0:65535 dport 0:65535
sa out 5 cipher_algo null auth_algo null mode ipv4-tunnel \\
src {0} dst {1}
sa in 6 cipher_algo null auth_algo null mode ipv4-tunnel \\
src {1} dst {0}
rt ipv4 dst {0}/32 port 1
rt ipv4 dst {1}/32 port 0
#outter-ipv6 inner-ipv6 tunnel mode test
sp ipv6 out esp protect 7 pri 1 \\
src {2}/128 \\
dst {3}/128 \\
sport 0:65535 dport 0:65535
sp ipv6 in esp protect 8 pri 1 \\
src {3}/128 \\
dst {2}/128 \\
sport 0:65535 dport 0:65535
sa out 7 cipher_algo null auth_algo null mode ipv6-tunnel \\
src {2} dst {3}
sa in 8 cipher_algo null auth_algo null mode ipv6-tunnel \\
src {3} dst {2}
rt ipv6 dst {2}/128 port 1
rt ipv6 dst {3}/128 port 0
#outter-ipv4 inner-ipv6 tunnel mode test
sp ipv6 out esp protect 9 pri 1 \\
src {4}/128 \\
dst {5}/128 \\
sport 0:65535 dport 0:65535
sp ipv6 in esp protect 10 pri 1 \\
src {5}/128 \\
dst {4}/128 \\
sport 0:65535 dport 0:65535
sa out 9 cipher_algo null auth_algo null mode ipv4-tunnel \\
src {6} dst {7}
sa in 10 cipher_algo null auth_algo null mode ipv4-tunnel \\
src {7} dst {6}
rt ipv6 dst {4}/128 port 1
rt ipv4 dst {7}/32 port 0
#outter-ipv6 inner-ipv4 tunnel mode test
sp ipv4 out esp protect 11 pri 1 \\
src {8}/32 \\
dst {9}/32 \\
sport 0:65535 dport 0:65535
sp ipv4 in esp protect 12 pri 1 \\
src {9}/32 \\
dst {8}/32 \\
sport 0:65535 dport 0:65535
sa out 11 cipher_algo null auth_algo null mode ipv6-tunnel \\
src {10} dst {11}
sa in 12 cipher_algo null auth_algo null mode ipv6-tunnel \\
src {11} dst {10}
rt ipv4 dst {8}/32 port 1
rt ipv6 dst {11}/128 port 0
""".format(SRC_ADDR_IPV4_1, DST_ADDR_IPV4_1,
SRC_ADDR_IPV6_1, DST_ADDR_IPV6_1,
SRC_ADDR_IPV6_2, DST_ADDR_IPV6_2, SRC_ADDR_IPV4_2, DST_ADDR_IPV4_2,
SRC_ADDR_IPV4_3, DST_ADDR_IPV4_3, SRC_ADDR_IPV6_3, DST_ADDR_IPV6_3)
ECN_ECT0 = 0x02
ECN_ECT1 = 0x01
ECN_CE = 0x03
DSCP_1 = 0x04
DSCP_3F = 0xFC
class TestTunnelHeaderReconstruct(unittest.TestCase):
def setUp(self):
self.px = pkttest.PacketXfer()
th = IP(src=DST_ADDR_IPV4_1, dst=SRC_ADDR_IPV4_1)
self.sa_ipv4v4 = SecurityAssociation(ESP, spi=6, tunnel_header = th)
th = IPv6(src=DST_ADDR_IPV6_1, dst=SRC_ADDR_IPV6_1)
self.sa_ipv6v6 = SecurityAssociation(ESP, spi=8, tunnel_header = th)
th = IP(src=DST_ADDR_IPV4_2, dst=SRC_ADDR_IPV4_2)
self.sa_ipv4v6 = SecurityAssociation(ESP, spi=10, tunnel_header = th)
th = IPv6(src=DST_ADDR_IPV6_3, dst=SRC_ADDR_IPV6_3)
self.sa_ipv6v4 = SecurityAssociation(ESP, spi=12, tunnel_header = th)
def gen_pkt_plain_ipv4(self, src, dst, tos):
pkt = IP(src=src, dst=dst, tos=tos)
pkt /= UDP(sport=123,dport=456)/Raw(load="abc")
return pkt
def gen_pkt_plain_ipv6(self, src, dst, tc):
pkt = IPv6(src=src, dst=dst, tc=tc)
pkt /= UDP(sport=123,dport=456)/Raw(load="abc")
return pkt
def gen_pkt_tun_ipv4v4(self, tos_outter, tos_inner):
pkt = self.gen_pkt_plain_ipv4(DST_ADDR_IPV4_1, SRC_ADDR_IPV4_1,
tos_inner)
pkt = self.sa_ipv4v4.encrypt(pkt)
self.assertEqual(pkt[IP].proto, socket.IPPROTO_ESP)
self.assertEqual(pkt[ESP].spi, 6)
pkt[IP].tos = tos_outter
return pkt
def gen_pkt_tun_ipv6v6(self, tc_outter, tc_inner):
pkt = self.gen_pkt_plain_ipv6(DST_ADDR_IPV6_1, SRC_ADDR_IPV6_1,
tc_inner)
pkt = self.sa_ipv6v6.encrypt(pkt)
self.assertEqual(pkt[IPv6].nh, socket.IPPROTO_ESP)
self.assertEqual(pkt[ESP].spi, 8)
pkt[IPv6].tc = tc_outter
return pkt
def gen_pkt_tun_ipv4v6(self, tos_outter, tc_inner):
pkt = self.gen_pkt_plain_ipv6(DST_ADDR_IPV6_2, SRC_ADDR_IPV6_2,
tc_inner)
pkt = self.sa_ipv4v6.encrypt(pkt)
self.assertEqual(pkt[IP].proto, socket.IPPROTO_ESP)
self.assertEqual(pkt[ESP].spi, 10)
pkt[IP].tos = tos_outter
return pkt
def gen_pkt_tun_ipv6v4(self, tc_outter, tos_inner):
pkt = self.gen_pkt_plain_ipv4(DST_ADDR_IPV4_3, SRC_ADDR_IPV4_3,
tos_inner)
pkt = self.sa_ipv6v4.encrypt(pkt)
self.assertEqual(pkt[IPv6].nh, socket.IPPROTO_ESP)
self.assertEqual(pkt[ESP].spi, 12)
pkt[IPv6].tc = tc_outter
return pkt
#RFC4301 5.1.2.1 & 5.1.2.2, outbound packets shall be copied ECN field
def test_outb_ipv4v4_ecn(self):
pkt = self.gen_pkt_plain_ipv4(SRC_ADDR_IPV4_1, DST_ADDR_IPV4_1,
ECN_ECT1)
resp = self.px.xfer_unprotected(pkt)
self.assertEqual(resp[IP].proto, socket.IPPROTO_ESP)
self.assertEqual(resp[ESP].spi, 5)
self.assertEqual(resp[IP].tos, ECN_ECT1)
pkt = self.gen_pkt_plain_ipv4(SRC_ADDR_IPV4_1, DST_ADDR_IPV4_1,
ECN_ECT0)
resp = self.px.xfer_unprotected(pkt)
self.assertEqual(resp[IP].proto, socket.IPPROTO_ESP)
self.assertEqual(resp[ESP].spi, 5)
self.assertEqual(resp[IP].tos, ECN_ECT0)
pkt = self.gen_pkt_plain_ipv4(SRC_ADDR_IPV4_1, DST_ADDR_IPV4_1,
ECN_CE)
resp = self.px.xfer_unprotected(pkt)
self.assertEqual(resp[IP].proto, socket.IPPROTO_ESP)
self.assertEqual(resp[ESP].spi, 5)
self.assertEqual(resp[IP].tos, ECN_CE)
def test_outb_ipv6v6_ecn(self):
pkt = self.gen_pkt_plain_ipv6(SRC_ADDR_IPV6_1, DST_ADDR_IPV6_1,
ECN_ECT1)
resp = self.px.xfer_unprotected(pkt)
self.assertEqual(resp[IPv6].nh, socket.IPPROTO_ESP)
self.assertEqual(resp[IPv6].tc, ECN_ECT1)
pkt = self.gen_pkt_plain_ipv6(SRC_ADDR_IPV6_1, DST_ADDR_IPV6_1,
ECN_ECT0)
resp = self.px.xfer_unprotected(pkt)
self.assertEqual(resp[IPv6].nh, socket.IPPROTO_ESP)
self.assertEqual(resp[ESP].spi, 7)
self.assertEqual(resp[IPv6].tc, ECN_ECT0)
pkt = self.gen_pkt_plain_ipv6(SRC_ADDR_IPV6_1, DST_ADDR_IPV6_1,
ECN_CE)
resp = self.px.xfer_unprotected(pkt)
self.assertEqual(resp[IPv6].nh, socket.IPPROTO_ESP)
self.assertEqual(resp[ESP].spi, 7)
self.assertEqual(resp[IPv6].tc, ECN_CE)
def test_outb_ipv4v6_ecn(self):
pkt = self.gen_pkt_plain_ipv6(SRC_ADDR_IPV6_2, DST_ADDR_IPV6_2,
ECN_ECT1)
resp = self.px.xfer_unprotected(pkt)
self.assertEqual(resp[IP].proto, socket.IPPROTO_ESP)
self.assertEqual(resp[IP].tos, ECN_ECT1)
pkt = self.gen_pkt_plain_ipv6(SRC_ADDR_IPV6_2, DST_ADDR_IPV6_2,
ECN_ECT0)
resp = self.px.xfer_unprotected(pkt)
self.assertEqual(resp[IP].proto, socket.IPPROTO_ESP)
self.assertEqual(resp[IP].tos, ECN_ECT0)
pkt = self.gen_pkt_plain_ipv6(SRC_ADDR_IPV6_2, DST_ADDR_IPV6_2,
ECN_CE)
resp = self.px.xfer_unprotected(pkt)
self.assertEqual(resp[IP].proto, socket.IPPROTO_ESP)
self.assertEqual(resp[IP].tos, ECN_CE)
def test_outb_ipv6v4_ecn(self):
pkt = self.gen_pkt_plain_ipv4(SRC_ADDR_IPV4_3, DST_ADDR_IPV4_3,
ECN_ECT1)
resp = self.px.xfer_unprotected(pkt)
self.assertEqual(resp[IPv6].nh, socket.IPPROTO_ESP)
self.assertEqual(resp[IPv6].tc, ECN_ECT1)
pkt = self.gen_pkt_plain_ipv4(SRC_ADDR_IPV4_3, DST_ADDR_IPV4_3,
ECN_ECT0)
resp = self.px.xfer_unprotected(pkt)
self.assertEqual(resp[IPv6].nh, socket.IPPROTO_ESP)
self.assertEqual(resp[IPv6].tc, ECN_ECT0)
pkt = self.gen_pkt_plain_ipv4(SRC_ADDR_IPV4_3, DST_ADDR_IPV4_3,
ECN_CE)
resp = self.px.xfer_unprotected(pkt)
self.assertEqual(resp[IPv6].nh, socket.IPPROTO_ESP)
self.assertEqual(resp[IPv6].tc, ECN_CE)
#RFC4301 5.1.2.1 & 5.1.2.2, if outbound packets ECN is CE (0x3), inbound packets
#ECN is overwritten to CE, otherwise no change
#Outter header not CE, Inner header should be no change
def test_inb_ipv4v4_ecn_inner_no_change(self):
pkt = self.gen_pkt_tun_ipv4v4(ECN_ECT1, ECN_ECT0)
resp = self.px.xfer_protected(pkt)
self.assertEqual(resp[IP].proto, socket.IPPROTO_UDP)
self.assertEqual(resp[IP].tos, ECN_ECT0)
pkt = self.gen_pkt_tun_ipv4v4(ECN_ECT0, ECN_ECT1)
resp = self.px.xfer_protected(pkt)
self.assertEqual(resp[IP].proto, socket.IPPROTO_UDP)
self.assertEqual(resp[IP].tos, ECN_ECT1)
pkt = self.gen_pkt_tun_ipv4v4(ECN_ECT1, ECN_CE)
resp = self.px.xfer_protected(pkt)
self.assertEqual(resp[IP].proto, socket.IPPROTO_UDP)
self.assertEqual(resp[IP].tos, ECN_CE)
def test_inb_ipv6v6_ecn_inner_no_change(self):
pkt = self.gen_pkt_tun_ipv6v6(ECN_ECT1, ECN_ECT0)
resp = self.px.xfer_protected(pkt)
self.assertEqual(resp[IPv6].nh, socket.IPPROTO_UDP)
self.assertEqual(resp[IPv6].tc, ECN_ECT0)
pkt = self.gen_pkt_tun_ipv6v6(ECN_ECT0, ECN_ECT1)
resp = self.px.xfer_protected(pkt)
self.assertEqual(resp[IPv6].nh, socket.IPPROTO_UDP)
self.assertEqual(resp[IPv6].tc, ECN_ECT1)
pkt = self.gen_pkt_tun_ipv6v6(ECN_ECT1, ECN_CE)
resp = self.px.xfer_protected(pkt)
self.assertEqual(resp[IPv6].nh, socket.IPPROTO_UDP)
self.assertEqual(resp[IPv6].tc, ECN_CE)
def test_inb_ipv4v6_ecn_inner_no_change(self):
pkt = self.gen_pkt_tun_ipv4v6(ECN_ECT1, ECN_ECT0)
resp = self.px.xfer_protected(pkt)
self.assertEqual(resp[IPv6].nh, socket.IPPROTO_UDP)
self.assertEqual(resp[IPv6].tc, ECN_ECT0)
pkt = self.gen_pkt_tun_ipv4v6(ECN_ECT0, ECN_ECT1)
resp = self.px.xfer_protected(pkt)
self.assertEqual(resp[IPv6].nh, socket.IPPROTO_UDP)
self.assertEqual(resp[IPv6].tc, ECN_ECT1)
pkt = self.gen_pkt_tun_ipv4v6(ECN_ECT1, ECN_CE)
resp = self.px.xfer_protected(pkt)
self.assertEqual(resp[IPv6].nh, socket.IPPROTO_UDP)
self.assertEqual(resp[IPv6].tc, ECN_CE)
def test_inb_ipv6v4_ecn_inner_no_change(self):
pkt = self.gen_pkt_tun_ipv6v4(ECN_ECT1, ECN_ECT0)
resp = self.px.xfer_protected(pkt)
self.assertEqual(resp[IP].proto, socket.IPPROTO_UDP)
self.assertEqual(resp[IP].tos, ECN_ECT0)
pkt = self.gen_pkt_tun_ipv6v4(ECN_ECT0, ECN_ECT1)
resp = self.px.xfer_protected(pkt)
self.assertEqual(resp[IP].proto, socket.IPPROTO_UDP)
self.assertEqual(resp[IP].tos, ECN_ECT1)
pkt = self.gen_pkt_tun_ipv6v4(ECN_ECT1, ECN_CE)
resp = self.px.xfer_protected(pkt)
self.assertEqual(resp[IP].proto, socket.IPPROTO_UDP)
self.assertEqual(resp[IP].tos, ECN_CE)
#Outter header CE, Inner header should be changed to CE
def test_inb_ipv4v4_ecn_inner_change(self):
pkt = self.gen_pkt_tun_ipv4v4(ECN_CE, ECN_ECT0)
resp = self.px.xfer_protected(pkt)
self.assertEqual(resp[IP].proto, socket.IPPROTO_UDP)
self.assertEqual(resp[IP].tos, ECN_CE)
pkt = self.gen_pkt_tun_ipv4v4(ECN_CE, ECN_ECT1)
resp = self.px.xfer_protected(pkt)
self.assertEqual(resp[IP].proto, socket.IPPROTO_UDP)
self.assertEqual(resp[IP].tos, ECN_CE)
def test_inb_ipv6v6_ecn_inner_change(self):
pkt = self.gen_pkt_tun_ipv6v6(ECN_CE, ECN_ECT0)
resp = self.px.xfer_protected(pkt)
self.assertEqual(resp[IPv6].nh, socket.IPPROTO_UDP)
self.assertEqual(resp[IPv6].tc, ECN_CE)
pkt = self.gen_pkt_tun_ipv6v6(ECN_CE, ECN_ECT1)
resp = self.px.xfer_protected(pkt)
self.assertEqual(resp[IPv6].nh, socket.IPPROTO_UDP)
self.assertEqual(resp[IPv6].tc, ECN_CE)
def test_inb_ipv4v6_ecn_inner_change(self):
pkt = self.gen_pkt_tun_ipv4v6(ECN_CE, ECN_ECT0)
resp = self.px.xfer_protected(pkt)
self.assertEqual(resp[IPv6].nh, socket.IPPROTO_UDP)
self.assertEqual(resp[IPv6].tc, ECN_CE)
pkt = self.gen_pkt_tun_ipv4v6(ECN_CE, ECN_ECT1)
resp = self.px.xfer_protected(pkt)
self.assertEqual(resp[IPv6].nh, socket.IPPROTO_UDP)
self.assertEqual(resp[IPv6].tc, ECN_CE)
def test_inb_ipv6v4_ecn_inner_change(self):
pkt = self.gen_pkt_tun_ipv6v4(ECN_CE, ECN_ECT0)
resp = self.px.xfer_protected(pkt)
self.assertEqual(resp[IP].proto, socket.IPPROTO_UDP)
self.assertEqual(resp[IP].tos, ECN_CE)
pkt = self.gen_pkt_tun_ipv6v4(ECN_CE, ECN_ECT1)
resp = self.px.xfer_protected(pkt)
self.assertEqual(resp[IP].proto, socket.IPPROTO_UDP)
self.assertEqual(resp[IP].tos, ECN_CE)
#RFC4301 5.1.2.1.5 Outer DS field should be copied from Inner DS field
def test_outb_ipv4v4_dscp(self):
pkt = self.gen_pkt_plain_ipv4(SRC_ADDR_IPV4_1, DST_ADDR_IPV4_1,
DSCP_1)
resp = self.px.xfer_unprotected(pkt)
self.assertEqual(resp[IP].proto, socket.IPPROTO_ESP)
self.assertEqual(resp[ESP].spi, 5)
self.assertEqual(resp[IP].tos, DSCP_1)
pkt = self.gen_pkt_plain_ipv4(SRC_ADDR_IPV4_1, DST_ADDR_IPV4_1,
DSCP_3F)
resp = self.px.xfer_unprotected(pkt)
self.assertEqual(resp[IP].proto, socket.IPPROTO_ESP)
self.assertEqual(resp[ESP].spi, 5)
self.assertEqual(resp[IP].tos, DSCP_3F)
def test_outb_ipv6v6_dscp(self):
pkt = self.gen_pkt_plain_ipv6(SRC_ADDR_IPV6_1, DST_ADDR_IPV6_1,
DSCP_1)
resp = self.px.xfer_unprotected(pkt)
self.assertEqual(resp[IPv6].nh, socket.IPPROTO_ESP)
self.assertEqual(resp[ESP].spi, 7)
self.assertEqual(resp[IPv6].tc, DSCP_1)
pkt = self.gen_pkt_plain_ipv6(SRC_ADDR_IPV6_1, DST_ADDR_IPV6_1,
DSCP_3F)
resp = self.px.xfer_unprotected(pkt)
self.assertEqual(resp[IPv6].nh, socket.IPPROTO_ESP)
self.assertEqual(resp[ESP].spi, 7)
self.assertEqual(resp[IPv6].tc, DSCP_3F)
def test_outb_ipv4v6_dscp(self):
pkt = self.gen_pkt_plain_ipv6(SRC_ADDR_IPV6_2, DST_ADDR_IPV6_2,
DSCP_1)
resp = self.px.xfer_unprotected(pkt)
self.assertEqual(resp[IP].proto, socket.IPPROTO_ESP)
self.assertEqual(resp[ESP].spi, 9)
self.assertEqual(resp[IP].tos, DSCP_1)
pkt = self.gen_pkt_plain_ipv6(SRC_ADDR_IPV6_2, DST_ADDR_IPV6_2,
DSCP_3F)
resp = self.px.xfer_unprotected(pkt)
self.assertEqual(resp[IP].proto, socket.IPPROTO_ESP)
self.assertEqual(resp[ESP].spi, 9)
self.assertEqual(resp[IP].tos, DSCP_3F)
def test_outb_ipv6v4_dscp(self):
pkt = self.gen_pkt_plain_ipv4(SRC_ADDR_IPV4_3, DST_ADDR_IPV4_3,
DSCP_1)
resp = self.px.xfer_unprotected(pkt)
self.assertEqual(resp[IPv6].nh, socket.IPPROTO_ESP)
self.assertEqual(resp[ESP].spi, 11)
self.assertEqual(resp[IPv6].tc, DSCP_1)
pkt = self.gen_pkt_plain_ipv4(SRC_ADDR_IPV4_3, DST_ADDR_IPV4_3,
DSCP_3F)
resp = self.px.xfer_unprotected(pkt)
self.assertEqual(resp[IPv6].nh, socket.IPPROTO_ESP)
self.assertEqual(resp[ESP].spi, 11)
self.assertEqual(resp[IPv6].tc, DSCP_3F)
#RFC4301 5.1.2.1.5 Inner DS field should not be affected by Outer DS field
def test_inb_ipv4v4_dscp(self):
pkt = self.gen_pkt_tun_ipv4v4(DSCP_3F, DSCP_1)
resp = self.px.xfer_protected(pkt)
self.assertEqual(resp[IP].proto, socket.IPPROTO_UDP)
self.assertEqual(resp[IP].tos, DSCP_1)
pkt = self.gen_pkt_tun_ipv4v4(DSCP_1, DSCP_3F)
resp = self.px.xfer_protected(pkt)
self.assertEqual(resp[IP].proto, socket.IPPROTO_UDP)
self.assertEqual(resp[IP].tos, DSCP_3F)
def test_inb_ipv6v6_dscp(self):
pkt = self.gen_pkt_tun_ipv6v6(DSCP_3F, DSCP_1)
resp = self.px.xfer_protected(pkt)
self.assertEqual(resp[IPv6].nh, socket.IPPROTO_UDP)
self.assertEqual(resp[IPv6].tc, DSCP_1)
pkt = self.gen_pkt_tun_ipv6v6(DSCP_1, DSCP_3F)
resp = self.px.xfer_protected(pkt)
self.assertEqual(resp[IPv6].nh, socket.IPPROTO_UDP)
self.assertEqual(resp[IPv6].tc, DSCP_3F)
def test_inb_ipv4v6_dscp(self):
pkt = self.gen_pkt_tun_ipv4v6(DSCP_3F, DSCP_1)
resp = self.px.xfer_protected(pkt)
self.assertEqual(resp[IPv6].nh, socket.IPPROTO_UDP)
self.assertEqual(resp[IPv6].tc, DSCP_1)
pkt = self.gen_pkt_tun_ipv4v6(DSCP_1, DSCP_3F)
resp = self.px.xfer_protected(pkt)
self.assertEqual(resp[IPv6].nh, socket.IPPROTO_UDP)
self.assertEqual(resp[IPv6].tc, DSCP_3F)
def test_inb_ipv6v4_dscp(self):
pkt = self.gen_pkt_tun_ipv6v4(DSCP_3F, DSCP_1)
resp = self.px.xfer_protected(pkt)
self.assertEqual(resp[IP].proto, socket.IPPROTO_UDP)
self.assertEqual(resp[IP].tos, DSCP_1)
pkt = self.gen_pkt_tun_ipv6v4(DSCP_1, DSCP_3F)
resp = self.px.xfer_protected(pkt)
self.assertEqual(resp[IP].proto, socket.IPPROTO_UDP)
self.assertEqual(resp[IP].tos, DSCP_3F)
pkttest.pkttest()
|
|
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 27 10:39:31 2016
@author: Wang Ziang
"""
import numpy as np
array = np.array
import math
from dSolve_2 import *
import matplotlib.pyplot as plt
from dSolveAndPlot import dSolutionFastPlot as dfp
from dSolveAndPlot import detailedDSolutionPlotAndHoldOn as ddp
from dSolveAndPlot import *
from scipy.optimize import minimize
from scipy.optimize import bisect
from scipy.optimize import newton
'''
fd = drivingForceAmplitude
omgd = drivingForceFrequency
gOverL
q = dampingParameter
'''
deg = math.pi / 180.
def forcedDampedLinearPendulum(omgd, fd, q, gOverL):
def foo(t,X):
[omega, theta] = X
return array([- gOverL*theta - q*omega + fd*math.sin(omgd*t), omega])
return foo
def forcedDampedNonlinearPendulum(omgd, fd = 1.2, q = 0.5 , gOverL = 1.):
def foo(t,X):
[omega, theta] = X
return array([- gOverL*math.sin(theta) - q*omega + fd*math.sin(omgd*t), omega])
return foo
'''
def simpleLinearPendulum(gOverL):
return forcedDampedLinearPendulum(0, 0, gOverL, 0)
'''
def simpleLinearPendulum(gOverL = 9.8):
def foo(t,X):
[omega, theta] = X
return array([-gOverL*theta, omega])
return foo
def oneDimNewtonPhaseSpaceSolutionPrintAndHoldOn(ans1, \
legendC = "", xLabel = "theta/1", yLabel = "omaga/ 1/s", titleLabel = "Phase Diagram"):
easyPlotXYHoldON(getXiList(ans1,2),getXiList(ans1,1),legendC, xLabel,yLabel, titleLabel)
def modSolution(solution):
"""
mod the solution to interval (-pi, pi)
pre: solution = [ [ti, array([omegai, thetai])], ... ]
post: return ...
"""
sol = solution[:];
xi = getXiList(sol, 2);
mxi = np.remainder( (array(xi) + math.pi).tolist(), 2 * math.pi) - math.pi
for i in range(len(sol)):
sol[i][1][1] = mxi[i]
return sol
def specialPlotForm(solution, nanForm = [np.nan, array([np.nan, np.nan])] ):
sol = solution[:];
xi = getXiList(sol, 2);
# print xi
pos = np.where(np.abs(np.diff(xi))>6.)[0]+1
# print pos
# print nanForm
# print sol[0]
for i in pos[::-1]:
sol.insert(i, nanForm)
return sol
'''
#different initial
stepSize = 0.01
tMax = 80.
ans1 = dSolve(forcedDampedNonlinearPendulum(2./3.), array([0., 0.20]), tMax, stepSize, method = "RK4")
ans1Omega = reduceStateList(ans1, [0])
ans1Theta = reduceStateList(ans1, [1])
ans2 = dSolve(forcedDampedNonlinearPendulum(2./3.), array([0., 0.21]), tMax, stepSize, method = "RK4")
ans2Omega = reduceStateList(ans2, [0])
ans2Theta = reduceStateList(ans2, [1])
# Omega
ddp(ans2Omega,[ "init = (omaga=0.0, theta=0.20)"], xLabel = "t/s", yLabel = "Omega/ 1/s")
ddp(ans1Omega,[ "init = (omaga=0.0, theta=0.20)"], xLabel = "t/s", yLabel = "Omega/ 1/s")
done()
# Theta
ddp(ans2Theta,["init = (omaga=0.0, theta=0.20)"], xLabel = "t/s", yLabel = "Theta/ 1")
ddp(ans1Theta,["init = (omaga=0.0, theta=0.20)"], xLabel = "t/s", yLabel = "Theta/ 1")
done()
# mod-theta
stepSize = 0.01
tMax = 80.
ans1 = modSolution( dSolve(forcedDampedNonlinearPendulum(2./3.), array([0., 0.20]), tMax, stepSize, method = "RK4") )
ans1Omega = reduceStateList(ans1, [0])
ans1Theta = reduceStateList(ans1, [1])
ans2 = modSolution( dSolve(forcedDampedNonlinearPendulum(2./3.), array([0., 0.21]), tMax, stepSize, method = "RK4") )
ans2Omega = reduceStateList(ans2, [0])
ans2Theta = reduceStateList(ans2, [1])
ddp(ans2Theta,["init = (omaga=0.0, theta=0.20)"], xLabel = "t/s", yLabel = "Theta/ 1")
ddp(ans1Theta,["init = (omaga=0.0, theta=0.20)"], xLabel = "t/s", yLabel = "Theta/ 1")
done()
'''
def fEqQ(floot1, floot2):
return abs(floot2 - floot1) < 1e-2
def poincareSection(solution, omgd, phase = 0., tmin = 50):
def mulOfTQ(t, omgd):
n = (t - phase/omgd)/ (2*math.pi/omgd)
return fEqQ(round(n), n)
n = len(solution)
sol = solution#[int(round(0.2*n)):-1]
sectionPoints = filter(lambda state: mulOfTQ(getT(state), omgd) and getT(state) > tmin, sol)
print phase
return sectionPoints
'''
# Omega
ddp(ans2Omega,["ForwardEuler"], xLabel = "t/s", yLabel = "Omega/ 1/s")
ddp(ans1Omega,["RK4"], xLabel = "t/s", yLabel = "Omega/ 1/s")
done()
# Theta
ddp(ans2Theta,["ForwardEuler"], xLabel = "t/s", yLabel = "Theta/ 1")
ddp(ans1Theta,["RK4"], xLabel = "t/s", yLabel = "Theta/ 1")
done()
stepSize = 0.01
tMax = 180.
ans1 = modSolution( dSolve(forcedDampedNonlinearPendulum(2./3.), array([0, 0.2]), tMax, stepSize, method = "RK4") )
ans1Omega = reduceStateList(ans1, [0])
ans1Theta = reduceStateList(ans1, [1])
ans2 = modSolution( dSolve(forcedDampedNonlinearPendulum(2./3.), array([0, 0.21]), tMax, stepSize, method = "RK4") )
ans2Omega = reduceStateList(ans2, [0])
ans2Theta = reduceStateList(ans2, [1])
# phase diag
ans1 = specialPlotForm(ans1)
ans2 = specialPlotForm(ans2)
easyPlotXYHoldON(getXiList(ans2,2),getXiList(ans2,1), "init = (omaga=0.0, theta=0.20)","Theta/ 1","Omega/ 1/s", "Phase Diagram")
easyPlotXYHoldON(getXiList(ans1,2),getXiList(ans1,1), "init = (omaga=0.0, theta=0.21)","Theta/ 1","Omega/ 1/s", "Phase Diagram")
done()
'''
'''
omgd = 2./3.
ppp = 50.
stepSize = 2* math.pi/omgd/ ppp
tMax = 100000.
frameN = 400
i = 2.001
ans = dSolve(forcedDampedNonlinearPendulum(omgd, fd = 1.2), array([0., 0.20]), tMax, stepSize, method = "RK4")
ans = modSolution(ans)
pSec = poincareSection(modSolution(ans), omgd,i * math.pi / 4.)
easyScatterPlotXYHoldON(getXiList(pSec,2),getXiList(pSec,1), "phase = "+ str(2)+" *Pi/4","Theta/ 1","Omega/ 1/s", "Phase Diagram")
done()
### i = 2.0 wrong!!!!!
# for i in range(5):
# pSec = poincareSection(modSolution(ans), omgd,i * math.pi / 4.)
# easyScatterPlotXYHoldON(getXiList(pSec,2),getXiList(pSec,1), "phase = "+ str(i)+" *Pi/4","Theta/ 1","Omega/ 1/s", "Phase Diagram")
# done()
'''
# p section
# stepSize = 0.05
# tMax = 80.
omgd = 2./3.
ppp = 50.
stepSize = 2* math.pi/omgd/ ppp
tMax = 10000.
frameN = 400
ans = dSolve(forcedDampedNonlinearPendulum(omgd, fd = 1.2), array([0., 0.20]), tMax, stepSize, method = "RK4")
ans = modSolution(ans)
phaseL = np.linspace(0, 2*math.pi, frameN, endpoint = 1).tolist()
dataL = [poincareSection(ans, omgd, phase) for phase in phaseL]
phaseLInPlot = [ph / math.pi for ph in phaseL]
# easyPlotXYHoldON(getXiList(ans,2),getXiList(ans,1), "init = (omaga=0.0, theta=0.21)","Theta/ 1","Omega/ 1/s", "Phase Diagram")
# done()
# pSec = poincareSection(modSolution(ans), omgd,0. *math.pi)
# easyScatterPlotXYHoldON(getXiList(pSec,2),getXiList(pSec,1), "init = (omaga=0.0, theta=0.20)","Theta/ 1","Omega/ 1/s", "Phase Diagram")
# done()
from matplotlib import animation
# First set up the figure, the axis, and the plot element we want to animate
fig = plt.figure()
ax = plt.axes(xlim=(-math.pi, math.pi), ylim=(-2.5, 2.5))
line, = ax.plot([], [], 'o', markersize = 1.1 )
ph = ax.text(1.5,2.,'')
# initialization function: plot the background of each frame
def init():
line.set_data([], [])
ph.set_text("")
return line, ph
# animation function. This is called sequentially
def animate(i):
pSec = dataL[i]
x = getXiList(pSec, 2)
y = getXiList(pSec, 1)
line.set_data(x, y)
ph.set_text('phase = %.3f Pi' % phaseLInPlot[i])#
return line, ph
# call the animator. blit=True means only re-draw the parts that have changed.
anim = animation.FuncAnimation(fig, animate, init_func=init, frames=frameN, interval=1, blit=True)
# anim.save('p-section.mp4', fps=25)
# save the animation as an mp4. This requires ffmpeg or mencoder to be
# installed. The extra_args ensure that the x264 codec is used, so that
# the video can be embedded in html5. You may need to adjust this for
# your system: for more information, see
# http://matplotlib.sourceforge.net/api/animation_api.html
plt.show()
|
|
from __future__ import division, absolute_import, print_function
# Code common to build tools
import sys
import warnings
import copy
import binascii
import textwrap
from numpy.distutils.misc_util import mingw32
#-------------------
# Versioning support
#-------------------
# How to change C_API_VERSION ?
# - increase C_API_VERSION value
# - record the hash for the new C API with the cversions.py script
# and add the hash to cversions.txt
# The hash values are used to remind developers when the C API number was not
# updated - generates a MismatchCAPIWarning warning which is turned into an
# exception for released version.
# Binary compatibility version number. This number is increased whenever the
# C-API is changed such that binary compatibility is broken, i.e. whenever a
# recompile of extension modules is needed.
C_ABI_VERSION = 0x01000009
# Minor API version. This number is increased whenever a change is made to the
# C-API -- whether it breaks binary compatibility or not. Some changes, such
# as adding a function pointer to the end of the function table, can be made
# without breaking binary compatibility. In this case, only the C_API_VERSION
# (*not* C_ABI_VERSION) would be increased. Whenever binary compatibility is
# broken, both C_API_VERSION and C_ABI_VERSION should be increased.
#
# 0x00000008 - 1.7.x
# 0x00000009 - 1.8.x
# 0x00000009 - 1.9.x
# 0x0000000a - 1.10.x
# 0x0000000a - 1.11.x
# 0x0000000a - 1.12.x
# 0x0000000b - 1.13.x
# 0x0000000c - 1.14.x
# 0x0000000c - 1.15.x
# 0x0000000d - 1.16.x
C_API_VERSION = 0x0000000d
class MismatchCAPIWarning(Warning):
pass
def is_released(config):
"""Return True if a released version of numpy is detected."""
from distutils.version import LooseVersion
v = config.get_version('../version.py')
if v is None:
raise ValueError("Could not get version")
pv = LooseVersion(vstring=v).version
if len(pv) > 3:
return False
return True
def get_api_versions(apiversion, codegen_dir):
"""
Return current C API checksum and the recorded checksum.
Return current C API checksum and the recorded checksum for the given
version of the C API version.
"""
# Compute the hash of the current API as defined in the .txt files in
# code_generators
sys.path.insert(0, codegen_dir)
try:
m = __import__('genapi')
numpy_api = __import__('numpy_api')
curapi_hash = m.fullapi_hash(numpy_api.full_api)
apis_hash = m.get_versions_hash()
finally:
del sys.path[0]
return curapi_hash, apis_hash[apiversion]
def check_api_version(apiversion, codegen_dir):
"""Emits a MismatchCAPIWarning if the C API version needs updating."""
curapi_hash, api_hash = get_api_versions(apiversion, codegen_dir)
# If different hash, it means that the api .txt files in
# codegen_dir have been updated without the API version being
# updated. Any modification in those .txt files should be reflected
# in the api and eventually abi versions.
# To compute the checksum of the current API, use numpy/core/cversions.py
if not curapi_hash == api_hash:
msg = ("API mismatch detected, the C API version "
"numbers have to be updated. Current C api version is %d, "
"with checksum %s, but recorded checksum for C API version %d "
"in core/codegen_dir/cversions.txt is %s. If functions were "
"added in the C API, you have to update C_API_VERSION in %s."
)
warnings.warn(msg % (apiversion, curapi_hash, apiversion, api_hash,
__file__),
MismatchCAPIWarning, stacklevel=2)
# Mandatory functions: if not found, fail the build
MANDATORY_FUNCS = ["sin", "cos", "tan", "sinh", "cosh", "tanh", "fabs",
"floor", "ceil", "sqrt", "log10", "log", "exp", "asin",
"acos", "atan", "fmod", 'modf', 'frexp', 'ldexp']
# Standard functions which may not be available and for which we have a
# replacement implementation. Note that some of these are C99 functions.
OPTIONAL_STDFUNCS = ["expm1", "log1p", "acosh", "asinh", "atanh",
"rint", "trunc", "exp2", "log2", "hypot", "atan2", "pow",
"copysign", "nextafter", "ftello", "fseeko",
"strtoll", "strtoull", "cbrt", "strtold_l", "fallocate",
"backtrace", "madvise"]
OPTIONAL_HEADERS = [
# sse headers only enabled automatically on amd64/x32 builds
"xmmintrin.h", # SSE
"emmintrin.h", # SSE2
"immintrin.h", # AVX
"features.h", # for glibc version linux
"xlocale.h", # see GH#8367
"dlfcn.h", # dladdr
"sys/mman.h", #madvise
]
# optional gcc compiler builtins and their call arguments and optional a
# required header and definition name (HAVE_ prepended)
# call arguments are required as the compiler will do strict signature checking
OPTIONAL_INTRINSICS = [("__builtin_isnan", '5.'),
("__builtin_isinf", '5.'),
("__builtin_isfinite", '5.'),
("__builtin_bswap32", '5u'),
("__builtin_bswap64", '5u'),
("__builtin_expect", '5, 0'),
("__builtin_mul_overflow", '5, 5, (int*)5'),
# broken on OSX 10.11, make sure its not optimized away
("volatile int r = __builtin_cpu_supports", '"sse"',
"stdio.h", "__BUILTIN_CPU_SUPPORTS"),
("volatile int r = __builtin_cpu_supports", '"avx512f"',
"stdio.h", "__BUILTIN_CPU_SUPPORTS_AVX512F"),
# MMX only needed for icc, but some clangs don't have it
("_m_from_int64", '0', "emmintrin.h"),
("_mm_load_ps", '(float*)0', "xmmintrin.h"), # SSE
("_mm_prefetch", '(float*)0, _MM_HINT_NTA',
"xmmintrin.h"), # SSE
("_mm_load_pd", '(double*)0', "emmintrin.h"), # SSE2
("__builtin_prefetch", "(float*)0, 0, 3"),
# check that the linker can handle avx
("__asm__ volatile", '"vpand %xmm1, %xmm2, %xmm3"',
"stdio.h", "LINK_AVX"),
("__asm__ volatile", '"vpand %ymm1, %ymm2, %ymm3"',
"stdio.h", "LINK_AVX2"),
("__asm__ volatile", '"vpaddd %zmm1, %zmm2, %zmm3"',
"stdio.h", "LINK_AVX512F"),
("__asm__ volatile", '"xgetbv"', "stdio.h", "XGETBV"),
]
# function attributes
# tested via "int %s %s(void *);" % (attribute, name)
# function name will be converted to HAVE_<upper-case-name> preprocessor macro
OPTIONAL_FUNCTION_ATTRIBUTES = [('__attribute__((optimize("unroll-loops")))',
'attribute_optimize_unroll_loops'),
('__attribute__((optimize("O3")))',
'attribute_optimize_opt_3'),
('__attribute__((nonnull (1)))',
'attribute_nonnull'),
('__attribute__((target ("avx")))',
'attribute_target_avx'),
('__attribute__((target ("avx2")))',
'attribute_target_avx2'),
('__attribute__((target ("avx512f")))',
'attribute_target_avx512f'),
]
# function attributes with intrinsics
# To ensure your compiler can compile avx intrinsics with just the attributes
# gcc 4.8.4 support attributes but not with intrisics
# tested via "#include<%s> int %s %s(void *){code; return 0;};" % (header, attribute, name, code)
# function name will be converted to HAVE_<upper-case-name> preprocessor macro
OPTIONAL_FUNCTION_ATTRIBUTES_WITH_INTRINSICS = [('__attribute__((target("avx2,fma")))',
'attribute_target_avx2_with_intrinsics',
'__m256 temp = _mm256_set1_ps(1.0); temp = \
_mm256_fmadd_ps(temp, temp, temp)',
'immintrin.h'),
('__attribute__((target("avx512f")))',
'attribute_target_avx512f_with_intrinsics',
'__m512 temp = _mm512_set1_ps(1.0)',
'immintrin.h'),
]
# variable attributes tested via "int %s a" % attribute
OPTIONAL_VARIABLE_ATTRIBUTES = ["__thread", "__declspec(thread)"]
# Subset of OPTIONAL_STDFUNCS which may already have HAVE_* defined by Python.h
OPTIONAL_STDFUNCS_MAYBE = [
"expm1", "log1p", "acosh", "atanh", "asinh", "hypot", "copysign",
"ftello", "fseeko"
]
# C99 functions: float and long double versions
C99_FUNCS = [
"sin", "cos", "tan", "sinh", "cosh", "tanh", "fabs", "floor", "ceil",
"rint", "trunc", "sqrt", "log10", "log", "log1p", "exp", "expm1",
"asin", "acos", "atan", "asinh", "acosh", "atanh", "hypot", "atan2",
"pow", "fmod", "modf", 'frexp', 'ldexp', "exp2", "log2", "copysign",
"nextafter", "cbrt"
]
C99_FUNCS_SINGLE = [f + 'f' for f in C99_FUNCS]
C99_FUNCS_EXTENDED = [f + 'l' for f in C99_FUNCS]
C99_COMPLEX_TYPES = [
'complex double', 'complex float', 'complex long double'
]
C99_COMPLEX_FUNCS = [
"cabs", "cacos", "cacosh", "carg", "casin", "casinh", "catan",
"catanh", "ccos", "ccosh", "cexp", "cimag", "clog", "conj", "cpow",
"cproj", "creal", "csin", "csinh", "csqrt", "ctan", "ctanh"
]
def fname2def(name):
return "HAVE_%s" % name.upper()
def sym2def(symbol):
define = symbol.replace(' ', '')
return define.upper()
def type2def(symbol):
define = symbol.replace(' ', '_')
return define.upper()
# Code to detect long double representation taken from MPFR m4 macro
def check_long_double_representation(cmd):
cmd._check_compiler()
body = LONG_DOUBLE_REPRESENTATION_SRC % {'type': 'long double'}
# Disable whole program optimization (the default on vs2015, with python 3.5+)
# which generates intermediary object files and prevents checking the
# float representation.
if sys.platform == "win32" and not mingw32():
try:
cmd.compiler.compile_options.remove("/GL")
except (AttributeError, ValueError):
pass
# Disable multi-file interprocedural optimization in the Intel compiler on Linux
# which generates intermediary object files and prevents checking the
# float representation.
elif (sys.platform != "win32"
and cmd.compiler.compiler_type.startswith('intel')
and '-ipo' in cmd.compiler.cc_exe):
newcompiler = cmd.compiler.cc_exe.replace(' -ipo', '')
cmd.compiler.set_executables(
compiler=newcompiler,
compiler_so=newcompiler,
compiler_cxx=newcompiler,
linker_exe=newcompiler,
linker_so=newcompiler + ' -shared'
)
# We need to use _compile because we need the object filename
src, obj = cmd._compile(body, None, None, 'c')
try:
ltype = long_double_representation(pyod(obj))
return ltype
except ValueError:
# try linking to support CC="gcc -flto" or icc -ipo
# struct needs to be volatile so it isn't optimized away
body = body.replace('struct', 'volatile struct')
body += "int main(void) { return 0; }\n"
src, obj = cmd._compile(body, None, None, 'c')
cmd.temp_files.append("_configtest")
cmd.compiler.link_executable([obj], "_configtest")
ltype = long_double_representation(pyod("_configtest"))
return ltype
finally:
cmd._clean()
LONG_DOUBLE_REPRESENTATION_SRC = r"""
/* "before" is 16 bytes to ensure there's no padding between it and "x".
* We're not expecting any "long double" bigger than 16 bytes or with
* alignment requirements stricter than 16 bytes. */
typedef %(type)s test_type;
struct {
char before[16];
test_type x;
char after[8];
} foo = {
{ '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
'\001', '\043', '\105', '\147', '\211', '\253', '\315', '\357' },
-123456789.0,
{ '\376', '\334', '\272', '\230', '\166', '\124', '\062', '\020' }
};
"""
def pyod(filename):
"""Python implementation of the od UNIX utility (od -b, more exactly).
Parameters
----------
filename : str
name of the file to get the dump from.
Returns
-------
out : seq
list of lines of od output
Note
----
We only implement enough to get the necessary information for long double
representation, this is not intended as a compatible replacement for od.
"""
def _pyod2():
out = []
with open(filename, 'rb') as fid:
yo = [int(oct(int(binascii.b2a_hex(o), 16))) for o in fid.read()]
for i in range(0, len(yo), 16):
line = ['%07d' % int(oct(i))]
line.extend(['%03d' % c for c in yo[i:i+16]])
out.append(" ".join(line))
return out
def _pyod3():
out = []
with open(filename, 'rb') as fid:
yo2 = [oct(o)[2:] for o in fid.read()]
for i in range(0, len(yo2), 16):
line = ['%07d' % int(oct(i)[2:])]
line.extend(['%03d' % int(c) for c in yo2[i:i+16]])
out.append(" ".join(line))
return out
if sys.version_info[0] < 3:
return _pyod2()
else:
return _pyod3()
_BEFORE_SEQ = ['000', '000', '000', '000', '000', '000', '000', '000',
'001', '043', '105', '147', '211', '253', '315', '357']
_AFTER_SEQ = ['376', '334', '272', '230', '166', '124', '062', '020']
_IEEE_DOUBLE_BE = ['301', '235', '157', '064', '124', '000', '000', '000']
_IEEE_DOUBLE_LE = _IEEE_DOUBLE_BE[::-1]
_INTEL_EXTENDED_12B = ['000', '000', '000', '000', '240', '242', '171', '353',
'031', '300', '000', '000']
_INTEL_EXTENDED_16B = ['000', '000', '000', '000', '240', '242', '171', '353',
'031', '300', '000', '000', '000', '000', '000', '000']
_MOTOROLA_EXTENDED_12B = ['300', '031', '000', '000', '353', '171',
'242', '240', '000', '000', '000', '000']
_IEEE_QUAD_PREC_BE = ['300', '031', '326', '363', '105', '100', '000', '000',
'000', '000', '000', '000', '000', '000', '000', '000']
_IEEE_QUAD_PREC_LE = _IEEE_QUAD_PREC_BE[::-1]
_IBM_DOUBLE_DOUBLE_BE = (['301', '235', '157', '064', '124', '000', '000', '000'] +
['000'] * 8)
_IBM_DOUBLE_DOUBLE_LE = (['000', '000', '000', '124', '064', '157', '235', '301'] +
['000'] * 8)
def long_double_representation(lines):
"""Given a binary dump as given by GNU od -b, look for long double
representation."""
# Read contains a list of 32 items, each item is a byte (in octal
# representation, as a string). We 'slide' over the output until read is of
# the form before_seq + content + after_sequence, where content is the long double
# representation:
# - content is 12 bytes: 80 bits Intel representation
# - content is 16 bytes: 80 bits Intel representation (64 bits) or quad precision
# - content is 8 bytes: same as double (not implemented yet)
read = [''] * 32
saw = None
for line in lines:
# we skip the first word, as od -b output an index at the beginning of
# each line
for w in line.split()[1:]:
read.pop(0)
read.append(w)
# If the end of read is equal to the after_sequence, read contains
# the long double
if read[-8:] == _AFTER_SEQ:
saw = copy.copy(read)
# if the content was 12 bytes, we only have 32 - 8 - 12 = 12
# "before" bytes. In other words the first 4 "before" bytes went
# past the sliding window.
if read[:12] == _BEFORE_SEQ[4:]:
if read[12:-8] == _INTEL_EXTENDED_12B:
return 'INTEL_EXTENDED_12_BYTES_LE'
if read[12:-8] == _MOTOROLA_EXTENDED_12B:
return 'MOTOROLA_EXTENDED_12_BYTES_BE'
# if the content was 16 bytes, we are left with 32-8-16 = 16
# "before" bytes, so 8 went past the sliding window.
elif read[:8] == _BEFORE_SEQ[8:]:
if read[8:-8] == _INTEL_EXTENDED_16B:
return 'INTEL_EXTENDED_16_BYTES_LE'
elif read[8:-8] == _IEEE_QUAD_PREC_BE:
return 'IEEE_QUAD_BE'
elif read[8:-8] == _IEEE_QUAD_PREC_LE:
return 'IEEE_QUAD_LE'
elif read[8:-8] == _IBM_DOUBLE_DOUBLE_LE:
return 'IBM_DOUBLE_DOUBLE_LE'
elif read[8:-8] == _IBM_DOUBLE_DOUBLE_BE:
return 'IBM_DOUBLE_DOUBLE_BE'
# if the content was 8 bytes, left with 32-8-8 = 16 bytes
elif read[:16] == _BEFORE_SEQ:
if read[16:-8] == _IEEE_DOUBLE_LE:
return 'IEEE_DOUBLE_LE'
elif read[16:-8] == _IEEE_DOUBLE_BE:
return 'IEEE_DOUBLE_BE'
if saw is not None:
raise ValueError("Unrecognized format (%s)" % saw)
else:
# We never detected the after_sequence
raise ValueError("Could not lock sequences (%s)" % saw)
def check_for_right_shift_internal_compiler_error(cmd):
"""
On our arm CI, this fails with an internal compilation error
The failure looks like the following, and can be reproduced on ARM64 GCC 5.4:
<source>: In function 'right_shift':
<source>:4:20: internal compiler error: in expand_shift_1, at expmed.c:2349
ip1[i] = ip1[i] >> in2;
^
Please submit a full bug report,
with preprocessed source if appropriate.
See <http://gcc.gnu.org/bugs.html> for instructions.
Compiler returned: 1
This function returns True if this compiler bug is present, and we need to
turn off optimization for the function
"""
cmd._check_compiler()
has_optimize = cmd.try_compile(textwrap.dedent("""\
__attribute__((optimize("O3"))) void right_shift() {}
"""), None, None)
if not has_optimize:
return False
no_err = cmd.try_compile(textwrap.dedent("""\
typedef long the_type; /* fails also for unsigned and long long */
__attribute__((optimize("O3"))) void right_shift(the_type in2, the_type *ip1, int n) {
for (int i = 0; i < n; i++) {
if (in2 < (the_type)sizeof(the_type) * 8) {
ip1[i] = ip1[i] >> in2;
}
}
}
"""), None, None)
return not no_err
|
|
"""Switches for AVM Fritz!Box functions."""
from __future__ import annotations
from collections import OrderedDict
from functools import partial
import logging
from typing import Any
from fritzconnection.core.exceptions import (
FritzActionError,
FritzActionFailedError,
FritzConnectionException,
FritzSecurityError,
FritzServiceError,
)
import xmltodict
from homeassistant.components.network import async_get_source_ip
from homeassistant.components.switch import SwitchEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import Entity, EntityCategory
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.util import slugify
from .common import (
FritzBoxBaseEntity,
FritzBoxTools,
FritzData,
FritzDevice,
FritzDeviceBase,
SwitchInfo,
device_filter_out_from_trackers,
)
from .const import (
DATA_FRITZ,
DOMAIN,
SWITCH_TYPE_DEFLECTION,
SWITCH_TYPE_PORTFORWARD,
SWITCH_TYPE_WIFINETWORK,
MeshRoles,
)
_LOGGER = logging.getLogger(__name__)
async def async_service_call_action(
fritzbox_tools: FritzBoxTools,
service_name: str,
service_suffix: str | None,
action_name: str,
**kwargs: Any,
) -> None | dict:
"""Return service details."""
return await fritzbox_tools.hass.async_add_executor_job(
partial(
service_call_action,
fritzbox_tools,
service_name,
service_suffix,
action_name,
**kwargs,
)
)
def service_call_action(
fritzbox_tools: FritzBoxTools,
service_name: str,
service_suffix: str | None,
action_name: str,
**kwargs: Any,
) -> dict | None:
"""Return service details."""
if f"{service_name}{service_suffix}" not in fritzbox_tools.connection.services:
return None
try:
return fritzbox_tools.connection.call_action( # type: ignore[no-any-return]
f"{service_name}:{service_suffix}",
action_name,
**kwargs,
)
except FritzSecurityError:
_LOGGER.error(
"Authorization Error: Please check the provided credentials and verify that you can log into the web interface",
exc_info=True,
)
return None
except (FritzActionError, FritzActionFailedError, FritzServiceError):
_LOGGER.error(
"Service/Action Error: cannot execute service %s",
service_name,
exc_info=True,
)
return None
except FritzConnectionException:
_LOGGER.error(
"Connection Error: Please check the device is properly configured for remote login",
exc_info=True,
)
return None
def get_deflections(
fritzbox_tools: FritzBoxTools, service_name: str
) -> list[OrderedDict[Any, Any]] | None:
"""Get deflection switch info."""
deflection_list = service_call_action(
fritzbox_tools,
service_name,
"1",
"GetDeflections",
)
if not deflection_list:
return []
items = xmltodict.parse(deflection_list["NewDeflectionList"])["List"]["Item"]
if not isinstance(items, list):
return [items]
return items
def deflection_entities_list(
fritzbox_tools: FritzBoxTools, device_friendly_name: str
) -> list[FritzBoxDeflectionSwitch]:
"""Get list of deflection entities."""
_LOGGER.debug("Setting up %s switches", SWITCH_TYPE_DEFLECTION)
service_name = "X_AVM-DE_OnTel"
deflections_response = service_call_action(
fritzbox_tools, service_name, "1", "GetNumberOfDeflections"
)
if not deflections_response:
_LOGGER.debug("The FRITZ!Box has no %s options", SWITCH_TYPE_DEFLECTION)
return []
_LOGGER.debug(
"Specific %s response: GetNumberOfDeflections=%s",
SWITCH_TYPE_DEFLECTION,
deflections_response,
)
if deflections_response["NewNumberOfDeflections"] == 0:
_LOGGER.debug("The FRITZ!Box has no %s options", SWITCH_TYPE_DEFLECTION)
return []
deflection_list = get_deflections(fritzbox_tools, service_name)
if deflection_list is None:
return []
return [
FritzBoxDeflectionSwitch(
fritzbox_tools, device_friendly_name, dict_of_deflection
)
for dict_of_deflection in deflection_list
]
def port_entities_list(
fritzbox_tools: FritzBoxTools, device_friendly_name: str, local_ip: str
) -> list[FritzBoxPortSwitch]:
"""Get list of port forwarding entities."""
_LOGGER.debug("Setting up %s switches", SWITCH_TYPE_PORTFORWARD)
entities_list: list[FritzBoxPortSwitch] = []
service_name = "Layer3Forwarding"
connection_type = service_call_action(
fritzbox_tools, service_name, "1", "GetDefaultConnectionService"
)
if not connection_type:
_LOGGER.debug("The FRITZ!Box has no %s options", SWITCH_TYPE_PORTFORWARD)
return []
# Return NewDefaultConnectionService sample: "1.WANPPPConnection.1"
con_type: str = connection_type["NewDefaultConnectionService"][2:][:-2]
# Query port forwardings and setup a switch for each forward for the current device
resp = service_call_action(
fritzbox_tools, con_type, "1", "GetPortMappingNumberOfEntries"
)
if not resp:
_LOGGER.debug("The FRITZ!Box has no %s options", SWITCH_TYPE_DEFLECTION)
return []
port_forwards_count: int = resp["NewPortMappingNumberOfEntries"]
_LOGGER.debug(
"Specific %s response: GetPortMappingNumberOfEntries=%s",
SWITCH_TYPE_PORTFORWARD,
port_forwards_count,
)
_LOGGER.debug("IP source for %s is %s", fritzbox_tools.host, local_ip)
for i in range(port_forwards_count):
portmap = service_call_action(
fritzbox_tools,
con_type,
"1",
"GetGenericPortMappingEntry",
NewPortMappingIndex=i,
)
if not portmap:
_LOGGER.debug("The FRITZ!Box has no %s options", SWITCH_TYPE_DEFLECTION)
continue
_LOGGER.debug(
"Specific %s response: GetGenericPortMappingEntry=%s",
SWITCH_TYPE_PORTFORWARD,
portmap,
)
# We can only handle port forwards of the given device
if portmap["NewInternalClient"] == local_ip:
port_name = portmap["NewPortMappingDescription"]
for entity in entities_list:
if entity.port_mapping and (
port_name in entity.port_mapping["NewPortMappingDescription"]
):
port_name = f"{port_name} {portmap['NewExternalPort']}"
entities_list.append(
FritzBoxPortSwitch(
fritzbox_tools,
device_friendly_name,
portmap,
port_name,
i,
con_type,
)
)
return entities_list
def wifi_entities_list(
fritzbox_tools: FritzBoxTools, device_friendly_name: str
) -> list[FritzBoxWifiSwitch]:
"""Get list of wifi entities."""
_LOGGER.debug("Setting up %s switches", SWITCH_TYPE_WIFINETWORK)
std_table = {"ax": "Wifi6", "ac": "5Ghz", "n": "2.4Ghz"}
if fritzbox_tools.model == "FRITZ!Box 7390":
std_table = {"n": "5Ghz"}
networks: dict = {}
for i in range(4):
if not ("WLANConfiguration" + str(i)) in fritzbox_tools.connection.services:
continue
network_info = service_call_action(
fritzbox_tools, "WLANConfiguration", str(i), "GetInfo"
)
if network_info:
ssid = network_info["NewSSID"]
_LOGGER.debug("SSID from device: <%s>", ssid)
if (
slugify(
ssid,
)
in [slugify(v) for v in networks.values()]
):
_LOGGER.debug("SSID duplicated, adding suffix")
networks[i] = f'{ssid} {std_table[network_info["NewStandard"]]}'
else:
networks[i] = ssid
_LOGGER.debug("SSID normalized: <%s>", networks[i])
return [
FritzBoxWifiSwitch(fritzbox_tools, device_friendly_name, net, network_name)
for net, network_name in networks.items()
]
def profile_entities_list(
router: FritzBoxTools,
data_fritz: FritzData,
) -> list[FritzBoxProfileSwitch]:
"""Add new tracker entities from the router."""
new_profiles: list[FritzBoxProfileSwitch] = []
if "X_AVM-DE_HostFilter1" not in router.connection.services:
return new_profiles
if router.unique_id not in data_fritz.profile_switches:
data_fritz.profile_switches[router.unique_id] = set()
for mac, device in router.devices.items():
if device_filter_out_from_trackers(
mac, device, data_fritz.profile_switches.values()
):
continue
new_profiles.append(FritzBoxProfileSwitch(router, device))
data_fritz.profile_switches[router.unique_id].add(mac)
return new_profiles
def all_entities_list(
fritzbox_tools: FritzBoxTools,
device_friendly_name: str,
data_fritz: FritzData,
local_ip: str,
) -> list[Entity]:
"""Get a list of all entities."""
if fritzbox_tools.mesh_role == MeshRoles.SLAVE:
return []
return [
*deflection_entities_list(fritzbox_tools, device_friendly_name),
*port_entities_list(fritzbox_tools, device_friendly_name, local_ip),
*wifi_entities_list(fritzbox_tools, device_friendly_name),
*profile_entities_list(fritzbox_tools, data_fritz),
]
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up entry."""
_LOGGER.debug("Setting up switches")
fritzbox_tools: FritzBoxTools = hass.data[DOMAIN][entry.entry_id]
data_fritz: FritzData = hass.data[DATA_FRITZ]
_LOGGER.debug("Fritzbox services: %s", fritzbox_tools.connection.services)
local_ip = await async_get_source_ip(
fritzbox_tools.hass, target_ip=fritzbox_tools.host
)
entities_list = await hass.async_add_executor_job(
all_entities_list,
fritzbox_tools,
entry.title,
data_fritz,
local_ip,
)
async_add_entities(entities_list)
@callback
def update_router() -> None:
"""Update the values of the router."""
async_add_entities(profile_entities_list(fritzbox_tools, data_fritz))
entry.async_on_unload(
async_dispatcher_connect(hass, fritzbox_tools.signal_device_new, update_router)
)
class FritzBoxBaseSwitch(FritzBoxBaseEntity):
"""Fritz switch base class."""
def __init__(
self,
fritzbox_tools: FritzBoxTools,
device_friendly_name: str,
switch_info: SwitchInfo,
) -> None:
"""Init Fritzbox port switch."""
super().__init__(fritzbox_tools, device_friendly_name)
self._description = switch_info["description"]
self._friendly_name = switch_info["friendly_name"]
self._icon = switch_info["icon"]
self._type = switch_info["type"]
self._update = switch_info["callback_update"]
self._switch = switch_info["callback_switch"]
self._name = f"{self._friendly_name} {self._description}"
self._unique_id = (
f"{self._fritzbox_tools.unique_id}-{slugify(self._description)}"
)
self._attributes: dict[str, str] = {}
self._is_available = True
self._attr_is_on = False
@property
def name(self) -> str:
"""Return name."""
return self._name
@property
def icon(self) -> str:
"""Return name."""
return self._icon
@property
def unique_id(self) -> str:
"""Return unique id."""
return self._unique_id
@property
def available(self) -> bool:
"""Return availability."""
return self._is_available
@property
def extra_state_attributes(self) -> dict[str, str]:
"""Return device attributes."""
return self._attributes
async def async_update(self) -> None:
"""Update data."""
_LOGGER.debug("Updating '%s' (%s) switch state", self.name, self._type)
await self._update()
async def async_turn_on(self, **kwargs: Any) -> None:
"""Turn on switch."""
await self._async_handle_turn_on_off(turn_on=True)
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn off switch."""
await self._async_handle_turn_on_off(turn_on=False)
async def _async_handle_turn_on_off(self, turn_on: bool) -> None:
"""Handle switch state change request."""
await self._switch(turn_on)
self._attr_is_on = turn_on
class FritzBoxPortSwitch(FritzBoxBaseSwitch, SwitchEntity):
"""Defines a FRITZ!Box Tools PortForward switch."""
def __init__(
self,
fritzbox_tools: FritzBoxTools,
device_friendly_name: str,
port_mapping: dict[str, Any] | None,
port_name: str,
idx: int,
connection_type: str,
) -> None:
"""Init Fritzbox port switch."""
self._fritzbox_tools = fritzbox_tools
self._attributes = {}
self.connection_type = connection_type
self.port_mapping = port_mapping # dict in the format as it comes from fritzconnection. eg: {'NewRemoteHost': '0.0.0.0', 'NewExternalPort': 22, 'NewProtocol': 'TCP', 'NewInternalPort': 22, 'NewInternalClient': '192.168.178.31', 'NewEnabled': True, 'NewPortMappingDescription': 'Beast SSH ', 'NewLeaseDuration': 0}
self._idx = idx # needed for update routine
self._attr_entity_category = EntityCategory.CONFIG
if port_mapping is None:
return
switch_info = SwitchInfo(
description=f"Port forward {port_name}",
friendly_name=device_friendly_name,
icon="mdi:check-network",
type=SWITCH_TYPE_PORTFORWARD,
callback_update=self._async_fetch_update,
callback_switch=self._async_handle_port_switch_on_off,
)
super().__init__(fritzbox_tools, device_friendly_name, switch_info)
async def _async_fetch_update(self) -> None:
"""Fetch updates."""
self.port_mapping = await async_service_call_action(
self._fritzbox_tools,
self.connection_type,
"1",
"GetGenericPortMappingEntry",
NewPortMappingIndex=self._idx,
)
_LOGGER.debug(
"Specific %s response: %s", SWITCH_TYPE_PORTFORWARD, self.port_mapping
)
if self.port_mapping is None:
self._is_available = False
return
self._attr_is_on = self.port_mapping["NewEnabled"] is True
self._is_available = True
attributes_dict = {
"NewInternalClient": "internal_ip",
"NewInternalPort": "internal_port",
"NewExternalPort": "external_port",
"NewProtocol": "protocol",
"NewPortMappingDescription": "description",
}
for key, attr in attributes_dict.items():
self._attributes[attr] = self.port_mapping[key]
async def _async_handle_port_switch_on_off(self, turn_on: bool) -> bool:
if self.port_mapping is None:
return False
self.port_mapping["NewEnabled"] = "1" if turn_on else "0"
resp = await async_service_call_action(
self._fritzbox_tools,
self.connection_type,
"1",
"AddPortMapping",
**self.port_mapping,
)
return bool(resp is not None)
class FritzBoxDeflectionSwitch(FritzBoxBaseSwitch, SwitchEntity):
"""Defines a FRITZ!Box Tools PortForward switch."""
def __init__(
self,
fritzbox_tools: FritzBoxTools,
device_friendly_name: str,
dict_of_deflection: Any,
) -> None:
"""Init Fritxbox Deflection class."""
self._fritzbox_tools: FritzBoxTools = fritzbox_tools
self.dict_of_deflection = dict_of_deflection
self._attributes = {}
self.id = int(self.dict_of_deflection["DeflectionId"])
self._attr_entity_category = EntityCategory.CONFIG
switch_info = SwitchInfo(
description=f"Call deflection {self.id}",
friendly_name=device_friendly_name,
icon="mdi:phone-forward",
type=SWITCH_TYPE_DEFLECTION,
callback_update=self._async_fetch_update,
callback_switch=self._async_switch_on_off_executor,
)
super().__init__(self._fritzbox_tools, device_friendly_name, switch_info)
async def _async_fetch_update(self) -> None:
"""Fetch updates."""
resp = await async_service_call_action(
self._fritzbox_tools, "X_AVM-DE_OnTel", "1", "GetDeflections"
)
if not resp:
self._is_available = False
return
self.dict_of_deflection = xmltodict.parse(resp["NewDeflectionList"])["List"][
"Item"
]
if isinstance(self.dict_of_deflection, list):
self.dict_of_deflection = self.dict_of_deflection[self.id]
_LOGGER.debug(
"Specific %s response: NewDeflectionList=%s",
SWITCH_TYPE_DEFLECTION,
self.dict_of_deflection,
)
self._attr_is_on = self.dict_of_deflection["Enable"] == "1"
self._is_available = True
self._attributes["type"] = self.dict_of_deflection["Type"]
self._attributes["number"] = self.dict_of_deflection["Number"]
self._attributes["deflection_to_number"] = self.dict_of_deflection[
"DeflectionToNumber"
]
# Return mode sample: "eImmediately"
self._attributes["mode"] = self.dict_of_deflection["Mode"][1:]
self._attributes["outgoing"] = self.dict_of_deflection["Outgoing"]
self._attributes["phonebook_id"] = self.dict_of_deflection["PhonebookID"]
async def _async_switch_on_off_executor(self, turn_on: bool) -> None:
"""Handle deflection switch."""
await async_service_call_action(
self._fritzbox_tools,
"X_AVM-DE_OnTel",
"1",
"SetDeflectionEnable",
NewDeflectionId=self.id,
NewEnable="1" if turn_on else "0",
)
class FritzBoxProfileSwitch(FritzDeviceBase, SwitchEntity):
"""Defines a FRITZ!Box Tools DeviceProfile switch."""
_attr_icon = "mdi:router-wireless-settings"
def __init__(self, fritzbox_tools: FritzBoxTools, device: FritzDevice) -> None:
"""Init Fritz profile."""
super().__init__(fritzbox_tools, device)
self._attr_is_on: bool = False
self._name = f"{device.hostname} Internet Access"
self._attr_unique_id = f"{self._mac}_internet_access"
self._attr_entity_category = EntityCategory.CONFIG
@property
def is_on(self) -> bool:
"""Switch status."""
return self._router.devices[self._mac].wan_access
async def async_turn_on(self, **kwargs: Any) -> None:
"""Turn on switch."""
await self._async_handle_turn_on_off(turn_on=True)
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn off switch."""
await self._async_handle_turn_on_off(turn_on=False)
async def _async_handle_turn_on_off(self, turn_on: bool) -> bool:
"""Handle switch state change request."""
await self._async_switch_on_off(turn_on)
self.async_write_ha_state()
return True
async def _async_switch_on_off(self, turn_on: bool) -> None:
"""Handle parental control switch."""
await async_service_call_action(
self._router,
"X_AVM-DE_HostFilter",
"1",
"DisallowWANAccessByIP",
NewIPv4Address=self.ip_address,
NewDisallow="0" if turn_on else "1",
)
class FritzBoxWifiSwitch(FritzBoxBaseSwitch, SwitchEntity):
"""Defines a FRITZ!Box Tools Wifi switch."""
def __init__(
self,
fritzbox_tools: FritzBoxTools,
device_friendly_name: str,
network_num: int,
network_name: str,
) -> None:
"""Init Fritz Wifi switch."""
self._fritzbox_tools = fritzbox_tools
self._attributes = {}
self._attr_entity_category = EntityCategory.CONFIG
self._network_num = network_num
switch_info = SwitchInfo(
description=f"Wi-Fi {network_name}",
friendly_name=device_friendly_name,
icon="mdi:wifi",
type=SWITCH_TYPE_WIFINETWORK,
callback_update=self._async_fetch_update,
callback_switch=self._async_switch_on_off_executor,
)
super().__init__(self._fritzbox_tools, device_friendly_name, switch_info)
async def _async_fetch_update(self) -> None:
"""Fetch updates."""
wifi_info = await async_service_call_action(
self._fritzbox_tools,
"WLANConfiguration",
str(self._network_num),
"GetInfo",
)
_LOGGER.debug(
"Specific %s response: GetInfo=%s", SWITCH_TYPE_WIFINETWORK, wifi_info
)
if wifi_info is None:
self._is_available = False
return
self._attr_is_on = wifi_info["NewEnable"] is True
self._is_available = True
std = wifi_info["NewStandard"]
self._attributes["standard"] = std if std else None
self._attributes["bssid"] = wifi_info["NewBSSID"]
self._attributes["mac_address_control"] = wifi_info[
"NewMACAddressControlEnabled"
]
async def _async_switch_on_off_executor(self, turn_on: bool) -> None:
"""Handle wifi switch."""
await async_service_call_action(
self._fritzbox_tools,
"WLANConfiguration",
str(self._network_num),
"SetEnable",
NewEnable="1" if turn_on else "0",
)
|
|
# Copyright 2017 F5 Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Controller Unit Tests.
Units tests for testing BIG-IP resource management.
"""
from __future__ import absolute_import
import unittest
import json
from mock import Mock, patch
from f5_cccl.utils.mgmt import ManagementRoot
from f5_cccl.utils.mgmt import mgmt_root
from f5_cccl.utils.network import apply_network_fdb_config
from f5_cccl.exceptions import F5CcclValidationError
from .. import bigipconfigdriver as ctlr
# Cloud app data
cloud_test_data = [
'tests/one_svc_two_nodes.json',
'tests/invalid_svcs.json',
'tests/one_svc_one_node.json',
'tests/one_svc_four_nodes.json',
'tests/one_iapp.json',
'tests/no_apps.json',
'tests/one_svc_two_nodes_pool_only.json'
]
class IPV4FormatError(Exception):
"""Exception type for improperly formatted IPv4 address."""
def __init__(self, msg):
"""Create ipv4 format exception object."""
Exception.__init__(self, msg)
def ipv4_to_mac(ip_str):
"""Convert an IPV4 string to a fake MAC address."""
ip = ip_str.split('.')
if len(ip) != 4:
raise IPV4FormatError('Bad IPv4 address format specified for '
'FDB record: {}'.format(ip_str))
return "0a:0a:%02x:%02x:%02x:%02x" % (
int(ip[0]), int(ip[1]), int(ip[2]), int(ip[3]))
class VxLANTunnel():
"""A mock BIG-IP VxLAN tunnel."""
def __init__(self, partition, name, initial_records):
"""Initialize the object."""
self.partition = partition
self.name = name
self.records = initial_records
def update(self, **kwargs):
"""Update list of vxlan records."""
self.records = []
if 'records' in kwargs:
self.records = kwargs['records']
class CloudTest(unittest.TestCase):
"""Cloud/Big-IP configuration tests.
Test BIG-IP configuration given various cloud states and existing
BIG-IP states
"""
def setUp(self):
"""Test suite set up."""
# Mock the call to _get_tmos_version(), which tries to make a
# connection
partition = 'test'
with patch.object(ManagementRoot, '_get_tmos_version'):
bigip = mgmt_root('1.2.3.4', 'admin', 'default', 443, 'tmos')
self.mgr = ctlr.CloudServiceManager(
bigip,
partition)
self.cccl = self.mgr._cccl
self.cccl._service_manager._service_deployer._bigip.refresh = Mock()
self.cccl._service_manager._service_deployer.deploy = \
Mock(return_value=0)
# mock out the bigip.tm.net.fdb.tunnels.tunnel resource
bigip.tm = type('', (), {})()
bigip.tm.net = type('', (), {})()
bigip.tm.net.fdb = type('', (), {})()
bigip.tm.net.fdb.tunnels = type('', (), {})()
bigip.tm.net.fdb.tunnels.tunnel = \
type('', (), {})()
bigip.tm.net.fdb.tunnels.tunnel.load = \
Mock(side_effect=self.mock_net_fdb_tunnels_tunnel_load)
def read_test_vectors(self, cloud_state, network_state=None):
"""Read test vectors for the various states."""
# Read the cloud state
if cloud_state:
with open(cloud_state) as json_data:
self.cloud_data = json.load(json_data)
if network_state:
with open(network_state) as json_data:
self.network_data = json.load(json_data)
def mock_net_fdb_tunnels_tunnel_load(self, partition, name):
"""Mock: Get a mocked vxla tunnel to store the vxlan record config."""
if not hasattr(self, 'vxlan_tunnel'):
# create a BigIP resource to store the 'current' tunnel
# FDB as well as updates.
self.vxlan_tunnel = VxLANTunnel(partition, name, self.network_data)
return self.vxlan_tunnel
def verify_cloud_config(self, cloud_state, expected_state):
"""Test: Verify expected config created from the cloud state."""
# Get the test data
self.read_test_vectors(cloud_state)
# Do the BIG-IP configuration
cfg = ctlr.create_ltm_config(self.mgr.get_partition(), self.cloud_data)
with open(expected_state) as json_data:
exp = json.load(json_data)
self.assertEqual(cfg, exp['ltm'])
self.mgr._apply_ltm_config(cfg)
def test_cccl_exceptions(
self,
cloud_state='tests/one_svc_two_nodes.json'):
"""Test: CCCL exceptions."""
cfg = {"not valid json"}
self.assertRaises(F5CcclValidationError, self.mgr._apply_ltm_config,
cfg)
# Get the test data
self.read_test_vectors(cloud_state)
# Do the BIG-IP configuration
cfg = ctlr.create_ltm_config(self.mgr.get_partition(), self.cloud_data)
# Corrupt the config
del cfg['virtualServers'][0]['name']
self.assertRaises(F5CcclValidationError, self.mgr._apply_ltm_config,
cfg)
def test_cloud_configs(self):
"""Test: Verify expected BIG-IP config created from cloud state."""
# Verify configuration
for data_file in cloud_test_data:
expected_file = data_file.replace('.json', '_expected.json')
self.verify_cloud_config(data_file, expected_file)
def test_pool_only_to_virtual_server(
self,
cloud_state='tests/one_svc_two_nodes_pool_only.json'):
"""Test: Cloud app without virtual server gets virtual server."""
# Get the test data
self.read_test_vectors(cloud_state)
# Do the BIG-IP configuration
cfg = ctlr.create_ltm_config(self.mgr.get_partition(), self.cloud_data)
self.mgr._apply_ltm_config(cfg)
# Reconfigure BIG-IP by adding virtual server to existing pool
vs = {
'destination': '/test/10.128.10.240:5051',
"enabled": True,
"name": "default_configmap",
"ipProtocol": "tcp",
"pool": "/test/default_configmap"
}
self.cloud_data['resources']['test']['virtualServers'].append(vs)
cfg = ctlr.create_ltm_config(self.mgr.get_partition(), self.cloud_data)
self.mgr._apply_ltm_config(cfg)
def test_virtual_server_to_pool_only(
self,
cloud_state='tests/one_svc_two_nodes.json'):
"""Test: Cloud app with virtual server removes virtual server."""
# Get the test data
self.read_test_vectors(cloud_state)
# Do the BIG-IP configuration
cfg = ctlr.create_ltm_config(self.mgr.get_partition(), self.cloud_data)
self.mgr._apply_ltm_config(cfg)
# Reconfigure BIG-IP by removing virtual server
self.cloud_data['resources']['test']['virtualServers'].pop()
cfg = ctlr.create_ltm_config(self.mgr.get_partition(), self.cloud_data)
self.mgr._apply_ltm_config(cfg)
def test_network_0_existing_vxlan_nodes_0_requested_vxlan_nodes(
self,
network_state='tests/bigip_test_vxlan_0_records.json',
cloud_state='tests/openshift_0_nodes.json'):
"""Test: openshift environment with 0 nodes."""
# Get the test data
self.read_test_vectors(cloud_state=cloud_state,
network_state=network_state)
# Do the BIG-IP configuration
cfg = ctlr.create_network_config(self.cloud_data)
apply_network_fdb_config(self.mgr.mgmt_root(), cfg['fdb'])
# Verify we only query bigip once for the initial state and
# don't try to write an update if nothing has changed.
self.assertEqual(self.mgr.mgmt_root().
tm.net.fdb.tunnels.tunnel.load.call_count, 1)
# Compare final content with self.network_state - should be the same
self.assertEqual(self.compute_fdb_records(), self.vxlan_tunnel.records)
def test_network_1_existing_vxlan_nodes_1_requested_vxlan_nodes(
self,
network_state='tests/bigip_test_vxlan_1_record.json',
cloud_state='tests/openshift_1_node.json'):
"""Test: openshift environment with 1 nodes."""
# Get the test data
self.read_test_vectors(cloud_state=cloud_state,
network_state=network_state)
# Do the BIG-IP configuration
cfg = ctlr.create_network_config(self.cloud_data)
apply_network_fdb_config(self.mgr.mgmt_root(), cfg['fdb'])
# Verify we only query bigip once for the initial state and
# don't try to write an update if nothing has changed.
self.assertEqual(self.mgr.mgmt_root().
tm.net.fdb.tunnels.tunnel.load.call_count, 1)
# Compare final content with self.network_state - should be the same
self.assertEqual(self.compute_fdb_records(), self.vxlan_tunnel.records)
def test_network_1_existing_vxlan_nodes_0_requested_vxlan_nodes(
self,
network_state='tests/bigip_test_vxlan_1_record.json',
cloud_state='tests/openshift_0_nodes.json'):
"""Test: openshift environment with 1 existing node, 0 requested."""
# Get the test data
self.read_test_vectors(cloud_state=cloud_state,
network_state=network_state)
# Do the BIG-IP configuration
cfg = ctlr.create_network_config(self.cloud_data)
apply_network_fdb_config(self.mgr.mgmt_root(), cfg['fdb'])
# Verify we first query bigip once for the initial state and
# then perform an update due to differences
self.assertEqual(self.mgr.mgmt_root().
tm.net.fdb.tunnels.tunnel.load.call_count, 2)
# Compare final content with self.network_state - should be the same
self.assertEqual(self.compute_fdb_records(), self.vxlan_tunnel.records)
def test_network_0_existing_vxlan_nodes_1_requested_vxlan_nodes(
self,
network_state='tests/bigip_test_vxlan_0_records.json',
cloud_state='tests/openshift_1_node.json'):
"""Test: openshift environment with 0 existing nodes, 1 requested."""
# Get the test data
self.read_test_vectors(cloud_state=cloud_state,
network_state=network_state)
# Do the BIG-IP configuration
cfg = ctlr.create_network_config(self.cloud_data)
apply_network_fdb_config(self.mgr.mgmt_root(), cfg['fdb'])
# Verify we first query bigip once for the initial state and
# then perform an update due to differences
self.assertEqual(self.mgr.mgmt_root().
tm.net.fdb.tunnels.tunnel.load.call_count, 2)
# Compare final content with self.network_state - should be the same
self.assertEqual(self.compute_fdb_records(), self.vxlan_tunnel.records)
def test_network_1_existing_vxlan_nodes_3_requested_vxlan_nodes(
self,
network_state='tests/bigip_test_vxlan_1_record.json',
cloud_state='tests/openshift_3_nodes.json'):
"""Test: Cloud openshift environment with 0 nodes."""
# Get the test data
self.read_test_vectors(cloud_state=cloud_state,
network_state=network_state)
# Do the BIG-IP configuration
cfg = ctlr.create_network_config(self.cloud_data)
apply_network_fdb_config(self.mgr.mgmt_root(), cfg['fdb'])
# Verify we first query bigip once for the initial state and
# then perform an update due to differences
self.assertEqual(self.mgr.mgmt_root().
tm.net.fdb.tunnels.tunnel.load.call_count, 2)
# Compare final content with self.network_state - should be the same
self.assertEqual(self.compute_fdb_records(), self.vxlan_tunnel.records)
def test_network_3_existing_vxlan_nodes_1_requested_vxlan_nodes(
self,
network_state='tests/bigip_test_vxlan_3_records.json',
cloud_state='tests/openshift_1_node.json'):
"""Test: Cloud openshift environment with 0 nodes."""
# Get the test data
self.read_test_vectors(cloud_state=cloud_state,
network_state=network_state)
# Do the BIG-IP configuration
cfg = ctlr.create_network_config(self.cloud_data)
apply_network_fdb_config(self.mgr.mgmt_root(), cfg['fdb'])
# Verify we first query bigip once for the initial state and
# then perform an update due to differences
self.assertEqual(self.mgr.mgmt_root().
tm.net.fdb.tunnels.tunnel.load.call_count, 2)
# Compare final content with self.network_state - should be the same
self.assertEqual(self.compute_fdb_records(), self.vxlan_tunnel.records)
def test_network_bad_vxlan_ip(
self,
network_state='tests/bigip_test_vxlan_3_records.json',
cloud_state='tests/openshift_1_node.json'):
"""Test: BigIP not updated if IP address in badly formatted."""
self.read_test_vectors(cloud_state=cloud_state,
network_state=network_state)
# Verify original configuration is untouched if we have errors
# in the cloud config file
self.cloud_data['openshift-sdn']['vxlan-node-ips'][0] = '55'
cfg = ctlr.create_network_config(self.cloud_data)
apply_network_fdb_config(self.mgr.mgmt_root(), cfg['fdb'])
self.assertEqual(self.network_data, self.vxlan_tunnel.records)
self.cloud_data['openshift-sdn']['vxlan-node-ips'][0] = 55
cfg = ctlr.create_network_config(self.cloud_data)
apply_network_fdb_config(self.mgr.mgmt_root(), cfg['fdb'])
self.assertEqual(self.network_data, self.vxlan_tunnel.records)
self.cloud_data['openshift-sdn']['vxlan-node-ips'][0] = 'myaddr'
cfg = ctlr.create_network_config(self.cloud_data)
apply_network_fdb_config(self.mgr.mgmt_root(), cfg['fdb'])
self.assertEqual(self.network_data, self.vxlan_tunnel.records)
def test_network_bad_partition_name(
self,
network_state='tests/bigip_test_vxlan_3_records.json',
cloud_state='tests/openshift_1_node.json'):
"""Test: BigIP not updated if the partition name format is bad."""
self.read_test_vectors(cloud_state=cloud_state,
network_state=network_state)
# Verify original configuration is untouched if we have errors
# in the cloud config file
self.cloud_data['openshift-sdn']['vxlan-name'] = \
'/bad/partition/name/idf/'
cfg = ctlr.create_network_config(self.cloud_data)
apply_network_fdb_config(self.mgr.mgmt_root(), cfg['fdb'])
self.assertFalse(hasattr(self, 'vxlan_tunnel'))
self.cloud_data['openshift-sdn']['vxlan-name'] = \
'bad/partition/name'
cfg = ctlr.create_network_config(self.cloud_data)
apply_network_fdb_config(self.mgr.mgmt_root(), cfg['fdb'])
self.assertFalse(hasattr(self, 'vxlan_tunnel'))
self.cloud_data['openshift-sdn']['vxlan-name'] = ''
cfg = ctlr.create_network_config(self.cloud_data)
apply_network_fdb_config(self.mgr.mgmt_root(), cfg['fdb'])
self.assertFalse(hasattr(self, 'vxlan_tunnel'))
def compute_fdb_records(self):
"""Create a FDB record for each openshift node."""
records = []
if self.cloud_data and 'openshift-sdn' in self.cloud_data and \
'vxlan-node-ips' in self.cloud_data['openshift-sdn']:
for node_ip in self.cloud_data['openshift-sdn']['vxlan-node-ips']:
record = {'endpoint': node_ip, 'name': ipv4_to_mac(node_ip)}
records.append(record)
return records
if __name__ == '__main__':
unittest.main()
|
|
from __future__ import print_function
from functools import partial
import random
import types
from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, \
Text, Type, cast
from unittest import loader, runner # type: ignore # Mypy cannot pick these up.
from unittest.result import TestResult
import coverage
from django.conf import settings
from django.db import connections, ProgrammingError
from django.urls.resolvers import RegexURLPattern
from django.test import TestCase
from django.test import runner as django_runner
from django.test.runner import DiscoverRunner
from django.test.signals import template_rendered
import six
from zerver.lib import test_classes, test_helpers
from zerver.lib.cache import bounce_key_prefix_for_testing
from zerver.lib.rate_limiter import bounce_redis_key_prefix_for_testing
from zerver.lib.test_classes import flush_caches_for_testing
from zerver.lib.sqlalchemy_utils import get_sqlalchemy_connection
from zerver.lib.test_helpers import (
get_all_templates, write_instrumentation_reports,
append_instrumentation_data
)
import os
import subprocess
import sys
import time
import traceback
import unittest
if False:
# Only needed by mypy.
from multiprocessing.sharedctypes import Synchronized
_worker_id = 0 # Used to identify the worker process.
def slow(slowness_reason):
# type: (str) -> Callable[[Callable], Callable]
'''
This is a decorate that annotates a test as being "known
to be slow." The decorator will set expected_run_time and slowness_reason
as atributes of the function. Other code can use this annotation
as needed, e.g. to exclude these tests in "fast" mode.
'''
def decorator(f):
# type: (Any) -> Any
f.slowness_reason = slowness_reason
return f
return decorator
def is_known_slow_test(test_method):
# type: (Any) -> bool
return hasattr(test_method, 'slowness_reason')
def full_test_name(test):
# type: (TestCase) -> str
test_module = test.__module__
test_class = test.__class__.__name__
test_method = test._testMethodName
return '%s.%s.%s' % (test_module, test_class, test_method)
def get_test_method(test):
# type: (TestCase) -> Callable[[], None]
return getattr(test, test._testMethodName)
# Each tuple is delay, test_name, slowness_reason
TEST_TIMINGS = [] # type: List[Tuple[float, str, str]]
def report_slow_tests():
# type: () -> None
timings = sorted(TEST_TIMINGS, reverse=True)
print('SLOWNESS REPORT')
print(' delay test')
print(' ---- ----')
for delay, test_name, slowness_reason in timings[:15]:
if not slowness_reason:
slowness_reason = 'UNKNOWN WHY SLOW, please investigate'
print(' %0.3f %s\n %s\n' % (delay, test_name, slowness_reason))
print('...')
for delay, test_name, slowness_reason in timings[100:]:
if slowness_reason:
print(' %.3f %s is not that slow' % (delay, test_name))
print(' consider removing @slow decorator')
print(' This may no longer be true: %s' % (slowness_reason,))
def enforce_timely_test_completion(test_method, test_name, delay, result):
# type: (Any, str, float, TestResult) -> None
if hasattr(test_method, 'slowness_reason'):
max_delay = 1.1 # seconds
else:
max_delay = 0.4 # seconds
if delay > max_delay:
msg = '** Test is TOO slow: %s (%.3f s)\n' % (test_name, delay)
result.addInfo(test_method, msg)
def fast_tests_only():
# type: () -> bool
return "FAST_TESTS_ONLY" in os.environ
def run_test(test, result):
# type: (TestCase, TestResult) -> bool
failed = False
test_method = get_test_method(test)
if fast_tests_only() and is_known_slow_test(test_method):
return failed
test_name = full_test_name(test)
bounce_key_prefix_for_testing(test_name)
bounce_redis_key_prefix_for_testing(test_name)
flush_caches_for_testing()
if not hasattr(test, "_pre_setup"):
# test_name is likely of the form unittest.loader.ModuleImportFailure.zerver.tests.test_upload
import_failure_prefix = 'unittest.loader.ModuleImportFailure.'
if test_name.startswith(import_failure_prefix):
actual_test_name = test_name[len(import_failure_prefix):]
error_msg = ("\nActual test to be run is %s, but import failed.\n"
"Importing test module directly to generate clearer "
"traceback:\n") % (actual_test_name,)
result.addInfo(test, error_msg)
try:
command = [sys.executable, "-c", "import %s" % (actual_test_name,)]
msg = "Import test command: `%s`" % (' '.join(command),)
result.addInfo(test, msg)
subprocess.check_call(command)
except subprocess.CalledProcessError:
msg = ("If that traceback is confusing, try doing the "
"import inside `./manage.py shell`")
result.addInfo(test, msg)
result.addError(test, sys.exc_info())
return True
msg = ("Import unexpectedly succeeded! Something is wrong. Try "
"running `import %s` inside `./manage.py shell`.\n"
"If that works, you may have introduced an import "
"cycle.") % (actual_test_name,)
import_error = (Exception, Exception(msg), None) # type: Tuple[Any, Any, Any]
result.addError(test, import_error)
return True
else:
msg = "Test doesn't have _pre_setup; something is wrong."
error_pre_setup = (Exception, Exception(msg), None) # type: Tuple[Any, Any, Any]
result.addError(test, error_pre_setup)
return True
test._pre_setup()
start_time = time.time()
test(result) # unittest will handle skipping, error, failure and success.
delay = time.time() - start_time
enforce_timely_test_completion(test_method, test_name, delay, result)
slowness_reason = getattr(test_method, 'slowness_reason', '')
TEST_TIMINGS.append((delay, test_name, slowness_reason))
test._post_teardown()
return failed
class TextTestResult(runner.TextTestResult):
"""
This class has unpythonic function names because base class follows
this style.
"""
def __init__(self, *args, **kwargs):
# type: (*Any, **Any) -> None
super(TextTestResult, self).__init__(*args, **kwargs)
self.failed_tests = [] # type: List[str]
def addInfo(self, test, msg):
# type: (TestCase, Text) -> None
self.stream.write(msg)
self.stream.flush()
def addInstrumentation(self, test, data):
# type: (TestCase, Dict[str, Any]) -> None
append_instrumentation_data(data)
def startTest(self, test):
# type: (TestCase) -> None
TestResult.startTest(self, test)
self.stream.writeln("Running {}".format(full_test_name(test)))
self.stream.flush()
def addSuccess(self, *args, **kwargs):
# type: (*Any, **Any) -> None
TestResult.addSuccess(self, *args, **kwargs)
def addError(self, *args, **kwargs):
# type: (*Any, **Any) -> None
TestResult.addError(self, *args, **kwargs)
test_name = full_test_name(args[0])
self.failed_tests.append(test_name)
def addFailure(self, *args, **kwargs):
# type: (*Any, **Any) -> None
TestResult.addFailure(self, *args, **kwargs)
test_name = full_test_name(args[0])
self.failed_tests.append(test_name)
def addSkip(self, test, reason):
# type: (TestCase, Text) -> None
TestResult.addSkip(self, test, reason)
self.stream.writeln("** Skipping {}: {}".format(full_test_name(test),
reason))
self.stream.flush()
class RemoteTestResult(django_runner.RemoteTestResult):
"""
The class follows the unpythonic style of function names of the
base class.
"""
def addInfo(self, test, msg):
# type: (TestCase, Text) -> None
self.events.append(('addInfo', self.test_index, msg))
def addInstrumentation(self, test, data):
# type: (TestCase, Dict[str, Any]) -> None
# Some elements of data['info'] cannot be serialized.
if 'info' in data:
del data['info']
self.events.append(('addInstrumentation', self.test_index, data))
def process_instrumented_calls(func):
# type: (Callable) -> None
for call in test_helpers.INSTRUMENTED_CALLS:
func(call)
def run_subsuite(collect_coverage, args):
# type: (bool, Tuple[int, Tuple[Type[Iterable[TestCase]], List[str]], bool]) -> Tuple[int, Any]
if collect_coverage:
cov = coverage.Coverage(config_file="tools/coveragerc", data_suffix=True)
cov.start()
# Reset the accumulated INSTRUMENTED_CALLS before running this subsuite.
test_helpers.INSTRUMENTED_CALLS = []
subsuite_index, subsuite, failfast = args
runner = RemoteTestRunner(failfast=failfast)
result = runner.run(deserialize_suite(subsuite))
# Now we send instrumentation related events. This data will be
# appended to the data structure in the main thread. For Mypy,
# type of Partial is different from Callable. All the methods of
# TestResult are passed TestCase as the first argument but
# addInstrumentation does not need it.
process_instrumented_calls(partial(result.addInstrumentation, None)) # type: ignore
if collect_coverage:
cov.stop()
cov.save()
return subsuite_index, result.events
# Monkey-patch database creation to fix unnecessary sleep(1)
from django.db.backends.postgresql.creation import DatabaseCreation
def _replacement_destroy_test_db(self, test_database_name, verbosity):
# type: (Any, str, Any) -> None
"""Replacement for Django's _destroy_test_db that removes the
unnecessary sleep(1)."""
with self.connection._nodb_connection.cursor() as cursor:
cursor.execute("DROP DATABASE %s"
% self.connection.ops.quote_name(test_database_name))
DatabaseCreation._destroy_test_db = _replacement_destroy_test_db
def destroy_test_databases(database_id=None):
# type: (Optional[int]) -> None
"""
When database_id is None, the name of the databases is picked up
by the database settings.
"""
for alias in connections:
connection = connections[alias]
try:
connection.creation.destroy_test_db(number=database_id)
except ProgrammingError:
# DB doesn't exist. No need to do anything.
pass
def create_test_databases(database_id):
# type: (int) -> None
for alias in connections:
connection = connections[alias]
connection.creation.clone_test_db(
number=database_id,
keepdb=True,
)
settings_dict = connection.creation.get_test_db_clone_settings(database_id)
# connection.settings_dict must be updated in place for changes to be
# reflected in django.db.connections. If the following line assigned
# connection.settings_dict = settings_dict, new threads would connect
# to the default database instead of the appropriate clone.
connection.settings_dict.update(settings_dict)
connection.close()
def init_worker(counter):
# type: (Synchronized) -> None
"""
This function runs only under parallel mode. It initializes the
individual processes which are also called workers.
"""
global _worker_id
with counter.get_lock():
counter.value += 1
_worker_id = counter.value
"""
You can now use _worker_id.
"""
test_classes.API_KEYS = {}
# Clear the cache
from zerver.lib.cache import get_cache_backend
cache = get_cache_backend(None)
cache.clear()
# Close all connections
connections.close_all()
destroy_test_databases(_worker_id)
create_test_databases(_worker_id)
# Every process should upload to a separate directory so that
# race conditions can be avoided.
settings.LOCAL_UPLOADS_DIR = '{}_{}'.format(settings.LOCAL_UPLOADS_DIR,
_worker_id)
def is_upload_avatar_url(url):
# type: (RegexURLPattern) -> bool
if url.regex.pattern == r'^user_avatars/(?P<path>.*)$':
return True
return False
# We manually update the upload directory path in the url regex.
from zproject import dev_urls
found = False
for url in dev_urls.urls:
if is_upload_avatar_url(url):
found = True
new_root = os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars")
url.default_args['document_root'] = new_root
if not found:
print("*** Upload directory not found.")
class TestSuite(unittest.TestSuite):
collect_coverage = False
def run(self, result, debug=False):
# type: (TestResult, Optional[bool]) -> TestResult
"""
This function mostly contains the code from
unittest.TestSuite.run. The need to override this function
occurred because we use run_test to run the testcase.
"""
topLevel = False
if getattr(result, '_testRunEntered', False) is False:
result._testRunEntered = topLevel = True
for test in self: # type: ignore # Mypy cannot recognize this
# but this is correct. Taken from unittest.
if result.shouldStop:
break
if isinstance(test, TestSuite):
test.run(result, debug=debug)
else:
self._tearDownPreviousClass(test, result) # type: ignore
self._handleModuleFixture(test, result) # type: ignore
self._handleClassSetUp(test, result) # type: ignore
result._previousTestClass = test.__class__
if (getattr(test.__class__, '_classSetupFailed', False) or
getattr(result, '_moduleSetUpFailed', False)):
continue
failed = run_test(test, result)
if failed or result.shouldStop:
result.shouldStop = True
break
if topLevel:
self._tearDownPreviousClass(None, result) # type: ignore
self._handleModuleTearDown(result) # type: ignore
result._testRunEntered = False
return result
class CoverageTestSuite(TestSuite):
collect_coverage = True
class TestLoader(loader.TestLoader):
suiteClass = TestSuite
class CoverageTestLoader(TestLoader):
suiteClass = CoverageTestSuite
class ParallelTestSuite(django_runner.ParallelTestSuite):
init_worker = init_worker
def __init__(self, suite, processes, failfast):
# type: (TestSuite, int, bool) -> None
super(ParallelTestSuite, self).__init__(suite, processes, failfast)
self.subsuites = SubSuiteList(self.subsuites) # type: SubSuiteList
# ParallelTestSuite expects this to be a bound method so it can access
# __func__ when passing it to multiprocessing.
method = partial(run_subsuite, suite.collect_coverage)
self.run_subsuite = six.create_bound_method(cast(types.FunctionType, method), self)
class Runner(DiscoverRunner):
test_suite = TestSuite
test_loader = TestLoader()
parallel_test_suite = ParallelTestSuite
def __init__(self, coverage=False, *args, **kwargs):
# type: (bool, *Any, **Any) -> None
DiscoverRunner.__init__(self, *args, **kwargs)
# `templates_rendered` holds templates which were rendered
# in proper logical tests.
self.templates_rendered = set() # type: Set[str]
# `shallow_tested_templates` holds templates which were rendered
# in `zerver.tests.test_templates`.
self.shallow_tested_templates = set() # type: Set[str]
template_rendered.connect(self.on_template_rendered)
self.database_id = random.randint(1, 10000)
def get_resultclass(self):
# type: () -> Type[TestResult]
return TextTestResult
def on_template_rendered(self, sender, context, **kwargs):
# type: (Any, Dict[str, Any], **Any) -> None
if hasattr(sender, 'template'):
template_name = sender.template.name
if template_name not in self.templates_rendered:
if context.get('shallow_tested') and template_name not in self.templates_rendered:
self.shallow_tested_templates.add(template_name)
else:
self.templates_rendered.add(template_name)
self.shallow_tested_templates.discard(template_name)
def get_shallow_tested_templates(self):
# type: () -> Set[str]
return self.shallow_tested_templates
def setup_test_environment(self, *args, **kwargs):
# type: (*Any, **Any) -> Any
settings.DATABASES['default']['NAME'] = settings.BACKEND_DATABASE_TEMPLATE
# We create/destroy the test databases in run_tests to avoid
# duplicate work when running in parallel mode.
return super(Runner, self).setup_test_environment(*args, **kwargs)
def teardown_test_environment(self, *args, **kwargs):
# type: (*Any, **Any) -> Any
# No need to pass the database id now. It will be picked up
# automatically through settings.
if self.parallel == 1:
# In parallel mode (parallel > 1), destroy_test_databases will
# destroy settings.BACKEND_DATABASE_TEMPLATE; we don't want that.
# So run this only in serial mode.
destroy_test_databases()
return super(Runner, self).teardown_test_environment(*args, **kwargs)
def run_tests(self, test_labels, extra_tests=None,
full_suite=False, **kwargs):
# type: (List[str], Optional[List[TestCase]], bool, **Any) -> Tuple[bool, List[str]]
self.setup_test_environment()
try:
suite = self.build_suite(test_labels, extra_tests)
except AttributeError:
traceback.print_exc()
print()
print(" This is often caused by a test module/class/function that doesn't exist or ")
print(" import properly. You can usually debug in a `manage.py shell` via e.g. ")
print(" import zerver.tests.test_messages")
print(" from zerver.tests.test_messages import StreamMessagesTest")
print(" StreamMessagesTest.test_message_to_stream")
print()
sys.exit(1)
if self.parallel == 1:
# We are running in serial mode so create the databases here.
# For parallel mode, the databases are created in init_worker.
# We don't want to create and destroy DB in setup_test_environment
# because it will be called for both serial and parallel modes.
# However, at this point we know in which mode we would be running
# since that decision has already been made in build_suite().
destroy_test_databases(self.database_id)
create_test_databases(self.database_id)
# We have to do the next line to avoid flaky scenarios where we
# run a single test and getting an SA connection causes data from
# a Django connection to be rolled back mid-test.
get_sqlalchemy_connection()
result = self.run_suite(suite)
self.teardown_test_environment()
failed = self.suite_result(suite, result)
if not failed:
write_instrumentation_reports(full_suite=full_suite)
return failed, result.failed_tests
class CoverageRunner(Runner):
test_suite = CoverageTestSuite
test_loader = CoverageTestLoader()
def get_test_names(suite):
# type: (TestSuite) -> List[str]
return [full_test_name(t) for t in get_tests_from_suite(suite)]
def get_tests_from_suite(suite):
# type: (TestSuite) -> TestCase
for test in suite: # type: ignore
if isinstance(test, TestSuite):
for child in get_tests_from_suite(test):
yield child
else:
yield test
def serialize_suite(suite):
# type: (TestSuite) -> Tuple[Type[TestSuite], List[str]]
return type(suite), get_test_names(suite)
def deserialize_suite(args):
# type: (Tuple[Type[Iterable[TestCase]], List[str]]) -> Iterable[TestCase]
suite_class, test_names = args
suite = suite_class() # type: ignore # Gives abstract type error.
tests = TestLoader().loadTestsFromNames(test_names)
for test in get_tests_from_suite(tests):
suite.addTest(test)
return suite
class RemoteTestRunner(django_runner.RemoteTestRunner):
resultclass = RemoteTestResult
class SubSuiteList(list):
"""
This class allows us to avoid changing the main logic of
ParallelTestSuite and still make it serializable.
"""
def __init__(self, suites):
# type: (List[TestSuite]) -> None
serialized_suites = [serialize_suite(s) for s in suites]
super(SubSuiteList, self).__init__(serialized_suites)
def __getitem__(self, index):
# type: (Any) -> Any
suite = super(SubSuiteList, self).__getitem__(index)
return deserialize_suite(suite)
|
|
import logging
from urllib import basejoin
from cStringIO import StringIO
from tg import expose, redirect, flash
from tg.decorators import without_trailing_slash
from pylons import request, app_globals as g, tmpl_context as c
from bson import ObjectId
from ming.orm import session, state
from ming.utils import LazyProperty
from allura.lib.helpers import push_config, vardec
from allura.lib.security import require, has_access, require_access
from allura import model
from allura.controllers import BaseController
from allura.lib.decorators import require_post, event_handler
from allura.lib.utils import permanent_redirect
log = logging.getLogger(__name__)
class ConfigOption(object):
def __init__(self, name, ming_type, default):
self.name, self.ming_type, self._default = (
name, ming_type, default)
@property
def default(self):
if callable(self._default):
return self._default()
return self._default
class SitemapEntry(object):
def __init__(self, label, url=None, children=None, className=None, ui_icon=None, small=None):
self.label = label
self.className = className
if url is not None:
url = url.encode('utf-8')
self.url = url
self.small = small
self.ui_icon = ui_icon
if children is None:
children = []
self.children = children
def __getitem__(self, x):
"""
Automatically expand the list of sitemap child entries with the given items. Example:
SitemapEntry('HelloForge')[
SitemapEntry('foo')[
SitemapEntry('Pages')[pages]
]
]
TODO: deprecate this; use a more clear method of building a tree
"""
if isinstance(x, (list, tuple)):
self.children.extend(list(x))
else:
self.children.append(x)
return self
def __repr__(self):
l = ['<SitemapEntry ']
l.append(' label=%r' % self.label)
l.append(' url=%r' % self.url)
l.append(' children=%s' % repr(self.children).replace('\n', '\n '))
l.append('>')
return '\n'.join(l)
def bind_app(self, app):
lbl = self.label
url = self.url
if callable(lbl):
lbl = lbl(app)
if url is not None:
url = basejoin(app.url, url)
return SitemapEntry(lbl, url, [
ch.bind_app(app) for ch in self.children], className=self.className)
def extend(self, sitemap):
child_index = dict(
(ch.label, ch) for ch in self.children)
for e in sitemap:
lbl = e.label
match = child_index.get(e.label)
if match and match.url == e.url:
match.extend(e.children)
else:
self.children.append(e)
child_index[lbl] = e
class Application(object):
"""
The base Allura pluggable application
After extending this, expose the app by adding an entry point in your setup.py:
[allura]
myapp = foo.bar.baz:MyAppClass
:var status: the status level of this app. 'production' apps are available to all projects
:var bool searchable: toggle if the search box appears in the left menu
:var permissions: a list of named permissions used by the app
:var sitemap: a list of :class:`SitemapEntries <allura.app.SitemapEntry>` to create an app navigation.
:var bool installable: toggle if the app can be installed in a project
:var bool hidden: toggle if the app should be hidden from the list of tools
:var Controller self.root: the root Controller used for the app
:var Controller self.api_root: a Controller used for API access at /rest/<neighborhood>/<project>/<app>/
:var Controller self.admin: a Controller used in the admin interface
"""
__version__ = None
config_options = [
ConfigOption('mount_point', str, 'app'),
ConfigOption('mount_label', str, 'app'),
ConfigOption('ordinal', int, '0') ]
status_map = [ 'production', 'beta', 'alpha', 'user' ]
status='production'
script_name=None
root=None # root controller
api_root=None
permissions=[]
sitemap = [ ]
installable=True
searchable = False
DiscussionClass = model.Discussion
PostClass = model.Post
AttachmentClass = model.DiscussionAttachment
tool_label='Tool'
default_mount_label='Tool Name'
default_mount_point='tool'
ordinal=0
hidden = False
icons={
24:'images/admin_24.png',
32:'images/admin_32.png',
48:'images/admin_48.png'
}
def __init__(self, project, app_config_object):
self.project = project
self.config = app_config_object
self.admin = DefaultAdminController(self)
@LazyProperty
def url(self):
return self.config.url(project=self.project)
@property
def acl(self):
return self.config.acl
def parent_security_context(self):
return self.config.parent_security_context()
@classmethod
def status_int(self):
return self.status_map.index(self.status)
@classmethod
def icon_url(self, size):
'''Subclasses (tools) provide their own icons (preferred) or in
extraordinary circumstances override this routine to provide
the URL to an icon of the requested size specific to that tool.
Application.icons is simply a default if no more specific icon
is available.
'''
resource = self.icons.get(size)
if resource:
return g.theme_href(resource)
return ''
def has_access(self, user, topic):
'''Whether the user has access to send email to the given topic'''
return False
def is_visible_to(self, user):
'''Whether the user can view the app.'''
return has_access(self, 'read')(user=user)
def subscribe_admins(self):
for uid in g.credentials.userids_with_named_role(self.project._id, 'Admin'):
model.Mailbox.subscribe(
type='direct',
user_id=uid,
project_id=self.project._id,
app_config_id=self.config._id)
@classmethod
def default_options(cls):
":return: the default config options"
return dict(
(co.name, co.default)
for co in cls.config_options)
def install(self, project):
'Whatever logic is required to initially set up a tool'
# Create the discussion object
discussion = self.DiscussionClass(
shortname=self.config.options.mount_point,
name='%s Discussion' % self.config.options.mount_point,
description='Forum for %s comments' % self.config.options.mount_point)
session(discussion).flush()
self.config.discussion_id = discussion._id
self.subscribe_admins()
def uninstall(self, project=None, project_id=None):
'Whatever logic is required to tear down a tool'
if project_id is None: project_id = project._id
# De-index all the artifacts belonging to this tool in one fell swoop
g.solr.delete(q='project_id_s:"%s" AND mount_point_s:"%s"' % (
project_id, self.config.options['mount_point']))
for d in model.Discussion.query.find({
'project_id':project_id,
'app_config_id':self.config._id}):
d.delete()
self.config.delete()
session(self.config).flush()
@property
def uninstallable(self):
"""Return True if this app can be uninstalled. Controls whether the
'Delete' option appears on the admin menu for this app.
By default, an app can be uninstalled iff it can be installed, although
some apps may want/need to override this (e.g. an app which can
not be installed directly by a user, but may be uninstalled).
"""
return self.installable
def main_menu(self):
'''Apps should provide their entries to be added to the main nav
:return: a list of :class:`SitemapEntries <allura.app.SitemapEntry>`
'''
return self.sitemap
def sidebar_menu(self):
"""
Apps should override this to provide their menu
:return: a list of :class:`SitemapEntries <allura.app.SitemapEntry>`
"""
return []
def sidebar_menu_js(self):
"""
Apps can override this to provide Javascript needed by the sidebar_menu.
:return: a string of Javascript code
"""
return ""
def admin_menu(self, force_options=False):
"""
Apps may override this to provide additional admin menu items
:return: a list of :class:`SitemapEntries <allura.app.SitemapEntry>`
"""
admin_url = c.project.url()+'admin/'+self.config.options.mount_point+'/'
links = []
if self.permissions and has_access(c.project, 'admin')():
links.append(SitemapEntry('Permissions', admin_url + 'permissions'))
if force_options or len(self.config_options) > 3:
links.append(SitemapEntry('Options', admin_url + 'options', className='admin_modal'))
links.append(SitemapEntry('Label', admin_url + 'edit_label', className='admin_modal'))
return links
def handle_message(self, topic, message):
'''Handle incoming email msgs addressed to this tool'''
pass
def handle_artifact_message(self, artifact, message):
# Find ancestor comment and thread
thd, parent_id = artifact.get_discussion_thread(message)
# Handle attachments
message_id = message['message_id']
if message.get('filename'):
# Special case - the actual post may not have been created yet
log.info('Saving attachment %s', message['filename'])
fp = StringIO(message['payload'])
self.AttachmentClass.save_attachment(
message['filename'], fp,
content_type=message.get('content_type', 'application/octet-stream'),
discussion_id=thd.discussion_id,
thread_id=thd._id,
post_id=message_id,
artifact_id=message_id)
return
# Handle duplicates
post = self.PostClass.query.get(_id=message_id)
if post:
log.info('Existing message_id %s found - saving this as text attachment' % message_id)
fp = StringIO(message['payload'])
post.attach(
'alternate', fp,
content_type=message.get('content_type', 'application/octet-stream'),
discussion_id=thd.discussion_id,
thread_id=thd._id,
post_id=message_id)
else:
text=message['payload'] or '--no text body--'
post = thd.post(
message_id=message_id,
parent_id=parent_id,
text=text,
subject=message['headers'].get('Subject', 'no subject'))
class DefaultAdminController(BaseController):
def __init__(self, app):
self.app = app
@expose()
def index(self, **kw):
permanent_redirect('permissions')
@expose('jinja:allura:templates/app_admin_permissions.html')
@without_trailing_slash
def permissions(self):
from ext.admin.widgets import PermissionCard
c.card = PermissionCard()
permissions = dict((p, []) for p in self.app.permissions)
for ace in self.app.config.acl:
if ace.access == model.ACE.ALLOW:
try:
permissions[ace.permission].append(ace.role_id)
except KeyError:
# old, unknown permission
pass
return dict(
app=self.app,
allow_config=has_access(c.project, 'admin')(),
permissions=permissions)
@expose('jinja:allura:templates/app_admin_edit_label.html')
def edit_label(self):
return dict(
app=self.app,
allow_config=has_access(self.app, 'configure')())
@expose()
@require_post()
def update_label(self, mount_label):
require_access(self.app, 'configure')
self.app.config.options['mount_label'] = mount_label
redirect(request.referer)
@expose('jinja:allura:templates/app_admin_options.html')
def options(self):
return dict(
app=self.app,
allow_config=has_access(self.app, 'configure')())
@expose()
@require_post()
def configure(self, **kw):
with push_config(c, app=self.app):
require_access(self.app, 'configure')
is_admin = self.app.config.tool_name == 'admin'
if kw.pop('delete', False):
if is_admin:
flash('Cannot delete the admin tool, sorry....')
redirect('.')
c.project.uninstall_app(self.app.config.options.mount_point)
redirect('..')
for k,v in kw.iteritems():
self.app.config.options[k] = v
if is_admin:
# possibly moving admin mount point
redirect('/'
+ c.project._id
+ self.app.config.options.mount_point
+ '/'
+ self.app.config.options.mount_point
+ '/')
else:
redirect(request.referer)
@without_trailing_slash
@expose()
@vardec
@require_post()
def update(self, card=None, **kw):
self.app.config.acl = []
for args in card:
perm = args['id']
new_group_ids = args.get('new', [])
group_ids = args.get('value', [])
if isinstance(new_group_ids, basestring):
new_group_ids = [ new_group_ids ]
if isinstance(group_ids, basestring):
group_ids = [ group_ids ]
role_ids = map(ObjectId, group_ids + new_group_ids)
self.app.config.acl += [
model.ACE.allow(r, perm) for r in role_ids]
redirect(request.referer)
@event_handler('project_updated')
def subscribe_admins(topic):
for ac in c.project.app_configs:
c.project.app_instance(ac).subscribe_admins()
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Data structures and helpers for TensorFlow Debugger (tfdbg)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.util import event_pb2
from tensorflow.python.framework import tensor_util
def load_tensor_from_event_file(event_file_path):
"""Load a tensor from an event file.
Assumes that the event file contains a Event protobuf and the Event protobuf
contains a tensor.
Args:
event_file_path: Path to the event file.
Returns:
The tensor value loaded from the event file. For uninitialized tensors,
return None.
"""
event = event_pb2.Event()
with open(event_file_path, "rb") as f:
event.ParseFromString(f.read())
if (event.summary.value[0].tensor.tensor_content or
event.summary.value[0].tensor.string_val):
# Initialized tensor.
tensor_value = tensor_util.MakeNdarray(event.summary.value[0].tensor)
else:
# Uninitialized tensor.
tensor_value = None
return tensor_value
def parse_node_or_tensor_name(name):
"""Get the node name from a string that can be node or tensor name.
Args:
name: An input node name (e.g., "node_a") or tensor name (e.g.,
"node_a:0"), as a str.
Returns:
1) The node name, as a str. If the input name is a tensor name, i.e.,
consists of a colon, the final colon and the following output slot
will be stripped.
2) If the input name is a tensor name, the output slot, as an int. If
the input name is not a tensor name, None.
"""
if ":" in name and not name.endswith(":"):
node_name = name[:name.rfind(":")]
output_slot = int(name[name.rfind(":") + 1:])
return node_name, output_slot
else:
return name, None
def _get_tensor_name(node_name, output_slot):
"""Get tensor name given node name and output slot index.
Args:
node_name: Name of the node that outputs the tensor, as a string.
output_slot: Output slot index of the tensor, as an integer.
Returns:
Name of the tensor, as a string.
"""
return "%s:%d" % (node_name, output_slot)
def _get_tensor_watch_key(node_name, output_slot, debug_op):
"""Get the string representation of a debug watch on a tensor.
Args:
node_name: Name of the node by which the watched tensor is produced, as a
string.
output_slot: Output slot index of the tensor, as an integer.
debug_op: Name of the debug op that is used to watch the tensor, as a
string.
Returns:
A string representing the debug watch on the tensor (i.e., the "watch
key").
"""
return "%s:%s" % (_get_tensor_name(node_name, output_slot), debug_op)
def _is_copy_node(node_name):
"""Determine whether a node name is that of a debug Copy node.
Such nodes are inserted by TensorFlow core upon request in
RunOptions.debug_tensor_watch_opts.
Args:
node_name: Name of the node.
Returns:
A bool indicating whether the input argument is the name of a debug Copy
node.
"""
return node_name.startswith("__copy_")
def _is_debug_node(node_name):
"""Determine whether a node name is that of a debug node.
Such nodes are inserted by TensorFlow core upon request in
RunOptions.debug_tensor_watch_opts.
Args:
node_name: Name of the node.
Returns:
A bool indicating whether the input argument is the name of a debug node.
"""
return node_name.startswith("__dbg_")
def _parse_debug_node_name(node_name):
"""Parse the name of a debug node.
Args:
node_name: Name of the debug node.
Returns:
1. Name of the watched node, as a str.
2. Output slot index of the watched tensor, as an int.
3. Index of the debug node, as an int.
4. Name of the debug op, as a str, e.g, "DebugIdentity".
Raises:
ValueError: If the input node name is not a valid debug node name.
"""
prefix = "__dbg_"
name = node_name
if not name.startswith(prefix):
raise ValueError("Invalid prefix in debug node name: '%s'" % node_name)
name = name[len(prefix):]
if name.count("_") < 2:
raise ValueError("Invalid debug node name: '%s'" % node_name)
debug_op = name[name.rindex("_") + 1:]
name = name[:name.rindex("_")]
debug_op_index = int(name[name.rindex("_") + 1:])
name = name[:name.rindex("_")]
if name.count(":") != 1:
raise ValueError("Invalid tensor name in debug node name: '%s'" % node_name)
watched_node_name = name[:name.index(":")]
watched_output_slot = int(name[name.index(":") + 1:])
return watched_node_name, watched_output_slot, debug_op_index, debug_op
def has_inf_or_nan(datum, tensor):
"""A predicate for whether a tensor consists of any bad numerical values.
This predicate is common enough to merit definition in this module.
Bad numerical values include nans and infs.
The signature of this function follows the requiremnet of DebugDumpDir's
find() method.
Args:
datum: (DebugTensorDatum) Datum metadata.
tensor: (numpy.ndarray or None) Value of the tensor. None represents
an uninitialized tensor.
Returns:
(bool) True if and only if tensor consists of any nan or inf values.
"""
_ = datum # Datum metadata is unused in this predicte.
if tensor is None:
# Uninitialized tensor doesn't have bad numerical values.
return False
else:
return np.any(np.isnan(tensor)) or np.any(np.isinf(tensor))
class DebugTensorDatum(object):
"""A single tensor dumped by tfdbg.
Contains "metadata" for the dumped tensor, including node name, output slot,
debug op and timestamp.
This type does not contain the space-expensive tensor (numpy array) itself.
It just points to the file path from which the tensor can be loaded if
needed.
"""
def __init__(self, dump_root, debug_dump_rel_path):
"""DebugTensorDatum constructor.
Args:
dump_root: Debug dump root directory.
debug_dump_rel_path: Path to a debug dump file, relative to the debug
dump root directory. For example, suppose the debug dump root
directory is "/tmp/tfdbg_1" and the dump file is at
"/tmp/tfdbg_1/ns_1/node_a_0_DebugIdentity_123456789", then
the value of the debug_dump_rel_path should be
"ns_1/node_a_0_DebugIdenity_1234456789".
"""
base = os.path.basename(debug_dump_rel_path)
# TODO(cais): Add hostname and pid to support dumps from distributed
# sessions.
self._timestamp = int(base.split("_")[-1])
self._debug_op = base.split("_")[-2]
self._output_slot = int(base.split("_")[-3])
namespace = os.path.dirname(debug_dump_rel_path)
node_base_name = "_".join(base.split("_")[:-3])
if not namespace or namespace == ".":
self._node_name = node_base_name
else:
self._node_name = namespace + "/" + node_base_name
self._file_path = os.path.join(dump_root, debug_dump_rel_path)
def __str__(self):
return "{DebugTensorDatum: %s:%d @ %s @ %d}" % (self.node_name,
self.output_slot,
self.debug_op,
self.timestamp)
def __repr__(self):
return self.__str__()
def get_tensor(self):
"""Get tensor from the dump (Event) file.
Returns:
The tensor loaded from the dump (Event) file.
"""
return load_tensor_from_event_file(self.file_path)
@property
def timestamp(self):
return self._timestamp
@property
def debug_op(self):
return self._debug_op
@property
def node_name(self):
return self._node_name
@property
def output_slot(self):
return self._output_slot
@property
def tensor_name(self):
return _get_tensor_name(self.node_name, self.output_slot)
@property
def watch_key(self):
"""Watch key identities a debug watch on a tensor.
Returns:
A watch key, in the form of <tensor_name>:<debug_op>.
"""
return _get_tensor_watch_key(self.node_name, self.output_slot,
self.debug_op)
@property
def file_path(self):
return self._file_path
class DebugDumpDir(object):
"""Data set from a debug dump directory on filesystem.
An instance of DebugDumpDir contains all DebugTensorDatum in a tfdbg dump
root directory. This is an immutable object, of which all constitute tensor
dump files and partition_graphs are loaded during the __init__ call.
"""
def __init__(self, dump_root, partition_graphs=None, validate=True):
"""DebugDumpDir constructor.
Args:
dump_root: Path to the dump root directory.
partition_graphs: A repeated field of GraphDefs representing the
partition graphs executed by the TensorFlow runtime.
validate: Whether the dump files are to be validated against the
partition graphs.
Raises:
IOError: If dump_root does not exist as a directory.
ValueError: If the dump_root directory contains file path patterns
that do not conform to the canonical dump file naming pattern.
"""
if not os.path.isdir(dump_root):
raise IOError("Dump root directory %s does not exist" % dump_root)
self._dump_root = dump_root
self._dump_tensor_data = []
for root, _, files in os.walk(self._dump_root):
for f in files:
if f.count("_") < 3:
raise ValueError(
"Dump file path does not conform to the naming pattern: %s" % f)
debug_dump_rel_path = os.path.join(
os.path.relpath(root, self._dump_root), f)
self._dump_tensor_data.append(
DebugTensorDatum(self._dump_root, debug_dump_rel_path))
# Sort the data by ascending timestamp.
# This sorting order reflects the order in which the TensorFlow
# executor processed the nodes of the graph. It is (one of many
# possible) topological sort of the nodes. This is useful for
# displaying tensors in the debugger frontend as well as for the use
# case in which the user wants to find a "culprit tensor", i.e., the
# first tensor in the graph that exhibits certain problematic
# properties, i.e., all zero values, or bad numerical values such as
# nan and inf.
self._dump_tensor_data = sorted(
self._dump_tensor_data, key=lambda x: x.timestamp)
# Time stamp of the first tensor dump.
if self._dump_tensor_data:
self._t0 = self._dump_tensor_data[0].timestamp
else:
self._t0 = None
# Create a map from watch key (tensor name + debug op) to
# DebugTensorDatum item.
# Also make a map from watch key to relative timestamp.
# "relative" means (absolute timestamp - t0).
self._watch_key_to_datum = {}
self._watch_key_to_rel_time = {}
for datum in self._dump_tensor_data:
if datum.watch_key not in self._watch_key_to_datum:
self._watch_key_to_datum[datum.watch_key] = [datum]
self._watch_key_to_rel_time[datum.watch_key] = [
datum.timestamp - self._t0
]
else:
self._watch_key_to_datum[datum.watch_key].append(datum)
self._watch_key_to_rel_time[datum.watch_key].append(datum.timestamp -
self._t0)
# Initialize partition graph-related information.
self._partition_graphs = None
self._node_inputs = None
self._node_ctrl_inputs = None
self._node_recipients = None
self._node_ctrl_recipients = None
self._devices = None
self._node_devices = None
self._node_op_types = None
self._debug_watches = None
# Check the dump data against partition executor graphs.
if partition_graphs:
self._load_partition_graphs(partition_graphs)
if (partition_graphs is not None) and validate:
self._validate_dump_with_graphs()
@property
def dumped_tensor_data(self):
return self._dump_tensor_data
@property
def t0(self):
"""Absolute timestamp of the first dumped tensor.
Returns:
Absolute timestamp of the first dumped tensor.
"""
return self._t0
@property
def size(self):
"""Total number of dumped tensors in the dump root directory.
Returns:
Total number of dumped tensors in the dump root directory.
"""
return len(self._dump_tensor_data)
def _load_partition_graphs(self, partition_graphs):
"""Load and process partition graphs.
Load the graphs; parse the input and control input structure; obtain the
device and op type of each node; remove the Copy and debug ops inserted
by the debugger. The gathered information can be used to validate the
tensor dumps.
Args:
partition_graphs: Partition graphs executed by the TensorFlow runtime,
represented as repeated fields of GraphDef.
Raises:
ValueError: If duplicate node names are encountered.
"""
self._partition_graphs = partition_graphs
# A map from node name to node attributes.
self._node_attributes = {}
# A map from node name to the node's non-control inputs, for non-debug &
# non-copy nodes only.
self._node_inputs = {}
# A map from node name to the node's control inputs.
self._node_ctrl_inputs = {}
# A map from node name to non-control recipients of the node's output(s).
self._node_recipients = {}
# A map from node name to control recipients of the node.
self._node_ctrl_recipients = {}
# A map from node name to debug watches.
# The key is the watched node name.
# The value is a dictionary.
# Of this dictionary, the key is the watched_output_slot.
# The value is a list of debug ops watching this output slot.
self._debug_watches = {}
# A map from node name to devices (as indices to self._devices)
self._devices = []
self._node_devices = {}
# A map from node name to node type.
self._node_op_types = {}
# A list of _Send that send Copy node outputs across devices.
copy_send_nodes = []
for pg in self._partition_graphs:
for node in pg.node:
if _is_debug_node(node.name):
# This is a debug node. Parse the node name and retrieve the
# information about debug watches on tensors. But do not include
# the node in the graph.
(watched_node_name, watched_output_slot, _,
debug_op) = _parse_debug_node_name(node.name)
if watched_node_name not in self._debug_watches:
self._debug_watches[
watched_node_name] = {watched_output_slot: [debug_op]}
else:
if watched_output_slot not in self._debug_watches[
watched_node_name]:
self._debug_watches[watched_node_name][
watched_output_slot] = [debug_op]
else:
self._debug_watches[watched_node_name][watched_node_name].append(
debug_op)
continue
if node.name in self._node_inputs:
raise ValueError("Duplicate node name: '%s'" % node.name)
# Collect node attributes.
self._node_attributes[node.name] = node.attr
# Keep track of devices.
if node.device not in self._devices and node.device:
self._devices.append(node.device)
self._node_inputs[node.name] = []
self._node_ctrl_inputs[node.name] = []
self._node_recipients[node.name] = []
self._node_ctrl_recipients[node.name] = []
self._node_devices[node.name] = node.device
self._node_op_types[node.name] = node.op
for inp in node.input:
if _is_copy_node(inp) and node.op == "_Send":
copy_send_nodes.append(node.name)
if inp.startswith("^"):
cinp = inp[1:]
self._node_ctrl_inputs[node.name].append(cinp)
else:
self._node_inputs[node.name].append(inp)
# Prune the Copy ops and associated _Send ops inserted by the debugger out
# from the non-control inputs and output recipients map. Replace the inputs
# and recipients with original ones.
copy_nodes = []
for node in self._node_inputs:
if node in copy_send_nodes:
continue
if _is_copy_node(node):
copy_nodes.append(node)
inputs = self._node_inputs[node]
for i in xrange(len(inputs)):
inp = inputs[i]
if _is_copy_node(inp):
# Find the input to the Copy node, which should be the original
# input to the node.
orig_inp = self._node_inputs[inp][0]
inputs[i] = orig_inp
# Remove the Copy ops inserted by the debugger from the maps.
for copy_node in copy_nodes:
del self._node_inputs[copy_node]
del self._node_ctrl_inputs[copy_node]
del self._node_recipients[copy_node]
del self._node_ctrl_recipients[copy_node]
# Remove the _Send ops associated with the Copy ops.
for copy_send_node in copy_send_nodes:
del self._node_inputs[copy_send_node]
del self._node_ctrl_inputs[copy_send_node]
del self._node_recipients[copy_send_node]
del self._node_ctrl_recipients[copy_send_node]
# Prune the edges from debug ops from the control edge map.
for node in self._node_ctrl_inputs:
ctrl_inputs = self._node_ctrl_inputs[node]
debug_op_inputs = []
for ctrl_inp in ctrl_inputs:
if _is_debug_node(ctrl_inp):
debug_op_inputs.append(ctrl_inp)
for debug_op_inp in debug_op_inputs:
ctrl_inputs.remove(debug_op_inp)
# Create the recipients maps.
for node in self._node_inputs:
inputs = self._node_inputs[node]
for inp in inputs:
# A tensor name: replace it with the node name.
if inp.count(":") == 1:
inp = inp.split(":")[0]
self._node_recipients[inp].append(node)
for node in self._node_ctrl_inputs:
ctrl_inputs = self._node_ctrl_inputs[node]
for ctrl_inp in ctrl_inputs:
if ctrl_inp in copy_send_nodes:
# Skip _Send ops associated with Copy nodes.
continue
self._node_ctrl_recipients[ctrl_inp].append(node)
def _validate_dump_with_graphs(self):
"""Validate the dumped tensor data against the partition graphs.
Raises:
RuntimeError: If the partition graphs have not been loaded yet.
ValueError: If dumps contain node names not found in partition graph.
Or if the temporal order of the dump's timestamps violate the
input relations on the partition graphs.
"""
if not self._partition_graphs:
raise RuntimeError("No partition graphs loaded.")
# Verify that the node names in the dump data are all present in the
# partittion graphs.
for datum in self._dump_tensor_data:
if datum.node_name not in self._node_inputs:
raise ValueError("Node name '%s' is not found in partition graphs." %
datum.node_name)
pending_inputs = {}
for node in self._node_inputs:
pending_inputs[node] = []
# TODO(cais): tfdbg currently does not watch control edges. Add control
# edges to pending_inputs when it does.
inputs = self._node_inputs[node]
for inp in inputs:
if inp.count(":") == 1:
inp = inp.split(":")[0]
# Keep track of only the watched nodes, as the debugger allows clients
# to watch a subset of the nodes.
if inp in self._debug_watches:
pending_inputs[node].append(inp)
for datum in self._dump_tensor_data:
node = datum.node_name
if pending_inputs[node]:
raise ValueError("Causality violated in timing relations of debug "
"dumps: %s (%d): "
"these input(s) are not satisfied: %s" %
(node, datum.timestamp, repr(pending_inputs[node])))
# Get the recipients of the node's output
recipients = self._node_recipients[node]
for recipient in recipients:
recipient_pending_inputs = pending_inputs[recipient]
if node in recipient_pending_inputs:
if self.node_op_type(recipient) == "Merge":
# If this is a Merge op, we automatically clear the list because
# a Merge node only requires one of its two inputs.
del recipient_pending_inputs[:]
else:
del recipient_pending_inputs[recipient_pending_inputs.index(node)]
def partition_graphs(self):
"""Get the partition graphs.
Returns:
Partition graphs as repeated fields of GraphDef.
Raises:
RuntimeError: If no partition graphs have been loaded.
"""
if self._partition_graphs is None:
raise RuntimeError("No partition graphs have been loaded.")
return self._partition_graphs
def nodes(self):
"""Get a list of all nodes from the partition graphs.
Returns:
All nodes' names, as a list of str.
Raises:
RuntimeError: If no partition graphs have been loaded.
"""
if self._partition_graphs is None:
raise RuntimeError("No partition graphs have been loaded.")
return [node_name for node_name in self._node_inputs]
def node_attributes(self, node_name):
"""Get attributes of a node.
Args:
node_name: Name of the node in question.
Returns:
Attributes of the node.
Raises:
RuntimeError: If no partition graphs have been loaded.
ValueError: If no node named node_name exists.
"""
if self._partition_graphs is None:
raise RuntimeError("No partition graphs have been loaded.")
if node_name in self._node_attributes:
return self._node_attributes[node_name]
else:
raise ValueError("No node named \"%s\" exists." % node_name)
def node_inputs(self, node_name, is_control=False):
"""Get the inputs of given node according to partition graphs.
Args:
node_name: Name of the node.
is_control: Whether control inputs, rather than non-control inputs, are
to be returned.
Returns:
All non-control inputs to the node, as a list of node names.
Raises:
RuntimeError: If node inputs and control inputs have not been loaded
from partition graphs yet.
ValueError: If the node does not exist in partition graphs.
"""
if self._node_inputs is None or self._node_ctrl_inputs is None:
raise RuntimeError(
"Node inputs are not loaded from partition graphs yet.")
if node_name not in self._node_inputs:
raise ValueError("Node '%s' does not exist in partition graphs." %
node_name)
if is_control:
return self._node_ctrl_inputs[node_name]
else:
return self._node_inputs[node_name]
def transitive_inputs(self, node_name, include_control=True):
"""Get the transitive inputs of given node according to partition graphs.
Args:
node_name: Name of the node
include_control: Include control inputs (True by default).
Returns:
All transitive inputs to the node, as a list of node names.
Raises:
RuntimeError: If node inputs and control inputs have not been loaded
from partition graphs yet.
ValueError: If the node does not exist in partition graphs.
"""
if not self._node_inputs or not self._node_ctrl_inputs:
raise RuntimeError(
"Node inputs are not loaded from partition graphs yet.")
if node_name not in self._node_inputs:
raise ValueError("Node '%s' does not exist in partition graphs." %
node_name)
inputs = []
# Keep track of visited nodes to avoid infinite loops during input
# tracing.
visited_nodes = []
def trace_inputs(node):
"""Inner function for recursive tracing of node inputs.
The transitive input names are appended to the list captured list
"inputs".
Args:
node: Name of the node, as a str.
"""
if node.count(":") == 1:
# This check is necessary for cases in which an input is not from the
# 0-th output slot, e.g., from a Switch op.
node = node[:node.rindex(":")]
# Stop the tracing at a Merge op, as it is generally impossible to infer
# outside the runtime which input to the Merge op is alive.
if self._node_op_types[node] == "Merge":
return
if node in visited_nodes:
# Avoid infinite loops.
return
visited_nodes.append(node)
for inp in self._node_inputs[node]:
if inp == node_name:
continue
inputs.append(inp)
trace_inputs(inp) # Recursive call.
if include_control:
for ctrl_inp in self._node_ctrl_inputs[node]:
if ctrl_inp == node_name:
continue
inputs.append(ctrl_inp)
trace_inputs(ctrl_inp) # Recursive call.
trace_inputs(node_name)
return inputs
def node_recipients(self, node_name, is_control=False):
"""Get recipient of the given node's output according to partition graphs.
Args:
node_name: Name of the node.
is_control: Whether control outputs, rather than non-control outputs,
are to be returned.
Returns:
All non-control inputs to the node, as a list of node names.
Raises:
RuntimeError: If node inputs and control inputs have not been loaded
from partition graphs yet.
ValueError: If the node does not exist in partition graphs.
"""
if self._node_recipients is None or self._node_ctrl_recipients is None:
raise RuntimeError(
"Node recipients are not loaded from partition graphs yet.")
if node_name not in self._node_recipients:
raise ValueError("Node '%s' does not exist in partition graphs." %
node_name)
if is_control:
return self._node_ctrl_recipients[node_name]
else:
return self._node_recipients[node_name]
def devices(self):
"""Get the list of devices.
Returns:
Number of devices.
Raises:
RuntimeError: If node inputs and control inputs have not been loaded
from partition graphs yet.
"""
if self._devices is None:
raise RuntimeError("Devices are not loaded from partition graphs yet.")
return self._devices
def node_exists(self, node_name):
"""Test if a node exists in the partition graphs.
Args:
node_name: Name of the node to be checked, as a str.
Returns:
A boolean indicating whether the node exists.
Raises:
RuntimeError: If no partition graphs have been loaded yet.
"""
if self._node_inputs is None:
raise RuntimeError(
"Nodes have not been loaded from partition graphs yet.")
return node_name in self._node_inputs
def node_device(self, node_name):
"""Get the device of a node.
Args:
node_name: Name of the node.
Returns:
Name of the device on which the node is placed, as a str.
Raises:
RuntimeError: If node inputs and control inputs have not been loaded
from partition graphs yet.
ValueError: If the node does not exist in partition graphs.
"""
if self._node_devices is None:
raise RuntimeError(
"Node devices are not loaded from partition graphs yet.")
if node_name not in self._node_devices:
raise ValueError("Node '%s' does not exist in partition graphs." %
node_name)
return self._node_devices[node_name]
def node_op_type(self, node_name):
"""Get the op type of given node.
Args:
node_name: Name of the node.
Returns:
Type of the node's op, as a str.
Raises:
RuntimeError: If node op types have not been loaded
from partition graphs yet.
ValueError: If the node does not exist in partition graphs.
"""
if self._node_op_types is None:
raise RuntimeError(
"Node op types are not loaded from partition graphs yet.")
if node_name not in self._node_op_types:
raise ValueError("Node '%s' does not exist in partition graphs." %
node_name)
return self._node_op_types[node_name]
def debug_watch_keys(self, node_name):
"""Get all tensor watch keys of given node according to partition graphs.
Args:
node_name: Name of the node.
Returns:
All debug tensor watch keys, as a list of strings. Returns an empty list
if the node name does not correspond to any debug watch keys.
Raises:
RuntimeError: If debug watch information has not been loaded from
partition graphs yet.
"""
if node_name not in self._debug_watches:
return []
watch_keys = []
for watched_slot in self._debug_watches[node_name]:
debug_ops = self._debug_watches[node_name][watched_slot]
for debug_op in debug_ops:
watch_keys.append(
_get_tensor_watch_key(node_name, watched_slot, debug_op))
return watch_keys
def watch_key_to_data(self, debug_watch_key):
"""Get all DebugTensorDatum instances corresponding to a debug watch key.
Args:
debug_watch_key: A debug watch key, as a str.
Returns:
A list of DebugTensorDatuminstances that correspond to the debug watch
key. If the watch key does not exist, returns an empty list.
Raises:
ValueError: If the debug watch key does not exist.
"""
return self._watch_key_to_datum.get(debug_watch_key, [])
def find(self, predicate, first_n=0):
"""Find dumped tensor data by a certain predicate.
Args:
predicate: A callable that takes two input arguments:
predicate(debug_tensor_datum, tensor),
where "debug_tensor_datum" is an instance of DebugTensorDatum, which
carries "metadata", such as the name of the node, the tensor's slot
index on the node, timestamp, debug op name, etc; and "tensor" is
the dumped tensor value as a numpy array.
first_n: Return only the first n dumped tensor data (in time order) for
which the predicate is True. To return all such data, let first_n be
<= 0.
Returns:
A list of all DebugTensorDatum objects in this DebugDumpDir object for
which predicate returns True, sorted in ascending order of the timestamp.
"""
matched_data = []
for datum in self._dump_tensor_data:
if predicate(datum, datum.get_tensor()):
matched_data.append(datum)
if first_n > 0 and len(matched_data) >= first_n:
break
return matched_data
def get_tensor_file_paths(self, node_name, output_slot, debug_op):
"""Get the file paths from a debug-dumped tensor.
Args:
node_name: Name of the node that the tensor is produced by.
output_slot: Output slot index of tensor.
debug_op: Name of the debug op.
Returns:
List of file path(s) loaded. This is a list because each debugged tensor
may be dumped multiple times.
Raises:
ValueError: If the tensor does not exist in the debub dump data.
"""
watch_key = _get_tensor_watch_key(node_name, output_slot, debug_op)
if watch_key not in self._watch_key_to_datum:
raise ValueError("Watch key \"%s\" does not exist in the debug dump" %
watch_key)
return [datum.file_path for datum in self._watch_key_to_datum[watch_key]]
def get_tensors(self, node_name, output_slot, debug_op):
"""Get the tensor value from for a debug-dumped tensor.
The tensor may be dumped multiple times in the dump root directory, so a
list of tensors (numpy arrays) is returned.
Args:
node_name: Name of the node that the tensor is produced by.
output_slot: Output slot index of tensor.
debug_op: Name of the debug op.
Returns:
List of tensor(s) loaded from the tensor dump file(s).
Raises:
ValueError: If the tensor does not exist in the debub dump data.
"""
watch_key = _get_tensor_watch_key(node_name, output_slot, debug_op)
if watch_key not in self._watch_key_to_datum:
raise ValueError("Watch key \"%s\" does not exist in the debug dump" %
watch_key)
return [datum.get_tensor() for datum in self._watch_key_to_datum[watch_key]]
def get_rel_timestamps(self, node_name, output_slot, debug_op):
"""Get the relative timestamp from for a debug-dumped tensor.
Relative timestamp means (absolute timestamp - t0), t0 being the absolute
timestamp of the first dumped tensor in the dump root. The tensor may be
dumped multiple times in the dump root directory, so a list of relative
timestamp (numpy arrays) is returned.
Args:
node_name: Name of the node that the tensor is produced by.
output_slot: Output slot index of tensor.
debug_op: Name of the debug op.
Returns:
List of relative timestamps.
Raises:
ValueError: If the tensor does not exist in the debub dump data.
"""
watch_key = _get_tensor_watch_key(node_name, output_slot, debug_op)
if watch_key not in self._watch_key_to_datum:
raise ValueError("Watch key \"%s\" does not exist in the debug dump" %
watch_key)
return self._watch_key_to_rel_time[watch_key]
|
|
import os
import subprocess
import time
__author__ = "Daniel Winklehner"
__doc__ = "Script to automatically compile LaTeX documents with very simple layout. " \
"Requires a working installation of LaTeX like TeXLive or MikTeX" \
"Make sure 'header.tex' is present in this file's directory."
# The following code to find an executable in PATH is from Jay's answer to
# http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python
def which(program):
import os
def is_exe(_fpath):
return os.path.isfile(_fpath) and os.access(_fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
class Object:
"""
The lowest level object.
Figure, Text, Table, etc. inherit from here.
"""
def __init__(self):
self.allowed_parents = ['document', 'section', 'subsection', 'subsubsection']
self.type = None
self.parent = None
self.next_object = None
self.prev_object = None
def append_to_section(self, section):
if self.parent is None:
if section.type in self.allowed_parents:
self.parent = section
section.append_child(self)
return 0
else:
print("Already part of a section, doing nothing")
return 1
class TextObject(Object):
"""
A snippet of text to go in the LaTeX document
"""
def __init__(self, text=None):
Object.__init__(self)
self.type = 'text'
self.text = text
def set_text(self, text):
self.text = text
def get_text(self):
return self.text
def get_latex_snippet(self):
return self.text
class FigureObject(Object):
"""
A snippet of text to go in the LaTeX document
"""
def __init__(self, filename=None, caption=None):
Object.__init__(self)
self.type = 'figure'
self.caption = caption
self.filename = filename.replace('\\', '/').replace('_', '\string_')
self.width = 1.0
self.landscape = False
def get_landscape(self):
return self.landscape
def get_caption(self):
return self.caption
def get_filename(self):
return self.filename
def get_latex_snippet(self):
if self.filename is None:
return ""
else:
if self.landscape:
snippet = """\\begin{sidewaysfigure}
\centering
\includegraphics[width=%.2f\\textwidth]{%s}""" % (self.width, self.filename)
else:
snippet = """\\begin{figure}
\centering
\includegraphics[width=%.2f\\textwidth]{%s}""" % (self.width, self.filename)
if self.caption is not None:
snippet += """
\caption{%s}""" % self.caption
if self.landscape:
snippet += """
\label{fig:%s}
\end{sidewaysfigure}\n""" % os.path.split(self.filename)[1]
else:
snippet += """
\label{fig:%s}
\end{figure}\n""" % os.path.split(self.filename)[1]
return snippet
def set_landscape(self, landscape=False):
self.landscape = landscape
def set_caption(self, caption):
self.caption = caption
def set_filename(self, filename):
self.filename = filename.replace('\\', '/').replace('_', '\string_')
def set_width(self, width):
"""
Set the relative width ofthe image to the textwidth
LaTeX will multiply with textwidth as in: 0.5*textwidth
"""
self.width = width
class SectionObject(Object):
"""
The Section class contains everything necessary to make a section in the LaTeX document.
Subsections and subsubsections inherit from the Section().
"""
def __init__(self, heading='Heading'):
Object.__init__(self)
self.allowed_parents = ['document']
self.type = 'section'
self.heading = heading
self.children = []
def append_child(self, child):
if len(self.children) > 0:
child.prev_object = self.children[-1]
self.children[-1].next_object = child
self.children.append(child)
class SubSectionObject(Object):
"""
The Section class contains everything necessary to make a section in the LaTeX document.
Subsections and subsubsections inherit from the Section().
"""
def __init__(self, heading='Heading'):
Object.__init__(self)
self.allowed_parents = ['document', 'section']
self.type = 'subsection'
self.heading = heading
self.children = []
def append_child(self, child):
if len(self.children) > 0:
child.prev_object = self.children[-1]
self.children[-1].next_object = child
self.children.append(child)
class SubSubSectionObject(Object):
"""
The Section class contains everything necessary to make a section in the LaTeX document.
Subsections and subsubsections inherit from the Section().
"""
def __init__(self, heading='Heading'):
Object.__init__(self)
self.allowed_parents = ['document', 'section', 'subsection']
self.type = 'subsubsection'
self.heading = heading
self.children = []
def append_child(self, child):
if len(self.children) > 0:
child.prev_object = self.children[-1]
self.children[-1].next_object = child
self.children.append(child)
class PyLatexDocument(SectionObject):
"""
Class to handle generation of simple LaTeX documents for documentation of
WARP postprocessing output
"""
def __init__(self):
"""
Create a new PyLatexDocument
:return:
"""
SectionObject.__init__(self)
# --- Define variables
self.type = 'document'
self.author = "John Doe"
self.title = "Generic Title"
self.pdflatex_exe = which("pdflatex.exe")
if self.pdflatex_exe is None:
print("Could not find pdflatex.exe in PATH, please specify executable by using set_pdflatex_exe(<path>)")
self.pdfreader_exe = which("")
self.output_path = None
self.header = self.read_header()
self.output_stream = None
def compile_latex(self, texfilename, mode='pdflatex'):
"""
Compiles the latex file
:param texfilename:
:param mode:
:return pdffilename:
"""
if self.output_path is None:
output_path = os.path.split(texfilename)[0]
else:
output_path = self.output_path
if mode == 'pdflatex':
args = [self.pdflatex_exe, "-output-directory=%s" % output_path, texfilename]
pdffilename = os.path.join(output_path, os.path.split(texfilename)[1]).replace('\\', '/')
pdffilename = os.path.splitext(pdffilename)[0]+'.pdf'
subprocess.call(args)
return pdffilename
else:
return None
def get_author(self):
return self.author
def get_header(self):
return self.header
def get_output_path(self):
return self.output_path
def get_pdflatex_exe(self):
return self.pdflatex_exe
def get_title(self):
return self.title
@staticmethod
def read_header():
"""
Read the default latex file header from file
:return text:
"""
with open(os.path.join(os.path.dirname(__file__), 'header.tex'), 'rb') as infile:
text = infile.read()
return text
def set_author(self, author):
self.author = author
def set_header(self, header):
"""
Set the latex file header containing document type, margins, newcommands, etc.
Be very careful when doing this manually, other parts of the class might depend
on certain user defined commands and loaded libraries!
:param header:
:return:
"""
self.header = header
def set_output_path(self, output_path):
"""
Set the path for the PDFLaTeX additional output files (.aux, etc.)
:param output_path:
:return:
"""
self.output_path = output_path
def set_pdflatex_exe(self, pdflatex_exe):
"""
Set the path to the pdflatex executable
:param pdflatex_exe:
:return:
"""
self.pdflatex_exe = pdflatex_exe
def set_pdfreader_exe(self, pdfreader_exe):
"""
Set the path to the executable of a pdf reader (like Adobe Reader)
:param pdfreader_exe:
:return:
"""
self.pdfreader_exe = pdfreader_exe
def set_title(self, title):
"""
Set the document title
:param title:
:return:
"""
self.title = title
def show_pdf(self, pdffilename):
pdffn_adj = '"{}"'.format(pdffilename)
print(pdffn_adj)
if self.pdfreader_exe is None:
os.system(pdffn_adj)
else:
args = [self.pdfreader_exe, pdffilename]
subprocess.call(args)
return 0
def test_me(self):
"""
Function that tests the pdflatex and pdf viewer functionalities
:return:
"""
# self.set_pdflatex_exe('C:/texlive/2014/bin/win32/pdflatex.exe')
self.set_pdfreader_exe('C:\Program Files (x86)\Adobe\Acrobat DC\Acrobat\Acrobat.exe')
figpath = os.path.join(os.path.dirname(__file__), 'vitruvian.jpg')
texfilename = os.path.join(os.path.dirname(__file__), 'Test.tex').replace('\\', '/')
section1 = SectionObject(heading="Section 1")
section1.append_to_section(self)
section2 = SectionObject(heading="Section 2")
section2.append_to_section(self)
section3 = SectionObject(heading="Section 3")
section3.append_to_section(self)
subsection1 = SubSectionObject(heading="Subsection 1")
subsection1.append_to_section(section2)
subsection2 = SubSectionObject(heading="Subsection 2")
subsection2.append_to_section(section2)
text1 = TextObject(text="First text in my LaTeX document\n")
text1.append_to_section(section1)
text2 = TextObject(text="Second text in my LaTeX document\n")
text2.append_to_section(subsection1)
text3 = TextObject(text="Third text in my LaTeX document\n")
text3.append_to_section(subsection1)
text3.append_to_section(section1)
figure1 = FigureObject(filename=figpath, caption="Current Variation w/o offset")
figure1.set_width(0.5)
figure1.append_to_section(subsection2)
self.write_tex_file(texfilename)
pdffilename = self.compile_latex(texfilename, mode='pdflatex')
if pdffilename is not None:
time.sleep(2)
self.show_pdf(pdffilename)
return 0
def update_output_stream(self):
"""
"""
self.output_stream = self.header
self.output_stream += """
\\title{%s}
\\author{%s}
\\begin{document}
\\maketitle
""" % (self.title, self.author)
# --- Add the main body from the different sources here ------------------------------------------------------ #
for level0 in self.children:
if level0.type in ['text', 'figure']:
self.output_stream += level0.get_latex_snippet()
self.output_stream += "\n"
elif level0.type in ['section', 'subsection', 'subsubsections']:
self.output_stream += '\\%s{%s}\n\n' % (level0.type, level0.heading)
for level1 in level0.children:
if level1.type in ['text', 'figure']:
self.output_stream += level1.get_latex_snippet()
self.output_stream += "\n"
elif level1.type in ['subsection', 'subsubsections']:
self.output_stream += '\\%s{%s}\n\n' % (level1.type, level1.heading)
for level2 in level1.children:
if level2.type in ['text', 'figure']:
self.output_stream += level2.get_latex_snippet()
self.output_stream += "\n"
elif level2.type in ['subsubsections']:
self.output_stream += '\\%s{%s}\n\n' % (level2.type, level2.heading)
for level3 in level2.children:
if level3.type in ['text', 'figure']:
self.output_stream += level3.get_latex_snippet()
self.output_stream += "\n"
self.output_stream += """
\\end{document}
"""
return 0
def write_tex_file(self, texfilename):
"""
Write the output stream to a tex file
:param texfilename:
:return:
"""
self.update_output_stream()
with open(texfilename, 'wb') as outfile:
outfile.write(self.output_stream)
return 0
if __name__ == '__main__':
# Tests
pld = PyLatexDocument()
pld.test_me()
|
|
# Copyright (c) 2012 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import sys
import mock
import netaddr
from oslo.config import cfg
import testtools
from neutron.agent.linux import async_process
from neutron.agent.linux import ip_lib
from neutron.agent.linux import ovs_lib
from neutron.agent.linux import utils
from neutron.common import constants as n_const
from neutron.openstack.common import log
from neutron.plugins.common import constants as p_const
from neutron.plugins.openvswitch.agent import ovs_neutron_agent
from neutron.plugins.openvswitch.common import constants
from neutron.tests import base
NOTIFIER = ('neutron.plugins.openvswitch.'
'ovs_neutron_plugin.AgentNotifierApi')
OVS_LINUX_KERN_VERS_WITHOUT_VXLAN = "3.12.0"
FAKE_MAC = '00:11:22:33:44:55'
FAKE_IP1 = '10.0.0.1'
FAKE_IP2 = '10.0.0.2'
class CreateAgentConfigMap(base.BaseTestCase):
def test_create_agent_config_map_succeeds(self):
self.assertTrue(ovs_neutron_agent.create_agent_config_map(cfg.CONF))
def test_create_agent_config_map_fails_for_invalid_tunnel_config(self):
# An ip address is required for tunneling but there is no default,
# verify this for both gre and vxlan tunnels.
cfg.CONF.set_override('tunnel_types', [p_const.TYPE_GRE],
group='AGENT')
with testtools.ExpectedException(ValueError):
ovs_neutron_agent.create_agent_config_map(cfg.CONF)
cfg.CONF.set_override('tunnel_types', [p_const.TYPE_VXLAN],
group='AGENT')
with testtools.ExpectedException(ValueError):
ovs_neutron_agent.create_agent_config_map(cfg.CONF)
def test_create_agent_config_map_enable_tunneling(self):
# Verify setting only enable_tunneling will default tunnel_type to GRE
cfg.CONF.set_override('tunnel_types', None, group='AGENT')
cfg.CONF.set_override('enable_tunneling', True, group='OVS')
cfg.CONF.set_override('local_ip', '10.10.10.10', group='OVS')
cfgmap = ovs_neutron_agent.create_agent_config_map(cfg.CONF)
self.assertEqual(cfgmap['tunnel_types'], [p_const.TYPE_GRE])
def test_create_agent_config_map_fails_no_local_ip(self):
# An ip address is required for tunneling but there is no default
cfg.CONF.set_override('enable_tunneling', True, group='OVS')
with testtools.ExpectedException(ValueError):
ovs_neutron_agent.create_agent_config_map(cfg.CONF)
def test_create_agent_config_map_fails_for_invalid_tunnel_type(self):
cfg.CONF.set_override('tunnel_types', ['foobar'], group='AGENT')
with testtools.ExpectedException(ValueError):
ovs_neutron_agent.create_agent_config_map(cfg.CONF)
def test_create_agent_config_map_multiple_tunnel_types(self):
cfg.CONF.set_override('local_ip', '10.10.10.10', group='OVS')
cfg.CONF.set_override('tunnel_types', [p_const.TYPE_GRE,
p_const.TYPE_VXLAN], group='AGENT')
cfgmap = ovs_neutron_agent.create_agent_config_map(cfg.CONF)
self.assertEqual(cfgmap['tunnel_types'],
[p_const.TYPE_GRE, p_const.TYPE_VXLAN])
def test_create_agent_config_map_enable_distributed_routing(self):
self.addCleanup(cfg.CONF.reset)
# Verify setting only enable_tunneling will default tunnel_type to GRE
cfg.CONF.set_override('enable_distributed_routing', True,
group='AGENT')
cfgmap = ovs_neutron_agent.create_agent_config_map(cfg.CONF)
self.assertEqual(cfgmap['enable_distributed_routing'], True)
class TestOvsNeutronAgent(base.BaseTestCase):
def setUp(self):
super(TestOvsNeutronAgent, self).setUp()
notifier_p = mock.patch(NOTIFIER)
notifier_cls = notifier_p.start()
self.notifier = mock.Mock()
notifier_cls.return_value = self.notifier
cfg.CONF.set_default('firewall_driver',
'neutron.agent.firewall.NoopFirewallDriver',
group='SECURITYGROUP')
# Avoid rpc initialization for unit tests
cfg.CONF.set_override('rpc_backend',
'neutron.openstack.common.rpc.impl_fake')
kwargs = ovs_neutron_agent.create_agent_config_map(cfg.CONF)
class MockFixedIntervalLoopingCall(object):
def __init__(self, f):
self.f = f
def start(self, interval=0):
self.f()
with contextlib.nested(
mock.patch('neutron.plugins.openvswitch.agent.ovs_neutron_agent.'
'OVSNeutronAgent.setup_integration_br',
return_value=mock.Mock()),
mock.patch('neutron.plugins.openvswitch.agent.ovs_neutron_agent.'
'OVSNeutronAgent.setup_ancillary_bridges',
return_value=[]),
mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.'
'create'),
mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.'
'set_secure_mode'),
mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.'
'get_local_port_mac',
return_value='00:00:00:00:00:01'),
mock.patch('neutron.agent.linux.utils.get_interface_mac',
return_value='00:00:00:00:00:01'),
mock.patch('neutron.agent.linux.ovs_lib.'
'get_bridges'),
mock.patch('neutron.openstack.common.loopingcall.'
'FixedIntervalLoopingCall',
new=MockFixedIntervalLoopingCall),
mock.patch('neutron.plugins.openvswitch.agent.ovs_neutron_agent.'
'OVSNeutronAgent._check_arp_responder_support',
return_value=True)):
self.agent = ovs_neutron_agent.OVSNeutronAgent(**kwargs)
self.agent.tun_br = mock.Mock()
self.agent.sg_agent = mock.Mock()
def _mock_port_bound(self, ofport=None, new_local_vlan=None,
old_local_vlan=None):
port = mock.Mock()
port.ofport = ofport
net_uuid = 'my-net-uuid'
fixed_ips = [{'subnet_id': 'my-subnet-uuid',
'ip_address': '1.1.1.1'}]
if old_local_vlan is not None:
self.agent.local_vlan_map[net_uuid] = (
ovs_neutron_agent.LocalVLANMapping(
old_local_vlan, None, None, None))
with contextlib.nested(
mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.'
'set_db_attribute', return_value=True),
mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.'
'db_get_val', return_value=str(old_local_vlan)),
mock.patch.object(self.agent.int_br, 'delete_flows')
) as (set_ovs_db_func, get_ovs_db_func, delete_flows_func):
self.agent.port_bound(port, net_uuid, 'local', None, None,
fixed_ips, "compute:None", False)
get_ovs_db_func.assert_called_once_with("Port", mock.ANY, "tag")
if new_local_vlan != old_local_vlan:
set_ovs_db_func.assert_called_once_with(
"Port", mock.ANY, "tag", str(new_local_vlan))
if ofport != -1:
delete_flows_func.assert_called_once_with(in_port=port.ofport)
else:
self.assertFalse(delete_flows_func.called)
else:
self.assertFalse(set_ovs_db_func.called)
self.assertFalse(delete_flows_func.called)
def _setup_for_dvr_test(self, ofport=10):
self._port = mock.Mock()
self._port.ofport = ofport
self._port.vif_id = "1234-5678-90"
self.agent.enable_distributed_routing = True
self.agent.enable_tunneling = True
self.agent.patch_tun_ofport = 1
self.agent.patch_int_ofport = 2
self.agent.dvr_agent.local_ports = {}
self.agent.local_vlan_map = {}
self.agent.dvr_agent.enable_distributed_routing = True
self.agent.dvr_agent.enable_tunneling = True
self.agent.dvr_agent.patch_tun_ofport = 1
self.agent.dvr_agent.patch_int_ofport = 2
self.agent.dvr_agent.tun_br = mock.Mock()
self.agent.dvr_agent.local_dvr_map = {}
self.agent.dvr_agent.registered_dvr_macs = set()
self.agent.dvr_agent.dvr_mac_address = 'aa:22:33:44:55:66'
self._net_uuid = 'my-net-uuid'
self._fixed_ips = [{'subnet_id': 'my-subnet-uuid',
'ip_address': '1.1.1.1'}]
self._compute_port = mock.Mock()
self._compute_port.ofport = 20
self._compute_port.vif_id = "1234-5678-91"
self._old_local_vlan = None
self._compute_fixed_ips = [{'subnet_id': 'my-subnet-uuid',
'ip_address': '1.1.1.3'}]
def test_port_bound_deletes_flows_for_valid_ofport(self):
self._mock_port_bound(ofport=1, new_local_vlan=1)
def test_port_bound_ignores_flows_for_invalid_ofport(self):
self._mock_port_bound(ofport=-1, new_local_vlan=1)
def test_port_bound_does_not_rewire_if_already_bound(self):
self._mock_port_bound(ofport=-1, new_local_vlan=1, old_local_vlan=1)
def test_port_bound_for_dvr_interface(self, ofport=10):
self._setup_for_dvr_test()
with mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.'
'set_db_attribute',
return_value=True):
with contextlib.nested(
mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.'
'db_get_val',
return_value=str(self._old_local_vlan)),
mock.patch.object(
self.agent.dvr_agent.plugin_rpc, 'get_subnet_for_dvr',
return_value={'gateway_ip': '1.1.1.1',
'cidr': '1.1.1.0/24',
'gateway_mac': 'aa:bb:cc:11:22:33'}),
mock.patch.object(self.agent.dvr_agent.plugin_rpc,
'get_compute_ports_on_host_by_subnet',
return_value=[]),
mock.patch.object(self.agent.dvr_agent.int_br,
'get_vif_port_by_id',
return_value=self._port),
mock.patch.object(self.agent.dvr_agent.int_br, 'add_flow'),
mock.patch.object(self.agent.dvr_agent.int_br, 'delete_flows'),
mock.patch.object(self.agent.dvr_agent.tun_br, 'add_flow'),
mock.patch.object(self.agent.dvr_agent.tun_br, 'delete_flows')
) as (get_ovs_db_func, get_subnet_fn, get_cphost_fn,
get_vif_fn, add_flow_int_fn, delete_flows_int_fn,
add_flow_tun_fn, delete_flows_tun_fn):
self.agent.port_bound(
self._port, self._net_uuid, 'vxlan',
None, None, self._fixed_ips,
n_const.DEVICE_OWNER_DVR_INTERFACE,
False)
self.assertTrue(add_flow_tun_fn.called)
self.assertTrue(delete_flows_int_fn.called)
def test_port_bound_for_dvr_with_compute_ports(self, ofport=10):
self._setup_for_dvr_test()
with mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.'
'set_db_attribute',
return_value=True):
with contextlib.nested(
mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.'
'db_get_val',
return_value=str(self._old_local_vlan)),
mock.patch.object(self.agent.dvr_agent.plugin_rpc,
'get_subnet_for_dvr',
return_value={
'gateway_ip': '1.1.1.1',
'cidr': '1.1.1.0/24',
'gateway_mac': 'aa:bb:cc:11:22:33'}),
mock.patch.object(self.agent.dvr_agent.plugin_rpc,
'get_compute_ports_on_host_by_subnet',
return_value=[]),
mock.patch.object(self.agent.dvr_agent.int_br,
'get_vif_port_by_id',
return_value=self._port),
mock.patch.object(self.agent.dvr_agent.int_br, 'add_flow'),
mock.patch.object(self.agent.dvr_agent.int_br, 'delete_flows'),
mock.patch.object(self.agent.dvr_agent.tun_br, 'add_flow'),
mock.patch.object(self.agent.dvr_agent.tun_br, 'delete_flows')
) as (get_ovs_db_func, get_subnet_fn, get_cphost_fn,
get_vif_fn, add_flow_int_fn, delete_flows_int_fn,
add_flow_tun_fn, delete_flows_tun_fn):
self.agent.port_bound(
self._port, self._net_uuid, 'vxlan',
None, None, self._fixed_ips,
n_const.DEVICE_OWNER_DVR_INTERFACE,
False)
self.agent.port_bound(self._compute_port, self._net_uuid,
'vxlan', None, None,
self._compute_fixed_ips,
"compute:None", False)
self.assertTrue(add_flow_tun_fn.called)
self.assertTrue(add_flow_int_fn.called)
self.assertTrue(delete_flows_int_fn.called)
def test_port_bound_for_dvr_with_csnat_ports(self, ofport=10):
self._setup_for_dvr_test()
with mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.'
'set_db_attribute',
return_value=True):
with contextlib.nested(
mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.'
'db_get_val',
return_value=str(self._old_local_vlan)),
mock.patch.object(
self.agent.dvr_agent.plugin_rpc, 'get_subnet_for_dvr',
return_value={'gateway_ip': '1.1.1.1',
'cidr': '1.1.1.0/24',
'gateway_mac': 'aa:bb:cc:11:22:33'}),
mock.patch.object(self.agent.dvr_agent.plugin_rpc,
'get_compute_ports_on_host_by_subnet',
return_value=[]),
mock.patch.object(self.agent.dvr_agent.int_br,
'get_vif_port_by_id',
return_value=self._port),
mock.patch.object(self.agent.dvr_agent.int_br, 'add_flow'),
mock.patch.object(self.agent.dvr_agent.int_br, 'delete_flows'),
mock.patch.object(self.agent.dvr_agent.tun_br, 'add_flow'),
mock.patch.object(self.agent.dvr_agent.tun_br, 'delete_flows')
) as (get_ovs_db_func, get_subnet_fn, get_cphost_fn,
get_vif_fn, add_flow_int_fn, delete_flows_int_fn,
add_flow_tun_fn, delete_flows_tun_fn):
self.agent.port_bound(
self._port, self._net_uuid, 'vxlan',
None, None, self._fixed_ips,
n_const.DEVICE_OWNER_ROUTER_SNAT,
False)
self.assertTrue(add_flow_int_fn.called)
self.assertTrue(delete_flows_int_fn.called)
def test_treat_devices_removed_for_dvr_interface(self, ofport=10):
self._setup_for_dvr_test()
with mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.'
'set_db_attribute',
return_value=True):
with contextlib.nested(
mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.'
'db_get_val',
return_value=str(self._old_local_vlan)),
mock.patch.object(
self.agent.dvr_agent.plugin_rpc, 'get_subnet_for_dvr',
return_value={'gateway_ip': '1.1.1.1',
'cidr': '1.1.1.0/24',
'gateway_mac': 'aa:bb:cc:11:22:33'}),
mock.patch.object(self.agent.dvr_agent.plugin_rpc,
'get_compute_ports_on_host_by_subnet',
return_value=[]),
mock.patch.object(self.agent.dvr_agent.int_br,
'get_vif_port_by_id',
return_value=self._port),
mock.patch.object(self.agent.dvr_agent.int_br, 'add_flow'),
mock.patch.object(self.agent.dvr_agent.int_br, 'delete_flows'),
mock.patch.object(self.agent.dvr_agent.tun_br, 'add_flow'),
mock.patch.object(self.agent.dvr_agent.tun_br, 'delete_flows')
) as (get_ovs_db_func, get_subnet_fn, get_cphost_fn,
get_vif_fn, add_flow_int_fn, delete_flows_int_fn,
add_flow_tun_fn, delete_flows_tun_fn):
self.agent.port_bound(
self._port, self._net_uuid, 'vxlan',
None, None, self._fixed_ips,
n_const.DEVICE_OWNER_DVR_INTERFACE,
False)
self.assertTrue(add_flow_tun_fn.called)
self.assertTrue(delete_flows_int_fn.called)
with contextlib.nested(
mock.patch.object(self.agent, 'reclaim_local_vlan'),
mock.patch.object(self.agent.plugin_rpc, 'update_device_down',
return_value=None),
mock.patch.object(self.agent.dvr_agent.int_br, 'delete_flows'),
mock.patch.object(self.agent.dvr_agent.tun_br,
'delete_flows')) as (reclaim_vlan_fn,
update_dev_down_fn,
delete_flows_int_fn,
delete_flows_tun_fn):
self.agent.treat_devices_removed([self._port.vif_id])
self.assertTrue(delete_flows_int_fn.called)
self.assertTrue(delete_flows_tun_fn.called)
def test_treat_devices_removed_for_dvr_with_compute_ports(self, ofport=10):
self._setup_for_dvr_test()
with mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.'
'set_db_attribute',
return_value=True):
with contextlib.nested(
mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.'
'db_get_val',
return_value=str(self._old_local_vlan)),
mock.patch.object(
self.agent.dvr_agent.plugin_rpc, 'get_subnet_for_dvr',
return_value={'gateway_ip': '1.1.1.1',
'cidr': '1.1.1.0/24',
'gateway_mac': 'aa:bb:cc:11:22:33'}),
mock.patch.object(self.agent.dvr_agent.plugin_rpc,
'get_compute_ports_on_host_by_subnet',
return_value=[]),
mock.patch.object(self.agent.dvr_agent.int_br,
'get_vif_port_by_id',
return_value=self._port),
mock.patch.object(self.agent.dvr_agent.int_br, 'add_flow'),
mock.patch.object(self.agent.dvr_agent.int_br, 'delete_flows'),
mock.patch.object(self.agent.dvr_agent.tun_br, 'add_flow'),
mock.patch.object(self.agent.dvr_agent.tun_br, 'delete_flows')
) as (get_ovs_db_func, get_subnet_fn, get_cphost_fn,
get_vif_fn, add_flow_int_fn, delete_flows_int_fn,
add_flow_tun_fn, delete_flows_tun_fn):
self.agent.port_bound(
self._port, self._net_uuid, 'vxlan',
None, None, self._fixed_ips,
n_const.DEVICE_OWNER_DVR_INTERFACE,
False)
self.agent.port_bound(self._compute_port,
self._net_uuid, 'vxlan',
None, None,
self._compute_fixed_ips,
"compute:None", False)
self.assertTrue(add_flow_tun_fn.called)
self.assertTrue(add_flow_int_fn.called)
self.assertTrue(delete_flows_int_fn.called)
with contextlib.nested(
mock.patch.object(self.agent, 'reclaim_local_vlan'),
mock.patch.object(self.agent.plugin_rpc, 'update_device_down',
return_value=None),
mock.patch.object(self.agent.dvr_agent.int_br,
'delete_flows')) as (reclaim_vlan_fn,
update_dev_down_fn,
delete_flows_int_fn):
self.agent.treat_devices_removed([self._compute_port.vif_id])
self.assertTrue(delete_flows_int_fn.called)
def test_treat_devices_removed_for_dvr_csnat_port(self, ofport=10):
self._setup_for_dvr_test()
with mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.'
'set_db_attribute',
return_value=True):
with contextlib.nested(
mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.'
'db_get_val',
return_value=str(self._old_local_vlan)),
mock.patch.object(
self.agent.dvr_agent.plugin_rpc, 'get_subnet_for_dvr',
return_value={'gateway_ip': '1.1.1.1',
'cidr': '1.1.1.0/24',
'gateway_mac': 'aa:bb:cc:11:22:33'}),
mock.patch.object(self.agent.dvr_agent.plugin_rpc,
'get_compute_ports_on_host_by_subnet',
return_value=[]),
mock.patch.object(self.agent.dvr_agent.int_br,
'get_vif_port_by_id',
return_value=self._port),
mock.patch.object(self.agent.dvr_agent.int_br, 'add_flow'),
mock.patch.object(self.agent.dvr_agent.int_br, 'delete_flows'),
mock.patch.object(self.agent.dvr_agent.tun_br, 'add_flow'),
mock.patch.object(self.agent.dvr_agent.tun_br, 'delete_flows')
) as (get_ovs_db_func, get_subnet_fn, get_cphost_fn,
get_vif_fn, add_flow_int_fn, delete_flows_int_fn,
add_flow_tun_fn, delete_flows_tun_fn):
self.agent.port_bound(
self._port, self._net_uuid, 'vxlan',
None, None, self._fixed_ips,
n_const.DEVICE_OWNER_ROUTER_SNAT,
False)
self.assertTrue(add_flow_int_fn.called)
self.assertTrue(delete_flows_int_fn.called)
with contextlib.nested(
mock.patch.object(self.agent, 'reclaim_local_vlan'),
mock.patch.object(self.agent.plugin_rpc, 'update_device_down',
return_value=None),
mock.patch.object(self.agent.dvr_agent.int_br,
'delete_flows')) as (reclaim_vlan_fn,
update_dev_down_fn,
delete_flows_int_fn):
self.agent.treat_devices_removed([self._port.vif_id])
self.assertTrue(delete_flows_int_fn.called)
def test_setup_dvr_flows_on_int_br(self):
self._setup_for_dvr_test()
with contextlib.nested(
mock.patch.object(
self.agent.dvr_agent.plugin_rpc,
'get_dvr_mac_address_by_host',
return_value={'host': 'cn1',
'mac_address': 'aa:bb:cc:dd:ee:ff'}),
mock.patch.object(self.agent.dvr_agent.int_br, 'add_flow'),
mock.patch.object(self.agent.dvr_agent.tun_br, 'add_flow'),
mock.patch.object(self.agent.dvr_agent.int_br,
'remove_all_flows'),
mock.patch.object(
self.agent.dvr_agent.plugin_rpc,
'get_dvr_mac_address_list',
return_value=[{'host': 'cn1',
'mac_address': 'aa:bb:cc:dd:ee:ff'},
{'host': 'cn2',
'mac_address': '11:22:33:44:55:66'}])) as \
(get_subnet_fn, get_cphost_fn, get_vif_fn,
add_flow_fn, delete_flows_fn):
self.agent.dvr_agent.setup_dvr_flows_on_integ_tun_br()
def _test_port_dead(self, cur_tag=None):
port = mock.Mock()
port.ofport = 1
with contextlib.nested(
mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.'
'set_db_attribute', return_value=True),
mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.'
'db_get_val', return_value=cur_tag),
mock.patch.object(self.agent.int_br, 'add_flow')
) as (set_ovs_db_func, get_ovs_db_func, add_flow_func):
self.agent.port_dead(port)
get_ovs_db_func.assert_called_once_with("Port", mock.ANY, "tag")
if cur_tag == ovs_neutron_agent.DEAD_VLAN_TAG:
self.assertFalse(set_ovs_db_func.called)
self.assertFalse(add_flow_func.called)
else:
set_ovs_db_func.assert_called_once_with(
"Port", mock.ANY, "tag", str(ovs_neutron_agent.DEAD_VLAN_TAG))
add_flow_func.assert_called_once_with(
priority=2, in_port=port.ofport, actions="drop")
def test_port_dead(self):
self._test_port_dead()
def test_port_dead_with_port_already_dead(self):
self._test_port_dead(ovs_neutron_agent.DEAD_VLAN_TAG)
def mock_scan_ports(self, vif_port_set=None, registered_ports=None,
updated_ports=None, port_tags_dict=None):
if port_tags_dict is None: # Because empty dicts evaluate as False.
port_tags_dict = {}
with contextlib.nested(
mock.patch.object(self.agent.int_br, 'get_vif_port_set',
return_value=vif_port_set),
mock.patch.object(self.agent.int_br, 'get_port_tag_dict',
return_value=port_tags_dict)
):
return self.agent.scan_ports(registered_ports, updated_ports)
def test_scan_ports_returns_current_only_for_unchanged_ports(self):
vif_port_set = set([1, 3])
registered_ports = set([1, 3])
expected = {'current': vif_port_set}
actual = self.mock_scan_ports(vif_port_set, registered_ports)
self.assertEqual(expected, actual)
def test_scan_ports_returns_port_changes(self):
vif_port_set = set([1, 3])
registered_ports = set([1, 2])
expected = dict(current=vif_port_set, added=set([3]), removed=set([2]))
actual = self.mock_scan_ports(vif_port_set, registered_ports)
self.assertEqual(expected, actual)
def _test_scan_ports_with_updated_ports(self, updated_ports):
vif_port_set = set([1, 3, 4])
registered_ports = set([1, 2, 4])
expected = dict(current=vif_port_set, added=set([3]),
removed=set([2]), updated=set([4]))
actual = self.mock_scan_ports(vif_port_set, registered_ports,
updated_ports)
self.assertEqual(expected, actual)
def test_scan_ports_finds_known_updated_ports(self):
self._test_scan_ports_with_updated_ports(set([4]))
def test_scan_ports_ignores_unknown_updated_ports(self):
# the port '5' was not seen on current ports. Hence it has either
# never been wired or already removed and should be ignored
self._test_scan_ports_with_updated_ports(set([4, 5]))
def test_scan_ports_ignores_updated_port_if_removed(self):
vif_port_set = set([1, 3])
registered_ports = set([1, 2])
updated_ports = set([1, 2])
expected = dict(current=vif_port_set, added=set([3]),
removed=set([2]), updated=set([1]))
actual = self.mock_scan_ports(vif_port_set, registered_ports,
updated_ports)
self.assertEqual(expected, actual)
def test_scan_ports_no_vif_changes_returns_updated_port_only(self):
vif_port_set = set([1, 2, 3])
registered_ports = set([1, 2, 3])
updated_ports = set([2])
expected = dict(current=vif_port_set, updated=set([2]))
actual = self.mock_scan_ports(vif_port_set, registered_ports,
updated_ports)
self.assertEqual(expected, actual)
def test_update_ports_returns_changed_vlan(self):
br = ovs_lib.OVSBridge('br-int', 'sudo')
mac = "ca:fe:de:ad:be:ef"
port = ovs_lib.VifPort(1, 1, 1, mac, br)
lvm = ovs_neutron_agent.LocalVLANMapping(
1, '1', None, 1, {port.vif_id: port})
local_vlan_map = {'1': lvm}
vif_port_set = set([1, 3])
registered_ports = set([1, 2])
port_tags_dict = {1: []}
expected = dict(
added=set([3]), current=vif_port_set,
removed=set([2]), updated=set([1])
)
with mock.patch.dict(self.agent.local_vlan_map, local_vlan_map):
actual = self.mock_scan_ports(
vif_port_set, registered_ports, port_tags_dict=port_tags_dict)
self.assertEqual(expected, actual)
def test_treat_devices_added_returns_raises_for_missing_device(self):
with contextlib.nested(
mock.patch.object(self.agent.plugin_rpc,
'get_devices_details_list',
side_effect=Exception()),
mock.patch.object(self.agent.int_br, 'get_vif_port_by_id',
return_value=mock.Mock())):
self.assertRaises(
ovs_neutron_agent.DeviceListRetrievalError,
self.agent.treat_devices_added_or_updated, [{}], False)
def _mock_treat_devices_added_updated(self, details, port, func_name):
"""Mock treat devices added or updated.
:param details: the details to return for the device
:param port: the port that get_vif_port_by_id should return
:param func_name: the function that should be called
:returns: whether the named function was called
"""
with contextlib.nested(
mock.patch.object(self.agent.plugin_rpc,
'get_devices_details_list',
return_value=[details]),
mock.patch.object(self.agent.int_br, 'get_vif_port_by_id',
return_value=port),
mock.patch.object(self.agent.plugin_rpc, 'update_device_up'),
mock.patch.object(self.agent.plugin_rpc, 'update_device_down'),
mock.patch.object(self.agent, func_name)
) as (get_dev_fn, get_vif_func, upd_dev_up, upd_dev_down, func):
skip_devs = self.agent.treat_devices_added_or_updated([{}], False)
# The function should not raise
self.assertFalse(skip_devs)
return func.called
def test_treat_devices_added_updated_ignores_invalid_ofport(self):
port = mock.Mock()
port.ofport = -1
self.assertFalse(self._mock_treat_devices_added_updated(
mock.MagicMock(), port, 'port_dead'))
def test_treat_devices_added_updated_marks_unknown_port_as_dead(self):
port = mock.Mock()
port.ofport = 1
self.assertTrue(self._mock_treat_devices_added_updated(
mock.MagicMock(), port, 'port_dead'))
def test_treat_devices_added_does_not_process_missing_port(self):
with contextlib.nested(
mock.patch.object(self.agent.plugin_rpc, 'get_device_details'),
mock.patch.object(self.agent.int_br, 'get_vif_port_by_id',
return_value=None)
) as (get_dev_fn, get_vif_func):
self.assertFalse(get_dev_fn.called)
def test_treat_devices_added_updated_updates_known_port(self):
details = mock.MagicMock()
details.__contains__.side_effect = lambda x: True
self.assertTrue(self._mock_treat_devices_added_updated(
details, mock.Mock(), 'treat_vif_port'))
def test_treat_devices_added_updated_skips_if_port_not_found(self):
dev_mock = mock.MagicMock()
dev_mock.__getitem__.return_value = 'the_skipped_one'
with contextlib.nested(
mock.patch.object(self.agent.plugin_rpc,
'get_devices_details_list',
return_value=[dev_mock]),
mock.patch.object(self.agent.int_br, 'get_vif_port_by_id',
return_value=None),
mock.patch.object(self.agent.plugin_rpc, 'update_device_up'),
mock.patch.object(self.agent.plugin_rpc, 'update_device_down'),
mock.patch.object(self.agent, 'treat_vif_port')
) as (get_dev_fn, get_vif_func, upd_dev_up,
upd_dev_down, treat_vif_port):
skip_devs = self.agent.treat_devices_added_or_updated([{}], False)
# The function should return False for resync and no device
# processed
self.assertEqual(['the_skipped_one'], skip_devs)
self.assertFalse(treat_vif_port.called)
self.assertFalse(upd_dev_down.called)
self.assertFalse(upd_dev_up.called)
def test_treat_devices_added_updated_put_port_down(self):
fake_details_dict = {'admin_state_up': False,
'port_id': 'xxx',
'device': 'xxx',
'network_id': 'yyy',
'physical_network': 'foo',
'segmentation_id': 'bar',
'network_type': 'baz',
'fixed_ips': [{'subnet_id': 'my-subnet-uuid',
'ip_address': '1.1.1.1'}],
'device_owner': 'compute:None'
}
with contextlib.nested(
mock.patch.object(self.agent.plugin_rpc,
'get_devices_details_list',
return_value=[fake_details_dict]),
mock.patch.object(self.agent.int_br, 'get_vif_port_by_id',
return_value=mock.MagicMock()),
mock.patch.object(self.agent.plugin_rpc, 'update_device_up'),
mock.patch.object(self.agent.plugin_rpc, 'update_device_down'),
mock.patch.object(self.agent, 'treat_vif_port')
) as (get_dev_fn, get_vif_func, upd_dev_up,
upd_dev_down, treat_vif_port):
skip_devs = self.agent.treat_devices_added_or_updated([{}], False)
# The function should return False for resync
self.assertFalse(skip_devs)
self.assertTrue(treat_vif_port.called)
self.assertTrue(upd_dev_down.called)
def test_treat_devices_removed_returns_true_for_missing_device(self):
with mock.patch.object(self.agent.plugin_rpc, 'update_device_down',
side_effect=Exception()):
self.assertTrue(self.agent.treat_devices_removed([{}]))
def _mock_treat_devices_removed(self, port_exists):
details = dict(exists=port_exists)
with mock.patch.object(self.agent.plugin_rpc, 'update_device_down',
return_value=details):
with mock.patch.object(self.agent, 'port_unbound') as port_unbound:
self.assertFalse(self.agent.treat_devices_removed([{}]))
self.assertTrue(port_unbound.called)
def test_treat_devices_removed_unbinds_port(self):
self._mock_treat_devices_removed(True)
def test_treat_devices_removed_ignores_missing_port(self):
self._mock_treat_devices_removed(False)
def _test_process_network_ports(self, port_info):
with contextlib.nested(
mock.patch.object(self.agent.sg_agent, "setup_port_filters"),
mock.patch.object(self.agent, "treat_devices_added_or_updated",
return_value=[]),
mock.patch.object(self.agent, "treat_devices_removed",
return_value=False)
) as (setup_port_filters, device_added_updated, device_removed):
self.assertFalse(self.agent.process_network_ports(port_info,
False))
setup_port_filters.assert_called_once_with(
port_info['added'], port_info.get('updated', set()))
device_added_updated.assert_called_once_with(
port_info['added'] | port_info.get('updated', set()), False)
device_removed.assert_called_once_with(port_info['removed'])
def test_process_network_ports(self):
self._test_process_network_ports(
{'current': set(['tap0']),
'removed': set(['eth0']),
'added': set(['eth1'])})
def test_process_network_port_with_updated_ports(self):
self._test_process_network_ports(
{'current': set(['tap0', 'tap1']),
'updated': set(['tap1', 'eth1']),
'removed': set(['eth0']),
'added': set(['eth1'])})
def test_report_state(self):
with mock.patch.object(self.agent.state_rpc,
"report_state") as report_st:
self.agent.int_br_device_count = 5
self.agent._report_state()
report_st.assert_called_with(self.agent.context,
self.agent.agent_state)
self.assertNotIn("start_flag", self.agent.agent_state)
self.assertEqual(
self.agent.agent_state["configurations"]["devices"],
self.agent.int_br_device_count
)
def test_network_delete(self):
with contextlib.nested(
mock.patch.object(self.agent, "reclaim_local_vlan"),
mock.patch.object(self.agent.tun_br, "cleanup_tunnel_port")
) as (recl_fn, clean_tun_fn):
self.agent.network_delete("unused_context",
network_id="123")
self.assertFalse(recl_fn.called)
self.agent.local_vlan_map["123"] = "LVM object"
self.agent.network_delete("unused_context",
network_id="123")
self.assertFalse(clean_tun_fn.called)
recl_fn.assert_called_with("123")
def test_port_update(self):
port = {"id": "123",
"network_id": "124",
"admin_state_up": False}
self.agent.port_update("unused_context",
port=port,
network_type="vlan",
segmentation_id="1",
physical_network="physnet")
self.assertEqual(set(['123']), self.agent.updated_ports)
def test_setup_physical_bridges(self):
with contextlib.nested(
mock.patch.object(ip_lib, "device_exists"),
mock.patch.object(sys, "exit"),
mock.patch.object(utils, "execute"),
mock.patch.object(ovs_lib.OVSBridge, "remove_all_flows"),
mock.patch.object(ovs_lib.OVSBridge, "add_flow"),
mock.patch.object(ovs_lib.OVSBridge, "add_patch_port"),
mock.patch.object(ovs_lib.OVSBridge, "delete_port"),
mock.patch.object(ovs_lib.OVSBridge, "set_db_attribute"),
mock.patch.object(self.agent.int_br, "add_flow"),
mock.patch.object(self.agent.int_br, "add_patch_port"),
mock.patch.object(self.agent.int_br, "delete_port"),
mock.patch.object(self.agent.int_br, "set_db_attribute"),
) as (devex_fn, sysexit_fn, utilsexec_fn, remflows_fn, ovs_add_flow_fn,
ovs_addpatch_port_fn, ovs_delport_fn, ovs_set_attr_fn,
br_add_flow_fn, br_addpatch_port_fn, br_delport_fn,
br_set_attr_fn):
devex_fn.return_value = True
parent = mock.MagicMock()
parent.attach_mock(ovs_addpatch_port_fn, 'phy_add_patch_port')
parent.attach_mock(ovs_add_flow_fn, 'phy_add_flow')
parent.attach_mock(ovs_set_attr_fn, 'phy_set_attr')
parent.attach_mock(br_addpatch_port_fn, 'int_add_patch_port')
parent.attach_mock(br_add_flow_fn, 'int_add_flow')
parent.attach_mock(br_set_attr_fn, 'int_set_attr')
ovs_addpatch_port_fn.return_value = "phy_ofport"
br_addpatch_port_fn.return_value = "int_ofport"
self.agent.setup_physical_bridges({"physnet1": "br-eth"})
expected_calls = [
mock.call.phy_add_flow(priority=1, actions='normal'),
mock.call.int_add_patch_port('int-br-eth',
constants.NONEXISTENT_PEER),
mock.call.phy_add_patch_port('phy-br-eth',
constants.NONEXISTENT_PEER),
mock.call.int_add_flow(priority=2, in_port='int_ofport',
actions='drop'),
mock.call.phy_add_flow(priority=2, in_port='phy_ofport',
actions='drop'),
mock.call.int_set_attr('Interface', 'int-br-eth',
'options:peer', 'phy-br-eth'),
mock.call.phy_set_attr('Interface', 'phy-br-eth',
'options:peer', 'int-br-eth'),
]
parent.assert_has_calls(expected_calls)
self.assertEqual(self.agent.int_ofports["physnet1"],
"int_ofport")
self.assertEqual(self.agent.phys_ofports["physnet1"],
"phy_ofport")
def test_setup_physical_bridges_using_veth_interconnection(self):
self.agent.use_veth_interconnection = True
with contextlib.nested(
mock.patch.object(ip_lib, "device_exists"),
mock.patch.object(sys, "exit"),
mock.patch.object(utils, "execute"),
mock.patch.object(ovs_lib.OVSBridge, "remove_all_flows"),
mock.patch.object(ovs_lib.OVSBridge, "add_flow"),
mock.patch.object(ovs_lib.OVSBridge, "add_port"),
mock.patch.object(ovs_lib.OVSBridge, "delete_port"),
mock.patch.object(self.agent.int_br, "add_port"),
mock.patch.object(self.agent.int_br, "delete_port"),
mock.patch.object(ip_lib.IPWrapper, "add_veth"),
mock.patch.object(ip_lib.IpLinkCommand, "delete"),
mock.patch.object(ip_lib.IpLinkCommand, "set_up"),
mock.patch.object(ip_lib.IpLinkCommand, "set_mtu"),
mock.patch.object(ovs_lib, "get_bridges")
) as (devex_fn, sysexit_fn, utilsexec_fn, remflows_fn, ovs_addfl_fn,
ovs_addport_fn, ovs_delport_fn, br_addport_fn, br_delport_fn,
addveth_fn, linkdel_fn, linkset_fn, linkmtu_fn, get_br_fn):
devex_fn.return_value = True
parent = mock.MagicMock()
parent.attach_mock(utilsexec_fn, 'utils_execute')
parent.attach_mock(linkdel_fn, 'link_delete')
parent.attach_mock(addveth_fn, 'add_veth')
addveth_fn.return_value = (ip_lib.IPDevice("int-br-eth1"),
ip_lib.IPDevice("phy-br-eth1"))
ovs_addport_fn.return_value = "int_ofport"
br_addport_fn.return_value = "phys_veth"
get_br_fn.return_value = ["br-eth"]
self.agent.setup_physical_bridges({"physnet1": "br-eth"})
expected_calls = [mock.call.link_delete(),
mock.call.utils_execute(['/sbin/udevadm',
'settle',
'--timeout=10']),
mock.call.add_veth('int-br-eth',
'phy-br-eth')]
parent.assert_has_calls(expected_calls, any_order=False)
self.assertEqual(self.agent.int_ofports["physnet1"],
"phys_veth")
self.assertEqual(self.agent.phys_ofports["physnet1"],
"int_ofport")
def test_get_peer_name(self):
bridge1 = "A_REALLY_LONG_BRIDGE_NAME1"
bridge2 = "A_REALLY_LONG_BRIDGE_NAME2"
self.agent.use_veth_interconnection = True
self.assertEqual(len(self.agent.get_peer_name('int-', bridge1)),
n_const.DEVICE_NAME_MAX_LEN)
self.assertEqual(len(self.agent.get_peer_name('int-', bridge2)),
n_const.DEVICE_NAME_MAX_LEN)
self.assertNotEqual(self.agent.get_peer_name('int-', bridge1),
self.agent.get_peer_name('int-', bridge2))
def test_setup_tunnel_br(self):
self.tun_br = mock.Mock()
with contextlib.nested(
mock.patch.object(self.agent.int_br, "add_patch_port",
return_value=1),
mock.patch.object(self.agent.tun_br, "add_patch_port",
return_value=2),
mock.patch.object(self.agent.tun_br, "remove_all_flows"),
mock.patch.object(self.agent.tun_br, "add_flow"),
mock.patch.object(ovs_lib, "OVSBridge"),
mock.patch.object(self.agent.tun_br, "reset_bridge"),
mock.patch.object(sys, "exit")
) as (intbr_patch_fn, tunbr_patch_fn, remove_all_fn,
add_flow_fn, ovs_br_fn, reset_br_fn, exit_fn):
self.agent.setup_tunnel_br(None)
self.assertTrue(intbr_patch_fn.called)
def test_setup_tunnel_port(self):
self.agent.tun_br = mock.Mock()
self.agent.l2_pop = False
self.agent.udp_vxlan_port = 8472
self.agent.tun_br_ofports['vxlan'] = {}
with contextlib.nested(
mock.patch.object(self.agent.tun_br, "add_tunnel_port",
return_value='6'),
mock.patch.object(self.agent.tun_br, "add_flow")
) as (add_tun_port_fn, add_flow_fn):
self.agent._setup_tunnel_port('portname', '1.2.3.4', 'vxlan')
self.assertTrue(add_tun_port_fn.called)
def test_port_unbound(self):
with mock.patch.object(self.agent, "reclaim_local_vlan") as reclvl_fn:
self.agent.enable_tunneling = True
lvm = mock.Mock()
lvm.network_type = "gre"
lvm.vif_ports = {"vif1": mock.Mock()}
self.agent.local_vlan_map["netuid12345"] = lvm
self.agent.port_unbound("vif1", "netuid12345")
self.assertTrue(reclvl_fn.called)
reclvl_fn.called = False
lvm.vif_ports = {}
self.agent.port_unbound("vif1", "netuid12345")
self.assertEqual(reclvl_fn.call_count, 2)
lvm.vif_ports = {"vif1": mock.Mock()}
self.agent.port_unbound("vif3", "netuid12345")
self.assertEqual(reclvl_fn.call_count, 2)
def _prepare_l2_pop_ofports(self):
lvm1 = mock.Mock()
lvm1.network_type = 'gre'
lvm1.vlan = 'vlan1'
lvm1.segmentation_id = 'seg1'
lvm1.tun_ofports = set(['1'])
lvm2 = mock.Mock()
lvm2.network_type = 'gre'
lvm2.vlan = 'vlan2'
lvm2.segmentation_id = 'seg2'
lvm2.tun_ofports = set(['1', '2'])
self.agent.local_vlan_map = {'net1': lvm1, 'net2': lvm2}
self.agent.tun_br_ofports = {'gre':
{'1.1.1.1': '1', '2.2.2.2': '2'}}
self.agent.arp_responder_enabled = True
def test_fdb_ignore_network(self):
self._prepare_l2_pop_ofports()
fdb_entry = {'net3': {}}
with contextlib.nested(
mock.patch.object(self.agent.tun_br, 'add_flow'),
mock.patch.object(self.agent.tun_br, 'delete_flows'),
mock.patch.object(self.agent, '_setup_tunnel_port'),
mock.patch.object(self.agent, 'cleanup_tunnel_port')
) as (add_flow_fn, del_flow_fn, add_tun_fn, clean_tun_fn):
self.agent.fdb_add(None, fdb_entry)
self.assertFalse(add_flow_fn.called)
self.assertFalse(add_tun_fn.called)
self.agent.fdb_remove(None, fdb_entry)
self.assertFalse(del_flow_fn.called)
self.assertFalse(clean_tun_fn.called)
def test_fdb_ignore_self(self):
self._prepare_l2_pop_ofports()
self.agent.local_ip = 'agent_ip'
fdb_entry = {'net2':
{'network_type': 'gre',
'segment_id': 'tun2',
'ports':
{'agent_ip':
[[FAKE_MAC, FAKE_IP1],
n_const.FLOODING_ENTRY]}}}
with mock.patch.object(self.agent.tun_br,
"defer_apply_on") as defer_fn:
self.agent.fdb_add(None, fdb_entry)
self.assertFalse(defer_fn.called)
self.agent.fdb_remove(None, fdb_entry)
self.assertFalse(defer_fn.called)
def test_fdb_add_flows(self):
self._prepare_l2_pop_ofports()
fdb_entry = {'net1':
{'network_type': 'gre',
'segment_id': 'tun1',
'ports':
{'2.2.2.2':
[[FAKE_MAC, FAKE_IP1],
n_const.FLOODING_ENTRY]}}}
with contextlib.nested(
mock.patch.object(self.agent.tun_br, 'add_flow'),
mock.patch.object(self.agent.tun_br, 'mod_flow'),
mock.patch.object(self.agent, '_setup_tunnel_port'),
) as (add_flow_fn, mod_flow_fn, add_tun_fn):
self.agent.fdb_add(None, fdb_entry)
self.assertFalse(add_tun_fn.called)
actions = ('move:NXM_OF_ETH_SRC[]->NXM_OF_ETH_DST[],'
'mod_dl_src:%(mac)s,'
'load:0x2->NXM_OF_ARP_OP[],'
'move:NXM_NX_ARP_SHA[]->NXM_NX_ARP_THA[],'
'move:NXM_OF_ARP_SPA[]->NXM_OF_ARP_TPA[],'
'load:%(mac)#x->NXM_NX_ARP_SHA[],'
'load:%(ip)#x->NXM_OF_ARP_SPA[],'
'in_port' %
{'mac': netaddr.EUI(FAKE_MAC, dialect=netaddr.mac_unix),
'ip': netaddr.IPAddress(FAKE_IP1)})
add_flow_fn.assert_has_calls([
mock.call(table=constants.ARP_RESPONDER,
priority=1,
proto='arp',
dl_vlan='vlan1',
nw_dst=FAKE_IP1,
actions=actions),
mock.call(table=constants.UCAST_TO_TUN,
priority=2,
dl_vlan='vlan1',
dl_dst=FAKE_MAC,
actions='strip_vlan,'
'set_tunnel:seg1,output:2')
])
mod_flow_fn.assert_called_with(table=constants.FLOOD_TO_TUN,
dl_vlan='vlan1',
actions='strip_vlan,'
'set_tunnel:seg1,output:1,2')
def test_fdb_del_flows(self):
self._prepare_l2_pop_ofports()
fdb_entry = {'net2':
{'network_type': 'gre',
'segment_id': 'tun2',
'ports':
{'2.2.2.2':
[[FAKE_MAC, FAKE_IP1],
n_const.FLOODING_ENTRY]}}}
with contextlib.nested(
mock.patch.object(self.agent.tun_br, 'mod_flow'),
mock.patch.object(self.agent.tun_br, 'delete_flows'),
) as (mod_flow_fn, del_flow_fn):
self.agent.fdb_remove(None, fdb_entry)
mod_flow_fn.assert_called_with(table=constants.FLOOD_TO_TUN,
dl_vlan='vlan2',
actions='strip_vlan,'
'set_tunnel:seg2,output:1')
expected = [mock.call(table=constants.ARP_RESPONDER,
proto='arp',
dl_vlan='vlan2',
nw_dst=FAKE_IP1),
mock.call(table=constants.UCAST_TO_TUN,
dl_vlan='vlan2',
dl_dst=FAKE_MAC),
mock.call(in_port='2')]
del_flow_fn.assert_has_calls(expected)
def test_fdb_add_port(self):
self._prepare_l2_pop_ofports()
fdb_entry = {'net1':
{'network_type': 'gre',
'segment_id': 'tun1',
'ports': {'1.1.1.1': [[FAKE_MAC, FAKE_IP1]]}}}
with contextlib.nested(
mock.patch.object(self.agent.tun_br, 'add_flow'),
mock.patch.object(self.agent.tun_br, 'mod_flow'),
mock.patch.object(self.agent, '_setup_tunnel_port')
) as (add_flow_fn, mod_flow_fn, add_tun_fn):
self.agent.fdb_add(None, fdb_entry)
self.assertFalse(add_tun_fn.called)
fdb_entry['net1']['ports']['10.10.10.10'] = [[FAKE_MAC, FAKE_IP1]]
self.agent.fdb_add(None, fdb_entry)
add_tun_fn.assert_called_with('gre-0a0a0a0a', '10.10.10.10', 'gre')
def test_fdb_del_port(self):
self._prepare_l2_pop_ofports()
fdb_entry = {'net2':
{'network_type': 'gre',
'segment_id': 'tun2',
'ports': {'2.2.2.2': [n_const.FLOODING_ENTRY]}}}
with contextlib.nested(
mock.patch.object(self.agent.tun_br, 'delete_flows'),
mock.patch.object(self.agent.tun_br, 'delete_port')
) as (del_flow_fn, del_port_fn):
self.agent.fdb_remove(None, fdb_entry)
del_port_fn.assert_called_once_with('gre-02020202')
def test_fdb_update_chg_ip(self):
self._prepare_l2_pop_ofports()
fdb_entries = {'chg_ip':
{'net1':
{'agent_ip':
{'before': [[FAKE_MAC, FAKE_IP1]],
'after': [[FAKE_MAC, FAKE_IP2]]}}}}
with contextlib.nested(
mock.patch.object(self.agent.tun_br, 'add_flow'),
mock.patch.object(self.agent.tun_br, 'delete_flows')
) as (add_flow_fn, del_flow_fn):
self.agent.fdb_update(None, fdb_entries)
actions = ('move:NXM_OF_ETH_SRC[]->NXM_OF_ETH_DST[],'
'mod_dl_src:%(mac)s,'
'load:0x2->NXM_OF_ARP_OP[],'
'move:NXM_NX_ARP_SHA[]->NXM_NX_ARP_THA[],'
'move:NXM_OF_ARP_SPA[]->NXM_OF_ARP_TPA[],'
'load:%(mac)#x->NXM_NX_ARP_SHA[],'
'load:%(ip)#x->NXM_OF_ARP_SPA[],'
'in_port' %
{'mac': netaddr.EUI(FAKE_MAC, dialect=netaddr.mac_unix),
'ip': netaddr.IPAddress(FAKE_IP2)})
add_flow_fn.assert_called_once_with(table=constants.ARP_RESPONDER,
priority=1,
proto='arp',
dl_vlan='vlan1',
nw_dst=FAKE_IP2,
actions=actions)
del_flow_fn.assert_called_once_with(table=constants.ARP_RESPONDER,
proto='arp',
dl_vlan='vlan1',
nw_dst=FAKE_IP1)
def test_recl_lv_port_to_preserve(self):
self._prepare_l2_pop_ofports()
self.agent.l2_pop = True
self.agent.enable_tunneling = True
with mock.patch.object(
self.agent.tun_br, 'cleanup_tunnel_port'
) as clean_tun_fn:
self.agent.reclaim_local_vlan('net1')
self.assertFalse(clean_tun_fn.called)
def test_recl_lv_port_to_remove(self):
self._prepare_l2_pop_ofports()
self.agent.l2_pop = True
self.agent.enable_tunneling = True
with contextlib.nested(
mock.patch.object(self.agent.tun_br, 'delete_port'),
mock.patch.object(self.agent.tun_br, 'delete_flows')
) as (del_port_fn, del_flow_fn):
self.agent.reclaim_local_vlan('net2')
del_port_fn.assert_called_once_with('gre-02020202')
def test_dvr_mac_address_update(self):
self._setup_for_dvr_test()
with contextlib.nested(
mock.patch.object(self.agent.dvr_agent.int_br, 'add_flow'),
mock.patch.object(self.agent.dvr_agent.tun_br, 'add_flow'),
mock.patch.object(self.agent.dvr_agent.int_br, 'delete_flows'),
#mock.patch.object(self.agent.dvr_agent.tun_br, 'delete_flows')
) as (add_flow_fn, add_flow_tn_fn, del_flows_fn):
self.agent.dvr_agent.\
dvr_mac_address_update(
dvr_macs=[{'host': 'cn2',
'mac_address': 'aa:bb:cc:dd:ee:ff'}])
add_flow_tn_fn.assert_called_with(table=constants.DVR_NOT_LEARN,
priority=1,
dl_src='aa:bb:cc:dd:ee:ff',
actions="output:%s"
% self.agent.patch_int_ofport
)
self.assertFalse(del_flows_fn.called)
with contextlib.nested(
mock.patch.object(self.agent.dvr_agent.int_br, 'add_flow'),
mock.patch.object(self.agent.dvr_agent.tun_br, 'delete_flows'),
mock.patch.object(self.agent.dvr_agent.int_br, 'delete_flows')
) as (add_flow_fn, del_flows_tn_fn, del_flows_fn):
self.agent.dvr_agent.dvr_mac_address_update(dvr_macs=[])
del_flows_tn_fn.assert_called_with(table=constants.DVR_NOT_LEARN,
dl_src='aa:bb:cc:dd:ee:ff')
self.assertFalse(add_flow_fn.called)
def test_daemon_loop_uses_polling_manager(self):
with mock.patch(
'neutron.agent.linux.polling.get_polling_manager') as mock_get_pm:
with mock.patch.object(self.agent, 'rpc_loop') as mock_loop:
self.agent.daemon_loop()
mock_get_pm.assert_called_with(True, 'sudo',
constants.DEFAULT_OVSDBMON_RESPAWN)
mock_loop.assert_called_once_with(polling_manager=mock.ANY)
def test__setup_tunnel_port_error_negative(self):
with contextlib.nested(
mock.patch.object(self.agent.tun_br, 'add_tunnel_port',
return_value='-1'),
mock.patch.object(ovs_neutron_agent.LOG, 'error')
) as (add_tunnel_port_fn, log_error_fn):
ofport = self.agent._setup_tunnel_port(
'gre-1', 'remote_ip', p_const.TYPE_GRE)
add_tunnel_port_fn.assert_called_once_with(
'gre-1', 'remote_ip', self.agent.local_ip, p_const.TYPE_GRE,
self.agent.vxlan_udp_port, self.agent.dont_fragment)
log_error_fn.assert_called_once_with(
_("Failed to set-up %(type)s tunnel port to %(ip)s"),
{'type': p_const.TYPE_GRE, 'ip': 'remote_ip'})
self.assertEqual(ofport, 0)
def test__setup_tunnel_port_error_not_int(self):
with contextlib.nested(
mock.patch.object(self.agent.tun_br, 'add_tunnel_port',
return_value=None),
mock.patch.object(ovs_neutron_agent.LOG, 'exception'),
mock.patch.object(ovs_neutron_agent.LOG, 'error')
) as (add_tunnel_port_fn, log_exc_fn, log_error_fn):
ofport = self.agent._setup_tunnel_port(
'gre-1', 'remote_ip', p_const.TYPE_GRE)
add_tunnel_port_fn.assert_called_once_with(
'gre-1', 'remote_ip', self.agent.local_ip, p_const.TYPE_GRE,
self.agent.vxlan_udp_port, self.agent.dont_fragment)
log_exc_fn.assert_called_once_with(
_("ofport should have a value that can be "
"interpreted as an integer"))
log_error_fn.assert_called_once_with(
_("Failed to set-up %(type)s tunnel port to %(ip)s"),
{'type': p_const.TYPE_GRE, 'ip': 'remote_ip'})
self.assertEqual(ofport, 0)
def test__setup_tunnel_port_error_negative_df_disabled(self):
with contextlib.nested(
mock.patch.object(self.agent.tun_br, 'add_tunnel_port',
return_value='-1'),
mock.patch.object(ovs_neutron_agent.LOG, 'error')
) as (add_tunnel_port_fn, log_error_fn):
self.agent.dont_fragment = False
ofport = self.agent._setup_tunnel_port(
'gre-1', 'remote_ip', p_const.TYPE_GRE)
add_tunnel_port_fn.assert_called_once_with(
'gre-1', 'remote_ip', self.agent.local_ip, p_const.TYPE_GRE,
self.agent.vxlan_udp_port, self.agent.dont_fragment)
log_error_fn.assert_called_once_with(
_("Failed to set-up %(type)s tunnel port to %(ip)s"),
{'type': p_const.TYPE_GRE, 'ip': 'remote_ip'})
self.assertEqual(ofport, 0)
def test_tunnel_sync_with_ovs_plugin(self):
fake_tunnel_details = {'tunnels': [{'id': '42',
'ip_address': '100.101.102.103'}]}
with contextlib.nested(
mock.patch.object(self.agent.plugin_rpc, 'tunnel_sync',
return_value=fake_tunnel_details),
mock.patch.object(self.agent, '_setup_tunnel_port')
) as (tunnel_sync_rpc_fn, _setup_tunnel_port_fn):
self.agent.tunnel_types = ['gre']
self.agent.tunnel_sync()
expected_calls = [mock.call('gre-42', '100.101.102.103', 'gre')]
_setup_tunnel_port_fn.assert_has_calls(expected_calls)
def test_tunnel_sync_with_ml2_plugin(self):
fake_tunnel_details = {'tunnels': [{'ip_address': '100.101.31.15'}]}
with contextlib.nested(
mock.patch.object(self.agent.plugin_rpc, 'tunnel_sync',
return_value=fake_tunnel_details),
mock.patch.object(self.agent, '_setup_tunnel_port')
) as (tunnel_sync_rpc_fn, _setup_tunnel_port_fn):
self.agent.tunnel_types = ['vxlan']
self.agent.tunnel_sync()
expected_calls = [mock.call('vxlan-64651f0f',
'100.101.31.15', 'vxlan')]
_setup_tunnel_port_fn.assert_has_calls(expected_calls)
def test_tunnel_sync_invalid_ip_address(self):
fake_tunnel_details = {'tunnels': [{'ip_address': '300.300.300.300'},
{'ip_address': '100.100.100.100'}]}
with contextlib.nested(
mock.patch.object(self.agent.plugin_rpc, 'tunnel_sync',
return_value=fake_tunnel_details),
mock.patch.object(self.agent, '_setup_tunnel_port')
) as (tunnel_sync_rpc_fn, _setup_tunnel_port_fn):
self.agent.tunnel_types = ['vxlan']
self.agent.tunnel_sync()
_setup_tunnel_port_fn.assert_called_once_with('vxlan-64646464',
'100.100.100.100',
'vxlan')
def test_tunnel_update(self):
kwargs = {'tunnel_ip': '10.10.10.10',
'tunnel_type': 'gre'}
self.agent._setup_tunnel_port = mock.Mock()
self.agent.enable_tunneling = True
self.agent.tunnel_types = ['gre']
self.agent.l2_pop = False
self.agent.tunnel_update(context=None, **kwargs)
expected_calls = [mock.call('gre-0a0a0a0a', '10.10.10.10', 'gre')]
self.agent._setup_tunnel_port.assert_has_calls(expected_calls)
def test_ovs_restart(self):
reply2 = {'current': set(['tap0']),
'added': set(['tap2']),
'removed': set([])}
reply3 = {'current': set(['tap2']),
'added': set([]),
'removed': set(['tap0'])}
with contextlib.nested(
mock.patch.object(async_process.AsyncProcess, "_spawn"),
mock.patch.object(log.ContextAdapter, 'exception'),
mock.patch.object(ovs_neutron_agent.OVSNeutronAgent,
'scan_ports'),
mock.patch.object(ovs_neutron_agent.OVSNeutronAgent,
'process_network_ports'),
mock.patch.object(ovs_neutron_agent.OVSNeutronAgent,
'check_ovs_restart'),
mock.patch.object(ovs_neutron_agent.OVSNeutronAgent,
'setup_integration_br'),
mock.patch.object(ovs_neutron_agent.OVSNeutronAgent,
'setup_physical_bridges')
) as (spawn_fn, log_exception, scan_ports, process_network_ports,
check_ovs_restart, setup_int_br, setup_phys_br):
log_exception.side_effect = Exception(
'Fake exception to get out of the loop')
scan_ports.side_effect = [reply2, reply3]
process_network_ports.side_effect = [
False, Exception('Fake exception to get out of the loop')]
check_ovs_restart.side_effect = [False, True]
# This will exit after the second loop
try:
self.agent.daemon_loop()
except Exception:
pass
scan_ports.assert_has_calls([
mock.call(set(), set()),
mock.call(set(), set())
])
process_network_ports.assert_has_calls([
mock.call({'current': set(['tap0']),
'removed': set([]),
'added': set(['tap2'])}, False),
mock.call({'current': set(['tap2']),
'removed': set(['tap0']),
'added': set([])}, True)
])
# Verify the second time through the loop we triggered an
# OVS restart and re-setup the bridges
setup_int_br.assert_has_calls([mock.call()])
setup_phys_br.assert_has_calls([mock.call({})])
class AncillaryBridgesTest(base.BaseTestCase):
def setUp(self):
super(AncillaryBridgesTest, self).setUp()
notifier_p = mock.patch(NOTIFIER)
notifier_cls = notifier_p.start()
self.notifier = mock.Mock()
notifier_cls.return_value = self.notifier
cfg.CONF.set_default('firewall_driver',
'neutron.agent.firewall.NoopFirewallDriver',
group='SECURITYGROUP')
# Avoid rpc initialization for unit tests
cfg.CONF.set_override('rpc_backend',
'neutron.openstack.common.rpc.impl_fake')
cfg.CONF.set_override('report_interval', 0, 'AGENT')
self.kwargs = ovs_neutron_agent.create_agent_config_map(cfg.CONF)
def _test_ancillary_bridges(self, bridges, ancillary):
device_ids = ancillary[:]
def pullup_side_effect(self, *args):
result = device_ids.pop(0)
return result
with contextlib.nested(
mock.patch('neutron.plugins.openvswitch.agent.ovs_neutron_agent.'
'OVSNeutronAgent.setup_integration_br',
return_value=mock.Mock()),
mock.patch('neutron.agent.linux.utils.get_interface_mac',
return_value='00:00:00:00:00:01'),
mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.'
'get_local_port_mac',
return_value='00:00:00:00:00:01'),
mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.'
'set_secure_mode'),
mock.patch('neutron.agent.linux.ovs_lib.get_bridges',
return_value=bridges),
mock.patch(
'neutron.agent.linux.ovs_lib.get_bridge_external_bridge_id',
side_effect=pullup_side_effect),
mock.patch('neutron.plugins.openvswitch.agent.ovs_neutron_agent.'
'OVSNeutronAgent._check_arp_responder_support',
return_value=True)):
self.agent = ovs_neutron_agent.OVSNeutronAgent(**self.kwargs)
self.assertEqual(len(ancillary), len(self.agent.ancillary_brs))
if ancillary:
bridges = [br.br_name for br in self.agent.ancillary_brs]
for br in ancillary:
self.assertIn(br, bridges)
def test_ancillary_bridges_single(self):
bridges = ['br-int', 'br-ex']
self._test_ancillary_bridges(bridges, ['br-ex'])
def test_ancillary_bridges_none(self):
bridges = ['br-int']
self._test_ancillary_bridges(bridges, [])
def test_ancillary_bridges_multiple(self):
bridges = ['br-int', 'br-ex1', 'br-ex2']
self._test_ancillary_bridges(bridges, ['br-ex1', 'br-ex2'])
|
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Shy Shalom
# Portions created by the Initial Developer are Copyright (C) 2005
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .enums import ProbingState
from .compat import wrap_ord
# This prober doesn't actually recognize a language or a charset.
# It is a helper prober for the use of the Hebrew model probers
### General ideas of the Hebrew charset recognition ###
#
# Four main charsets exist in Hebrew:
# "ISO-8859-8" - Visual Hebrew
# "windows-1255" - Logical Hebrew
# "ISO-8859-8-I" - Logical Hebrew
# "x-mac-hebrew" - ?? Logical Hebrew ??
#
# Both "ISO" charsets use a completely identical set of code points, whereas
# "windows-1255" and "x-mac-hebrew" are two different proper supersets of
# these code points. windows-1255 defines additional characters in the range
# 0x80-0x9F as some misc punctuation marks as well as some Hebrew-specific
# diacritics and additional 'Yiddish' ligature letters in the range 0xc0-0xd6.
# x-mac-hebrew defines similar additional code points but with a different
# mapping.
#
# As far as an average Hebrew text with no diacritics is concerned, all four
# charsets are identical with respect to code points. Meaning that for the
# main Hebrew alphabet, all four map the same values to all 27 Hebrew letters
# (including final letters).
#
# The dominant difference between these charsets is their directionality.
# "Visual" directionality means that the text is ordered as if the renderer is
# not aware of a BIDI rendering algorithm. The renderer sees the text and
# draws it from left to right. The text itself when ordered naturally is read
# backwards. A buffer of Visual Hebrew generally looks like so:
# "[last word of first line spelled backwards] [whole line ordered backwards
# and spelled backwards] [first word of first line spelled backwards]
# [end of line] [last word of second line] ... etc' "
# adding punctuation marks, numbers and English text to visual text is
# naturally also "visual" and from left to right.
#
# "Logical" directionality means the text is ordered "naturally" according to
# the order it is read. It is the responsibility of the renderer to display
# the text from right to left. A BIDI algorithm is used to place general
# punctuation marks, numbers and English text in the text.
#
# Texts in x-mac-hebrew are almost impossible to find on the Internet. From
# what little evidence I could find, it seems that its general directionality
# is Logical.
#
# To sum up all of the above, the Hebrew probing mechanism knows about two
# charsets:
# Visual Hebrew - "ISO-8859-8" - backwards text - Words and sentences are
# backwards while line order is natural. For charset recognition purposes
# the line order is unimportant (In fact, for this implementation, even
# word order is unimportant).
# Logical Hebrew - "windows-1255" - normal, naturally ordered text.
#
# "ISO-8859-8-I" is a subset of windows-1255 and doesn't need to be
# specifically identified.
# "x-mac-hebrew" is also identified as windows-1255. A text in x-mac-hebrew
# that contain special punctuation marks or diacritics is displayed with
# some unconverted characters showing as question marks. This problem might
# be corrected using another model prober for x-mac-hebrew. Due to the fact
# that x-mac-hebrew texts are so rare, writing another model prober isn't
# worth the effort and performance hit.
#
#### The Prober ####
#
# The prober is divided between two SBCharSetProbers and a HebrewProber,
# all of which are managed, created, fed data, inquired and deleted by the
# SBCSGroupProber. The two SBCharSetProbers identify that the text is in
# fact some kind of Hebrew, Logical or Visual. The final decision about which
# one is it is made by the HebrewProber by combining final-letter scores
# with the scores of the two SBCharSetProbers to produce a final answer.
#
# The SBCSGroupProber is responsible for stripping the original text of HTML
# tags, English characters, numbers, low-ASCII punctuation characters, spaces
# and new lines. It reduces any sequence of such characters to a single space.
# The buffer fed to each prober in the SBCS group prober is pure text in
# high-ASCII.
# The two SBCharSetProbers (model probers) share the same language model:
# Win1255Model.
# The first SBCharSetProber uses the model normally as any other
# SBCharSetProber does, to recognize windows-1255, upon which this model was
# built. The second SBCharSetProber is told to make the pair-of-letter
# lookup in the language model backwards. This in practice exactly simulates
# a visual Hebrew model using the windows-1255 logical Hebrew model.
#
# The HebrewProber is not using any language model. All it does is look for
# final-letter evidence suggesting the text is either logical Hebrew or visual
# Hebrew. Disjointed from the model probers, the results of the HebrewProber
# alone are meaningless. HebrewProber always returns 0.00 as confidence
# since it never identifies a charset by itself. Instead, the pointer to the
# HebrewProber is passed to the model probers as a helper "Name Prober".
# When the Group prober receives a positive identification from any prober,
# it asks for the name of the charset identified. If the prober queried is a
# Hebrew model prober, the model prober forwards the call to the
# HebrewProber to make the final decision. In the HebrewProber, the
# decision is made according to the final-letters scores maintained and Both
# model probers scores. The answer is returned in the form of the name of the
# charset identified, either "windows-1255" or "ISO-8859-8".
class HebrewProber(CharSetProber):
# windows-1255 / ISO-8859-8 code points of interest
FINAL_KAF = 0xea
NORMAL_KAF = 0xeb
FINAL_MEM = 0xed
NORMAL_MEM = 0xee
FINAL_NUN = 0xef
NORMAL_NUN = 0xf0
FINAL_PE = 0xf3
NORMAL_PE = 0xf4
FINAL_TSADI = 0xf5
NORMAL_TSADI = 0xf6
# Minimum Visual vs Logical final letter score difference.
# If the difference is below this, don't rely solely on the final letter score
# distance.
MIN_FINAL_CHAR_DISTANCE = 5
# Minimum Visual vs Logical model score difference.
# If the difference is below this, don't rely at all on the model score
# distance.
MIN_MODEL_DISTANCE = 0.01
VISUAL_HEBREW_NAME = "ISO-8859-8"
LOGICAL_HEBREW_NAME = "windows-1255"
def __init__(self):
super(HebrewProber, self).__init__()
self._final_char_logical_score = None
self._final_char_visual_score = None
self._prev = None
self._before_prev = None
self._logical_prober = None
self._visual_prober = None
self.reset()
def reset(self):
self._final_char_logical_score = 0
self._final_char_visual_score = 0
# The two last characters seen in the previous buffer,
# mPrev and mBeforePrev are initialized to space in order to simulate
# a word delimiter at the beginning of the data
self._prev = ' '
self._before_prev = ' '
# These probers are owned by the group prober.
def set_model_probers(self, logicalProber, visualProber):
self._logical_prober = logicalProber
self._visual_prober = visualProber
def is_final(self, c):
return wrap_ord(c) in [self.FINAL_KAF, self.FINAL_MEM, self.FINAL_NUN,
self.FINAL_PE, self.FINAL_TSADI]
def is_non_final(self, c):
# The normal Tsadi is not a good Non-Final letter due to words like
# 'lechotet' (to chat) containing an apostrophe after the tsadi. This
# apostrophe is converted to a space in FilterWithoutEnglishLetters
# causing the Non-Final tsadi to appear at an end of a word even
# though this is not the case in the original text.
# The letters Pe and Kaf rarely display a related behavior of not being
# a good Non-Final letter. Words like 'Pop', 'Winamp' and 'Mubarak'
# for example legally end with a Non-Final Pe or Kaf. However, the
# benefit of these letters as Non-Final letters outweighs the damage
# since these words are quite rare.
return wrap_ord(c) in [self.NORMAL_KAF, self.NORMAL_MEM,
self.NORMAL_NUN, self.NORMAL_PE]
def feed(self, byte_str):
# Final letter analysis for logical-visual decision.
# Look for evidence that the received buffer is either logical Hebrew
# or visual Hebrew.
# The following cases are checked:
# 1) A word longer than 1 letter, ending with a final letter. This is
# an indication that the text is laid out "naturally" since the
# final letter really appears at the end. +1 for logical score.
# 2) A word longer than 1 letter, ending with a Non-Final letter. In
# normal Hebrew, words ending with Kaf, Mem, Nun, Pe or Tsadi,
# should not end with the Non-Final form of that letter. Exceptions
# to this rule are mentioned above in isNonFinal(). This is an
# indication that the text is laid out backwards. +1 for visual
# score
# 3) A word longer than 1 letter, starting with a final letter. Final
# letters should not appear at the beginning of a word. This is an
# indication that the text is laid out backwards. +1 for visual
# score.
#
# The visual score and logical score are accumulated throughout the
# text and are finally checked against each other in GetCharSetName().
# No checking for final letters in the middle of words is done since
# that case is not an indication for either Logical or Visual text.
#
# We automatically filter out all 7-bit characters (replace them with
# spaces) so the word boundary detection works properly. [MAP]
if self.state == ProbingState.not_me:
# Both model probers say it's not them. No reason to continue.
return ProbingState.not_me
byte_str = self.filter_high_byte_only(byte_str)
for cur in byte_str:
if cur == ' ':
# We stand on a space - a word just ended
if self._before_prev != ' ':
# next-to-last char was not a space so self._prev is not a
# 1 letter word
if self.is_final(self._prev):
# case (1) [-2:not space][-1:final letter][cur:space]
self._final_char_logical_score += 1
elif self.is_non_final(self._prev):
# case (2) [-2:not space][-1:Non-Final letter][
# cur:space]
self._final_char_visual_score += 1
else:
# Not standing on a space
if ((self._before_prev == ' ') and
(self.is_final(self._prev)) and (cur != ' ')):
# case (3) [-2:space][-1:final letter][cur:not space]
self._final_char_visual_score += 1
self._before_prev = self._prev
self._prev = cur
# Forever detecting, till the end or until both model probers return
# ProbingState.not_me (handled above)
return ProbingState.detecting
@property
def charset_name(self):
# Make the decision: is it Logical or Visual?
# If the final letter score distance is dominant enough, rely on it.
finalsub = self._final_char_logical_score - self._final_char_visual_score
if finalsub >= self.MIN_FINAL_CHAR_DISTANCE:
return self.LOGICAL_HEBREW_NAME
if finalsub <= -self.MIN_FINAL_CHAR_DISTANCE:
return self.VISUAL_HEBREW_NAME
# It's not dominant enough, try to rely on the model scores instead.
modelsub = (self._logical_prober.get_confidence()
- self._visual_prober.get_confidence())
if modelsub > self.MIN_MODEL_DISTANCE:
return self.LOGICAL_HEBREW_NAME
if modelsub < -self.MIN_MODEL_DISTANCE:
return self.VISUAL_HEBREW_NAME
# Still no good, back to final letter distance, maybe it'll save the
# day.
if finalsub < 0.0:
return self.VISUAL_HEBREW_NAME
# (finalsub > 0 - Logical) or (don't know what to do) default to
# Logical.
return self.LOGICAL_HEBREW_NAME
@property
def state(self):
# Remain active as long as any of the model probers are active.
if (self._logical_prober.state == ProbingState.not_me) and \
(self._visual_prober.state == ProbingState.not_me):
return ProbingState.not_me
return ProbingState.detecting
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import os
import sys
import shutil
import unittest
import tempfile
from libcloud.utils.py3 import httplib
from libcloud.common.types import InvalidCredsError
from libcloud.common.types import LibcloudError
from libcloud.storage.base import Container, Object
from libcloud.storage.types import ContainerDoesNotExistError
from libcloud.storage.types import ContainerAlreadyExistsError
from libcloud.storage.types import ContainerIsNotEmptyError
from libcloud.storage.types import InvalidContainerNameError
from libcloud.storage.types import ObjectDoesNotExistError
from libcloud.storage.types import ObjectHashMismatchError
try:
from libcloud.storage.drivers.local import LocalStorageDriver
except ImportError:
print('lockfile library is not available, skipping local_storage tests...')
LocalStorageDriver = None
from libcloud.storage.drivers.dummy import DummyIterator
class LocalTests(unittest.TestCase):
driver_type = LocalStorageDriver
@classmethod
def create_driver(self):
self.key = tempfile.mkdtemp()
return self.driver_type(self.key, None)
def setUp(self):
self.driver = self.create_driver()
def tearDown(self):
shutil.rmtree(self.key)
self.key = None
def make_tmp_file(self):
_, tmppath = tempfile.mkstemp()
with open(tmppath, 'w') as fp:
fp.write('blah' * 1024)
return tmppath
def remove_tmp_file(self, tmppath):
os.unlink(tmppath)
def test_list_containers_empty(self):
containers = self.driver.list_containers()
self.assertEqual(len(containers), 0)
def test_containers_success(self):
self.driver.create_container('test1')
self.driver.create_container('test2')
containers = self.driver.list_containers()
self.assertEqual(len(containers), 2)
container = containers[1]
self.assertTrue('creation_time' in container.extra)
self.assertTrue('modify_time' in container.extra)
self.assertTrue('access_time' in container.extra)
objects = self.driver.list_container_objects(container=container)
self.assertEqual(len(objects), 0)
objects = container.list_objects()
self.assertEqual(len(objects), 0)
for container in containers:
self.driver.delete_container(container)
def test_objects_success(self):
tmppath = self.make_tmp_file()
tmpfile = open(tmppath)
container = self.driver.create_container('test3')
obj1 = container.upload_object(tmppath, 'object1')
obj2 = container.upload_object(tmppath, 'path/object2')
obj3 = container.upload_object(tmppath, 'path/to/object3')
obj4 = container.upload_object(tmppath, 'path/to/object4.ext')
obj5 = container.upload_object_via_stream(tmpfile, 'object5')
objects = self.driver.list_container_objects(container=container)
self.assertEqual(len(objects), 5)
for obj in objects:
self.assertNotEqual(obj.hash, None)
self.assertEqual(obj.size, 4096)
self.assertEqual(obj.container.name, 'test3')
self.assertTrue('creation_time' in obj.extra)
self.assertTrue('modify_time' in obj.extra)
self.assertTrue('access_time' in obj.extra)
obj1.delete()
obj2.delete()
objects = container.list_objects()
self.assertEqual(len(objects), 3)
container.delete_object(obj3)
container.delete_object(obj4)
container.delete_object(obj5)
objects = container.list_objects()
self.assertEqual(len(objects), 0)
container.delete()
tmpfile.close()
self.remove_tmp_file(tmppath)
def test_get_container_doesnt_exist(self):
try:
self.driver.get_container(container_name='container1')
except ContainerDoesNotExistError:
pass
else:
self.fail('Exception was not thrown')
def test_get_container_success(self):
self.driver.create_container('test4')
container = self.driver.get_container(container_name='test4')
self.assertTrue(container.name, 'test4')
container.delete()
def test_get_object_container_doesnt_exist(self):
try:
self.driver.get_object(container_name='test-inexistent',
object_name='test')
except ContainerDoesNotExistError:
pass
else:
self.fail('Exception was not thrown')
def test_get_object_success(self):
tmppath = self.make_tmp_file()
container = self.driver.create_container('test5')
container.upload_object(tmppath, 'test')
obj = self.driver.get_object(container_name='test5',
object_name='test')
self.assertEqual(obj.name, 'test')
self.assertEqual(obj.container.name, 'test5')
self.assertEqual(obj.size, 4096)
self.assertNotEqual(obj.hash, None)
self.assertTrue('creation_time' in obj.extra)
self.assertTrue('modify_time' in obj.extra)
self.assertTrue('access_time' in obj.extra)
obj.delete()
container.delete()
self.remove_tmp_file(tmppath)
def test_create_container_invalid_name(self):
try:
self.driver.create_container(container_name='new/container')
except InvalidContainerNameError:
pass
else:
self.fail('Exception was not thrown')
def test_create_container_already_exists(self):
container = self.driver.create_container(
container_name='new-container')
try:
self.driver.create_container(container_name='new-container')
except ContainerAlreadyExistsError:
pass
else:
self.fail('Exception was not thrown')
# success
self.driver.delete_container(container)
def test_create_container_success(self):
name = 'new_container'
container = self.driver.create_container(container_name=name)
self.assertEqual(container.name, name)
self.driver.delete_container(container)
def test_delete_container_doesnt_exist(self):
container = Container(name='new_container', extra=None,
driver=self.driver)
try:
self.driver.delete_container(container=container)
except ContainerDoesNotExistError:
pass
else:
self.fail('Exception was not thrown')
def test_delete_container_not_empty(self):
tmppath = self.make_tmp_file()
container = self.driver.create_container('test6')
obj = container.upload_object(tmppath, 'test')
try:
self.driver.delete_container(container=container)
except ContainerIsNotEmptyError:
pass
else:
self.fail('Exception was not thrown')
# success
obj.delete()
self.remove_tmp_file(tmppath)
self.assertTrue(self.driver.delete_container(container=container))
def test_delete_container_not_found(self):
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
try:
self.driver.delete_container(container=container)
except ContainerDoesNotExistError:
pass
else:
self.fail('Container does not exist but an exception was not' +
'thrown')
def test_delete_container_success(self):
container = self.driver.create_container('test7')
self.assertTrue(self.driver.delete_container(container=container))
def test_download_object_success(self):
tmppath = self.make_tmp_file()
container = self.driver.create_container('test6')
obj = container.upload_object(tmppath, 'test')
destination_path = tmppath + '.temp'
result = self.driver.download_object(obj=obj,
destination_path=destination_path,
overwrite_existing=False,
delete_on_failure=True)
self.assertTrue(result)
obj.delete()
container.delete()
self.remove_tmp_file(tmppath)
os.unlink(destination_path)
def test_download_object_and_overwrite(self):
tmppath = self.make_tmp_file()
container = self.driver.create_container('test6')
obj = container.upload_object(tmppath, 'test')
destination_path = tmppath + '.temp'
result = self.driver.download_object(obj=obj,
destination_path=destination_path,
overwrite_existing=False,
delete_on_failure=True)
self.assertTrue(result)
try:
self.driver.download_object(obj=obj,
destination_path=destination_path,
overwrite_existing=False,
delete_on_failure=True)
except LibcloudError:
pass
else:
self.fail('Exception was not thrown')
result = self.driver.download_object(obj=obj,
destination_path=destination_path,
overwrite_existing=True,
delete_on_failure=True)
self.assertTrue(result)
# success
obj.delete()
container.delete()
self.remove_tmp_file(tmppath)
os.unlink(destination_path)
def test_download_object_as_stream_success(self):
tmppath = self.make_tmp_file()
container = self.driver.create_container('test6')
obj = container.upload_object(tmppath, 'test')
stream = self.driver.download_object_as_stream(obj=obj,
chunk_size=1024)
self.assertTrue(hasattr(stream, '__iter__'))
data = ''
for buff in stream:
data += buff.decode('utf-8')
self.assertTrue(len(data), 4096)
obj.delete()
container.delete()
self.remove_tmp_file(tmppath)
if not LocalStorageDriver:
class LocalTests(unittest.TestCase):
pass
if __name__ == '__main__':
sys.exit(unittest.main())
|
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import jsonschema
from future.utils import PY2
from future.backports.http import client as backport_client
import re
import os
import sys
import yaml
import logging
import shutil
import requests
import itertools
from contextlib import contextmanager
import mock
import pytest
from path import Path
from vcr import VCR
from vcr.stubs import VCRHTTPSConnection, VCRHTTPConnection
import flexget.logger
from flexget.manager import Manager
from flexget.plugin import load_plugins
from flexget.task import Task, TaskAbort
from flexget.webserver import User
from flexget.manager import Session
from flexget.api import api_app
log = logging.getLogger('tests')
VCR_CASSETTE_DIR = os.path.join(os.path.dirname(__file__), 'cassettes')
VCR_RECORD_MODE = os.environ.get('VCR_RECORD_MODE', 'once')
vcr = VCR(
cassette_library_dir=VCR_CASSETTE_DIR,
record_mode=VCR_RECORD_MODE,
custom_patches=(
(backport_client, 'HTTPSConnection', VCRHTTPSConnection),
(backport_client, 'HTTPConnection', VCRHTTPConnection),
),
)
# --- These are the public fixtures tests can ask for ---
@pytest.fixture(scope='class')
def config(request):
"""
If used inside a test class, uses the `config` class attribute of the class.
This is used by `manager` fixture, and can be parametrized.
"""
return request.cls.config
@pytest.yield_fixture()
def manager(
request, config, caplog, monkeypatch, filecopy
): # enforce filecopy is run before manager
"""
Create a :class:`MockManager` for this test based on `config` argument.
"""
if 'tmpdir' in request.fixturenames:
config = config.replace('__tmp__', request.getfixturevalue('tmpdir').strpath)
try:
mockmanager = MockManager(config, request.cls.__name__)
except Exception:
# Since we haven't entered the test function yet, pytest won't print the logs on failure. Print them manually.
print(caplog.text)
raise
yield mockmanager
mockmanager.shutdown()
@pytest.fixture()
def execute_task(manager):
"""
A function that can be used to execute and return a named task in `config` argument.
"""
def execute(task_name, abort=False, options=None):
"""
Use to execute one test task from config.
:param abort: If `True` expect (and require) this task to abort.
"""
log.info('********** Running task: %s ********** ' % task_name)
config = manager.config['tasks'][task_name]
task = Task(manager, task_name, config=config, options=options)
try:
if abort:
with pytest.raises(TaskAbort):
task.execute()
else:
task.execute()
finally:
try:
task.session.close()
except Exception:
pass
return task
return execute
@pytest.yield_fixture()
def use_vcr(request, monkeypatch):
"""
This fixture is applied automatically to any test using the `online` mark. It will record and playback network
sessions using VCR.
The record mode of VCR can be set using the VCR_RECORD_MODE environment variable when running tests.
"""
if VCR_RECORD_MODE == 'off':
yield None
else:
module = request.module.__name__.split('tests.')[-1]
class_name = request.cls.__name__
cassette_name = '.'.join([module, class_name, request.function.__name__])
cassette_path = os.path.join(VCR_CASSETTE_DIR, cassette_name)
online = True
if vcr.record_mode == 'none':
online = False
elif vcr.record_mode == 'once':
online = not os.path.exists(cassette_path)
# If we are not going online, disable domain limiting during test
if not online:
log.debug('Disabling domain limiters during VCR playback.')
monkeypatch.setattr('flexget.utils.requests.limit_domains', mock.Mock())
with vcr.use_cassette(path=cassette_path) as cassette:
yield cassette
@pytest.fixture()
def api_client(manager):
with Session() as session:
user = session.query(User).first()
if not user:
user = User(name='flexget', password='flexget')
session.add(user)
session.commit()
return APIClient(user.token)
@pytest.fixture()
def schema_match(manager):
"""
This fixture enables verifying JSON Schema. Return a list of validation error dicts. List is empty if no errors
occurred.
"""
def match(schema, response):
validator = jsonschema.Draft4Validator(schema)
errors = list(validator.iter_errors(response))
return [dict(value=list(e.path), message=e.message) for e in errors]
return match
@pytest.fixture()
def link_headers(manager):
"""
Parses link headers and return them in dict form
"""
def headers(response):
links = {}
for link in requests.utils.parse_header_links(response.headers.get('link')):
url = link['url']
page = int(re.search('(?<!per_)page=(\d)', url).group(1))
links[link['rel']] = dict(url=url, page=page)
return links
return headers
# --- End Public Fixtures ---
def pytest_configure(config):
# register the filecopy marker
config.addinivalue_line(
'markers',
'filecopy(src, dst): mark test to copy a file from `src` to `dst` before running.'
'online: mark a test that goes online. VCR will automatically be used.',
)
def pytest_runtest_setup(item):
# Add the filcopy fixture to any test marked with filecopy
if item.get_closest_marker('filecopy'):
item.fixturenames.append('filecopy')
# Add the online marker to tests that will go online
if item.get_closest_marker('online'):
item.fixturenames.append('use_vcr')
else:
item.fixturenames.append('no_requests')
@pytest.yield_fixture()
def filecopy(request):
out_files = []
marker = request.node.get_closest_marker('filecopy')
if marker is not None:
copy_list = marker.args[0] if len(marker.args) == 1 else [marker.args]
for sources, dst in copy_list:
if isinstance(sources, str):
sources = [sources]
if 'tmpdir' in request.fixturenames:
dst = dst.replace('__tmp__', request.getfixturevalue('tmpdir').strpath)
dst = Path(dst)
for f in itertools.chain(*(Path().glob(src) for src in sources)):
dest_path = dst
if dest_path.isdir():
dest_path = dest_path / f.basename()
log.debug('copying %s to %s', f, dest_path)
if not os.path.isdir(os.path.dirname(dest_path)):
os.makedirs(os.path.dirname(dest_path))
if os.path.isdir(f):
shutil.copytree(f, dest_path)
else:
shutil.copy(f, dest_path)
out_files.append(dest_path)
yield
if out_files:
for f in out_files:
try:
if os.path.isdir(f):
shutil.rmtree(f)
else:
f.remove()
except OSError as e:
print("couldn't remove %s: %s" % (f, e))
@pytest.fixture()
def no_requests(monkeypatch):
online_funcs = [
'requests.sessions.Session.request',
'future.backports.http.client.HTTPConnection.request',
]
# Don't monkey patch HTTPSConnection if ssl not installed as it won't exist in backports
try:
import ssl # noqa
from ssl import SSLContext # noqa
online_funcs.append('future.backports.http.client.HTTPSConnection.request')
except ImportError:
pass
if PY2:
online_funcs.extend(['httplib.HTTPConnection.request', 'httplib.HTTPSConnection.request'])
else:
online_funcs.extend(
['http.client.HTTPConnection.request', 'http.client.HTTPSConnection.request']
)
for func in online_funcs:
monkeypatch.setattr(
func, mock.Mock(side_effect=Exception('Online tests should use @pytest.mark.online'))
)
@pytest.fixture(scope='session', autouse=True)
def setup_once(pytestconfig, request):
# os.chdir(os.path.join(pytestconfig.rootdir.strpath, 'flexget', 'tests'))
flexget.logger.initialize(True)
m = MockManager(
'tasks: {}', 'init'
) # This makes sure our template environment is set up before any tests are run
m.shutdown()
logging.getLogger().setLevel(logging.DEBUG)
load_plugins()
@pytest.fixture(autouse=True)
def chdir(pytestconfig, request):
"""
By marking test with chdir flag we will change current working directory
to that module location. Task configuration can then assume this being
location for relative paths
"""
if 'chdir' in request.funcargnames:
os.chdir(os.path.dirname(request.module.__file__))
@pytest.fixture(autouse=True)
def setup_loglevel(pytestconfig, caplog):
# set logging level according to pytest verbosity
level = logging.DEBUG
if pytestconfig.getoption('verbose') == 1:
level = flexget.logger.TRACE
elif pytestconfig.getoption('quiet', None) == 1:
level = logging.INFO
logging.getLogger().setLevel(level)
caplog.set_level(level)
class CrashReport(Exception):
pass
class MockManager(Manager):
unit_test = True
def __init__(self, config_text, config_name, db_uri=None):
self.config_text = config_text
self._db_uri = db_uri or 'sqlite:///:memory:'
super(MockManager, self).__init__(['execute'])
self.config_name = config_name
self.database_uri = self._db_uri
log.debug('database_uri: %s' % self.database_uri)
self.initialize()
def _init_config(self, *args, **kwargs):
"""
Override configuration loading
"""
self.config_base = os.path.dirname(os.path.abspath(sys.path[0]))
def load_config(self, *args, **kwargs):
"""
Just load our config from the text passed in on init
"""
config = yaml.safe_load(self.config_text) or {}
self.update_config(config)
@property
def conn(self):
return self.engine.connect()
# no lock files with unit testing
@contextmanager
def acquire_lock(self, **kwargs):
self._has_lock = True
yield
def release_lock(self):
pass
def crash_report(self):
# We don't want to silently swallow crash reports during unit tests
log.error('Crash Report Traceback:', exc_info=True)
raise CrashReport('Crash report created during unit test, check log for traceback.')
class APIClient(object):
def __init__(self, api_key):
self.api_key = api_key
self.client = api_app.test_client()
def _append_header(self, key, value, kwargs):
if 'headers' not in kwargs:
kwargs['headers'] = {}
kwargs['headers'][key] = value
def json_post(self, *args, **kwargs):
self._append_header('Content-Type', 'application/json', kwargs)
if kwargs.get('auth', True):
self._append_header('Authorization', 'Token %s' % self.api_key, kwargs)
return self.client.post(*args, **kwargs)
def json_put(self, *args, **kwargs):
self._append_header('Content-Type', 'application/json', kwargs)
if kwargs.get('auth', True):
self._append_header('Authorization', 'Token %s' % self.api_key, kwargs)
return self.client.put(*args, **kwargs)
def get(self, *args, **kwargs):
if kwargs.get('auth', True):
self._append_header('Authorization', 'Token %s' % self.api_key, kwargs)
return self.client.get(*args, **kwargs)
def delete(self, *args, **kwargs):
if kwargs.get('auth', True):
self._append_header('Authorization', 'Token %s' % self.api_key, kwargs)
return self.client.delete(*args, **kwargs)
def head(self, *args, **kwargs):
if kwargs.get('auth', True):
self._append_header('Authorization', 'Token %s' % self.api_key, kwargs)
return self.client.head(*args, **kwargs)
|
|
# Copyright 2018 The Exoplanet ML Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dataset_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
from absl import flags
import numpy as np
import tensorflow as tf
from astronet.ops import dataset_ops
from tf_util import configdict
FLAGS = flags.FLAGS
_TEST_TFRECORD_FILE = "astronet/ops/test_data/test_dataset.tfrecord"
# TODO(shallue): add tests with subcomponents, including with reversing.
class DatasetOpsTest(tf.test.TestCase):
def testPadTensorToBatchSize(self):
with self.session():
# Cannot pad a 0-dimensional Tensor.
tensor_0d = tf.constant(1)
with self.assertRaises(ValueError):
dataset_ops.pad_tensor_to_batch_size(tensor_0d, 10)
# 1-dimensional Tensor. Un-padded batch size is 5.
tensor_1d = tf.range(5, dtype=tf.int32)
self.assertEqual([5], tensor_1d.shape)
self.assertAllEqual([0, 1, 2, 3, 4], tensor_1d.eval())
tensor_1d_pad5 = dataset_ops.pad_tensor_to_batch_size(tensor_1d, 5)
self.assertEqual([5], tensor_1d_pad5.shape)
self.assertAllEqual([0, 1, 2, 3, 4], tensor_1d_pad5.eval())
tensor_1d_pad8 = dataset_ops.pad_tensor_to_batch_size(tensor_1d, 8)
self.assertEqual([8], tensor_1d_pad8.shape)
self.assertAllEqual([0, 1, 2, 3, 4, 0, 0, 0], tensor_1d_pad8.eval())
# 2-dimensional Tensor. Un-padded batch size is 3.
tensor_2d = tf.reshape(tf.range(9, dtype=tf.int32), [3, 3])
self.assertEqual([3, 3], tensor_2d.shape)
self.assertAllEqual([[0, 1, 2], [3, 4, 5], [6, 7, 8]], tensor_2d.eval())
tensor_2d_pad3 = dataset_ops.pad_tensor_to_batch_size(tensor_2d, 3)
self.assertEqual([3, 3], tensor_2d_pad3.shape)
self.assertAllEqual([[0, 1, 2], [3, 4, 5], [6, 7, 8]],
tensor_2d_pad3.eval())
tensor_2d_pad4 = dataset_ops.pad_tensor_to_batch_size(tensor_2d, 4)
self.assertEqual([4, 3], tensor_2d_pad4.shape)
self.assertAllEqual([[0, 1, 2], [3, 4, 5], [6, 7, 8], [0, 0, 0]],
tensor_2d_pad4.eval())
def testPadDatasetToBatchSizeNoWeights(self):
values = {"labels": np.arange(10, dtype=np.int32)}
dataset = tf.data.Dataset.from_tensor_slices(values).batch(4)
self.assertItemsEqual(["labels"], dataset.output_shapes.keys())
self.assertFalse(dataset.output_shapes["labels"].is_fully_defined())
dataset_pad = dataset_ops.pad_dataset_to_batch_size(dataset, 4)
self.assertItemsEqual(["labels", "weights"],
dataset_pad.output_shapes.keys())
self.assertEqual([4], dataset_pad.output_shapes["labels"])
self.assertEqual([4], dataset_pad.output_shapes["weights"])
next_batch = dataset_pad.make_one_shot_iterator().get_next()
next_labels = next_batch["labels"]
next_weights = next_batch["weights"]
with self.session() as sess:
labels, weights = sess.run([next_labels, next_weights])
self.assertAllEqual([0, 1, 2, 3], labels)
self.assertAllClose([1, 1, 1, 1], weights)
labels, weights = sess.run([next_labels, next_weights])
self.assertAllEqual([4, 5, 6, 7], labels)
self.assertAllClose([1, 1, 1, 1], weights)
labels, weights = sess.run([next_labels, next_weights])
self.assertAllEqual([8, 9, 0, 0], labels)
self.assertAllClose([1, 1, 0, 0], weights)
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run([next_labels, next_weights])
def testPadDatasetToBatchSizeWithWeights(self):
values = {
"labels": np.arange(10, dtype=np.int32),
"weights": 100 + np.arange(10, dtype=np.int32)
}
dataset = tf.data.Dataset.from_tensor_slices(values).batch(4)
self.assertItemsEqual(["labels", "weights"], dataset.output_shapes.keys())
self.assertFalse(dataset.output_shapes["labels"].is_fully_defined())
self.assertFalse(dataset.output_shapes["weights"].is_fully_defined())
dataset_pad = dataset_ops.pad_dataset_to_batch_size(dataset, 4)
self.assertItemsEqual(["labels", "weights"],
dataset_pad.output_shapes.keys())
self.assertEqual([4], dataset_pad.output_shapes["labels"])
self.assertEqual([4], dataset_pad.output_shapes["weights"])
next_batch = dataset_pad.make_one_shot_iterator().get_next()
next_labels = next_batch["labels"]
next_weights = next_batch["weights"]
with self.session() as sess:
labels, weights = sess.run([next_labels, next_weights])
self.assertAllEqual([0, 1, 2, 3], labels)
self.assertAllEqual([100, 101, 102, 103], weights)
labels, weights = sess.run([next_labels, next_weights])
self.assertAllEqual([4, 5, 6, 7], labels)
self.assertAllEqual([104, 105, 106, 107], weights)
labels, weights = sess.run([next_labels, next_weights])
self.assertAllEqual([8, 9, 0, 0], labels)
self.assertAllEqual([108, 109, 0, 0], weights)
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run([next_labels, next_weights])
def testSetBatchSizeSingleTensor1d(self):
dataset = tf.data.Dataset.range(4).batch(2)
self.assertFalse(dataset.output_shapes.is_fully_defined())
dataset = dataset_ops.set_batch_size(dataset, 2)
self.assertEqual([2], dataset.output_shapes)
next_batch = dataset.make_one_shot_iterator().get_next()
with self.session() as sess:
batch_value = sess.run(next_batch)
self.assertAllEqual([0, 1], batch_value)
batch_value = sess.run(next_batch)
self.assertAllEqual([2, 3], batch_value)
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(next_batch)
def testSetBatchSizeSingleTensor2d(self):
values = np.arange(12, dtype=np.int32).reshape([4, 3])
dataset = tf.data.Dataset.from_tensor_slices(values).batch(2)
self.assertFalse(dataset.output_shapes.is_fully_defined())
dataset = dataset_ops.set_batch_size(dataset, 2)
self.assertEqual([2, 3], dataset.output_shapes)
next_batch = dataset.make_one_shot_iterator().get_next()
with self.session() as sess:
batch_value = sess.run(next_batch)
self.assertAllEqual([[0, 1, 2], [3, 4, 5]], batch_value)
batch_value = sess.run(next_batch)
self.assertAllEqual([[6, 7, 8], [9, 10, 11]], batch_value)
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(next_batch)
def testSetBatchSizeNested(self):
values = {
"a": 100 + np.arange(4, dtype=np.int32),
"nest": {
"b": np.arange(12, dtype=np.int32).reshape([4, 3]),
"c": np.arange(4, dtype=np.int32)
}
}
dataset = tf.data.Dataset.from_tensor_slices(values).batch(2)
self.assertItemsEqual(["a", "nest"], dataset.output_shapes.keys())
self.assertItemsEqual(["b", "c"], dataset.output_shapes["nest"].keys())
self.assertFalse(dataset.output_shapes["a"].is_fully_defined())
self.assertFalse(dataset.output_shapes["nest"]["b"].is_fully_defined())
self.assertFalse(dataset.output_shapes["nest"]["c"].is_fully_defined())
dataset = dataset_ops.set_batch_size(dataset, 2)
self.assertItemsEqual(["a", "nest"], dataset.output_shapes.keys())
self.assertItemsEqual(["b", "c"], dataset.output_shapes["nest"].keys())
self.assertEqual([2], dataset.output_shapes["a"])
self.assertEqual([2, 3], dataset.output_shapes["nest"]["b"])
self.assertEqual([2], dataset.output_shapes["nest"]["c"])
next_batch = dataset.make_one_shot_iterator().get_next()
next_a = next_batch["a"]
next_b = next_batch["nest"]["b"]
next_c = next_batch["nest"]["c"]
with self.session() as sess:
a, b, c = sess.run([next_a, next_b, next_c])
self.assertAllEqual([100, 101], a)
self.assertAllEqual([[0, 1, 2], [3, 4, 5]], b)
self.assertAllEqual([0, 1], c)
a, b, c = sess.run([next_a, next_b, next_c])
self.assertAllEqual([102, 103], a)
self.assertAllEqual([[6, 7, 8], [9, 10, 11]], b)
self.assertAllEqual([2, 3], c)
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(next_batch)
class BuildDatasetTest(tf.test.TestCase):
def setUp(self):
super(BuildDatasetTest, self).setUp()
# The test dataset contains 10 tensorflow.Example protocol buffers. The i-th
# Example contains the following features:
# global_view = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0]
# local_view = [0.0, 1.0, 2.0, 3.0]
# aux_feature = 100 + i
# label_str = "PC" if i % 3 == 0 else "AFP" if i % 3 == 1 else "NTP"
self._file_pattern = os.path.join(FLAGS.test_srcdir, _TEST_TFRECORD_FILE)
self._input_config = configdict.ConfigDict({
"features": {
"global_view": {
"is_time_series": True,
"length": 8
},
"local_view": {
"is_time_series": True,
"length": 4
},
"aux_feature": {
"is_time_series": False,
"length": 1
}
},
})
def testNonExistentFileRaisesValueError(self):
with self.assertRaises(ValueError):
dataset_ops.build_dataset(
file_pattern="nonexistent",
input_config=self._input_config,
batch_size=4)
def testBuildWithoutLabels(self):
dataset = dataset_ops.build_dataset(
file_pattern=self._file_pattern,
input_config=self._input_config,
batch_size=4,
include_labels=False)
# We can use a one-shot iterator without labels because we don't have the
# stateful hash map for label ids.
iterator = dataset.make_one_shot_iterator()
features = iterator.get_next()
# Expect features only.
self.assertItemsEqual(["time_series_features", "aux_features"],
features.keys())
with self.session() as sess:
# Batch 1.
f = sess.run(features)
np.testing.assert_array_almost_equal([
[[0], [1], [2], [3], [4], [5], [6], [7]],
[[0], [1], [2], [3], [4], [5], [6], [7]],
[[0], [1], [2], [3], [4], [5], [6], [7]],
[[0], [1], [2], [3], [4], [5], [6], [7]],
], f["time_series_features"]["global_view"])
np.testing.assert_array_almost_equal([
[[0], [1], [2], [3]],
[[0], [1], [2], [3]],
[[0], [1], [2], [3]],
[[0], [1], [2], [3]],
], f["time_series_features"]["local_view"])
np.testing.assert_array_almost_equal([[100], [101], [102], [103]],
f["aux_features"]["aux_feature"])
# Batch 2.
f = sess.run(features)
np.testing.assert_array_almost_equal([
[[0], [1], [2], [3], [4], [5], [6], [7]],
[[0], [1], [2], [3], [4], [5], [6], [7]],
[[0], [1], [2], [3], [4], [5], [6], [7]],
[[0], [1], [2], [3], [4], [5], [6], [7]],
], f["time_series_features"]["global_view"])
np.testing.assert_array_almost_equal([
[[0], [1], [2], [3]],
[[0], [1], [2], [3]],
[[0], [1], [2], [3]],
[[0], [1], [2], [3]],
], f["time_series_features"]["local_view"])
np.testing.assert_array_almost_equal([[104], [105], [106], [107]],
f["aux_features"]["aux_feature"])
# Batch 3.
f = sess.run(features)
np.testing.assert_array_almost_equal([
[[0], [1], [2], [3], [4], [5], [6], [7]],
[[0], [1], [2], [3], [4], [5], [6], [7]],
], f["time_series_features"]["global_view"])
np.testing.assert_array_almost_equal([
[[0], [1], [2], [3]],
[[0], [1], [2], [3]],
], f["time_series_features"]["local_view"])
np.testing.assert_array_almost_equal([[108], [109]],
f["aux_features"]["aux_feature"])
# No more batches.
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(features)
def testLabels1(self):
self._input_config["label_feature"] = "label_str"
self._input_config["label_map"] = {"PC": 0, "AFP": 1, "NTP": 2}
dataset = dataset_ops.build_dataset(
file_pattern=self._file_pattern,
input_config=self._input_config,
batch_size=4)
# We need an initializable iterator when using labels because of the
# stateful label id hash table.
iterator = dataset.make_initializable_iterator()
inputs = iterator.get_next()
init_op = tf.tables_initializer()
# Expect features and labels.
self.assertItemsEqual(["time_series_features", "aux_features", "labels"],
inputs.keys())
labels = inputs["labels"]
with self.session() as sess:
sess.run([init_op, iterator.initializer])
# Fetch 3 batches.
np.testing.assert_array_equal([0, 1, 2, 0], sess.run(labels))
np.testing.assert_array_equal([1, 2, 0, 1], sess.run(labels))
np.testing.assert_array_equal([2, 0], sess.run(labels))
# No more batches.
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(labels)
def testLabels2(self):
self._input_config["label_feature"] = "label_str"
self._input_config["label_map"] = {"PC": 1, "AFP": 0, "NTP": 0}
dataset = dataset_ops.build_dataset(
file_pattern=self._file_pattern,
input_config=self._input_config,
batch_size=4)
# We need an initializable iterator when using labels because of the
# stateful label id hash table.
iterator = dataset.make_initializable_iterator()
inputs = iterator.get_next()
init_op = tf.tables_initializer()
# Expect features and labels.
self.assertItemsEqual(["time_series_features", "aux_features", "labels"],
inputs.keys())
labels = inputs["labels"]
with self.session() as sess:
sess.run([init_op, iterator.initializer])
# Fetch 3 batches.
np.testing.assert_array_equal([1, 0, 0, 1], sess.run(labels))
np.testing.assert_array_equal([0, 0, 1, 0], sess.run(labels))
np.testing.assert_array_equal([0, 1], sess.run(labels))
# No more batches.
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(labels)
def testBadLabelIdsRaisesValueError(self):
self._input_config["label_feature"] = "label_str"
# Label ids should be contiguous integers starting at 0.
self._input_config["label_map"] = {"PC": 1, "AFP": 2, "NTP": 3}
with self.assertRaises(ValueError):
dataset_ops.build_dataset(
file_pattern=self._file_pattern,
input_config=self._input_config,
batch_size=4)
def testUnknownLabelRaisesValueError(self):
self._input_config["label_feature"] = "label_str"
# label_map does not include "NTP".
self._input_config["label_map"] = {"PC": 1, "AFP": 0}
dataset = dataset_ops.build_dataset(
file_pattern=self._file_pattern,
input_config=self._input_config,
batch_size=4)
# We need an initializable iterator when using labels because of the
# stateful label id hash table.
iterator = dataset.make_initializable_iterator()
inputs = iterator.get_next()
init_op = tf.tables_initializer()
# Expect features and labels.
self.assertItemsEqual(["time_series_features", "aux_features", "labels"],
inputs.keys())
labels = inputs["labels"]
with self.session() as sess:
sess.run([init_op, iterator.initializer])
# Unknown label "NTP".
with self.assertRaises(tf.errors.InvalidArgumentError):
sess.run(labels)
def testLabelFiltering(self):
self._input_config["label_feature"] = "label_str"
# "AFP" is -1, so these examples should be filtered.
self._input_config["label_map"] = {"PC": 1, "AFP": -1, "NTP": 0}
dataset = dataset_ops.build_dataset(
file_pattern=self._file_pattern,
input_config=self._input_config,
batch_size=4)
# We need an initializable iterator when using labels because of the
# stateful label id hash table.
iterator = dataset.make_initializable_iterator()
inputs = iterator.get_next()
init_op = tf.tables_initializer()
# Expect features and labels.
self.assertItemsEqual(["time_series_features", "aux_features", "labels"],
inputs.keys())
labels = inputs["labels"]
with self.session() as sess:
sess.run([init_op, iterator.initializer])
# Fetch 3 batches.
np.testing.assert_array_equal([1, 0, 1, 0], sess.run(labels))
np.testing.assert_array_equal([1, 0, 1], sess.run(labels))
# No more batches.
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(labels)
def testReverseTimeSeries(self):
dataset = dataset_ops.build_dataset(
file_pattern=self._file_pattern,
input_config=self._input_config,
batch_size=4,
reverse_time_series_prob=1,
include_labels=False)
# We can use a one-shot iterator without labels because we don't have the
# stateful hash map for label ids.
iterator = dataset.make_one_shot_iterator()
features = iterator.get_next()
# Expect features only.
self.assertItemsEqual(["time_series_features", "aux_features"],
features.keys())
with self.session() as sess:
# Batch 1.
f = sess.run(features)
np.testing.assert_array_almost_equal([
[[7], [6], [5], [4], [3], [2], [1], [0]],
[[7], [6], [5], [4], [3], [2], [1], [0]],
[[7], [6], [5], [4], [3], [2], [1], [0]],
[[7], [6], [5], [4], [3], [2], [1], [0]],
], f["time_series_features"]["global_view"])
np.testing.assert_array_almost_equal([
[[3], [2], [1], [0]],
[[3], [2], [1], [0]],
[[3], [2], [1], [0]],
[[3], [2], [1], [0]],
], f["time_series_features"]["local_view"])
np.testing.assert_array_almost_equal([[100], [101], [102], [103]],
f["aux_features"]["aux_feature"])
# Batch 2.
f = sess.run(features)
np.testing.assert_array_almost_equal([
[[7], [6], [5], [4], [3], [2], [1], [0]],
[[7], [6], [5], [4], [3], [2], [1], [0]],
[[7], [6], [5], [4], [3], [2], [1], [0]],
[[7], [6], [5], [4], [3], [2], [1], [0]],
], f["time_series_features"]["global_view"])
np.testing.assert_array_almost_equal([
[[3], [2], [1], [0]],
[[3], [2], [1], [0]],
[[3], [2], [1], [0]],
[[3], [2], [1], [0]],
], f["time_series_features"]["local_view"])
np.testing.assert_array_almost_equal([[104], [105], [106], [107]],
f["aux_features"]["aux_feature"])
# Batch 3.
f = sess.run(features)
np.testing.assert_array_almost_equal([
[[7], [6], [5], [4], [3], [2], [1], [0]],
[[7], [6], [5], [4], [3], [2], [1], [0]],
], f["time_series_features"]["global_view"])
np.testing.assert_array_almost_equal([
[[3], [2], [1], [0]],
[[3], [2], [1], [0]],
], f["time_series_features"]["local_view"])
np.testing.assert_array_almost_equal([[108], [109]],
f["aux_features"]["aux_feature"])
# No more batches.
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(features)
def testRepeat(self):
dataset = dataset_ops.build_dataset(
file_pattern=self._file_pattern,
input_config=self._input_config,
batch_size=4,
include_labels=False)
# We can use a one-shot iterator without labels because we don't have the
# stateful hash map for label ids.
iterator = dataset.make_one_shot_iterator()
features = iterator.get_next()
# Expect features only.
self.assertItemsEqual(["time_series_features", "aux_features"],
features.keys())
with self.session() as sess:
# Batch 1.
f = sess.run(features)
np.testing.assert_array_almost_equal([
[[0], [1], [2], [3], [4], [5], [6], [7]],
[[0], [1], [2], [3], [4], [5], [6], [7]],
[[0], [1], [2], [3], [4], [5], [6], [7]],
[[0], [1], [2], [3], [4], [5], [6], [7]],
], f["time_series_features"]["global_view"])
np.testing.assert_array_almost_equal([
[[0], [1], [2], [3]],
[[0], [1], [2], [3]],
[[0], [1], [2], [3]],
[[0], [1], [2], [3]],
], f["time_series_features"]["local_view"])
np.testing.assert_array_almost_equal([[100], [101], [102], [103]],
f["aux_features"]["aux_feature"])
# Batch 2.
f = sess.run(features)
np.testing.assert_array_almost_equal([
[[0], [1], [2], [3], [4], [5], [6], [7]],
[[0], [1], [2], [3], [4], [5], [6], [7]],
[[0], [1], [2], [3], [4], [5], [6], [7]],
[[0], [1], [2], [3], [4], [5], [6], [7]],
], f["time_series_features"]["global_view"])
np.testing.assert_array_almost_equal([
[[0], [1], [2], [3]],
[[0], [1], [2], [3]],
[[0], [1], [2], [3]],
[[0], [1], [2], [3]],
], f["time_series_features"]["local_view"])
np.testing.assert_array_almost_equal([[104], [105], [106], [107]],
f["aux_features"]["aux_feature"])
# Batch 3.
f = sess.run(features)
np.testing.assert_array_almost_equal([
[[0], [1], [2], [3], [4], [5], [6], [7]],
[[0], [1], [2], [3], [4], [5], [6], [7]],
], f["time_series_features"]["global_view"])
np.testing.assert_array_almost_equal([
[[0], [1], [2], [3]],
[[0], [1], [2], [3]],
], f["time_series_features"]["local_view"])
np.testing.assert_array_almost_equal([[108], [109]],
f["aux_features"]["aux_feature"])
# No more batches.
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(features)
def testTPU(self):
dataset = dataset_ops.build_dataset(
file_pattern=self._file_pattern,
input_config=self._input_config,
batch_size=4,
include_labels=False)
# We can use a one-shot iterator without labels because we don't have the
# stateful hash map for label ids.
iterator = dataset.make_one_shot_iterator()
features = iterator.get_next()
# Expect features only.
self.assertItemsEqual(["time_series_features", "aux_features"],
features.keys())
with self.session() as sess:
# Batch 1.
f = sess.run(features)
np.testing.assert_array_almost_equal([
[[0], [1], [2], [3], [4], [5], [6], [7]],
[[0], [1], [2], [3], [4], [5], [6], [7]],
[[0], [1], [2], [3], [4], [5], [6], [7]],
[[0], [1], [2], [3], [4], [5], [6], [7]],
], f["time_series_features"]["global_view"])
np.testing.assert_array_almost_equal([
[[0], [1], [2], [3]],
[[0], [1], [2], [3]],
[[0], [1], [2], [3]],
[[0], [1], [2], [3]],
], f["time_series_features"]["local_view"])
np.testing.assert_array_almost_equal([[100], [101], [102], [103]],
f["aux_features"]["aux_feature"])
# Batch 2.
f = sess.run(features)
np.testing.assert_array_almost_equal([
[[0], [1], [2], [3], [4], [5], [6], [7]],
[[0], [1], [2], [3], [4], [5], [6], [7]],
[[0], [1], [2], [3], [4], [5], [6], [7]],
[[0], [1], [2], [3], [4], [5], [6], [7]],
], f["time_series_features"]["global_view"])
np.testing.assert_array_almost_equal([
[[0], [1], [2], [3]],
[[0], [1], [2], [3]],
[[0], [1], [2], [3]],
[[0], [1], [2], [3]],
], f["time_series_features"]["local_view"])
np.testing.assert_array_almost_equal([[104], [105], [106], [107]],
f["aux_features"]["aux_feature"])
# Batch 3.
f = sess.run(features)
np.testing.assert_array_almost_equal([
[[0], [1], [2], [3], [4], [5], [6], [7]],
[[0], [1], [2], [3], [4], [5], [6], [7]],
], f["time_series_features"]["global_view"])
np.testing.assert_array_almost_equal([
[[0], [1], [2], [3]],
[[0], [1], [2], [3]],
], f["time_series_features"]["local_view"])
np.testing.assert_array_almost_equal([[108], [109]],
f["aux_features"]["aux_feature"])
# No more batches.
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(features)
if __name__ == "__main__":
tf.test.main()
|
|
"""
File: PulseOutStart01.py
Library Call Demonstrated: mcculw.ul.pulse_out_start()
mcculw.ul.pulse_out_stop()
Purpose: Controls an Output Timer Channel.
Demonstration: Sends a frequency output to Timer 0.
Special Requirements: Device must have a Timer output.
"""
from __future__ import absolute_import, division, print_function
from builtins import * # @UnusedWildImport
import tkinter as tk
from mcculw import ul
from mcculw.enums import CounterChannelType
from mcculw.ul import ULError
from mcculw.device_info import DaqDeviceInfo
try:
from ui_examples_util import UIExample, show_ul_error, validate_float_entry
except ImportError:
from .ui_examples_util import UIExample, show_ul_error, validate_float_entry
class PulseOutStart01(UIExample):
def __init__(self, master=None):
super(PulseOutStart01, self).__init__(master)
master.protocol("WM_DELETE_WINDOW", self.exit)
# By default, the example detects all available devices and selects the
# first device listed.
# If use_device_detection is set to False, the board_num property needs
# to match the desired board number configured with Instacal.
use_device_detection = True
self.board_num = 0
self.first_chan_num = -1
self.last_chan_num = -1
try:
if use_device_detection:
self.configure_first_detected_device()
self.device_info = DaqDeviceInfo(self.board_num)
ctr_info = self.device_info.get_ctr_info()
# Find the first pulse counter
first_chan = next(
(channel for channel in ctr_info.chan_info
if channel.type == CounterChannelType.CTRPULSE), None)
if first_chan is not None:
last_chan = next(
(channel for channel in reversed(ctr_info.chan_info)
if channel.type == CounterChannelType.CTRPULSE), None)
self.first_chan_num = first_chan.channel_num
self.last_chan_num = last_chan.channel_num
self.create_widgets()
else:
self.create_unsupported_widgets()
except ULError:
self.create_unsupported_widgets(True)
def update_output(self):
try:
timer_num = self.get_channel_num()
frequency = self.get_frequency()
duty_cycle = self.get_duty_cycle()
# Start the pulse output (optional parameters omitted)
actual_freq, actual_duty_cycle, _ = ul.pulse_out_start(
self.board_num, timer_num, frequency, duty_cycle)
self.update_actual_values(actual_freq, actual_duty_cycle)
except ULError as e:
show_ul_error(e)
def exit(self):
# Stop all the timers at exit
if self.first_chan_num != -1:
for chan_num in range(self.first_chan_num, self.last_chan_num + 1):
try:
ul.pulse_out_stop(self.board_num, chan_num)
except ULError as e:
show_ul_error(e)
self.master.destroy()
def update_actual_values(self, actual_freq, actual_duty_cycle):
self.actual_freq_label["text"] = str(actual_freq)
self.actual_duty_cycle_label["text"] = str(actual_duty_cycle)
def get_frequency(self):
try:
return float(self.freq_entry.get())
except ValueError:
return 100000
def get_duty_cycle(self):
try:
return float(self.duty_cycle_entry.get())
except ValueError:
return 0.5
def get_channel_num(self):
if self.last_chan_num == self.first_chan_num:
return self.last_chan_num
try:
return int(self.channel_entry.get())
except ValueError:
return 0
def validate_channel_entry(self, p):
if p == '':
return True
try:
value = int(p)
if value < self.first_chan_num or value > self.last_chan_num:
return False
except ValueError:
return False
return True
def create_widgets(self):
'''Create the tkinter UI'''
self.device_label = tk.Label(self)
self.device_label.pack(fill=tk.NONE, anchor=tk.NW)
self.device_label["text"] = ('Board Number ' + str(self.board_num)
+ ": " + self.device_info.product_name
+ " (" + self.device_info.unique_id + ")")
main_frame = tk.Frame(self)
main_frame.pack(fill=tk.X, anchor=tk.NW)
channel_vcmd = self.register(self.validate_channel_entry)
float_vcmd = self.register(validate_float_entry)
curr_row = 0
if self.last_chan_num != self.first_chan_num:
channel_entry_label = tk.Label(main_frame)
channel_entry_label["text"] = "Channel Number:"
channel_entry_label.grid(row=curr_row, column=0, sticky=tk.W)
self.channel_entry = tk.Spinbox(
main_frame, from_=self.first_chan_num, to=self.last_chan_num,
validate='key', validatecommand=(channel_vcmd, '%P'))
self.channel_entry.grid(row=curr_row, column=1, sticky=tk.W)
curr_row += 1
freq_label = tk.Label(main_frame)
freq_label["text"] = "Frequency:"
freq_label.grid(row=curr_row, column=0, sticky=tk.W)
self.freq_entry = tk.Entry(
main_frame, validate='key', validatecommand=(float_vcmd, '%P'))
self.freq_entry.grid(row=curr_row, column=1, sticky=tk.W)
self.freq_entry.insert(0, "100000")
curr_row += 1
duty_cycle_label = tk.Label(main_frame)
duty_cycle_label["text"] = "Duty Cycle (0-1):"
duty_cycle_label.grid(row=curr_row, column=0, sticky=tk.W)
self.duty_cycle_entry = tk.Entry(
main_frame, validate='key', validatecommand=(float_vcmd, '%P'))
self.duty_cycle_entry.grid(row=curr_row, column=1, sticky=tk.W)
self.duty_cycle_entry.insert(0, "0.5")
curr_row += 1
update_button = tk.Button(main_frame)
update_button["text"] = "Update"
update_button["command"] = self.update_output
update_button.grid(row=curr_row, column=0,
columnspan=2, padx=3, pady=3)
curr_row += 1
actual_freq_left_label = tk.Label(main_frame)
actual_freq_left_label["text"] = "Actual Frequency:"
actual_freq_left_label.grid(row=curr_row, column=0, sticky=tk.W)
self.actual_freq_label = tk.Label(main_frame)
self.actual_freq_label.grid(row=curr_row, column=1, sticky=tk.W)
curr_row += 1
actual_duty_cycle_left_label = tk.Label(main_frame)
actual_duty_cycle_left_label["text"] = "Actual Duty Cycle:"
actual_duty_cycle_left_label.grid(
row=curr_row, column=0, sticky=tk.W)
self.actual_duty_cycle_label = tk.Label(main_frame)
self.actual_duty_cycle_label.grid(
row=curr_row, column=1, sticky=tk.W)
button_frame = tk.Frame(self)
button_frame.pack(fill=tk.X, side=tk.RIGHT, anchor=tk.SE)
quit_button = tk.Button(button_frame)
quit_button["text"] = "Quit"
quit_button["command"] = self.exit
quit_button.grid(row=0, column=1, padx=3, pady=3)
if __name__ == "__main__":
# Start the example
PulseOutStart01(master=tk.Tk()).mainloop()
|
|
from __future__ import absolute_import, division, print_function
from trakt.core.helpers import dictfilter
from trakt.core.pagination import PaginationIterator
from trakt.interfaces.base import Interface, authenticated
from trakt.mapper.progress import ProgressMapper
from trakt.mapper.summary import SummaryMapper
import requests
class ShowsInterface(Interface):
path = 'shows'
def get(self, id, extended=None, **kwargs):
response = self.http.get(str(id), query={
'extended': extended
})
item = self.get_data(response, **kwargs)
if isinstance(item, requests.Response):
return item
return SummaryMapper.show(self.client, item)
def recommended(self, period=None, extended=None, page=None, per_page=None, **kwargs):
# Build parameters
params = []
if period:
params.append(period)
# Build query
query = {
'extended': extended,
'page': page,
'limit': per_page
}
# Send request
response = self.http.get(
'recommended',
params=params,
query=query,
**dictfilter(kwargs, get=[
'exceptions'
], pop=[
'pagination'
])
)
# Parse response
items = self.get_data(response, **kwargs)
if isinstance(items, PaginationIterator):
return items.with_mapper(lambda items: SummaryMapper.shows(self.client, items))
if isinstance(items, requests.Response):
return items
return SummaryMapper.shows(self.client, items)
def trending(self, extended=None, page=None, per_page=None, **kwargs):
response = self.http.get('trending', query={
'extended': extended,
'page': page,
'limit': per_page
}, **dictfilter(kwargs, get=[
'exceptions'
], pop=[
'pagination'
]))
# Parse response
items = self.get_data(response, **kwargs)
if isinstance(items, PaginationIterator):
return items.with_mapper(lambda items: SummaryMapper.shows(self.client, items))
if isinstance(items, requests.Response):
return items
return SummaryMapper.shows(self.client, items)
def popular(self, extended=None, page=None, per_page=None, **kwargs):
response = self.http.get('popular', query={
'extended': extended,
'page': page,
'limit': per_page
}, **dictfilter(kwargs, get=[
'exceptions'
], pop=[
'pagination'
]))
# Parse response
items = self.get_data(response, **kwargs)
if isinstance(items, PaginationIterator):
return items.with_mapper(lambda items: SummaryMapper.shows(self.client, items))
if isinstance(items, requests.Response):
return items
return SummaryMapper.shows(self.client, items)
def next_episode(self, id, extended=None, **kwargs):
response = self.http.get(str(id), 'next_episode', query={
'extended': extended
})
item = self.get_data(response, **kwargs)
if isinstance(item, requests.Response):
return item
return SummaryMapper.episode(self.client, item)
def last_episode(self, id, extended=None, **kwargs):
response = self.http.get(str(id), 'last_episode', query={
'extended': extended
})
item = self.get_data(response, **kwargs)
if isinstance(item, requests.Response):
return item
return SummaryMapper.episode(self.client, item)
def seasons(self, id, extended=None, **kwargs):
response = self.http.get(str(id), [
'seasons'
], query={
'extended': extended
})
items = self.get_data(response, **kwargs)
if isinstance(items, requests.Response):
return items
return SummaryMapper.seasons(self.client, items)
def season(self, id, season, extended=None, **kwargs):
response = self.http.get(str(id), [
'seasons', str(season)
], query={
'extended': extended
})
items = self.get_data(response, **kwargs)
if isinstance(items, requests.Response):
return items
return SummaryMapper.episodes(self.client, items)
def episode(self, id, season, episode, extended=None, **kwargs):
response = self.http.get(str(id), [
'seasons', str(season),
'episodes', str(episode)
], query={
'extended': extended
})
item = self.get_data(response, **kwargs)
if isinstance(item, requests.Response):
return item
return SummaryMapper.episode(self.client, item)
@authenticated
def progress(self, progress_type, id, hidden=False, specials=False, count_specials=True, **kwargs):
query = {
'hidden': hidden,
'specials': specials,
'count_specials': count_specials
}
response = self.http.get(str(id), [
'progress', progress_type
], query=query, **dictfilter(kwargs, pop=[
'authenticated',
'validate_token'
]))
item = self.get_data(response, **kwargs)
if isinstance(item, requests.Response):
return item
return ProgressMapper.progress(self.client, progress_type, item)
@authenticated
def progress_collection(self, id, hidden=False, specials=False, count_specials=True, **kwargs):
return self.progress('collection', id, hidden, specials, count_specials, **kwargs)
@authenticated
def progress_watched(self, id, hidden=False, specials=False, count_specials=True, **kwargs):
return self.progress('watched', id, hidden, specials, count_specials, **kwargs)
|
|
import time
import errno
import socket
import threading
import traceback
from pyroute2 import config
from pyroute2.common import basestring
from pyroute2.common import reduce
from pyroute2.common import dqn2int
from pyroute2.netlink import NetlinkError
from pyroute2.netlink.rtnl.req import IPLinkRequest
from pyroute2.netlink.rtnl.ifinfmsg import IFF_MASK
from pyroute2.netlink.rtnl.ifinfmsg import ifinfmsg
from pyroute2.ipdb.transactional import Transactional
from pyroute2.ipdb.transactional import update
from pyroute2.ipdb.linkedset import LinkedSet
from pyroute2.ipdb.linkedset import IPaddrSet
from pyroute2.ipdb.common import CreateException
from pyroute2.ipdb.common import CommitException
from pyroute2.ipdb.common import SYNC_TIMEOUT
def _get_data_fields():
ret = []
for data in ('bridge_data',
'bond_data',
'tuntap_data',
'vxlan_data',
'gre_data',
'macvlan_data',
'macvtap_data',
'ipvlan_data'):
msg = getattr(ifinfmsg.ifinfo, data)
ret += [msg.nla2name(i[0]) for i in msg.nla_map]
return ret
class Interface(Transactional):
'''
Objects of this class represent network interface and
all related objects:
* addresses
* (todo) neighbours
* (todo) routes
Interfaces provide transactional model and can act as
context managers. Any attribute change implicitly
starts a transaction. The transaction can be managed
with three methods:
* review() -- review changes
* rollback() -- drop all the changes
* commit() -- try to apply changes
If anything will go wrong during transaction commit,
it will be rolled back authomatically and an
exception will be raised. Failed transaction review
will be attached to the exception.
'''
_fields_cmp = {'flags': lambda x, y: x & y & IFF_MASK == y & IFF_MASK}
_virtual_fields = ['ipdb_scope', 'ipdb_priority']
_xfields = {'common': [ifinfmsg.nla2name(i[0]) for i
in ifinfmsg.nla_map]}
_xfields['common'].append('index')
_xfields['common'].append('flags')
_xfields['common'].append('mask')
_xfields['common'].append('change')
_xfields['common'].append('kind')
_xfields['common'].append('peer')
_xfields['common'].append('vlan_id')
_xfields['common'].append('bond_mode')
_xfields['common'].extend(_get_data_fields())
_fields = reduce(lambda x, y: x + y, _xfields.values())
_fields.extend(_virtual_fields)
def __init__(self, ipdb, mode=None, parent=None, uid=None):
'''
Parameters:
* ipdb -- ipdb() reference
* mode -- transaction mode
'''
Transactional.__init__(self, ipdb, mode)
self.cleanup = ('header',
'linkinfo',
'af_spec',
'attrs',
'event',
'map',
'stats',
'stats64',
'__align')
self.ingress = None
self.egress = None
self.nlmsg = None
self._exception = None
self._tb = None
self._load_event = threading.Event()
self._linked_sets.add('ipaddr')
self._linked_sets.add('ports')
self._freeze = None
# 8<-----------------------------------
# local setup: direct state is required
with self._direct_state:
for i in self._fields:
self[i] = None
for i in ('change', 'mask'):
del self[i]
self['ipaddr'] = IPaddrSet()
self['ports'] = LinkedSet()
self['ipdb_priority'] = 0
# 8<-----------------------------------
def __hash__(self):
return self['index']
@property
def if_master(self):
'''
[property] Link to the parent interface -- if it exists
'''
return self.get('master', None)
def detach(self):
self.ipdb.detach(self['ifname'], self['index'], self.nlmsg)
return self
def freeze(self):
dump = self.pick()
def cb(ipdb, msg, action):
if msg.get('index', -1) == dump['index']:
try:
# important: that's a rollback, so do not
# try to revert changes in the case of failure
self.commit(transaction=dump, rollback=True)
except Exception:
pass
self._freeze = self.ipdb.register_callback(cb)
return self
def unfreeze(self):
self.ipdb.unregister_callback(self._freeze)
self._freeze = None
return self
def load(self, data):
'''
Load the data from a dictionary to an existing
transaction. Requires `commit()` call, or must be
called from within a `with` statement.
Sample::
data = json.loads(...)
with ipdb.interfaces['dummy1'] as i:
i.load(data)
Sample, mode `explicit::
data = json.loads(...)
i = ipdb.interfaces['dummy1']
i.begin()
i.load(data)
i.commit()
'''
for key in data:
if key == 'ipaddr':
for addr in self['ipaddr']:
self.del_ip(*addr)
for addr in data[key]:
if isinstance(addr, basestring):
addr = (addr, )
self.add_ip(*addr)
elif key == 'ports':
for port in self['ports']:
self.del_port(port)
for port in data[key]:
self.add_port(port)
elif key == 'neighbours':
# ignore neighbours on load
pass
else:
self[key] = data[key]
return self
def make_transaction(self, data):
'''
Create a new transaction instance from a dictionary.
One can apply it the with `commit(transaction=...)`
call.
Sample::
data = json.loads(...)
t = ipdb.interfaces['dummy1'].make_transaction(data)
ipdb.interfaces['dummy1'].commit(transaction=t)
'''
with self._write_lock:
template = self.__class__(ipdb=self.ipdb, mode='snapshot')
template.load_dict(data)
return template
def load_dict(self, data):
'''
Update the interface info from a dictionary.
This call always bypasses open transactions, loading
changes directly into the interface data.
'''
with self._direct_state:
self.load(data)
def load_netlink(self, dev):
'''
Update the interface info from RTM_NEWLINK message.
This call always bypasses open transactions, loading
changes directly into the interface data.
'''
with self._direct_state:
if self['ipdb_scope'] == 'locked':
# do not touch locked interfaces
return
if self['ipdb_scope'] in ('shadow', 'create'):
# ignore non-broadcast messages
if dev['header']['sequence_number'] != 0:
return
# ignore ghost RTM_NEWLINK messages
if (config.kernel[0] < 3) and \
(not dev.get_attr('IFLA_AF_SPEC')):
return
self['ipdb_scope'] = 'system'
if self.ipdb.debug:
self.nlmsg = dev
for (name, value) in dev.items():
self[name] = value
for item in dev['attrs']:
name, value = item[:2]
norm = ifinfmsg.nla2name(name)
self[norm] = value
# load interface kind
linkinfo = dev.get_attr('IFLA_LINKINFO')
if linkinfo is not None:
kind = linkinfo.get_attr('IFLA_INFO_KIND')
if kind is not None:
self['kind'] = kind
if kind == 'vlan':
data = linkinfo.get_attr('IFLA_INFO_DATA')
self['vlan_id'] = data.get_attr('IFLA_VLAN_ID')
if kind in ('vxlan', 'macvlan', 'macvtap',
'gre', 'gretap', 'ipvlan'):
data = linkinfo.get_attr('IFLA_INFO_DATA')
for nla in data.get('attrs', []):
norm = ifinfmsg.nla2name(nla[0])
self[norm] = nla[1]
# get OVS master and override IFLA_MASTER value
try:
master = linkinfo.get_attr('IFLA_INFO_OVS_MASTER')
if master:
self['master'] = self.ipdb.interfaces[master].index
except (AttributeError, KeyError):
pass
# the rest is possible only when interface
# is used in IPDB, not standalone
if self.ipdb is not None:
self['ipaddr'] = self.ipdb.ipaddr[self['index']]
self['neighbours'] = self.ipdb.neighbours[self['index']]
# finally, cleanup all not needed
for item in self.cleanup:
if item in self:
del self[item]
self.sync()
def sync(self):
self._load_event.set()
def wait_ip(self, *argv, **kwarg):
return self['ipaddr'].wait_ip(*argv, **kwarg)
@update
def add_ip(self, direct, ip,
mask=None,
broadcast=None,
anycast=None,
scope=None):
'''
Add IP address to an interface
Keyword arguments:
* mask
* broadcast
* anycast
* scope
'''
# split mask
if mask is None:
ip, mask = ip.split('/')
if mask.find('.') > -1:
mask = dqn2int(mask)
else:
mask = int(mask, 0)
elif isinstance(mask, basestring):
mask = dqn2int(mask)
# FIXME: make it more generic
# skip IPv6 link-local addresses
if ip[:4] == 'fe80' and mask == 64:
return self
if not direct:
# if it is an interface object, route any change
# to the last transaction
transaction = self.last()
transaction.add_ip(ip, mask, broadcast, anycast, scope)
else:
# if it is a transaction or an interface update, apply the change
self['ipaddr'].unlink((ip, mask))
request = {}
if broadcast is not None:
request['broadcast'] = broadcast
if anycast is not None:
request['anycast'] = anycast
if scope is not None:
request['scope'] = scope
self['ipaddr'].add((ip, mask), raw=request)
return self
@update
def del_ip(self, direct, ip, mask=None):
'''
Delete IP address from an interface
'''
if mask is None:
ip, mask = ip.split('/')
if mask.find('.') > -1:
mask = dqn2int(mask)
else:
mask = int(mask, 0)
if not direct:
transaction = self.last()
if (ip, mask) in transaction['ipaddr']:
transaction.del_ip(ip, mask)
else:
self['ipaddr'].unlink((ip, mask))
self['ipaddr'].remove((ip, mask))
return self
@update
def add_port(self, direct, port):
'''
Add a slave port to a bridge or bonding
'''
if isinstance(port, Interface):
port = port['index']
if not direct:
transaction = self.last()
transaction.add_port(port)
else:
self['ports'].unlink(port)
self['ports'].add(port)
return self
@update
def del_port(self, direct, port):
'''
Remove a slave port from a bridge or bonding
'''
if isinstance(port, Interface):
port = port['index']
if not direct:
transaction = self.last()
if port in transaction['ports']:
transaction.del_port(port)
else:
self['ports'].unlink(port)
self['ports'].remove(port)
return self
def reload(self):
'''
Reload interface information
'''
countdown = 3
while countdown:
links = self.nl.get_links(self['index'])
if links:
self.load_netlink(links[0])
break
else:
countdown -= 1
time.sleep(1)
return self
def filter(self, ftype):
ret = {}
for key in self:
if key in self._xfields[ftype]:
ret[key] = self[key]
return ret
def review(self):
ret = super(Interface, self).review()
if self['ipdb_scope'] == 'create':
last = self.last()
ret['+ipaddr'] = last['ipaddr']
ret['+ports'] = last['ports']
del ret['ports']
del ret['ipaddr']
return ret
def _commit_real_ip(self):
for _ in range(3):
try:
return set([(x.get_attr('IFA_ADDRESS'),
x.get('prefixlen')) for x
in self.nl.get_addr(index=self.index)])
except NetlinkError as x:
if x.code == errno.EBUSY:
time.sleep(0.5)
else:
raise
def _commit_add_ip(self, addrs, transaction):
for i in addrs:
# Ignore link-local IPv6 addresses
if i[0][:4] == 'fe80' and i[1] == 64:
continue
# Try to fetch additional address attributes
try:
kwarg = dict([k for k in transaction.ipaddr[i].items()
if k[0] in ('broadcast',
'anycast',
'scope')])
except KeyError:
kwarg = None
# feed the address to the OS
self.ipdb.update_addr(
self.nl.addr('add', self['index'], i[0], i[1],
**kwarg if kwarg else {}), 'add')
# wait feedback from the OS
# do not provide here the mask -- we're waiting
# not for any address from the network, but a
# specific one
self.wait_ip(i[0], timeout=SYNC_TIMEOUT)
# 8<--------------------------------------
# FIXME: kernel bug, sometimes `addr add` for
# bond interfaces returns success, but does
# really nothing
if self['kind'] == 'bond':
while True:
try:
# dirtiest hack, but we have to use it here
time.sleep(0.1)
self.nl.addr('add', self['index'], i[0], i[1])
# continue to try to add the address
# until the kernel reports `file exists`
#
# a stupid solution, but must help
except NetlinkError as e:
if e.code == errno.EEXIST:
break
else:
raise
except Exception:
raise
def commit(self, tid=None, transaction=None, rollback=False, newif=False):
'''
Commit transaction. In the case of exception all
changes applied during commit will be reverted.
'''
error = None
added = None
removed = None
drop = True
if tid:
transaction = self._transactions[tid]
else:
if transaction:
drop = False
else:
transaction = self.last()
wd = None
with self._write_lock:
# if the interface does not exist, create it first ;)
if self['ipdb_scope'] != 'system':
request = IPLinkRequest(self.filter('common'))
# create watchdog
wd = self.ipdb.watchdog(ifname=self['ifname'])
newif = True
try:
# 8<----------------------------------------------------
# ACHTUNG: hack for old platforms
if request.get('address', None) == '00:00:00:00:00:00':
del request['address']
del request['broadcast']
# 8<----------------------------------------------------
try:
self.nl.link('add', **request)
except NetlinkError as x:
# File exists
if x.code == errno.EEXIST:
# A bit special case, could be one of two cases:
#
# 1. A race condition between two different IPDB
# processes
# 2. An attempt to create dummy0, gre0, bond0 when
# the corrseponding module is not loaded. Being
# loaded, the module creates a default interface
# by itself, causing the request to fail
#
# The exception in that case can cause the DB
# inconsistence, since there can be queued not only
# the interface creation, but also IP address
# changes etc.
#
# So we ignore this particular exception and try to
# continue, as it is created by us.
pass
# Operation not supported
elif x.code == errno.EOPNOTSUPP and \
request.get('index', 0) != 0:
# ACHTUNG: hack for old platforms
request = IPLinkRequest({'ifname': self['ifname'],
'kind': self['kind'],
'index': 0})
self.nl.link('add', **request)
else:
raise
except Exception as e:
# on failure, invalidate the interface and detach it
# from the parent
# 1. drop the IPRoute() link
self.nl = None
# 2. clean up ipdb
self.detach()
# 3. invalidate the interface
with self._direct_state:
for i in tuple(self.keys()):
del self[i]
# 4. the rest
self._mode = 'invalid'
self._exception = e
self._tb = traceback.format_exc()
# raise the exception
raise
if wd is not None:
wd.wait()
if self['index'] == 0:
# Only the interface creation time issue on
# old or compat platforms. The interface index
# may be not known yet, but we can not continue
# without it. It will be updated anyway, but
# it is better to force the lookup.
ix = self.nl.link_lookup(ifname=self['ifname'])
if ix:
self['index'] = ix[0]
else:
raise CreateException()
# now we have our index and IP set and all other stuff
snapshot = self.pick()
try:
removed = snapshot - transaction
added = transaction - snapshot
# 8<---------------------------------------------
# Interface slaves
self['ports'].set_target(transaction['ports'])
for i in removed['ports']:
# detach the port
port = self.ipdb.interfaces[i]
port.set_target('master', None)
port.mirror_target('master', 'link')
self.nl.link('set', index=port['index'], master=0)
for i in added['ports']:
# enslave the port
port = self.ipdb.interfaces[i]
port.set_target('master', self['index'])
port.mirror_target('master', 'link')
self.nl.link('set',
index=port['index'],
master=self['index'])
if removed['ports'] or added['ports']:
for link in self.nl.get_links(
*(removed['ports'] | added['ports'])):
self.ipdb.device_put(link)
self['ports'].target.wait(SYNC_TIMEOUT)
if not self['ports'].target.is_set():
raise CommitException('ports target is not set')
# RHEL 6.5 compat fix -- an explicit timeout
# it gives a time for all the messages to pass
if not self.ipdb.nl.capabilities['create_bridge']:
time.sleep(1)
# wait for proper targets on ports
for i in list(added['ports']) + list(removed['ports']):
port = self.ipdb.interfaces[i]
target = port._local_targets['master']
target.wait(SYNC_TIMEOUT)
del port._local_targets['master']
del port._local_targets['link']
if not target.is_set():
raise CommitException('master target failed')
if i in added['ports']:
if port.if_master != self['index']:
raise CommitException('master set failed')
else:
if port.if_master == self['index']:
raise CommitException('master unset failed')
# 8<---------------------------------------------
# Interface changes
request = IPLinkRequest()
for key in added:
if (key in self._xfields['common']) and \
(key != 'kind'):
request[key] = added[key]
request['index'] = self['index']
# apply changes only if there is something to apply
if any([request[item] is not None for item in request
if item != 'index']):
self.nl.link('set', **request)
# hardcoded pause -- if the interface was moved
# across network namespaces
if 'net_ns_fd' in request:
while True:
# wait until the interface will disappear
# from the main network namespace
try:
for link in self.nl.get_links(self['index']):
self.ipdb.device_put(link)
except NetlinkError as e:
if e.code == errno.ENODEV:
break
raise
except Exception:
raise
time.sleep(0.1)
# 8<---------------------------------------------
# IP address changes
#
# There is one corner case: if the interface didn't
# exist before commit(), the transaction may not
# contain automatic IPv6 addresses.
#
# So fetch here possible addresses and use it to
# extend the transaction
target = self._commit_real_ip().union(set(transaction['ipaddr']))
self['ipaddr'].set_target(target)
# 8<--------------------------------------
for i in removed['ipaddr']:
# Ignore link-local IPv6 addresses
if i[0][:4] == 'fe80' and i[1] == 64:
continue
# When you remove a primary IP addr, all subnetwork
# can be removed. In this case you will fail, but
# it is OK, no need to roll back
try:
self.ipdb.update_addr(
self.nl.addr('delete', self['index'], i[0], i[1]),
'remove')
except NetlinkError as x:
# bypass only errno 99, 'Cannot assign address'
if x.code != errno.EADDRNOTAVAIL:
raise
except socket.error as x:
# bypass illegal IP requests
if isinstance(x.args[0], basestring) and \
x.args[0].startswith('illegal IP'):
continue
raise
# 8<--------------------------------------
target = added['ipaddr']
for i in range(3): # just to be sure
self._commit_add_ip(target, transaction)
real = self._commit_real_ip()
if real >= set(transaction['ipaddr']):
break
else:
target = set(transaction['ipaddr']) - real
else:
raise CommitException('ipaddr setup error', i)
# 8<--------------------------------------
if removed['ipaddr'] or added['ipaddr']:
# 8<--------------------------------------
# bond and bridge interfaces do not send
# IPv6 address updates, when are down
#
# beside of that, bridge interfaces are
# down by default, so they never send
# address updates from beginning
#
# so if we need, force address load
#
# FIXME: probably, we should handle other
# types as well
if self['kind'] in ('bond', 'bridge', 'veth'):
self.ipdb.update_addr(self.nl.get_addr(), 'add')
# 8<--------------------------------------
self['ipaddr'].target.wait(SYNC_TIMEOUT)
if not self['ipaddr'].target.is_set():
raise CommitException('ipaddr target is not set')
# 8<---------------------------------------------
# reload interface to hit targets
if transaction._targets:
try:
self.reload()
except NetlinkError as e:
if e.code == errno.ENODEV: # No such device
if ('net_ns_fd' in added) or \
('net_ns_pid' in added):
# it means, that the device was moved
# to another netns; just give up
if drop:
self.drop(transaction)
return self
# wait for targets
transaction._wait_all_targets()
# 8<---------------------------------------------
# Interface removal
if (added.get('ipdb_scope') in ('shadow', 'remove')) or\
((added.get('ipdb_scope') == 'create') and rollback):
wd = self.ipdb.watchdog(action='RTM_DELLINK',
ifname=self['ifname'])
if added.get('ipdb_scope') in ('shadow', 'create'):
self.set_item('ipdb_scope', 'locked')
self.nl.link('delete', **self)
wd.wait()
if added.get('ipdb_scope') == 'shadow':
self.set_item('ipdb_scope', 'shadow')
if added['ipdb_scope'] == 'create':
self.load_dict(transaction)
if drop:
self.drop(transaction)
return self
# 8<---------------------------------------------
# Iterate callback chain
for ch in self._commit_hooks:
# An exception will rollback the transaction
ch(self.dump(), snapshot.dump(), transaction.dump())
# 8<---------------------------------------------
except Exception as e:
# something went wrong: roll the transaction back
if not rollback:
ret = self.commit(transaction=snapshot,
rollback=True,
newif=newif)
# if some error was returned by the internal
# closure, substitute the initial one
if isinstance(ret, Exception):
error = ret
else:
error = e
error.traceback = traceback.format_exc()
elif isinstance(e, NetlinkError) and \
getattr(e, 'code', 0) == errno.EPERM:
# It is <Operation not permitted>, catched in
# rollback. So return it -- see ~5 lines above
e.traceback = traceback.format_exc()
return e
else:
# somethig went wrong during automatic rollback.
# that's the worst case, but it is still possible,
# since we have no locks on OS level.
self['ipaddr'].clear_target()
self['ports'].clear_target()
# reload all the database -- it can take a long time,
# but it is required since we have no idea, what is
# the result of the failure
#
# ACHTUNG: database reload is asynchronous, so after
# getting RuntimeError() from commit(), take a seat
# and rest for a while. It is an extremal case, it
# should not became at all, and there is no sync.
for link in self.nl.get_links():
self.ipdb.device_put(link)
self.ipdb.update_addr(self.nl.get_addr())
x = RuntimeError()
x.cause = e
x.traceback = traceback.format_exc()
raise x
# if it is not a rollback turn
if drop and not rollback:
# drop last transaction in any case
self.drop(transaction)
# raise exception for failed transaction
if error is not None:
error.transaction = transaction
raise error
time.sleep(config.commit_barrier)
return self
def up(self):
'''
Shortcut: change the interface state to 'up'.
'''
if self['flags'] is None:
self['flags'] = 1
else:
self['flags'] |= 1
return self
def down(self):
'''
Shortcut: change the interface state to 'down'.
'''
if self['flags'] is None:
self['flags'] = 0
else:
self['flags'] &= ~(self['flags'] & 1)
return self
def remove(self):
'''
Mark the interface for removal
'''
self['ipdb_scope'] = 'remove'
return self
def shadow(self):
'''
Remove the interface from the OS, but leave it in the
database. When one will try to re-create interface with
the same name, all the old saved attributes will apply
to the new interface, incl. MAC-address and even the
interface index. Please be aware, that the interface
index can be reused by OS while the interface is "in the
shadow state", in this case re-creation will fail.
'''
self['ipdb_scope'] = 'shadow'
return self
|
|
#!/usr/bin/env python
# Copyright 2012 Citrix Systems, Inc. Licensed under the
# Apache License, Version 2.0 (the "License"); you may not use this
# file except in compliance with the License. Citrix Systems, Inc.
# reserves all rights not expressly granted by the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Automatically generated by addcopyright.py at 04/03/2012
# -*- coding: utf-8 -*-
"""CloudStack Python utility library"""
import sys, os, subprocess, errno, re, time, glob
import urllib2
import xml.dom.minidom
import logging
import socket
# exit() error constants
E_GENERIC= 1
E_NOKVM = 2
E_NODEFROUTE = 3
E_DHCP = 4
E_NOPERSISTENTNET = 5
E_NETRECONFIGFAILED = 6
E_VIRTRECONFIGFAILED = 7
E_FWRECONFIGFAILED = 8
E_AGENTRECONFIGFAILED = 9
E_AGENTFAILEDTOSTART = 10
E_NOFQDN = 11
E_SELINUXENABLED = 12
try: E_USAGE = os.EX_USAGE
except AttributeError: E_USAGE = 64
E_NEEDSMANUALINTERVENTION = 13
E_INTERRUPTED = 14
E_SETUPFAILED = 15
E_UNHANDLEDEXCEPTION = 16
E_MISSINGDEP = 17
Unknown = 0
Fedora = 1
CentOS = 2
Ubuntu = 3
RHEL6 = 4
IPV4 = 4
IPV6 = 6
#=================== DISTRIBUTION DETECTION =================
if os.path.exists("/etc/fedora-release"): distro = Fedora
elif os.path.exists("/etc/centos-release"): distro = CentOS
elif os.path.exists("/etc/redhat-release"):
version = file("/etc/redhat-release").readline()
if version.find("Red Hat Enterprise Linux Server release 6") != -1:
distro = RHEL6
elif version.find("CentOS release") != -1:
distro = CentOS
else:
distro = CentOS
elif os.path.exists("/etc/legal") and "Ubuntu" in file("/etc/legal").read(-1): distro = Ubuntu
else: distro = Unknown
logFileName=None
# ================== LIBRARY UTILITY CODE=============
def setLogFile(logFile):
global logFileName
logFileName=logFile
def read_properties(propfile):
if not hasattr(propfile,"read"): propfile = file(propfile)
properties = propfile.read().splitlines()
properties = [ s.strip() for s in properties ]
properties = [ s for s in properties if
s and
not s.startswith("#") and
not s.startswith(";") ]
#[ logging.debug("Valid config file line: %s",s) for s in properties ]
proppairs = [ s.split("=",1) for s in properties ]
return dict(proppairs)
def stderr(msgfmt,*args):
"""Print a message to stderr, optionally interpolating the arguments into it"""
msgfmt += "\n"
if logFileName != None:
sys.stderr = open(logFileName, 'a+')
if args: sys.stderr.write(msgfmt%args)
else: sys.stderr.write(msgfmt)
def exit(errno=E_GENERIC,message=None,*args):
"""Exit with an error status code, printing a message to stderr if specified"""
if message: stderr(message,*args)
sys.exit(errno)
def resolve(host,port):
return [ (x[4][0],len(x[4])+2) for x in socket.getaddrinfo(host,port,socket.AF_UNSPEC,socket.SOCK_STREAM, 0, socket.AI_PASSIVE) ]
def resolves_to_ipv6(host,port):
return resolve(host,port)[0][1] == IPV6
###add this to Python 2.4, patching the subprocess module at runtime
if hasattr(subprocess,"check_call"):
from subprocess import CalledProcessError, check_call
else:
class CalledProcessError(Exception):
def __init__(self, returncode, cmd):
self.returncode = returncode ; self.cmd = cmd
def __str__(self): return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode)
subprocess.CalledProcessError = CalledProcessError
def check_call(*popenargs, **kwargs):
retcode = subprocess.call(*popenargs, **kwargs)
cmd = kwargs.get("args")
if cmd is None: cmd = popenargs[0]
if retcode: raise subprocess.CalledProcessError(retcode, cmd)
return retcode
subprocess.check_call = check_call
# python 2.4 does not have this
try:
any = any
all = all
except NameError:
def any(sequence):
for i in sequence:
if i: return True
return False
def all(sequence):
for i in sequence:
if not i: return False
return True
class Command:
"""This class simulates a shell command"""
def __init__(self,name,parent=None):
self.__name = name
self.__parent = parent
def __getattr__(self,name):
if name == "_print": name = "print"
return Command(name,self)
def __call__(self,*args,**kwargs):
cmd = self.__get_recursive_name() + list(args)
#print " ",cmd
kwargs = dict(kwargs)
if "stdout" not in kwargs: kwargs["stdout"] = subprocess.PIPE
if "stderr" not in kwargs: kwargs["stderr"] = subprocess.PIPE
popen = subprocess.Popen(cmd,**kwargs)
m = popen.communicate()
ret = popen.wait()
if ret:
e = CalledProcessError(ret,cmd)
e.stdout,e.stderr = m
raise e
class CommandOutput:
def __init__(self,stdout,stderr):
self.stdout = stdout
self.stderr = stderr
return CommandOutput(*m)
def __lt__(self,other):
cmd = self.__get_recursive_name()
#print " ",cmd,"<",other
popen = subprocess.Popen(cmd,stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
m = popen.communicate(other)
ret = popen.wait()
if ret:
e = CalledProcessError(ret,cmd)
e.stdout,e.stderr = m
raise e
class CommandOutput:
def __init__(self,stdout,stderr):
self.stdout = stdout
self.stderr = stderr
return CommandOutput(*m)
def __get_recursive_name(self,sep=None):
m = self
l = []
while m is not None:
l.append(m.__name)
m = m.__parent
l.reverse()
if sep: return sep.join(l)
else: return l
def __str__(self):
return '<Command %r>'%self.__get_recursive_name(sep=" ")
def __repr__(self): return self.__str__()
kvmok = Command("kvm-ok")
getenforce = Command("/usr/sbin/getenforce")
ip = Command("ip")
service = Command("service")
chkconfig = Command("chkconfig")
updatercd = Command("update-rc.d")
ufw = Command("ufw")
iptables = Command("iptables")
iptablessave = Command("iptables-save")
augtool = Command("augtool")
ifconfig = Command("ifconfig")
ifdown = Command("ifdown")
ifup = Command("ifup")
brctl = Command("brctl")
uuidgen = Command("uuidgen")
def is_service_running(servicename):
try:
o = service(servicename,"status")
if distro is Ubuntu:
# status in ubuntu does not signal service status via return code
if "start/running" in o.stdout: return True
return False
else:
# retcode 0, service running
return True
except CalledProcessError,e:
# retcode nonzero, service not running
return False
def stop_service(servicename,force=False):
# This function is idempotent. N number of calls have the same result as N+1 number of calls.
if is_service_running(servicename) or force: service(servicename,"stop",stdout=None,stderr=None)
def disable_service(servicename):
# Stops AND disables the service
stop_service(servicename)
if distro is Ubuntu:
updatercd("-f",servicename,"remove",stdout=None,stderr=None)
else:
chkconfig("--del",servicename,stdout=None,stderr=None)
def start_service(servicename,force=False):
# This function is idempotent unless force is True. N number of calls have the same result as N+1 number of calls.
if not is_service_running(servicename) or force: service(servicename,"start",stdout=None,stderr=None)
def enable_service(servicename,forcestart=False):
# Stops AND disables the service
if distro is Ubuntu:
updatercd("-f",servicename,"remove",stdout=None,stderr=None)
updatercd("-f",servicename,"start","2","3","4","5",".",stdout=None,stderr=None)
else:
chkconfig("--add",servicename,stdout=None,stderr=None)
chkconfig("--level","345",servicename,"on",stdout=None,stderr=None)
start_service(servicename,force=forcestart)
def replace_line(f,startswith,stanza,always_add=False):
lines = [ s.strip() for s in file(f).readlines() ]
newlines = []
replaced = False
for line in lines:
if line.startswith(startswith):
newlines.append(stanza)
replaced = True
else: newlines.append(line)
if not replaced and always_add: newlines.append(stanza)
newlines = [ s + '\n' for s in newlines ]
file(f,"w").writelines(newlines)
def replace_or_add_line(f,startswith,stanza):
return replace_line(f,startswith,stanza,always_add=True)
# ==================================== CHECK FUNCTIONS ==========================
# If they return without exception, it's okay. If they raise a CheckFailed exception, that means a condition
# (generallly one that needs administrator intervention) was detected.
class CheckFailed(Exception): pass
#check function
def check_hostname():
"""If the hostname is a non-fqdn, fail with CalledProcessError. Else return 0."""
try: check_call(["hostname",'--fqdn'])
except CalledProcessError:
raise CheckFailed("This machine does not have an FQDN (fully-qualified domain name) for a hostname")
#check function
def check_kvm():
if distro in (Fedora,CentOS,RHEL6):
if os.path.exists("/dev/kvm"): return True
raise CheckFailed("KVM is not correctly installed on this system, or support for it is not enabled in the BIOS")
else:
try:
kvmok()
return True
except CalledProcessError:
raise CheckFailed("KVM is not correctly installed on this system, or support for it is not enabled in the BIOS")
except OSError,e:
if e.errno is errno.ENOENT: raise CheckFailed("KVM is not correctly installed on this system, or support for it is not enabled in the BIOS")
raise
return True
raise AssertionError, "check_kvm() should have never reached this part"
def check_cgroups():
return glob.glob("/*/cpu.shares")
#check function
def check_selinux():
if distro not in [Fedora,CentOS,RHEL6]: return # no selinux outside of those
enforcing = False
try:
output = getenforce().stdout.strip()
if "nforcing" in output:
enforcing = True
if any ( [ s.startswith("SELINUX=enforcing") for s in file("/etc/selinux/config").readlines() ] ):
config_enforcing = True
else:
config_enforcing = False
except (IOError,OSError),e:
if e.errno == 2: pass
else: raise CheckFailed("An unknown error (%s) took place while checking for SELinux"%str(e))
if enforcing:
raise CheckFailed('''SELinux is set to enforcing. There are two options:
1> Set it permissive in /etc/selinux/config, then reboot the machine.
2> Type 'setenforce Permissive' in commandline, after which you can run this program again.
We strongly suggest you doing the option 1 that makes sure SELinux goes into permissive after system reboot.\n''')
if config_enforcing:
print "WARNING: We detected that your SELinux is not configured in permissive. to make sure cloudstack won't block by \
SELinux after system reboot, we strongly suggest you setting it in permissive in /etc/selinux/config, then reboot the machine."
def preflight_checks(do_check_kvm=True):
if distro is Ubuntu:
preflight_checks = [
(check_hostname,"Checking hostname"),
]
else:
preflight_checks = [
(check_hostname,"Checking hostname"),
(check_selinux,"Checking if SELinux is disabled"),
]
#preflight_checks.append( (check_cgroups,"Checking if the control groups /cgroup filesystem is mounted") )
if do_check_kvm: preflight_checks.append( (check_kvm,"Checking for KVM") )
return preflight_checks
# ========================== CONFIGURATION TASKS ================================
# A Task is a function that runs within the context of its run() function that runs the function execute(), which does several things, reporting back to the caller as it goes with the use of yield
# the done() method ought to return true if the task has run in the past
# the execute() method must implement the configuration act itself
# run() wraps the output of execute() within a Starting taskname and a Completed taskname message
# tasks have a name
class TaskFailed(Exception): pass
#def __init__(self,code,msg):
#Exception.__init__(self,msg)
#self.code = code
class ConfigTask:
name = "generic config task"
autoMode=False
def __init__(self): pass
def done(self):
"""Returns true if the config task has already been done in the past, false if it hasn't"""
return False
def execute(self):
"""Executes the configuration task. Must not be run if test() returned true.
Must yield strings that describe the steps in the task.
Raises TaskFailed if the task failed at some step.
"""
def run (self):
stderr("Starting %s"%self.name)
it = self.execute()
if not it:
pass # not a yielding iterable
else:
for msg in it: stderr(msg)
stderr("Completed %s"%self.name)
def setAutoMode(self, autoMode):
self.autoMode = autoMode
def isAutoMode(self):
return self.autoMode
# ============== these are some configuration tasks ==================
class SetupNetworking(ConfigTask):
name = "network setup"
def __init__(self,brname, pubNic, prvNic):
ConfigTask.__init__(self)
self.brname = brname
self.pubNic = pubNic
self.prvNic = prvNic
self.runtime_state_changed = False
self.was_nm_service_running = None
self.was_net_service_running = None
if distro in (Fedora, CentOS, RHEL6):
self.nmservice = 'NetworkManager'
self.netservice = 'network'
else:
self.nmservice = 'network-manager'
self.netservice = 'networking'
def done(self):
try:
alreadysetup = False
if distro in (Fedora,CentOS, RHEL6):
if self.pubNic != None:
alreadysetup = alreadysetup or augtool._print("/files/etc/sysconfig/network-scripts/ifcfg-%s"%self.pubNic).stdout.strip()
if self.prvNic != None:
alreadysetup = alreadysetup or augtool._print("/files/etc/sysconfig/network-scripts/ifcfg-%s"%self.prvNic).stdout.strip()
if not alreadysetup:
alreadysetup = augtool._print("/files/etc/sysconfig/network-scripts/ifcfg-%s"%self.brname).stdout.strip()
else:
if self.pubNic != None:
alreadysetup = alreadysetup or augtool._print("/files/etc/network/interfaces/iface",self.pubNic).stdout.strip()
if self.prvNic != None:
alreadysetup = alreadysetup or augtool._print("/files/etc/network/interfaces/iface",self.prvNic).stdout.strip()
if not alreadysetup:
alreadysetup = augtool.match("/files/etc/network/interfaces/iface",self.brname).stdout.strip()
return alreadysetup
except OSError,e:
if e.errno is 2: raise TaskFailed("augtool has not been properly installed on this system")
raise
def restore_state(self):
if not self.runtime_state_changed: return
try:
o = ifconfig(self.brname)
bridge_exists = True
except CalledProcessError,e:
print e.stdout + e.stderr
bridge_exists = False
if bridge_exists:
ifconfig(self.brname,"0.0.0.0")
if hasattr(self,"old_net_device"):
ifdown(self.old_net_device)
ifup(self.old_net_device)
try: ifdown(self.brname)
except CalledProcessError: pass
try: ifconfig(self.brname,"down")
except CalledProcessError: pass
try: brctl("delbr",self.brname)
except CalledProcessError: pass
try: ifdown("--force",self.brname)
except CalledProcessError: pass
if self.was_net_service_running is None:
# we do nothing
pass
elif self.was_net_service_running == False:
stop_service(self.netservice,force=True)
time.sleep(1)
else:
# we altered service configuration
stop_service(self.netservice,force=True)
time.sleep(1)
try: start_service(self.netservice,force=True)
except CalledProcessError,e:
if e.returncode == 1: pass
else: raise
time.sleep(1)
if self.was_nm_service_running is None:
# we do nothing
pass
elif self.was_nm_service_running == False:
stop_service(self.nmservice,force=True)
time.sleep(1)
else:
# we altered service configuration
stop_service(self.nmservice,force=True)
time.sleep(1)
start_service(self.nmservice,force=True)
time.sleep(1)
self.runtime_state_changed = False
def execute(self):
yield "Determining default route"
routes = ip.route().stdout.splitlines()
defaultroute = [ x for x in routes if x.startswith("default") ]
if not defaultroute: raise TaskFailed("Your network configuration does not have a default route")
dev = defaultroute[0].split()[4]
yield "Default route assigned to device %s"%dev
self.old_net_device = dev
if distro in (Fedora, CentOS, RHEL6):
inconfigfile = "/".join(augtool.match("/files/etc/sysconfig/network-scripts/*/DEVICE",dev).stdout.strip().split("/")[:-1])
if not inconfigfile: raise TaskFailed("Device %s has not been set up in /etc/sysconfig/network-scripts"%dev)
pathtoconfigfile = inconfigfile[6:]
if distro in (Fedora, CentOS, RHEL6):
automatic = augtool.match("%s/ONBOOT"%inconfigfile,"yes").stdout.strip()
else:
automatic = augtool.match("/files/etc/network/interfaces/auto/*/",dev).stdout.strip()
if not automatic:
if distro is Fedora: raise TaskFailed("Device %s has not been set up in %s as automatic on boot"%dev,pathtoconfigfile)
else: raise TaskFailed("Device %s has not been set up in /etc/network/interfaces as automatic on boot"%dev)
if distro not in (Fedora , CentOS, RHEL6):
inconfigfile = augtool.match("/files/etc/network/interfaces/iface",dev).stdout.strip()
if not inconfigfile: raise TaskFailed("Device %s has not been set up in /etc/network/interfaces"%dev)
if distro in (Fedora, CentOS, RHEL6):
isstatic = augtool.match(inconfigfile + "/BOOTPROTO","none").stdout.strip()
if not isstatic: isstatic = augtool.match(inconfigfile + "/BOOTPROTO","static").stdout.strip()
else:
isstatic = augtool.match(inconfigfile + "/method","static").stdout.strip()
if not isstatic:
if distro in (Fedora, CentOS, RHEL6): raise TaskFailed("Device %s has not been set up as a static device in %s"%(dev,pathtoconfigfile))
else: raise TaskFailed("Device %s has not been set up as a static device in /etc/network/interfaces"%dev)
if is_service_running(self.nmservice):
self.was_nm_service_running = True
yield "Stopping NetworkManager to avoid automatic network reconfiguration"
disable_service(self.nmservice)
else:
self.was_nm_service_running = False
if is_service_running(self.netservice):
self.was_net_service_running = True
else:
self.was_net_service_running = False
yield "Creating Cloud bridging device and making device %s member of this bridge"%dev
if distro in (Fedora, CentOS, RHEL6):
ifcfgtext = file(pathtoconfigfile).read()
newf = "/etc/sysconfig/network-scripts/ifcfg-%s"%self.brname
#def restore():
#try: os.unlink(newf)
#except OSError,e:
#if errno == 2: pass
#raise
#try: file(pathtoconfigfile,"w").write(ifcfgtext)
#except OSError,e: raise
f = file(newf,"w") ; f.write(ifcfgtext) ; f.flush() ; f.close()
innewconfigfile = "/files" + newf
script = """set %s/DEVICE %s
set %s/NAME %s
set %s/BRIDGE_PORTS %s
set %s/TYPE Bridge
rm %s/HWADDR
rm %s/UUID
rm %s/HWADDR
rm %s/IPADDR
rm %s/DEFROUTE
rm %s/NETMASK
rm %s/GATEWAY
rm %s/BROADCAST
rm %s/NETWORK
set %s/BRIDGE %s
save"""%(innewconfigfile,self.brname,innewconfigfile,self.brname,innewconfigfile,dev,
innewconfigfile,innewconfigfile,innewconfigfile,innewconfigfile,
inconfigfile,inconfigfile,inconfigfile,inconfigfile,inconfigfile,inconfigfile,
inconfigfile,self.brname)
yield "Executing the following reconfiguration script:\n%s"%script
try:
returned = augtool < script
if "Saved 2 file" not in returned.stdout:
print returned.stdout + returned.stderr
#restore()
raise TaskFailed("Network reconfiguration failed.")
else:
yield "Network reconfiguration complete"
except CalledProcessError,e:
#restore()
print e.stdout + e.stderr
raise TaskFailed("Network reconfiguration failed")
else: # Not fedora
backup = file("/etc/network/interfaces").read(-1)
#restore = lambda: file("/etc/network/interfaces","w").write(backup)
script = """set %s %s
set %s %s
set %s/bridge_ports %s
save"""%(automatic,self.brname,inconfigfile,self.brname,inconfigfile,dev)
yield "Executing the following reconfiguration script:\n%s"%script
try:
returned = augtool < script
if "Saved 1 file" not in returned.stdout:
#restore()
raise TaskFailed("Network reconfiguration failed.")
else:
yield "Network reconfiguration complete"
except CalledProcessError,e:
#restore()
print e.stdout + e.stderr
raise TaskFailed("Network reconfiguration failed")
yield "We are going to restart network services now, to make the network changes take effect. Hit ENTER when you are ready."
if self.isAutoMode(): pass
else:
raw_input()
# if we reach here, then if something goes wrong we should attempt to revert the runinng state
# if not, then no point
self.runtime_state_changed = True
yield "Enabling and restarting non-NetworkManager networking"
if distro is Ubuntu: ifup(self.brname,stdout=None,stderr=None)
stop_service(self.netservice)
try: enable_service(self.netservice,forcestart=True)
except CalledProcessError,e:
if e.returncode == 1: pass
else: raise
yield "Verifying that the bridge is up"
try:
o = ifconfig(self.brname)
except CalledProcessError,e:
print e.stdout + e.stderr
raise TaskFailed("The bridge could not be set up properly")
yield "Networking restart done"
class SetupCgConfig(ConfigTask):
name = "control groups configuration"
def done(self):
try:
return "group virt" in file("/etc/cgconfig.conf","r").read(-1)
except IOError,e:
if e.errno is 2: raise TaskFailed("cgconfig has not been properly installed on this system")
raise
def execute(self):
cgconfig = file("/etc/cgconfig.conf","r").read(-1)
cgconfig = cgconfig + """
group virt {
cpu {
cpu.shares = 9216;
}
}
"""
file("/etc/cgconfig.conf","w").write(cgconfig)
stop_service("cgconfig")
enable_service("cgconfig",forcestart=True)
class SetupCgRules(ConfigTask):
name = "control group rules setup"
cfgline = "root:/usr/sbin/libvirtd cpu virt/"
def done(self):
try:
return self.cfgline in file("/etc/cgrules.conf","r").read(-1)
except IOError,e:
if e.errno is 2: raise TaskFailed("cgrulesd has not been properly installed on this system")
raise
def execute(self):
cgrules = file("/etc/cgrules.conf","r").read(-1)
cgrules = cgrules + "\n" + self.cfgline + "\n"
file("/etc/cgrules.conf","w").write(cgrules)
stop_service("cgred")
enable_service("cgred")
class SetupCgroupControllers(ConfigTask):
name = "qemu cgroup controllers setup"
cfgline = "cgroup_controllers = [ \"cpu\" ]"
filename = "/etc/libvirt/qemu.conf"
def done(self):
try:
return self.cfgline in file(self.filename,"r").read(-1)
except IOError,e:
if e.errno is 2: raise TaskFailed("qemu has not been properly installed on this system")
raise
def execute(self):
libvirtqemu = file(self.filename,"r").read(-1)
libvirtqemu = libvirtqemu + "\n" + self.cfgline + "\n"
file("/etc/libvirt/qemu.conf","w").write(libvirtqemu)
class SetupSecurityDriver(ConfigTask):
name = "security driver setup"
cfgline = "security_driver = \"none\""
filename = "/etc/libvirt/qemu.conf"
def done(self):
try:
return self.cfgline in file(self.filename,"r").read(-1)
except IOError,e:
if e.errno is 2: raise TaskFailed("qemu has not been properly installed on this system")
raise
def execute(self):
libvirtqemu = file(self.filename,"r").read(-1)
libvirtqemu = libvirtqemu + "\n" + self.cfgline + "\n"
file("/etc/libvirt/qemu.conf","w").write(libvirtqemu)
class SetupLibvirt(ConfigTask):
name = "libvirt setup"
cfgline = "export CGROUP_DAEMON='cpu:/virt'"
def done(self):
try:
if distro in (Fedora,CentOS, RHEL6): libvirtfile = "/etc/sysconfig/libvirtd"
elif distro is Ubuntu: libvirtfile = "/etc/default/libvirt-bin"
else: raise AssertionError, "We should not reach this"
return self.cfgline in file(libvirtfile,"r").read(-1)
except IOError,e:
if e.errno is 2: raise TaskFailed("libvirt has not been properly installed on this system")
raise
def execute(self):
if distro in (Fedora,CentOS, RHEL6): libvirtfile = "/etc/sysconfig/libvirtd"
elif distro is Ubuntu: libvirtfile = "/etc/default/libvirt-bin"
else: raise AssertionError, "We should not reach this"
libvirtbin = file(libvirtfile,"r").read(-1)
libvirtbin = libvirtbin + "\n" + self.cfgline + "\n"
file(libvirtfile,"w").write(libvirtbin)
if distro in (CentOS, Fedora, RHEL6): svc = "libvirtd"
else: svc = "libvirt-bin"
stop_service(svc)
enable_service(svc)
class SetupLiveMigration(ConfigTask):
name = "live migration setup"
stanzas = (
"listen_tcp=1",
'tcp_port="16509"',
'auth_tcp="none"',
"listen_tls=0",
)
def done(self):
try:
lines = [ s.strip() for s in file("/etc/libvirt/libvirtd.conf").readlines() ]
if all( [ stanza in lines for stanza in self.stanzas ] ): return True
except IOError,e:
if e.errno is 2: raise TaskFailed("libvirt has not been properly installed on this system")
raise
def execute(self):
for stanza in self.stanzas:
startswith = stanza.split("=")[0] + '='
replace_or_add_line("/etc/libvirt/libvirtd.conf",startswith,stanza)
if distro in (Fedora, RHEL6):
replace_or_add_line("/etc/sysconfig/libvirtd","LIBVIRTD_ARGS=","LIBVIRTD_ARGS=-l")
elif distro is Ubuntu:
if os.path.exists("/etc/init/libvirt-bin.conf"):
replace_line("/etc/init/libvirt-bin.conf", "exec /usr/sbin/libvirtd","exec /usr/sbin/libvirtd -d -l")
else:
replace_or_add_line("/etc/default/libvirt-bin","libvirtd_opts=","libvirtd_opts='-l -d'")
else:
raise AssertionError("Unsupported distribution")
if distro in (CentOS, Fedora, RHEL6): svc = "libvirtd"
else: svc = "libvirt-bin"
stop_service(svc)
enable_service(svc)
class SetupRequiredServices(ConfigTask):
name = "required services setup"
def done(self):
if distro in (Fedora, RHEL6): nfsrelated = "rpcbind nfslock"
elif distro is CentOS: nfsrelated = "portmap nfslock"
else: return True
return all( [ is_service_running(svc) for svc in nfsrelated.split() ] )
def execute(self):
if distro in (Fedora, RHEL6): nfsrelated = "rpcbind nfslock"
elif distro is CentOS: nfsrelated = "portmap nfslock"
else: raise AssertionError("Unsupported distribution")
for svc in nfsrelated.split(): enable_service(svc)
class SetupFirewall(ConfigTask):
name = "firewall setup"
def done(self):
if distro in (Fedora, CentOS,RHEL6):
if not os.path.exists("/etc/sysconfig/iptables"): return True
if ":on" not in chkconfig("--list","iptables").stdout: return True
else:
if "Status: active" not in ufw.status().stdout: return True
if not os.path.exists("/etc/ufw/before.rules"): return True
rule = "-p tcp -m tcp --dport 16509 -j ACCEPT"
if rule in iptablessave().stdout: return True
return False
def execute(self):
ports = "22 1798 16509".split()
if distro in (Fedora , CentOS, RHEL6):
for p in ports: iptables("-I","INPUT","1","-p","tcp","--dport",p,'-j','ACCEPT')
o = service.iptables.save() ; print o.stdout + o.stderr
else:
for p in ports: ufw.allow(p)
class SetupFirewall2(ConfigTask):
# this closes bug 4371
name = "additional firewall setup"
def __init__(self,brname):
ConfigTask.__init__(self)
self.brname = brname
def done(self):
if distro in (Fedora, CentOS, RHEL6):
if not os.path.exists("/etc/sysconfig/iptables"): return True
if ":on" not in chkconfig("--list","iptables").stdout: return True
return False
else:
if "Status: active" not in ufw.status().stdout: return True
if not os.path.exists("/etc/ufw/before.rules"): return True
return False
def execute(self):
yield "Permitting traffic in the bridge interface, migration port and for VNC ports"
if distro in (Fedora , CentOS, RHEL6):
for rule in (
"-I INPUT 1 -p tcp --dport 5900:6100 -j ACCEPT",
"-I INPUT 1 -p tcp --dport 49152:49216 -j ACCEPT",
):
args = rule.split()
o = iptables(*args)
service.iptables.save(stdout=None,stderr=None)
else:
ufw.allow.proto.tcp("from","any","to","any","port","5900:6100")
ufw.allow.proto.tcp("from","any","to","any","port","49152:49216")
stop_service("ufw")
start_service("ufw")
# Tasks according to distribution -- at some point we will split them in separate modules
def config_tasks(brname, pubNic, prvNic):
if distro is CentOS:
config_tasks = (
SetupNetworking(brname, pubNic, prvNic),
SetupLibvirt(),
SetupRequiredServices(),
SetupFirewall(),
SetupFirewall2(brname),
)
elif distro in (Ubuntu,Fedora, RHEL6):
config_tasks = (
SetupNetworking(brname, pubNic, prvNic),
SetupCgConfig(),
SetupCgRules(),
SetupCgroupControllers(),
SetupSecurityDriver(),
SetupLibvirt(),
SetupLiveMigration(),
SetupRequiredServices(),
SetupFirewall(),
SetupFirewall2(brname),
)
else:
raise AssertionError("Unknown distribution")
return config_tasks
def backup_etc(targetdir):
if not targetdir.endswith("/"): targetdir += "/"
check_call( ["mkdir","-p",targetdir] )
rsynccall = ["rsync","-ax","--delete"] + ["/etc/",targetdir]
check_call( rsynccall )
def restore_etc(targetdir):
if not targetdir.endswith("/"): targetdir += "/"
rsynccall = ["rsync","-ax","--delete"] + [targetdir,"/etc/"]
check_call( rsynccall )
def remove_backup(targetdir):
check_call( ["rm","-rf",targetdir] )
def list_zonespods(host):
text = urllib2.urlopen('http://%s:8096/client/api?command=listPods'%host).read(-1)
dom = xml.dom.minidom.parseString(text)
x = [ (zonename,podname)
for pod in dom.childNodes[0].childNodes
for podname in [ x.childNodes[0].wholeText for x in pod.childNodes if x.tagName == "name" ]
for zonename in [ x.childNodes[0].wholeText for x in pod.childNodes if x.tagName == "zonename" ]
]
return x
def prompt_for_hostpods(zonespods):
"""Ask user to select one from those zonespods
Returns (zone,pod) or None if the user made the default selection."""
while True:
stderr("Type the number of the zone and pod combination this host belongs to (hit ENTER to skip this step)")
print " N) ZONE, POD"
print "================"
for n,(z,p) in enumerate(zonespods):
print "%3d) %s, %s"%(n,z,p)
print "================"
print "> ",
zoneandpod = raw_input().strip()
if not zoneandpod:
# we go with default, do not touch anything, just break
return None
try:
# if parsing fails as an int, just vomit and retry
zoneandpod = int(zoneandpod)
if zoneandpod >= len(zonespods) or zoneandpod < 0: raise ValueError, "%s out of bounds"%zoneandpod
except ValueError,e:
stderr(str(e))
continue # re-ask
# oh yeah, the int represents an valid zone and pod index in the array
return zonespods[zoneandpod]
# this configures the agent
def device_exist(devName):
try:
alreadysetup = False
if distro in (Fedora,CentOS, RHEL6):
alreadysetup = augtool._print("/files/etc/sysconfig/network-scripts/ifcfg-%s"%devName).stdout.strip()
else:
alreadysetup = augtool.match("/files/etc/network/interfaces/iface",devName).stdout.strip()
return alreadysetup
except OSError,e:
return False
def setup_agent_config(configfile, host, zone, pod, cluster, guid, pubNic, prvNic):
stderr("Examining Agent configuration")
fn = configfile
text = file(fn).read(-1)
lines = [ s.strip() for s in text.splitlines() ]
confopts = dict([ m.split("=",1) for m in lines if "=" in m and not m.startswith("#") ])
confposes = dict([ (m.split("=",1)[0],n) for n,m in enumerate(lines) if "=" in m and not m.startswith("#") ])
if guid != None:
confopts['guid'] = guid
else:
if not "guid" in confopts:
stderr("Generating GUID for this Agent")
confopts['guid'] = uuidgen().stdout.strip()
if host == None:
try: host = confopts["host"]
except KeyError: host = "localhost"
stderr("Please enter the host name of the management server that this agent will connect to: (just hit ENTER to go with %s)",host)
print "> ",
newhost = raw_input().strip()
if newhost: host = newhost
confopts["host"] = host
if pubNic != None and device_exist(pubNic):
confopts["public.network.device"] = pubNic
if prvNic == None or not device_exist(prvNic):
confopts["private.network.device"] = pubNic
if prvNic != None and device_exist(prvNic):
confopts["private.network.device"] = prvNic
if pubNic == None or not device_exist(pubNic):
confopts["public.network.device"] = prvNic
stderr("Querying %s for zones and pods",host)
try:
if zone == None or pod == None:
x = list_zonespods(confopts['host'])
zoneandpod = prompt_for_hostpods(x)
if zoneandpod:
confopts["zone"],confopts["pod"] = zoneandpod
stderr("You selected zone %s pod %s",confopts["zone"],confopts["pod"])
else:
stderr("Skipped -- using the previous zone %s pod %s",confopts["zone"],confopts["pod"])
else:
confopts["zone"] = zone
confopts["pod"] = pod
confopts["cluster"] = cluster
except (urllib2.URLError,urllib2.HTTPError),e:
stderr("Query failed: %s. Defaulting to zone %s pod %s",str(e),confopts["zone"],confopts["pod"])
for opt,val in confopts.items():
line = "=".join([opt,val])
if opt not in confposes: lines.append(line)
else: lines[confposes[opt]] = line
text = "\n".join(lines)
file(fn,"w").write(text)
def setup_consoleproxy_config(configfile, host, zone, pod):
stderr("Examining Console Proxy configuration")
fn = configfile
text = file(fn).read(-1)
lines = [ s.strip() for s in text.splitlines() ]
confopts = dict([ m.split("=",1) for m in lines if "=" in m and not m.startswith("#") ])
confposes = dict([ (m.split("=",1)[0],n) for n,m in enumerate(lines) if "=" in m and not m.startswith("#") ])
if not "guid" in confopts:
stderr("Generating GUID for this Console Proxy")
confopts['guid'] = uuidgen().stdout.strip()
if host == None:
try: host = confopts["host"]
except KeyError: host = "localhost"
stderr("Please enter the host name of the management server that this console-proxy will connect to: (just hit ENTER to go with %s)",host)
print "> ",
newhost = raw_input().strip()
if newhost: host = newhost
confopts["host"] = host
stderr("Querying %s for zones and pods",host)
try:
if zone == None or pod == None:
x = list_zonespods(confopts['host'])
zoneandpod = prompt_for_hostpods(x)
if zoneandpod:
confopts["zone"],confopts["pod"] = zoneandpod
stderr("You selected zone %s pod %s",confopts["zone"],confopts["pod"])
else:
stderr("Skipped -- using the previous zone %s pod %s",confopts["zone"],confopts["pod"])
else:
confopts["zone"] = zone
confopts["pod"] = pod
except (urllib2.URLError,urllib2.HTTPError),e:
stderr("Query failed: %s. Defaulting to zone %s pod %s",str(e),confopts["zone"],confopts["pod"])
for opt,val in confopts.items():
line = "=".join([opt,val])
if opt not in confposes: lines.append(line)
else: lines[confposes[opt]] = line
text = "\n".join(lines)
file(fn,"w").write(text)
# =========================== DATABASE MIGRATION SUPPORT CODE ===================
# Migrator, Migratee and Evolvers -- this is the generic infrastructure.
class MigratorException(Exception): pass
class NoMigrationPath(MigratorException): pass
class NoMigrator(MigratorException): pass
INITIAL_LEVEL = '-'
class Migrator:
"""Migrator class.
The migrator gets a list of Python objects, and discovers MigrationSteps in it. It then sorts the steps into a chain, based on the attributes from_level and to_level in each one of the steps.
When the migrator's run(context) is called, the chain of steps is applied sequentially on the context supplied to run(), in the order of the chain of steps found at discovery time. See the documentation for the MigrationStep class for information on how that happens.
"""
def __init__(self,evolver_source):
self.discover_evolvers(evolver_source)
self.sort_evolvers()
def discover_evolvers(self,source):
self.evolvers = []
for val in source:
if hasattr(val,"from_level") and hasattr(val,"to_level") and val.to_level:
self.evolvers.append(val)
def sort_evolvers(self):
new = []
while self.evolvers:
if not new:
try: idx= [ i for i,s in enumerate(self.evolvers)
if s.from_level == INITIAL_LEVEL ][0] # initial evolver
except IndexError,e:
raise IndexError, "no initial evolver (from_level is None) could be found"
else:
try: idx= [ i for i,s in enumerate(self.evolvers)
if new[-1].to_level == s.from_level ][0]
except IndexError,e:
raise IndexError, "no evolver could be found to evolve from level %s"%new[-1].to_level
new.append(self.evolvers.pop(idx))
self.evolvers = new
def get_evolver_chain(self):
return [ (s.from_level, s.to_level, s) for s in self.evolvers ]
def get_evolver_by_starting_level(self,level):
try: return [ s for s in self.evolvers if s.from_level == level][0]
except IndexError: raise NoMigrator, "No evolver knows how to evolve the database from schema level %r"%level
def get_evolver_by_ending_level(self,level):
try: return [ s for s in self.evolvers if s.to_level == level][0]
except IndexError: raise NoMigrator, "No evolver knows how to evolve the database to schema level %r"%level
def run(self, context, dryrun = False, starting_level = None, ending_level = None):
"""Runs each one of the steps in sequence, passing the migration context to each. At the end of the process, context.commit() is called to save the changes, or context.rollback() is called if dryrun = True.
If starting_level is not specified, then the context.get_schema_level() is used to find out at what level the context is at. Then starting_level is set to that.
If ending_level is not specified, then the evolvers will run till the end of the chain."""
assert dryrun is False # NOT IMPLEMENTED, prolly gonna implement by asking the context itself to remember its state
starting_level = starting_level or context.get_schema_level() or self.evolvers[0].from_level
ending_level = ending_level or self.evolvers[-1].to_level
evolution_path = self.evolvers
idx = evolution_path.index(self.get_evolver_by_starting_level(starting_level))
evolution_path = evolution_path[idx:]
try: idx = evolution_path.index(self.get_evolver_by_ending_level(ending_level))
except ValueError:
raise NoEvolutionPath, "No evolution path from schema level %r to schema level %r" % \
(starting_level,ending_level)
evolution_path = evolution_path[:idx+1]
logging.info("Starting migration on %s"%context)
for ec in evolution_path:
assert ec.from_level == context.get_schema_level()
evolver = ec(context=context)
logging.info("%s (from level %s to level %s)",
evolver,
evolver.from_level,
evolver.to_level)
#try:
evolver.run()
#except:
#context.rollback()
#raise
context.set_schema_level(evolver.to_level)
#context.commit()
logging.info("%s is now at level %s",context,context.get_schema_level())
#if dryrun: # implement me with backup and restore
#logging.info("Rolling back changes on %s",context)
#context.rollback()
#else:
#logging.info("Committing changes on %s",context)
#context.commit()
logging.info("Migration finished")
class MigrationStep:
"""Base MigrationStep class, aka evolver.
You develop your own steps, and then pass a list of those steps to the
Migrator instance that will run them in order.
When the migrator runs, it will take the list of steps you gave him,
and, for each step:
a) instantiate it, passing the context you gave to the migrator
into the step's __init__().
b) run() the method in the migration step.
As you can see, the default MigrationStep constructor makes the passed
context available as self.context in the methods of your step.
Each step has two member vars that determine in which order they
are run, and if they need to run:
- from_level = the schema level that the database should be at,
before running the evolver
The value None has special meaning here, it
means the first evolver that should be run if the
database does not have a schema level yet.
- to_level = the schema level number that the database will be at
after the evolver has run
"""
# Implement these attributes in your steps
from_level = None
to_level = None
def __init__(self,context):
self.context = context
def run(self):
raise NotImplementedError
class MigrationContext:
def __init__(self): pass
def commit(self):raise NotImplementedError
def rollback(self):raise NotImplementedError
def get_schema_level(self):raise NotImplementedError
def set_schema_level(self,l):raise NotImplementedError
|
|
"""
MySQL database backend for Django.
Requires MySQLdb: http://sourceforge.net/projects/mysql-python
"""
import re
try:
import MySQLdb as Database
except ImportError, e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading MySQLdb module: %s" % e)
# We want version (1, 2, 1, 'final', 2) or later. We can't just use
# lexicographic ordering in this check because then (1, 2, 1, 'gamma')
# inadvertently passes the version test.
version = Database.version_info
if (version < (1,2,1) or (version[:3] == (1, 2, 1) and
(len(version) < 5 or version[3] != 'final' or version[4] < 2))):
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("MySQLdb-1.2.1p2 or newer is required; you have %s" % Database.__version__)
from MySQLdb.converters import conversions
from MySQLdb.constants import FIELD_TYPE, FLAG
from django.db.backends import *
from django.db.backends.mysql.client import DatabaseClient
from django.db.backends.mysql.creation import DatabaseCreation
from django.db.backends.mysql.introspection import DatabaseIntrospection
from django.db.backends.mysql.validation import DatabaseValidation
# Raise exceptions for database warnings if DEBUG is on
from django.conf import settings
if settings.DEBUG:
from warnings import filterwarnings
filterwarnings("error", category=Database.Warning)
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
# MySQLdb-1.2.1 supports the Python boolean type, and only uses datetime
# module for time-related columns; older versions could have used mx.DateTime
# or strings if there were no datetime module. However, MySQLdb still returns
# TIME columns as timedelta -- they are more like timedelta in terms of actual
# behavior as they are signed and include days -- and Django expects time, so
# we still need to override that.
django_conversions = conversions.copy()
django_conversions.update({
FIELD_TYPE.TIME: util.typecast_time,
FIELD_TYPE.DECIMAL: util.typecast_decimal,
FIELD_TYPE.NEWDECIMAL: util.typecast_decimal,
})
# This should match the numerical portion of the version numbers (we can treat
# versions like 5.0.24 and 5.0.24a as the same). Based on the list of version
# at http://dev.mysql.com/doc/refman/4.1/en/news.html and
# http://dev.mysql.com/doc/refman/5.0/en/news.html .
server_version_re = re.compile(r'(\d{1,2})\.(\d{1,2})\.(\d{1,2})')
# MySQLdb-1.2.1 and newer automatically makes use of SHOW WARNINGS on
# MySQL-4.1 and newer, so the MysqlDebugWrapper is unnecessary. Since the
# point is to raise Warnings as exceptions, this can be done with the Python
# warning module, and this is setup when the connection is created, and the
# standard util.CursorDebugWrapper can be used. Also, using sql_mode
# TRADITIONAL will automatically cause most warnings to be treated as errors.
class DatabaseFeatures(BaseDatabaseFeatures):
empty_fetchmany_value = ()
update_can_self_select = False
class DatabaseOperations(BaseDatabaseOperations):
def date_extract_sql(self, lookup_type, field_name):
# http://dev.mysql.com/doc/mysql/en/date-and-time-functions.html
return "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name)
def date_trunc_sql(self, lookup_type, field_name):
fields = ['year', 'month', 'day', 'hour', 'minute', 'second']
format = ('%%Y-', '%%m', '-%%d', ' %%H:', '%%i', ':%%s') # Use double percents to escape.
format_def = ('0000-', '01', '-01', ' 00:', '00', ':00')
try:
i = fields.index(lookup_type) + 1
except ValueError:
sql = field_name
else:
format_str = ''.join([f for f in format[:i]] + [f for f in format_def[i:]])
sql = "CAST(DATE_FORMAT(%s, '%s') AS DATETIME)" % (field_name, format_str)
return sql
def drop_foreignkey_sql(self):
return "DROP FOREIGN KEY"
def fulltext_search_sql(self, field_name):
return 'MATCH (%s) AGAINST (%%s IN BOOLEAN MODE)' % field_name
def no_limit_value(self):
# 2**64 - 1, as recommended by the MySQL documentation
return 18446744073709551615L
def quote_name(self, name):
if name.startswith("`") and name.endswith("`"):
return name # Quoting once is enough.
return "`%s`" % name
def random_function_sql(self):
return 'RAND()'
def sql_flush(self, style, tables, sequences):
# NB: The generated SQL below is specific to MySQL
# 'TRUNCATE x;', 'TRUNCATE y;', 'TRUNCATE z;'... style SQL statements
# to clear all tables of all data
if tables:
sql = ['SET FOREIGN_KEY_CHECKS = 0;']
for table in tables:
sql.append('%s %s;' % (style.SQL_KEYWORD('TRUNCATE'), style.SQL_FIELD(self.quote_name(table))))
sql.append('SET FOREIGN_KEY_CHECKS = 1;')
# 'ALTER TABLE table AUTO_INCREMENT = 1;'... style SQL statements
# to reset sequence indices
sql.extend(["%s %s %s %s %s;" % \
(style.SQL_KEYWORD('ALTER'),
style.SQL_KEYWORD('TABLE'),
style.SQL_TABLE(self.quote_name(sequence['table'])),
style.SQL_KEYWORD('AUTO_INCREMENT'),
style.SQL_FIELD('= 1'),
) for sequence in sequences])
return sql
else:
return []
def value_to_db_datetime(self, value):
# MySQL doesn't support microseconds
if value is None:
return None
return unicode(value.replace(microsecond=0))
def value_to_db_time(self, value):
# MySQL doesn't support microseconds
if value is None:
return None
return unicode(value.replace(microsecond=0))
def year_lookup_bounds(self, value):
# Again, no microseconds
first = '%s-01-01 00:00:00'
second = '%s-12-31 23:59:59.99'
return [first % value, second % value]
class DatabaseWrapper(BaseDatabaseWrapper):
operators = {
'exact': '= %s',
'iexact': 'LIKE %s',
'contains': 'LIKE BINARY %s',
'icontains': 'LIKE %s',
'regex': 'REGEXP BINARY %s',
'iregex': 'REGEXP %s',
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': 'LIKE BINARY %s',
'endswith': 'LIKE BINARY %s',
'istartswith': 'LIKE %s',
'iendswith': 'LIKE %s',
}
def __init__(self, **kwargs):
super(DatabaseWrapper, self).__init__(**kwargs)
self.server_version = None
self.features = DatabaseFeatures()
self.ops = DatabaseOperations()
self.client = DatabaseClient()
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = DatabaseValidation()
def _valid_connection(self):
if self.connection is not None:
try:
self.connection.ping()
return True
except DatabaseError:
self.connection.close()
self.connection = None
return False
def _cursor(self, settings):
if not self._valid_connection():
kwargs = {
'conv': django_conversions,
'charset': 'utf8',
'use_unicode': True,
}
if settings.DATABASE_USER:
kwargs['user'] = settings.DATABASE_USER
if settings.DATABASE_NAME:
kwargs['db'] = settings.DATABASE_NAME
if settings.DATABASE_PASSWORD:
kwargs['passwd'] = settings.DATABASE_PASSWORD
if settings.DATABASE_HOST.startswith('/'):
kwargs['unix_socket'] = settings.DATABASE_HOST
elif settings.DATABASE_HOST:
kwargs['host'] = settings.DATABASE_HOST
if settings.DATABASE_PORT:
kwargs['port'] = int(settings.DATABASE_PORT)
kwargs.update(self.options)
self.connection = Database.connect(**kwargs)
cursor = self.connection.cursor()
return cursor
def _rollback(self):
try:
BaseDatabaseWrapper._rollback(self)
except Database.NotSupportedError:
pass
def get_server_version(self):
if not self.server_version:
if not self._valid_connection():
self.cursor()
m = server_version_re.match(self.connection.get_server_info())
if not m:
raise Exception('Unable to determine MySQL version from version string %r' % self.connection.get_server_info())
self.server_version = tuple([int(x) for x in m.groups()])
return self.server_version
|
|
import matplotlib
#matplotlib.use('PS')
matplotlib.use('Agg')
matplotlib.rc('text', usetex=True)
matplotlib.rcParams['font.size'] = 17
matplotlib.rc('xtick', labelsize=14)
matplotlib.rc('axes', linewidth=1.1)
matplotlib.rcParams['legend.fontsize'] = 11
matplotlib.rcParams['legend.handlelength'] = 3
matplotlib.rcParams['xtick.major.size'] = 5
matplotlib.rcParams['ytick.major.size'] = 5
import pylab as P
import os
import numpy as N
from matplotlib import cm
#Sami's repository
import db.sqlite as sq
import sandbox.MyTools as M
def plotMergerFractions(query,
xlabel, ylabel,
output, out_folder,
mergetimelimit=0.25,
ymin=-0.2, ymax=1.0,
xmin=-8, xmax=4.1,
xbins=70, ybins=70,
title='All galaxies in $2 \leq z < 4$'):
#get data, all galaxies
data = sq.get_data_sqliteSMNfunctions(path, db, query)
x = data[:, 0]
f775w = data[:, 1]
f850lp = data[:, 2]
uvcolor = f775w - f850lp
tmerge = data[:, 3]
tmajor = data[:, 4]
#masks
nomergeMask = tmerge < 0.0
majorsMask = (tmajor > 0.0) & (tmajor <= mergetimelimit)
majorsMask2 = (tmajor > mergetimelimit)
mergersMask = (tmerge > 0.0) & (tmerge <= mergetimelimit) &\
(majorsMask == False) & (majorsMask2 == False)
mergersMask2 = (nomergeMask == False) & (majorsMask == False) &\
(mergersMask == False) & (majorsMask2 == False)
#KDE
mu = M.AnaKDE([N.log10(x[nomergeMask]), uvcolor[nomergeMask]])
x_vec, y_vec, zm, lvls, d0, d1 = mu.contour(N.linspace(xmin, xmax, xbins),
N.linspace(ymin, ymax, ybins),
return_data=True)
#make the figure
# fig = P.figure()
fig = P.figure(figsize=(10, 10))
fig.subplots_adjust(left=0.09, bottom=0.08,
right=0.92, top=0.94)
ax1 = fig.add_subplot(111)
#make contours
cont = ax1.contour(x_vec, y_vec, zm, linewidths=1.3,
colors='black',
levels=N.linspace(0.2, N.max(zm), 5))
#plot scatters
ax1.scatter(N.log10(x[nomergeMask]), uvcolor[nomergeMask],
s=1, c='k', marker='s',
label='Never merged')
s2 = ax1.scatter(N.log10(x[mergersMask]), uvcolor[mergersMask],
s=18, c=1000. * tmerge[mergersMask], marker='^',
cmap=cm.get_cmap('jet'), edgecolor='none',
label='Minor Merger: $T \leq %.0f$ Myr' % (mergetimelimit * 1000.),
alpha=0.2)
s1 = ax1.scatter(N.log10(x[majorsMask]), uvcolor[majorsMask],
s=25, c=1000. * tmajor[majorsMask], marker='o',
cmap=cm.get_cmap('jet'), edgecolor='none',
label='Major Merger: $T \leq %.0f$ Myr' % (mergetimelimit * 1000.),
alpha=0.2)
s1 = ax1.scatter(N.log10(x[majorsMask]), uvcolor[majorsMask],
s=1, c=1000. * tmajor[majorsMask], marker='o',
cmap=cm.get_cmap('jet'), edgecolor='none',
alpha=1.0, visible=False)
c1 = fig.colorbar(s1, shrink=0.8, fraction=0.03)
c1.set_label('Time since merger [Myr]')
#labels
ax1.set_xlabel(xlabel)
ax1.set_ylabel(ylabel)
#limits
ax1.set_ylim(ymin, ymax)
ax1.set_xlim(xmin, xmax)
#add annotate
P.text(0.5, 1.03, title,
horizontalalignment='center',
verticalalignment='center',
transform=ax1.transAxes)
#make grid
ax1.grid()
#legend and save
P.legend(loc='upper left', scatterpoints=1, shadow=True,
fancybox=True, markerscale=2)
P.savefig(out_folder + output)
def plotMergerFractionsMultiplot(query,
xlabel, ylabel,
output, out_folder,
mergetimelimit=0.25,
ymin=-0.2, ymax=0.8,
xmin=-9, xmax=4.1,
xbins=50, ybins=50,
title=''):
#get data, all galaxies
data = sq.get_data_sqliteSMNfunctions(path, db, query)
x = data[:, 0]
f775w = data[:, 1]
f850lp = data[:, 2]
uvcolor = f775w - f850lp
tmerge = data[:, 3]
tmajor = data[:, 4]
#masks
nomergeMask = tmerge < 0.0
majorsMask = (tmajor > 0.0) & (tmajor <= mergetimelimit)
majorsMask2 = (tmajor > mergetimelimit)
mergersMask = (tmerge > 0.0) & (tmerge <= mergetimelimit) &\
(majorsMask == False) & (majorsMask2 == False)
mergersMask2 = (nomergeMask == False) & (majorsMask == False) &\
(mergersMask == False) & (majorsMask2 == False)
#KDE
mu = M.AnaKDE([N.log10(x[nomergeMask]), uvcolor[nomergeMask]])
x_vec, y_vec, zm, lvls, d0, d1 = mu.contour(N.linspace(xmin, xmax, xbins),
N.linspace(ymin, ymax, ybins),
return_data=True)
#make the figure
# fig = P.figure()
fig = P.figure(figsize=(10, 10))
fig.subplots_adjust(left=0.09, bottom=0.08,
right=0.93, top=0.95,
wspace=0.0, hspace=0.0)
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(223)
ax4 = fig.add_subplot(224)
#make contours
lv = N.linspace(0.2, N.max(zm), 4)
cont = ax1.contour(x_vec, y_vec, zm, linewidths=0.9,
levels=lv, colors='black')
cont = ax2.contour(x_vec, y_vec, zm, linewidths=0.9,
levels=lv, colors='black')
cont = ax3.contour(x_vec, y_vec, zm, linewidths=0.9,
levels=lv, colors='black')
cont = ax4.contour(x_vec, y_vec, zm, linewidths=0.9,
levels=lv, colors='black')
#plot scatters
s1 = ax1.scatter(N.log10(x[majorsMask]), uvcolor[majorsMask],
s=4, c=1000. * tmajor[majorsMask], marker='o',
cmap=cm.get_cmap('jet'), edgecolor='none',
label='Major Merger: $T \leq %.0f$ Myr' % (mergetimelimit * 1000.),
alpha=0.25)
s2 = ax2.scatter(N.log10(x[mergersMask]), uvcolor[mergersMask],
s=6, c=1000. * tmerge[mergersMask], marker='^',
cmap=cm.get_cmap('jet'), edgecolor='none',
label='Minor Merger: $T \leq %.0f$ Myr' % (mergetimelimit * 1000.),
alpha=0.25)
s2 = ax2.scatter(N.log10(x[mergersMask]), uvcolor[mergersMask],
s=6, c=1000. * tmerge[mergersMask], marker='^',
cmap=cm.get_cmap('jet'), edgecolor='none',
visible=False)
#masks
mergetimelimit *= 2.
majorsMask = (tmajor > 0.0) & (tmajor <= mergetimelimit)
majorsMask2 = (tmajor > mergetimelimit)
mergersMask = (tmerge > 0.0) & (tmerge <= mergetimelimit) &\
(majorsMask == False) & (majorsMask2 == False)
s3 = ax3.scatter(N.log10(x[majorsMask]), uvcolor[majorsMask],
s=4, c=1000. * tmajor[majorsMask], marker='o',
cmap=cm.get_cmap('jet'), edgecolor='none',
label='Major Merger: $T \leq %.0f$ Myr' % (mergetimelimit * 1000.),
alpha=0.25)
s4 = ax4.scatter(N.log10(x[mergersMask]), uvcolor[mergersMask],
s=6, c=1000. * tmerge[mergersMask], marker='^',
cmap=cm.get_cmap('jet'), edgecolor='none',
label='Minor Merger: $T \leq %.0f$ Myr' % (mergetimelimit * 1000.),
alpha=0.25)
s4 = ax4.scatter(N.log10(x[mergersMask]), uvcolor[mergersMask],
s=6, c=1000. * tmerge[mergersMask], marker='^',
cmap=cm.get_cmap('jet'), edgecolor='none',
visible=False)
c1 = fig.colorbar(s2, ax=ax2, shrink=0.7, fraction=0.05)
c2 = fig.colorbar(s4, ax=ax4, shrink=0.7, fraction=0.05)
c1.set_label('Time since merger [Myr]')
c2.set_label('Time since merger [Myr]')
#add annotate
P.text(1.0, 1.04, title,
horizontalalignment='center',
verticalalignment='center',
transform=ax1.transAxes)
#labels
ax3.set_xlabel(xlabel)
ax4.set_xlabel(xlabel)
ax1.set_ylabel(ylabel)
ax3.set_ylabel(ylabel)
ax2.set_yticklabels([])
ax4.set_yticklabels([])
ax1.set_xticklabels([])
ax2.set_xticklabels([])
#limits
ax1.set_ylim(ymin, ymax)
ax1.set_xlim(xmin, xmax)
ax2.set_ylim(ymin, ymax)
ax2.set_xlim(xmin, xmax)
ax3.set_ylim(ymin, ymax)
ax3.set_xlim(xmin, xmax)
ax4.set_ylim(ymin, ymax)
ax4.set_xlim(xmin, xmax)
#make grid
ax1.grid()
ax2.grid()
ax3.grid()
ax4.grid()
#legend and save
ax1.legend(loc='upper left', scatterpoints=1,
shadow=True, fancybox=True, markerscale=3)
ax2.legend(loc='upper left', scatterpoints=1,
shadow=True, fancybox=True, markerscale=3)
ax3.legend(loc='upper left', scatterpoints=1,
shadow=True, fancybox=True, markerscale=3)
ax4.legend(loc='upper left', scatterpoints=1,
shadow=True, fancybox=True, markerscale=3)
P.savefig(out_folder + output)
if __name__ == '__main__':
#find the home directory, because the output is to dropbox
#and my user name is not always the same, this hack is required.
hm = os.getenv('HOME')
#constants
path = hm + '/Dropbox/Research/Herschel/runs/reds_zero_dust_evolve/'
out_folder = hm + '/Dropbox/Research/Herschel/plots/mergers/'
db = 'sams.db'
type = '.png'
print 'Begin plotting'
###############################################################################
query = '''select FIR.spire250_obs / FIR.irac_ch1_obs,
galphotdust.f775w, galphotdust.f850lp,
galprop.tmerge, galprop.tmajmerge
from FIR, galprop, galphotdust where
FIR.z >= 2.0 and
FIR.z < 4.0 and
FIR.gal_id = galprop.gal_id and
FIR.halo_id = galprop.halo_id and
FIR.gal_id = galphotdust.gal_id and
FIR.halo_id = galphotdust.halo_id and
FIR.spire250_obs < 1e6 and
galphotdust.f775w < 33 and
galphotdust.f850lp < 33 and
FIR.spire250_obs > 1e-15 and
FIR.irac_ch1_obs > 1e-15
'''
xlab = r'$\log_{10}\left ( \frac{S_{250}}{S_{3.4}} \right )$'
ylab = r'$\mathrm{F775W} - \mathrm{F850lp}$'
plotMergerFractions(query, xlab, ylab, 'ColorColorIRAC1Merger' + type,
out_folder)
###############################################################################
query = '''select FIR.spire250_obs / FIR.irac_ch2_obs,
galphotdust.f775w, galphotdust.f850lp,
galprop.tmerge, galprop.tmajmerge
from FIR, galprop, galphotdust where
FIR.z >= 2.0 and
FIR.z < 4.0 and
FIR.gal_id = galprop.gal_id and
FIR.halo_id = galprop.halo_id and
FIR.gal_id = galphotdust.gal_id and
FIR.halo_id = galphotdust.halo_id and
FIR.spire250_obs < 1e6 and
galphotdust.f775w < 33 and
galphotdust.f850lp < 33 and
FIR.spire250_obs > 1e-15 and
FIR.irac_ch2_obs > 1e-15
'''
xlab = r'$\log_{10}\left ( \frac{S_{250}}{S_{4.5}} \right )$'
ylab = r'$\mathrm{F775W} - \mathrm{F850lp}$'
plotMergerFractions(query, xlab, ylab, 'ColorColorIRAC2Merger' + type,
out_folder)
###############################################################################
query = '''select FIR.spire250_obs / FIR.irac_ch3_obs,
galphotdust.f775w, galphotdust.f850lp,
galprop.tmerge, galprop.tmajmerge
from FIR, galprop, galphotdust where
FIR.z >= 2.0 and
FIR.z < 4.0 and
FIR.gal_id = galprop.gal_id and
FIR.halo_id = galprop.halo_id and
FIR.gal_id = galphotdust.gal_id and
FIR.halo_id = galphotdust.halo_id and
FIR.spire250_obs < 1e6 and
galphotdust.f775w < 33 and
galphotdust.f850lp < 33 and
FIR.spire250_obs > 1e-15 and
FIR.irac_ch3_obs > 1e-15
'''
xlab = r'$\log_{10}\left ( \frac{S_{250}}{S_{5.8}} \right )$'
ylab = r'$\mathrm{F775W} - \mathrm{F850lp}$'
plotMergerFractions(query, xlab, ylab, 'ColorColorIRAC3Merger' + type,
out_folder)
###############################################################################
query = '''select FIR.spire250_obs / FIR.irac_ch4_obs,
galphotdust.f775w, galphotdust.f850lp,
galprop.tmerge, galprop.tmajmerge
from FIR, galprop, galphotdust where
FIR.z >= 2.0 and
FIR.z < 4.0 and
FIR.gal_id = galprop.gal_id and
FIR.halo_id = galprop.halo_id and
FIR.gal_id = galphotdust.gal_id and
FIR.halo_id = galphotdust.halo_id and
FIR.spire250_obs < 1e6 and
galphotdust.f775w < 33 and
galphotdust.f850lp < 33 and
FIR.spire250_obs > 1e-15 and
FIR.irac_ch4_obs > 1e-15
'''
xlab = r'$\log_{10}\left ( \frac{S_{250}}{S_{8.0}} \right )$'
ylab = r'$\mathrm{F775W} - \mathrm{F850lp}$'
plotMergerFractions(query, xlab, ylab, 'ColorColorIRAC4Merger' + type,
out_folder)
###############################################################################
print 'IR bright galaxies only'
###############################################################################
query = '''select FIR.spire250_obs / FIR.irac_ch1_obs,
galphotdust.f775w, galphotdust.f850lp,
galprop.tmerge, galprop.tmajmerge
from FIR, galprop, galphotdust where
FIR.z >= 2.0 and
FIR.z < 4.0 and
FIR.gal_id = galprop.gal_id and
FIR.halo_id = galprop.halo_id and
FIR.gal_id = galphotdust.gal_id and
FIR.halo_id = galphotdust.halo_id and
FIR.spire250_obs < 1e6 and
galphotdust.f775w < 33 and
galphotdust.f850lp < 33 and
FIR.spire250_obs > 5e-3 and
FIR.irac_ch1_obs > 1e-15
'''
xlab = r'$\log_{10}\left ( \frac{S_{250}}{S_{3.4}} \right )$'
ylab = r'$\mathrm{F775W} - \mathrm{F850lp}$'
plotMergerFractions(query, xlab, ylab, 'ColorColorIRAC1MergerIRBright' + type,
out_folder, xmin=0.1,
title='Galaxies with $S_{250} > 5$mJy in $2 \leq z < 4$')
###############################################################################
query = '''select FIR.spire250_obs / FIR.irac_ch2_obs,
galphotdust.f775w, galphotdust.f850lp,
galprop.tmerge, galprop.tmajmerge
from FIR, galprop, galphotdust where
FIR.z >= 2.0 and
FIR.z < 4.0 and
FIR.gal_id = galprop.gal_id and
FIR.halo_id = galprop.halo_id and
FIR.gal_id = galphotdust.gal_id and
FIR.halo_id = galphotdust.halo_id and
FIR.spire250_obs < 1e6 and
galphotdust.f775w < 33 and
galphotdust.f850lp < 33 and
FIR.spire250_obs > 5e-3 and
FIR.irac_ch2_obs > 1e-15
'''
xlab = r'$\log_{10}\left ( \frac{S_{250}}{S_{4.5}} \right )$'
ylab = r'$\mathrm{F775W} - \mathrm{F850lp}$'
plotMergerFractions(query, xlab, ylab, 'ColorColorIRAC2MergerIRBright' + type,
out_folder, xmin=0.1,
title='Galaxies with $S_{250} > 5$mJy in $2 \leq z < 4$')
###############################################################################
query = '''select FIR.spire250_obs / FIR.irac_ch3_obs,
galphotdust.f775w, galphotdust.f850lp,
galprop.tmerge, galprop.tmajmerge
from FIR, galprop, galphotdust where
FIR.z >= 2.0 and
FIR.z < 4.0 and
FIR.gal_id = galprop.gal_id and
FIR.halo_id = galprop.halo_id and
FIR.gal_id = galphotdust.gal_id and
FIR.halo_id = galphotdust.halo_id and
FIR.spire250_obs < 1e6 and
galphotdust.f775w < 33 and
galphotdust.f850lp < 33 and
FIR.spire250_obs > 5e-3 and
FIR.irac_ch3_obs > 1e-15
'''
xlab = r'$\log_{10}\left ( \frac{S_{250}}{S_{5.8}} \right )$'
ylab = r'$\mathrm{F775W} - \mathrm{F850lp}$'
plotMergerFractions(query, xlab, ylab, 'ColorColorIRAC3MergerIRBright' + type,
out_folder, xmin=0.1,
title='Galaxies with $S_{250} > 5$mJy in $2 \leq z < 4$')
###############################################################################
query = '''select FIR.spire250_obs / FIR.irac_ch4_obs,
galphotdust.f775w, galphotdust.f850lp,
galprop.tmerge, galprop.tmajmerge
from FIR, galprop, galphotdust where
FIR.z >= 2.0 and
FIR.z < 4.0 and
FIR.gal_id = galprop.gal_id and
FIR.halo_id = galprop.halo_id and
FIR.gal_id = galphotdust.gal_id and
FIR.halo_id = galphotdust.halo_id and
FIR.spire250_obs < 1e6 and
galphotdust.f775w < 33 and
galphotdust.f850lp < 33 and
FIR.spire250_obs > 5e-3 and
FIR.irac_ch4_obs > 1e-15
'''
xlab = r'$\log_{10}\left ( \frac{S_{250}}{S_{8.0}} \right )$'
ylab = r'$\mathrm{F775W} - \mathrm{F850lp}$'
plotMergerFractions(query, xlab, ylab, 'ColorColorIRAC4MergerIRBright' + type,
out_folder, xmin=0.1,
title='Galaxies with $S_{250} > 5$mJy in $2 \leq z < 4$')
###############################################################################
##############################################################################
query = '''select FIR.spire250_obs / FIR.irac_ch1_obs,
galphotdust.f775w, galphotdust.f850lp,
galprop.tmerge, galprop.tmajmerge
from FIR, galprop, galphotdust where
FIR.z >= 2.0 and
FIR.z < 4.0 and
FIR.gal_id = galprop.gal_id and
FIR.halo_id = galprop.halo_id and
FIR.gal_id = galphotdust.gal_id and
FIR.halo_id = galphotdust.halo_id and
FIR.spire250_obs < 1e6 and
galphotdust.f775w < 33 and
galphotdust.f850lp < 33 and
FIR.spire250_obs > 5e-6 and
FIR.irac_ch1_obs > 1e-15
'''
xlab = r'$\log_{10}\left ( \frac{S_{250}}{S_{3.4}} \right )$'
ylab = r'$\mathrm{F775W} - \mathrm{F850lp}$'
plotMergerFractions(query, xlab, ylab, 'ColorColorIRAC1MergerIRBrightish' + type,
out_folder, xmin=0.1,
title='$\mathrm{Galaxies \ with \ } S_{250} > 5 \ \mu \mathrm{Jy\ in \ } 2 \leq z < 4$')
###############################################################################
query = '''select FIR.spire250_obs / FIR.irac_ch2_obs,
galphotdust.f775w, galphotdust.f850lp,
galprop.tmerge, galprop.tmajmerge
from FIR, galprop, galphotdust where
FIR.z >= 2.0 and
FIR.z < 4.0 and
FIR.gal_id = galprop.gal_id and
FIR.halo_id = galprop.halo_id and
FIR.gal_id = galphotdust.gal_id and
FIR.halo_id = galphotdust.halo_id and
FIR.spire250_obs < 1e6 and
galphotdust.f775w < 33 and
galphotdust.f850lp < 33 and
FIR.spire250_obs > 5e-6 and
FIR.irac_ch2_obs > 1e-15
'''
xlab = r'$\log_{10}\left ( \frac{S_{250}}{S_{4.5}} \right )$'
ylab = r'$\mathrm{F775W} - \mathrm{F850lp}$'
plotMergerFractions(query, xlab, ylab, 'ColorColorIRAC2MergerIRBrightish' + type,
out_folder, xmin=0.1,
title='$\mathrm{Galaxies \ with \ } S_{250} > 5 \ \mu \mathrm{Jy\ in \ } 2 \leq z < 4$')
###############################################################################
query = '''select FIR.spire250_obs / FIR.irac_ch3_obs,
galphotdust.f775w, galphotdust.f850lp,
galprop.tmerge, galprop.tmajmerge
from FIR, galprop, galphotdust where
FIR.z >= 2.0 and
FIR.z < 4.0 and
FIR.gal_id = galprop.gal_id and
FIR.halo_id = galprop.halo_id and
FIR.gal_id = galphotdust.gal_id and
FIR.halo_id = galphotdust.halo_id and
FIR.spire250_obs < 1e6 and
galphotdust.f775w < 33 and
galphotdust.f850lp < 33 and
FIR.spire250_obs > 5e-6 and
FIR.irac_ch3_obs > 1e-15
'''
xlab = r'$\log_{10}\left ( \frac{S_{250}}{S_{5.8}} \right )$'
ylab = r'$\mathrm{F775W} - \mathrm{F850lp}$'
plotMergerFractions(query, xlab, ylab, 'ColorColorIRAC3MergerIRBrightish' + type,
out_folder, xmin=0.1,
title='$\mathrm{Galaxies \ with \ } S_{250} > 5 \ \mu \mathrm{Jy\ in \ } 2 \leq z < 4$')
###############################################################################
query = '''select FIR.spire250_obs / FIR.irac_ch4_obs,
galphotdust.f775w, galphotdust.f850lp,
galprop.tmerge, galprop.tmajmerge
from FIR, galprop, galphotdust where
FIR.z >= 2.0 and
FIR.z < 4.0 and
FIR.gal_id = galprop.gal_id and
FIR.halo_id = galprop.halo_id and
FIR.gal_id = galphotdust.gal_id and
FIR.halo_id = galphotdust.halo_id and
FIR.spire250_obs < 1e6 and
galphotdust.f775w < 33 and
galphotdust.f850lp < 33 and
FIR.spire250_obs > 5e-6 and
FIR.irac_ch4_obs > 1e-15
'''
xlab = r'$\log_{10}\left ( \frac{S_{250}}{S_{8.0}} \right )$'
ylab = r'$\mathrm{F775W} - \mathrm{F850lp}$'
plotMergerFractions(query, xlab, ylab, 'ColorColorIRAC4MergerIRBrightish' + type,
out_folder, xmin=0.1,
title='$\mathrm{Galaxies \ with \ } S_{250} > 5 \ \mu \mathrm{Jy\ in \ } 2 \leq z < 4$')
##############################################################################
# multiplots
print 'Starting multiplots'
################################################################################
query = '''select FIR.spire250_obs / FIR.irac_ch1_obs,
galphotdust.f775w, galphotdust.f850lp,
galprop.tmerge, galprop.tmajmerge
from FIR, galprop, galphotdust where
FIR.z >= 2.0 and
FIR.z < 4.0 and
FIR.gal_id = galprop.gal_id and
FIR.halo_id = galprop.halo_id and
FIR.gal_id = galphotdust.gal_id and
FIR.halo_id = galphotdust.halo_id and
FIR.spire250_obs < 1e6 and
galphotdust.f775w < 33 and
galphotdust.f850lp < 33 and
FIR.spire250_obs > 1e-15 and
FIR.irac_ch1_obs > 1e-15
'''
xlab = r'$\log_{10}\left ( \frac{S_{250}}{S_{3.4}} \right )$'
ylab = r'$\mathrm{F775W} - \mathrm{F850lp}$'
plotMergerFractionsMultiplot(query, xlab, ylab, 'ColorColorIRAC1Multi' + type,
out_folder)
###############################################################################
query = '''select FIR.spire250_obs / FIR.irac_ch2_obs,
galphotdust.f775w, galphotdust.f850lp,
galprop.tmerge, galprop.tmajmerge
from FIR, galprop, galphotdust where
FIR.z >= 2.0 and
FIR.z < 4.0 and
FIR.gal_id = galprop.gal_id and
FIR.halo_id = galprop.halo_id and
FIR.gal_id = galphotdust.gal_id and
FIR.halo_id = galphotdust.halo_id and
FIR.spire250_obs < 1e6 and
galphotdust.f775w < 33 and
galphotdust.f850lp < 33 and
FIR.spire250_obs > 1e-15 and
FIR.irac_ch2_obs > 1e-15
'''
xlab = r'$\log_{10}\left ( \frac{S_{250}}{S_{4.5}} \right )$'
ylab = r'$\mathrm{F775W} - \mathrm{F850lp}$'
plotMergerFractionsMultiplot(query, xlab, ylab, 'ColorColorIRAC2Multi' + type,
out_folder)
###############################################################################
query = '''select FIR.spire250_obs / FIR.irac_ch3_obs,
galphotdust.f775w, galphotdust.f850lp,
galprop.tmerge, galprop.tmajmerge
from FIR, galprop, galphotdust where
FIR.z >= 2.0 and
FIR.z < 4.0 and
FIR.gal_id = galprop.gal_id and
FIR.halo_id = galprop.halo_id and
FIR.gal_id = galphotdust.gal_id and
FIR.halo_id = galphotdust.halo_id and
FIR.spire250_obs < 1e6 and
galphotdust.f775w < 33 and
galphotdust.f850lp < 33 and
FIR.spire250_obs > 1e-15 and
FIR.irac_ch3_obs > 1e-15
'''
xlab = r'$\log_{10}\left ( \frac{S_{250}}{S_{5.8}} \right )$'
ylab = r'$\mathrm{F775W} - \mathrm{F850lp}$'
plotMergerFractionsMultiplot(query, xlab, ylab, 'ColorColorIRAC3Multi' + type,
out_folder)
##############################################################################
query = '''select FIR.spire250_obs / FIR.irac_ch4_obs,
galphotdust.f775w, galphotdust.f850lp,
galprop.tmerge, galprop.tmajmerge
from FIR, galprop, galphotdust where
FIR.z >= 2.0 and
FIR.z < 4.0 and
FIR.gal_id = galprop.gal_id and
FIR.halo_id = galprop.halo_id and
FIR.gal_id = galphotdust.gal_id and
FIR.halo_id = galphotdust.halo_id and
FIR.spire250_obs < 1e6 and
galphotdust.f775w < 33 and
galphotdust.f850lp < 33 and
FIR.spire250_obs > 1e-15 and
FIR.irac_ch4_obs > 1e-15
'''
xlab = r'$\log_{10}\left ( \frac{S_{250}}{S_{8.0}} \right )$'
ylab = r'$\mathrm{F775W} - \mathrm{F850lp}$'
plotMergerFractionsMultiplot(query, xlab, ylab, 'ColorColorIRAC4Multi' + type,
out_folder)
##############################################################################
##############################################################################
query = '''select FIR.spire250_obs / FIR.irac_ch4_obs,
galphotdust.f775w, galphotdust.f850lp,
galprop.tmerge, galprop.tmajmerge
from FIR, galprop, galphotdust where
FIR.z >= 2.0 and
FIR.z < 4.0 and
FIR.gal_id = galprop.gal_id and
FIR.halo_id = galprop.halo_id and
FIR.gal_id = galphotdust.gal_id and
FIR.halo_id = galphotdust.halo_id and
FIR.spire250_obs < 1e6 and
galphotdust.f775w < 33 and
galphotdust.f850lp < 33 and
FIR.spire250_obs > 1e-15 and
FIR.irac_ch4_obs > 1e-15 and
galprop.mhalo > 11.5
'''
xlab = r'$\log_{10}\left ( \frac{S_{250}}{S_{8.0}} \right )$'
ylab = r'$\mathrm{F775W} - \mathrm{F850lp}$'
plotMergerFractionsMultiplot(query, xlab, ylab,
'ColorColorIRAC4MultiLDM' + type,
out_folder,
title='$\log_{10}(M_{\mathrm{DM}}) > 11.5$',
xmin=0.5, xmax=4.)
################################################################################
query = '''select FIR.spire250_obs / FIR.irac_ch4_obs,
galphotdust.f775w, galphotdust.f850lp,
galprop.tmerge, galprop.tmajmerge
from FIR, galprop, galphotdust where
FIR.z >= 2.0 and
FIR.z < 4.0 and
FIR.gal_id = galprop.gal_id and
FIR.halo_id = galprop.halo_id and
FIR.gal_id = galphotdust.gal_id and
FIR.halo_id = galphotdust.halo_id and
FIR.spire250_obs < 1e6 and
galphotdust.f775w < 33 and
galphotdust.f850lp < 33 and
FIR.spire250_obs > 5e-3 and
FIR.irac_ch4_obs > 1e-15
'''
xlab = r'$\log_{10}\left ( \frac{S_{250}}{S_{8.0}} \right )$'
ylab = r'$\mathrm{F775W} - \mathrm{F850lp}$'
plotMergerFractionsMultiplot(query, xlab, ylab,
'ColorColorIRAC4MultiLSPIRE' + type,
out_folder,
title='$S_{250} > 5\ \mathrm{mJy}$',
xmin=0.5, xmax=4.0)
################################################################################
query = '''select FIR.spire250_obs / FIR.irac_ch4_obs,
galphotdust.f775w, galphotdust.f850lp,
galprop.tmerge, galprop.tmajmerge
from FIR, galprop, galphotdust where
FIR.z >= 2.0 and
FIR.z < 2.5 and
FIR.gal_id = galprop.gal_id and
FIR.halo_id = galprop.halo_id and
FIR.gal_id = galphotdust.gal_id and
FIR.halo_id = galphotdust.halo_id and
FIR.spire250_obs < 1e6 and
galphotdust.f775w < 33 and
galphotdust.f850lp < 33 and
FIR.spire250_obs > 1e-15 and
FIR.irac_ch4_obs > 1e-15
'''
xlab = r'$\log_{10}\left ( \frac{S_{250}}{S_{8.0}} \right )$'
ylab = r'$\mathrm{F775W} - \mathrm{F850lp}$'
plotMergerFractionsMultiplot(query, xlab, ylab,
'ColorColorIRAC4MultiLz2' + type,
out_folder,
title='$2.0 \leq z < 2.5$')
###############################################################################
query = '''select FIR.spire250_obs / FIR.irac_ch4_obs,
galphotdust.f775w, galphotdust.f850lp,
galprop.tmerge, galprop.tmajmerge
from FIR, galprop, galphotdust where
FIR.z >= 2.5 and
FIR.z < 3.0 and
FIR.gal_id = galprop.gal_id and
FIR.halo_id = galprop.halo_id and
FIR.gal_id = galphotdust.gal_id and
FIR.halo_id = galphotdust.halo_id and
FIR.spire250_obs < 1e6 and
galphotdust.f775w < 33 and
galphotdust.f850lp < 33 and
FIR.spire250_obs > 1e-15 and
FIR.irac_ch4_obs > 1e-15
'''
xlab = r'$\log_{10}\left ( \frac{S_{250}}{S_{8.0}} \right )$'
ylab = r'$\mathrm{F775W} - \mathrm{F850lp}$'
plotMergerFractionsMultiplot(query, xlab, ylab,
'ColorColorIRAC4MultiLz25' + type,
out_folder,
title='$2.5 \leq z < 3.0$')
###############################################################################
query = '''select FIR.spire250_obs / FIR.irac_ch4_obs,
galphotdust.f775w, galphotdust.f850lp,
galprop.tmerge, galprop.tmajmerge
from FIR, galprop, galphotdust where
FIR.z >= 3.0 and
FIR.z < 3.5 and
FIR.gal_id = galprop.gal_id and
FIR.halo_id = galprop.halo_id and
FIR.gal_id = galphotdust.gal_id and
FIR.halo_id = galphotdust.halo_id and
FIR.spire250_obs < 1e6 and
galphotdust.f775w < 33 and
galphotdust.f850lp < 33 and
FIR.spire250_obs > 1e-15 and
FIR.irac_ch4_obs > 1e-15
'''
xlab = r'$\log_{10}\left ( \frac{S_{250}}{S_{8.0}} \right )$'
ylab = r'$\mathrm{F775W} - \mathrm{F850lp}$'
plotMergerFractionsMultiplot(query, xlab, ylab,
'ColorColorIRAC4MultiLz3' + type,
out_folder,
title='$3.0 \leq z < 3.5$')
###############################################################################
query = '''select FIR.spire250_obs / FIR.irac_ch4_obs,
galphotdust.f775w, galphotdust.f850lp,
galprop.tmerge, galprop.tmajmerge
from FIR, galprop, galphotdust where
FIR.z >= 3.5 and
FIR.z < 4.0 and
FIR.gal_id = galprop.gal_id and
FIR.halo_id = galprop.halo_id and
FIR.gal_id = galphotdust.gal_id and
FIR.halo_id = galphotdust.halo_id and
FIR.spire250_obs < 1e6 and
galphotdust.f775w < 33 and
galphotdust.f850lp < 33 and
FIR.spire250_obs > 1e-15 and
FIR.irac_ch4_obs > 1e-15
'''
xlab = r'$\log_{10}\left ( \frac{S_{250}}{S_{8.0}} \right )$'
ylab = r'$\mathrm{F775W} - \mathrm{F850lp}$'
plotMergerFractionsMultiplot(query, xlab, ylab,
'ColorColorIRAC4MultiLz35' + type,
out_folder,
title='$3.5 \leq z < 4.0$')
###############################################################################
print 'All done'
|
|
import sys
import os
import io
from jinja2 import Environment, FileSystemLoader, FunctionLoader
import urllib
import base64
import copy
import gc
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import seaborn as sns
import scipy.stats as stats
# pip install https://github.com/Geosyntec/python-pdfkit/archive/master.zip
import pdfkit
from ..utils import (html_template, css_template)
import wqio
sns.set(style='ticks', context='paper')
mpl.rcParams['text.usetex'] = False
mpl.rcParams['lines.markeredgewidth'] = .5
mpl.rcParams['font.family'] = ['sans-serif']
mpl.rcParams['mathtext.default'] = 'regular'
def make_table(loc):
# make table
singlevarfmtr = '{0:.3f}'
doublevarfmtr = '{0:.3f}; {1:.3f}'
multilinefmtr = '{0:.3f}\n({1:.3f}; {2:.3f})'
if loc.logmean is None:
logmean = np.nan
else:
logmean = loc.logmean
if loc.geomean is None:
geomean = np.nan
else:
geomean = loc.geomean
if loc.logstd is None:
logstd = np.nan
else:
logstd = loc.logstd
if loc.logmean_conf_interval is None:
logmean_conf_interval = [np.nan, np.nan]
else:
logmean_conf_interval = loc.logmean_conf_interval
if loc.geomean_conf_interval is None:
geomean_conf_interval = [np.nan, np.nan]
else:
geomean_conf_interval = loc.geomean_conf_interval
rows = [
['Count', singlevarfmtr.format(loc.N)],
['Number of NDs', singlevarfmtr.format(loc.ND)],
['Min; Max ({})'.format(loc.definition['unit']),
doublevarfmtr.format(loc.min,loc.max)],
['Mean ({})\n(95% confidence interval)'.format(loc.definition['unit']),
multilinefmtr.format(
loc.mean, *loc.mean_conf_interval)],
['Standard Deviation ({})'.format(loc.definition['unit']),
singlevarfmtr.format(loc.std)],
['Log. Mean\n(95% confidence interval)', multilinefmtr.format(
logmean, *logmean_conf_interval).replace('nan', '-')],
['Log. Standard Deviation', singlevarfmtr.format(logstd).replace('nan', '-')],
['Geo. Mean ({})\n(95% confidence interval)'.format(loc.definition['unit']),
multilinefmtr.format(
geomean, *geomean_conf_interval).replace('nan', '-')],
['Coeff. of Variation', singlevarfmtr.format(loc.cov)],
['Skewness', singlevarfmtr.format(loc.skew)],
['Median ({})\n(95% confidence interval)'.format(loc.definition['unit']),
multilinefmtr.format(
loc.median, *loc.median_conf_interval)],
['Quartiles ({})'.format(loc.definition['unit']),
doublevarfmtr.format(loc.pctl25, loc.pctl75)],
]
return pd.DataFrame(rows, columns=['Statistic', 'Result'])
def make_report(loc, savename, analyte=None, geolocation=None, statplot_options={}, useROS=False):
""" Produces a statistical report for the specified analyte.
Parameters
----------
loc : wqio.Location
The Location object to be summarized.
savename : str
Filename/path of the output pdf
analyte : str, optional
Optional name for the analyte in the ``loc``'s data.
statplot_options : dict, optional
Dictionary of keyward arguments to be passed to
wqio.Location.statplot
Returns
-------
None
See also
--------
wqio.Location
wqio.Location.statplot
"""
if loc.full_data.shape[0] >= 3:
if analyte is None:
analyte = loc.definition.get("analyte", "unknown")
if geolocation is None:
geolocation = loc.definition.get("geolocation", "unknown")
unit = loc.definition['unit']
thershold = loc.definition['thershold']
if 'ylabel' not in statplot_options:
statplot_options['ylabel'] = analyte + ' ' + '(' + unit + ')'
if 'xlabel' not in statplot_options:
statplot_options['xlabel'] = 'Monitoring Location' #used to be geolocation
# make the table
table = make_table(loc)
table_html = table.to_html(index=False, justify='left').replace('\\n', '\n')
# wqio figure - !can move args to main func later!
fig = loc.statplot(**statplot_options)
ax1, ax2 = fig.get_axes()
ax1xlim = ax1.get_xlim()
ax2xlim = ax2.get_xlim()
if loc.dataframe[loc.dataframe[loc.cencol]].shape[0] > 0 and useROS:
# print(loc.dataframe.head())
qntls, ranked = stats.probplot(loc.data, fit=False)
xvalues = stats.norm.cdf(qntls) * 100
figdata = loc.dataframe.sort(columns='modeled')
figdata['xvalues'] = xvalues
figdata = figdata[figdata[loc.cencol]]
ax2.plot(figdata.xvalues, figdata['modeled'], linestyle='', marker='s',
color='tomato', label='Extrapolated values')
ax2.plot(ax2xlim, [thershold]*2, color=sns.color_palette()[-1], label='Threshold')
handles, labels = ax2.get_legend_handles_labels()
labels[0] = 'Data'
ax2.legend(handles, labels, loc='best')
ax2.set_xlabel('Percent less than value')
ax1.set_xlim(ax1xlim)
ax2.set_xlim(ax2xlim)
ax2ylim = ax2.get_ylim()
ax1.set_ylim(ax2ylim)
fig.tight_layout()
# force figure to a byte object in memory then encode
boxplot_img = io.BytesIO()
fig.savefig(boxplot_img, format="png", dpi=300)
boxplot_img.seek(0)
boxplot_uri = ('data:image/png;base64,'
+ urllib.parse.quote(base64.b64encode(boxplot_img.read())))
# box plot legend
figl, axl = plt.subplots(1,1, figsize=(7,10))
img = mpimg.imread('box.png')
axl.imshow(img)
axl.xaxis.set_visible(False)
axl.yaxis.set_visible(False)
sns.despine(ax=axl, top=True, right=True, left=True, bottom=True)
legend_img = io.BytesIO()
figl.savefig(legend_img, format="png", dpi=300, bbox_inches='tight')
legend_img.seek(0)
legend_uri = ('data:image/png;base64,'
+ urllib.parse.quote(base64.b64encode(legend_img.read())))
# html magic
env = Environment(loader=FileSystemLoader(r'.\utils'))
template = env.from_string(html_template.getvalue())
# create pdf report
template_vars = {'analyte' : analyte,
'location': geolocation,
'analyte_table': table_html,
'legend': legend_uri,
'boxplot': boxplot_uri}
html_out = template.render(template_vars)
csst = copy.copy(css_template)
try:
print('Creating report {}'.format(savename))
pdf = pdfkit.from_string(html_out, savename, css=csst)
except OSError as e:
raise OSError('The tool cannot write to the destination path. '
'Please check that the destination pdf is not open.\n'
'Trace back:\n{}'.format(e))
plt.close(fig)
del boxplot_img
del figl
else:
print('{} does not have greater than 3 data points, skipping...'.format(savename))
print('\n')
gc.collect()
class PdfReport(object):
""" Class to generate generic 1-page reports from wqio objects.
Parameters
----------
path : str
Filepath to the CSV file containing input data.
analytecol : str (default = 'analyte')
Column in the input file that contains the analyte name.
rescol : str (default='res')
Column in the input file that contains the result values.
qualcol : str (default='qual')
Column in the input file that contains the data qualifiers
labeling data as right-censored (non-detect) or not.
ndvals : list of strings
List of values found in ``qualcol`` that flag data as being
right-censored (non-detect). Any value in ``qualcol`` that is
*not* in this list will be assumed to denote an uncensored
(detected value).
bsIter : int (default = 10000)
Number of iterations used to refined statistics via a bias-
corrected and accelerated (BCA) bootstrapping method.
useROS : bool (default is True)
Toggles the use of regression-on-order statistics to estimate
censored (non-detect) values when computing summary statistics.
Examples
--------
>>> import wqreports
>>> report = wqreports.PdfReport("~/data/arsenic.csv", ndvals=['U', 'UJ', '<'])
>>> report.make_report(...)
"""
def __init__(self, path, analytecol='analyte', rescol='res',
qualcol='qual', unitcol='unit', locationcol='location',
thersholdcol='threshold', ndvals=['U'], bsIter=5000,
useROS=False):
self.filepath = path
self.ndvals = ndvals
self.final_ndval = 'ND'
self.bsIter = bsIter
self.useROS = useROS
self.analytecol = analytecol
self.unitcol = unitcol
self.locationcol = locationcol
self.thersholdcol = thersholdcol
self.rescol = rescol
self.qualcol = qualcol
self._rawdata = None
self._cleandata = None
self._analytes = None
self._geolocations = None
self._thresholds = None
self._locations = None
@property
def rawdata(self):
""" Raw data as parsed by pandas.read_csv(self.filepath)
"""
if self._rawdata is None:
self._rawdata = pd.read_csv(
self.filepath,
dtype={
self.analytecol: str,
self.unitcol: str,
self.locationcol: str,
self.thersholdcol: np.float64,
self.rescol: np.float64,
self.qualcol: str,
})
return self._rawdata
@property
def cleandata(self):
""" Cleaned data with simpler qualifiers.
"""
if self._cleandata is None:
self._cleandata = (
self.rawdata
.replace({self.qualcol:{_: self.final_ndval for _ in self.ndvals}})
)
return self._cleandata
@property
def analytes(self):
""" Simple list of the analytes to be analyzed.
"""
if self._analytes is None:
self._analytes = self.cleandata[self.analytecol].unique().tolist()
self._analytes.sort()
return self._analytes
@property
def geolocations(self):
"""Simple list of the physical locations in the dataset.
"""
if self._geolocations is None:
self._geolocations = self.cleandata[self.locationcol].unique().tolist()
self._geolocations.sort()
return self._geolocations
@property
def thresholds(self):
"""Simple dictionary of thresholds per each analyte.
"""
if self._thresholds is None:
thresholds = (self.cleandata.loc[:,[self.analytecol, self.thersholdcol]]
.drop_duplicates())
tshape = thresholds.shape[0]
thresholds = thresholds.set_index(self.analytecol).loc[:,self.thersholdcol]
thresholds = thresholds.to_dict()
if tshape != len(thresholds):
e = ('An analyte has mroe than one thershold value, please'
' check the input data')
raise ValueError(e)
self._thresholds = thresholds
return self._thresholds
@property
def locations(self):
""" Simple list of wqio.Location objects for each analyte.
"""
if self._locations is None:
self._locations = {}
gb = self.cleandata.groupby([self.locationcol, self.analytecol])
for gl, a in gb.groups.keys():
loc = self._make_location(gl, a)
loc.definition.update({"analyte": a, "geolocation": gl})
self._locations[(gl, a)] = loc
return self._locations
def _make_location(self, location, analyte):
""" Make a wqio.Location from an analyte.
Parameters
----------
analyte : string
The pollutant to be included in the Location.
Returns
-------
loc : wqio.Location
A wqio.Location object for the provided analyte.
"""
if analyte not in self.analytes:
raise ValueError("{} is not in the dataset".format(analyte))
if location not in self.geolocations:
raise ValueError("{} is not in the dataset".format(location))
# get target analyte
querystring = "{} == @location and {} == @analyte".format(self.locationcol, self.analytecol)
data = self.cleandata.query(querystring)
if data[self.unitcol].unique().shape[0] > 1:
e = 'More than one unit detected for {}-{}. Please check the input file'
raise ValueError(e)
loc = wqio.features.Location(data, bsIter=self.bsIter, ndval=self.final_ndval,
rescol=self.rescol, qualcol=self.qualcol,
useROS=self.useROS, include=True)
loc.definition = {
'unit': data[self.unitcol].iloc[0],
'thershold': self.thresholds[analyte]
}
return loc
def export_pdfs(self, output_path, basename=None, **statplot_options):
""" Export 1-pg summary PDF for each analyte in the data.
Parameters
----------
output_path : string
Folder path in which all PDFs will be saved
basename : string, optional
Prefix for the filename of each PDF. If omitted, the
filename will simply the be analyte.
statplot_options : optional keyword arguments
Options passed directly to wqio.Location.statplot
"""
if basename is None:
basename = ""
for (geolocation, analyte), loc in self.locations.items():
san_geolocation = wqio.utils.processFilename(geolocation)
san_analyte = wqio.utils.processFilename(analyte)
filename = os.path.join(output_path, '{}{}{}.pdf'.format(
basename, san_geolocation, san_analyte))
# need to make a copy so that the dict does not get changed in
# the low functions
spo = copy.copy(statplot_options)
make_report(loc, filename, analyte=analyte, geolocation=geolocation,
statplot_options=spo, useROS=self.useROS)
|
|
from .. import util
import numpy as np
class HeaderError(Exception):
# the exception raised if an STL file object doesn't match its header
pass
# define a numpy datatype for the data section of a binary STL file
# everything in STL is always Little Endian
# this works natively on Little Endian systems, but blows up on Big Endians
# so we always specify byteorder
_stl_dtype = np.dtype([('normals', '<f4', (3)),
('vertices', '<f4', (3, 3)),
('attributes', '<u2')])
# define a numpy datatype for the header of a binary STL file
_stl_dtype_header = np.dtype([('header', np.void, 80),
('face_count', '<i4')])
def load_stl(file_obj, **kwargs):
"""
Load an STL file from a file object.
Parameters
----------
file_obj : open file-like object
Containing STL data
Returns
----------
loaded : dict
kwargs for a Trimesh constructor with keys:
vertices: (n,3) float, vertices
faces: (m,3) int, indexes of vertices
face_normals: (m,3) float, normal vector of each face
"""
# save start of file obj
file_pos = file_obj.tell()
try:
# check the file for a header which matches the file length
# if that is true, it is almost certainly a binary STL file
# if the header doesn't match the file length a HeaderError will be
# raised
return load_stl_binary(file_obj)
except HeaderError:
# move the file back to where it was initially
file_obj.seek(file_pos)
# try to load the file as an ASCII STL
# if the header doesn't match the file length a HeaderError will be
# raised
return load_stl_ascii(file_obj)
def load_stl_binary(file_obj):
"""
Load a binary STL file from a file object.
Parameters
----------
file_obj : open file- like object
Containing STL data
Returns
----------
loaded: kwargs for a Trimesh constructor with keys:
vertices: (n,3) float, vertices
faces: (m,3) int, indexes of vertices
face_normals: (m,3) float, normal vector of each face
"""
# the header is always 84 bytes long, we just reference the dtype.itemsize
# to be explicit about where that magical number comes from
header_length = _stl_dtype_header.itemsize
header_data = file_obj.read(header_length)
if len(header_data) < header_length:
raise HeaderError('Binary STL shorter than a fixed header!')
try:
header = np.frombuffer(header_data,
dtype=_stl_dtype_header)
except BaseException:
raise HeaderError('Binary header incorrect type')
try:
# save the header block as a string
# there could be any garbage in there so wrap in try
metadata = {'header': util.decode_text(
bytes(header['header'][0])).strip()}
except BaseException:
metadata = {}
# now we check the length from the header versus the length of the file
# data_start should always be position 84, but hard coding that felt ugly
data_start = file_obj.tell()
# this seeks to the end of the file
# position 0, relative to the end of the file 'whence=2'
file_obj.seek(0, 2)
# we save the location of the end of the file and seek back to where we
# started from
data_end = file_obj.tell()
file_obj.seek(data_start)
# the binary format has a rigidly defined structure, and if the length
# of the file doesn't match the header, the loaded version is almost
# certainly going to be garbage.
len_data = data_end - data_start
len_expected = header['face_count'] * _stl_dtype.itemsize
# this check is to see if this really is a binary STL file.
# if we don't do this and try to load a file that isn't structured properly
# we will be producing garbage or crashing hard
# so it's much better to raise an exception here.
if len_data != len_expected:
raise HeaderError(
'Binary STL has incorrect length in header: {} vs {}'.format(
len_data, len_expected))
blob = np.frombuffer(file_obj.read(), dtype=_stl_dtype)
# return empty geometry if there are no vertices
if not len(blob['vertices']):
return {'geometry': {}}
# all of our vertices will be loaded in order
# so faces are just sequential indices reshaped.
faces = np.arange(header['face_count'] * 3).reshape((-1, 3))
# there are two bytes per triangle saved for anything
# which is sometimes used for face color
result = {'vertices': blob['vertices'].reshape((-1, 3)),
'face_normals': blob['normals'].reshape((-1, 3)),
'faces': faces,
'face_attributes': {'stl': blob['attributes']},
'metadata': metadata}
return result
def load_stl_ascii(file_obj):
"""
Load an ASCII STL file from a file object.
Parameters
----------
file_obj : open file- like object
Containing input data
Returns
----------
loaded : dict
kwargs for a Trimesh constructor with keys:
vertices: (n,3) float, vertices
faces: (m,3) int, indexes of vertices
face_normals: (m,3) float, normal vector of each face
"""
# read all text into one string
raw = util.decode_text(file_obj.read()).strip().lower()
# split into solid body
solids = raw.split('endsolid')
kwargs = {}
for solid in solids:
stripped = solid.split('solid', 1)
if len(stripped) != 2:
continue
header, text = stripped[1].split('\n', 1)
name = header.strip()
if name in kwargs or len(name) == 0:
name = '{}_{}'.format(name, util.unique_id())
# create array of splits
blob = np.array(text.strip().split())
# there are 21 'words' in each face
face_len = 21
# length of blob should be multiple of face_len
if (len(blob) % face_len) != 0:
util.log.warning('skipping solid!')
continue
face_count = int(len(blob) / face_len)
# this offset is to be added to a fixed set of tiled indices
offset = face_len * np.arange(face_count).reshape((-1, 1))
normal_index = np.tile([2, 3, 4], (face_count, 1)) + offset
vertex_index = np.tile([8, 9, 10,
12, 13, 14,
16, 17, 18], (face_count, 1)) + offset
# faces are groups of three sequential vertices
faces = np.arange(face_count * 3).reshape((-1, 3))
face_normals = blob[normal_index].astype('<f8')
vertices = blob[vertex_index.reshape((-1, 3))].astype('<f8')
# only add vertices and faces if there is geometry
if len(vertices):
kwargs[name] = {'vertices': vertices,
'faces': faces,
'face_normals': face_normals}
if len(kwargs) == 1:
return next(iter(kwargs.values()))
return {'geometry': kwargs}
def export_stl(mesh):
"""
Convert a Trimesh object into a binary STL file.
Parameters
---------
mesh: Trimesh object
Returns
---------
export: bytes, representing mesh in binary STL form
"""
header = np.zeros(1, dtype=_stl_dtype_header)
if hasattr(mesh, 'faces'):
header['face_count'] = len(mesh.faces)
export = header.tobytes()
if hasattr(mesh, 'faces'):
packed = np.zeros(len(mesh.faces), dtype=_stl_dtype)
packed['normals'] = mesh.face_normals
packed['vertices'] = mesh.triangles
export += packed.tobytes()
return export
def export_stl_ascii(mesh):
"""
Convert a Trimesh object into an ASCII STL file.
Parameters
---------
mesh : trimesh.Trimesh
Returns
---------
export : str
Mesh represented as an ASCII STL file
"""
# move all the data that's going into the STL file into one array
blob = np.zeros((len(mesh.faces), 4, 3))
blob[:, 0, :] = mesh.face_normals
blob[:, 1:, :] = mesh.triangles
# create a lengthy format string for the data section of the file
format_string = 'facet normal {} {} {}\nouter loop\n'
format_string += 'vertex {} {} {}\n' * 3
format_string += 'endloop\nendfacet\n'
format_string *= len(mesh.faces)
# concatenate the header, data, and footer
export = 'solid \n'
export += format_string.format(*blob.reshape(-1))
export += 'endsolid'
return export
_stl_loaders = {'stl': load_stl,
'stl_ascii': load_stl}
|
|
# ioc_et.py
#
# Copyright 2013 Mandiant Corporation.
# Licensed under the Apache 2.0 license. Developed for Mandiant by William
# Gibb.
#
# Mandiant licenses this file to you under the Apache License, Version
# 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# Provides support for ioc_api.
#
#
# Modifications to file for Yahoo! by Sean Gillespie
#
import uuid
import datetime
from lxml import etree as et
##############################################
NSMAP = {'xsi' : 'http://www.w3.org/2001/XMLSchema-instance',
'xsd' : 'http://www.w3.org/2001/XMLSchema', }
def make_IOC_root(id=None, version="1.1"):
if version == "1.0":
NSMAP[None] = "http://schemas.mandiant.com/2010/ioc"
root = et.Element('ioc', nsmap = NSMAP)
elif version == "1.1":
NSMAP[None] = "http://openioc.org/schemas/OpenIOC_1.1"
root = et.Element('OpenIOC', nsmap = NSMAP)
else:
raise ValueError('Invalid Version')
if id:
root.attrib['id'] = id
else:
root.attrib['id'] = get_guid()
# default dates
root.attrib['last-modified'] = '0001-01-01T00:00:00'
root.attrib['published-date'] = '0001-01-01T00:00:00'
return root
def make_metadata_node(name = None,
description = 'Automatically generated IOC',
author = 'IOC_et',
links = None,):
metadata_node = et.Element('metadata')
metadata_node.append(make_short_description_node(name))
metadata_node.append(make_description_node(description))
#metadata_node.append(make_keywords_node())
metadata_node.append(make_authored_by_node(author))
metadata_node.append(make_authored_date_node())
metadata_node.append(make_links_node(links))
return metadata_node
def make_keywords_node(keywords = None):
keywords_node = et.Element('keywords')
if keywords:
keywords_node.text = keywords
return keywords_node
def make_short_description_node(name):
description_node = et.Element('short_description')
description_node.text=name
return description_node
def update_node_text(node, text):
node.text = text
return node
def make_description_node(text):
description_node = et.Element('description')
description_node.text=text
return description_node
def make_authored_by_node(author = 'ioc_et'):
authored_node = et.Element('authored_by')
authored_node.text = author
return authored_node
def make_links_node(links = None):
links_node = et.Element('links')
if links:
for rel, href, value in links:
links_node.append(make_link_node(rel,value, href))
return links_node
def set_root_lastmodified(root_node, date=None):
if date:
root_node.attrib['last-modified'] = date
else:
root_node.attrib['last-modified'] = get_current_date()
def set_root_published_date(root_node, date=None):
if date:
root_node.attrib['published-date'] = date
else:
root_node.attrib['published-date'] = get_current_date()
def set_root_created_date(root_node, date=None):
date_node = root_node.find('.//authored_date')
if date_node is None:
raise ValueError('authored_date node does not exist. IOC is not schema compliant.')
if date:
date_node.text = date
else:
date_node.text = get_current_date()
def make_criteria_node(indicator_node = None):
definition_node = et.Element('criteria')
if indicator_node is not None:
if indicator_node.tag != 'Indicator':
raise ValueError('IndicatorNode has the incorrect tag.')
definition_node.append(indicator_node)
return definition_node
def make_definition_node(indicator_node = None):
definition_node = et.Element('definition')
if indicator_node is not None:
if indicator_node.tag != 'Indicator':
raise ValueError('IndicatorNode has the incorrect tag.')
definition_node.append(indicator_node)
return definition_node
def make_parameters_node():
parameters_node = et.Element('parameters')
return parameters_node
def make_param_node(id, content, name='comment', type='string',):
param_node = et.Element('param')
param_node.attrib['id'] = get_guid()
param_node.attrib['ref-id'] = id
param_node.attrib['name'] = name
value_node = et.Element('value')
value_node.attrib['type'] = type
value_node.text = content
param_node.append(value_node)
return param_node
def make_Indicator_node(operator, id = None):
'''
This makes a Indicator node element. These allow the construction of a
logic tree within the IOC.
input
operator: 'AND' or 'OR'.
id: a string value. This is used to provide a GUID for the Indicator.
The ID should NOT be specified under normal circumstances.
return: elementTree element
'''
Indicator_node = et.Element('Indicator')
if id:
Indicator_node.attrib['id'] = id
else:
Indicator_node.attrib['id'] = get_guid()
if operator.upper() not in ['AND','OR']:
raise ValueError('Indicator operator must be "AND" or "OR".')
Indicator_node.attrib['operator'] = operator.upper()
return Indicator_node
def make_IndicatorItem_node(condition="is",
document="Undefined",
search="",
content_type="Undefined",
content="",
preserve_case = False,
negate = False,
context_type = 'mir',
id = None,
version = "1.1"):
'''
This makes a IndicatorItem element. This contains the actual threat
intelligence in the IOC.
input
condition: This is the condition of the item ('is', 'contains',
'matches', etc).
document: String value. Denotes the type of document to look for
the encoded artifact in.
search: String value. Specifies what attribute of the doucment type
the encoded value is.
content_type: This is the display type of the item, which is derived
from the iocterm for the search value.
content: a string value, containing the data to be identified.
preserve_case: Boolean value. Specify if the
IndicatorItem/content/text() is case sensitive.
negate: Boolean value. Specify if the IndicatorItem/@condition is
negated, ie:
@condition = 'is' & @negate = 'true' would be equal to the
@condition = 'isnot' in OpenIOC 1.0.
context_type: a string value, giving context to the document/search
information. This defaults to 'mir'.
id: a string value. This is used to provide a GUID for the IndicatorItem
The ID should NOT be specified under normal circumstances.
returns
an elementTree Element item
'''
IndicatorItem_node = et.Element('IndicatorItem')
if version != "1.0":
if preserve_case:
IndicatorItem_node.attrib['preserve-case'] = 'true'
else:
IndicatorItem_node.attrib['preserve-case'] = 'false'
if negate:
IndicatorItem_node.attrib['negate'] = 'true'
else:
IndicatorItem_node.attrib['negate'] = 'false'
if id:
IndicatorItem_node.attrib['id'] = id
else:
IndicatorItem_node.attrib['id'] = get_guid()
IndicatorItem_node.attrib['condition'] = condition
context_node = make_context_node(document, search, context_type)
content_node = make_content_node(content_type, content)
IndicatorItem_node.append(context_node)
IndicatorItem_node.append(content_node)
return IndicatorItem_node
##############################################
def make_authored_date_node():
authored_node = et.Element('authored_date')
authored_node.text = get_current_date()
return authored_node
def make_link_node(rel, value, href=None):
link_node = et.Element('link')
link_node.attrib['rel'] = rel
if href:
link_node.attrib['href'] = href
link_node.text = value
return link_node
def make_context_node(document,search,context_type='mir'):
context_node = et.Element('Context')
context_node.attrib['document'] = document
context_node.attrib['search'] = search
if context_type:
context_node.attrib['type'] = context_type
return context_node
def make_content_node(type, content):
content_node = et.Element('Content')
content_node.attrib['type'] = type
content_node.text = content
return content_node
##############################################
def get_guid():
return str(uuid.uuid4())
def get_current_date():
# xsdDate format. not TZ format.
time = datetime.datetime.utcnow()
timestring = time.strftime('%Y-%m-%dT%H:%M:%S')
return timestring
|
|
#!/usr/bin/env python
#
# A basic functional test of the total impact API
#
import urllib2
import urllib
import json
import time
import sys
import pickle
from pprint import pprint
from optparse import OptionParser
TEST_ITEMS = {
('doi', '10.1371/journal.pcbi.1000361') :
{
'aliases': ['doi', "title", "url"],
'biblio': [u'authors', u'journal', u'year', u'title'],
'metrics' : {
'wikipedia:mentions' : 1,
u'plosalm:crossref': 133,
'plosalm:html_views': 17455,
'plosalm:pdf_views': 2106,
u'plosalm:pmc_abstract': 19,
u'plosalm:pmc_figure': 71,
u'plosalm:pmc_full-text': 1092,
u'plosalm:pmc_pdf': 419,
u'plosalm:pmc_supp-data': 157,
u'plosalm:pmc_unique-ip': 963,
u'plosalm:pubmed_central': 102,
u'plosalm:scopus': 218
}
},
('url', 'http://total-impact.org/') : #note trailing slash
{
'aliases': ["url"],
'biblio': ['title'],
'metrics' : {
'delicious:bookmarks' : 65
}
},
('url', 'http://total-impact.org'): #no trailing slash
{
'aliases': ["url"],
'biblio': ['title'],
'metrics' : {
'topsy:tweets' : 282,
'topsy:influential_tweets' : 26
}
},
('doi', '10.5061/dryad.18') :
{
'aliases': ['doi', 'url', 'title'],
'biblio': [u'authors', u'year', u'repository', u'title'],
'metrics' : {
'dryad:most_downloaded_file' : 63,
'dryad:package_views' : 149,
'dryad:total_downloads' : 169
}
},
('github', 'egonw,cdk') :
{
'aliases': ['github', 'url', 'title'],
'biblio': [u'last_push_date', u'create_date', u'description', u'title', u'url', u'owner', 'h1'],
'metrics' : {
'github:forks' : 27,
'github:watchers' : 31
}
},
('url', 'http://nescent.org/'):
{
'aliases': ['url'],
'biblio': [u'title', "h1"],
'metrics' : {}
},
('url', 'http://www.slideshare.net/cavlec/manufacturing-serendipity-12176916') :
{
'aliases' : ['url', 'title'],
'biblio': [u'username', u'repository', u'created', u'h1', u'genre', u'title'],
'metrics' : {
'slideshare:downloads' : 4,
'slideshare:views' : 337,
'slideshare:favorites' : 2
}
}
}
class TotalImpactAPI:
base_url = 'http://localhost:5001/'
def request_item(self, alias):
""" Attempt to obtain an item from the server using the given
namespace and namespace id. For example,
namespace = 'pubmed', nid = '234234232'
Will request the item related to pubmed item 234234232
"""
(namespace, nid) = alias
url = self.base_url + urllib.quote('item/%s/%s' % (namespace, nid))
req = urllib2.Request(url)
data = {} # fake a POST
response = urllib2.urlopen(req, data)
tiid = json.loads(urllib.unquote(response.read()))
print "tiid %s for %s" %(tiid, alias)
return tiid
def request_item_result(self, item_id):
url = self.base_url + urllib.quote('item/%s' % (item_id))
req = urllib2.Request(url)
response = urllib2.urlopen(req)
return json.loads(response.read())
def checkItem(item, data, alias, items_for_use, options):
if options.debug:
print "Checking %s result (%s)..." % (alias, item)
success = True
for section in ["biblio", "aliases", "metrics"]:
result = checkItemSection(alias,
item,
section,
data[section],
items_for_use[alias],
options)
if not result:
success = False
return success
def checkItemSection(alias, id, section, api_response, gold_item, options):
success = True
if options.debug:
print "Checking %s result (%s)..." % (alias, id)
# Check aliases are correct
if section=="aliases":
gold_aliases = gold_item['aliases']
alias_result = set(api_response.keys())
expected_result = set(gold_aliases + [u'last_modified', u'created'])
if (alias_result == expected_result):
if options.debug:
print "ALIASES CORRECT! %s" %(alias_result)
else:
if options.debug:
print "ALIASES **NOT** CORRECT, for %s, %s, have %s, want %s" %(alias, id, alias_result, expected_result)
success = False
# Check biblio are correct
elif section=="biblio":
gold_biblio = gold_item['biblio']
if api_response:
biblio_result = set(api_response.keys())
else:
biblio_result = set([])
expected_result = set(gold_biblio + ['genre'])
if (biblio_result == expected_result):
if options.debug:
print "BIBLIO CORRECT! %s" %(biblio_result)
else:
if options.debug:
print "BIBLIO **NOT** CORRECT, have %s, want %s" %(biblio_result, expected_result)
success = False
# Check we've got some metric values
elif section=="metrics":
gold_metrics = gold_item['metrics']
for metric in gold_metrics.keys():
try:
metric_data = api_response[metric].values()[0]
except KeyError:
# didn't return anything. problem!
if options.debug:
print "METRICS **NOT** CORRECT for %s: metric missing" % (metric)
success = False
# expect the returned value to be equal or larger than reference
if success:
if metric_data >= gold_metrics:
if options.debug:
print "METRICS CORRECT! %s" %(metric_data)
else:
if options.debug:
print "METRICS **NOT** CORRECT for %s - %s, expected at least %s" % (metric, metric_data, gold_metrics)
return False
if options.debug:
print #blank line
return success
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-n", "--numrepeats", dest="numrepeats",
default=1, help="Number of repeated requests to make")
parser.add_option("-i", "--items", dest="numdiverseitems",
default=999,
help="Number of diverse items to use (up to max defined)")
parser.add_option("-m", "--missing", dest="missing",
default=False, action="store_true",
help="Display any outstanding items")
parser.add_option("-p", "--printdata", dest="printdata",
default=False, action="store_true", help="Display item data")
parser.add_option("-v", "--verbose", dest="debug",
default=False, action="store_true", help="Display verbose debug data")
(options, args) = parser.parse_args()
item_count = int(options.numrepeats)
num_diverse_items = min(len(TEST_ITEMS), int(options.numdiverseitems))
aliases = TEST_ITEMS.keys()[0:num_diverse_items]
items_for_use = dict((alias, TEST_ITEMS[alias]) for alias in aliases)
ti = TotalImpactAPI()
complete = {}
itemid = {}
for alias in aliases:
complete[alias] = {}
itemid[alias] = {}
for idx in range(item_count):
# Request the items to be generated
itemid[alias][idx] = ti.request_item(alias)
complete[alias][idx] = False
while True:
for idx in range(item_count):
for alias in aliases:
if not complete[alias][idx]:
if options.missing:
print alias, idx, itemid[alias][idx]
itemdata = ti.request_item_result(itemid[alias][idx])
complete[alias][idx] = checkItem(
itemid[alias][idx],
itemdata,
alias,
items_for_use,
options
)
if complete[alias][idx] and options.printdata:
pprint(itemdata)
total = sum([sum(complete[alias].values()) for alias in aliases])
print "%i of %i responses are complete" %(total, item_count * len(aliases))
if total == item_count * len(aliases):
sys.exit(0)
time.sleep(0.5)
|
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2016 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import flask
from flask import json
from sqlalchemy import sql
from sqlalchemy import exc as sa_exc
from dci.api.v1 import api
from dci.api.v1 import base
from dci import decorators
from dci.api.v1 import utils as v1_utils
from dci.common import exceptions as dci_exc
from dci.common import schemas
from dci.common import utils
from dci.db import models
from dci.trackers import github
from dci.trackers import bugzilla
_TABLE = models.ISSUES
_I_COLUMNS = v1_utils.get_columns_name_with_objects(_TABLE)
def _get_or_create_issue(data):
values = v1_utils.common_values_dict()
values.update(data)
if 'github.com' in values['url']:
type = 'github'
else:
type = 'bugzilla'
values['tracker'] = type
# First, insert the issue if it doesn't already exist
# in the issues table. If it already exists, ignore the
# exceptions, and keep proceeding.
query = _TABLE.insert().returning(*_TABLE.columns).values(**values)
try:
return flask.g.db_conn.execute(query).fetchone()
except sa_exc.IntegrityError:
# It is not a real failure it the issue have been tried
# to inserted a second time. As long as it is once, we are
# good to proceed
query = (sql.select([_TABLE])
.where(sql.and_(_TABLE.c.url == values['url'],
_TABLE.c.topic_id == values['topic_id'])))
return flask.g.db_conn.execute(query).fetchone()
def get_issues_by_resource(resource_id, table):
"""Get all issues for a specific job."""
v1_utils.verify_existence_and_get(resource_id, table)
# When retrieving the issues for a job, we actually retrieve
# the issues attach to the job itself + the issues attached to
# the components the job has been run with.
if table.name == 'jobs':
JJI = models.JOIN_JOBS_ISSUES
JJC = models.JOIN_JOBS_COMPONENTS
JCI = models.JOIN_COMPONENTS_ISSUES
# Get all the issues attach to all the components attach to a job
j1 = sql.join(
_TABLE,
sql.join(
JCI,
JJC,
sql.and_(
JCI.c.component_id == JJC.c.component_id,
JJC.c.job_id == resource_id,
),
),
_TABLE.c.id == JCI.c.issue_id,
)
query = sql.select([_TABLE]).select_from(j1)
rows = flask.g.db_conn.execute(query)
rows = [dict(row) for row in rows]
# Get all the issues attach to a job
j2 = sql.join(
_TABLE,
JJI,
sql.and_(
_TABLE.c.id == JJI.c.issue_id,
JJI.c.job_id == resource_id
)
)
query2 = sql.select([_TABLE]).select_from(j2)
rows2 = flask.g.db_conn.execute(query2)
rows += [dict(row) for row in rows2]
# When retrieving the issues for a component, we only retrieve the
# issues attached to the specified component.
else:
JCI = models.JOIN_COMPONENTS_ISSUES
query = (sql.select([_TABLE])
.select_from(JCI.join(_TABLE))
.where(JCI.c.component_id == resource_id))
rows = flask.g.db_conn.execute(query)
rows = [dict(row) for row in rows]
for row in rows:
if row['tracker'] == 'github':
l_tracker = github.Github(row['url'])
elif row['tracker'] == 'bugzilla':
l_tracker = bugzilla.Bugzilla(row['url'])
row.update(l_tracker.dump())
return flask.jsonify({'issues': rows,
'_meta': {'count': len(rows)}})
def unattach_issue(resource_id, issue_id, table):
"""Unattach an issue from a specific job."""
v1_utils.verify_existence_and_get(issue_id, _TABLE)
if table.name == 'jobs':
join_table = models.JOIN_JOBS_ISSUES
where_clause = sql.and_(join_table.c.job_id == resource_id,
join_table.c.issue_id == issue_id)
else:
join_table = models.JOIN_COMPONENTS_ISSUES
where_clause = sql.and_(join_table.c.component_id == resource_id,
join_table.c.issue_id == issue_id)
query = join_table.delete().where(where_clause)
result = flask.g.db_conn.execute(query)
if not result.rowcount:
raise dci_exc.DCIConflict('%s_issues' % table.name, issue_id)
return flask.Response(None, 204, content_type='application/json')
def attach_issue(resource_id, table, user_id):
"""Attach an issue to a specific job."""
data = schemas.issue.post(flask.request.json)
issue = _get_or_create_issue(data)
# Second, insert a join record in the JOIN_JOBS_ISSUES or
# JOIN_COMPONENTS_ISSUES database.
if table.name == 'jobs':
join_table = models.JOIN_JOBS_ISSUES
else:
join_table = models.JOIN_COMPONENTS_ISSUES
key = '%s_id' % table.name[0:-1]
query = join_table.insert().values({
'user_id': user_id,
'issue_id': issue['id'],
key: resource_id
})
try:
flask.g.db_conn.execute(query)
except sa_exc.IntegrityError:
raise dci_exc.DCICreationConflict(join_table.name,
'%s, issue_id' % key)
result = json.dumps({'issue': dict(issue)})
return flask.Response(result, 201, content_type='application/json')
# CRD /issues
@api.route('/issues', methods=['POST'])
@decorators.login_required
@decorators.check_roles
def create_issue(user):
data = schemas.issue.post(flask.request.json)
issue = _get_or_create_issue(data)
result = json.dumps({'issue': dict(issue)})
return flask.Response(result, 201,
headers={'ETag': issue['etag']},
content_type='application/json')
@api.route('/issues', methods=['GET'])
@decorators.login_required
@decorators.check_roles
def get_all_issues(user):
args = schemas.args(flask.request.args.to_dict())
query = v1_utils.QueryBuilder(_TABLE, args, _I_COLUMNS)
query.add_extra_condition(_TABLE.c.state != 'archived')
nb_rows = query.get_number_of_rows()
rows = query.execute(fetchall=True)
rows = v1_utils.format_result(rows, _TABLE.name)
return flask.jsonify({'issues': rows, '_meta': {'count': nb_rows}})
@api.route('/issues/<uuid:issue_id>', methods=['GET'])
@decorators.login_required
@decorators.check_roles
def get_issue(user, issue_id):
issue = v1_utils.verify_existence_and_get(issue_id, _TABLE)
return flask.jsonify({'issue': issue})
@api.route('/issues/<uuid:issue_id>', methods=['DELETE'])
@decorators.login_required
@decorators.check_roles
def delete_issue_by_id(user, issue_id):
# get If-Match header
if_match_etag = utils.check_and_get_etag(flask.request.headers)
v1_utils.verify_existence_and_get(issue_id, _TABLE)
with flask.g.db_conn.begin():
values = {'state': 'archived'}
where_clause = sql.and_(
_TABLE.c.etag == if_match_etag,
_TABLE.c.id == issue_id
)
query = _TABLE.update().where(where_clause).values(**values)
result = flask.g.db_conn.execute(query)
if not result.rowcount:
raise dci_exc.DCIDeleteConflict('Issue', issue_id)
return flask.Response(None, 204, content_type='application/json')
# issues-tests
@api.route('/issues/<uuid:issue_id>/tests', methods=['POST'])
@decorators.login_required
@decorators.check_roles
def add_test_to_issue(user, issue_id):
values = schemas.issue_test.post(flask.request.json)
issue_id = v1_utils.verify_existence_and_get(issue_id, _TABLE, get_id=True)
values['issue_id'] = issue_id
v1_utils.verify_existence_and_get(values.get('test_id'),
models.TESTS,
get_id=True)
q_insert = models.JOIN_ISSUES_TESTS.insert().values(**values)
flask.g.db_conn.execute(q_insert)
return flask.Response(json.dumps(values),
201,
content_type='application/json')
@api.route('/issues/<uuid:issue_id>/tests/<uuid:test_id>', methods=['DELETE'])
@decorators.login_required
@decorators.check_roles
def remove_test_from_issue(users, issue_id, test_id):
v1_utils.verify_existence_and_get(issue_id, _TABLE)
v1_utils.verify_existence_and_get(test_id, models.TESTS)
_JIT = models.JOIN_ISSUES_TESTS
query = _JIT.delete().where(sql.and_(_JIT.c.issue_id == issue_id,
_JIT.c.test_id == test_id))
try:
flask.g.db_conn.execute(query)
except sa_exc.IntegrityError:
raise dci_exc.DCIDeleteConflict('tests', 'test_id')
return flask.Response(None, 204, content_type='application/json')
@api.route('/issues/<uuid:issue_id>/tests', methods=['GET'])
@decorators.login_required
@decorators.check_roles
def get_tests_from_issue(user, issue_id):
JIT = models.JOIN_ISSUES_TESTS
query = (sql.select([models.TESTS])
.select_from(JIT.join(models.TESTS))
.where(JIT.c.issue_id == issue_id))
results = flask.g.db_conn.execute(query).fetchall()
return flask.Response(json.dumps({'tests': results}),
200,
content_type='application/json')
@api.route('/issues/purge', methods=['GET'])
@decorators.login_required
@decorators.check_roles
def get_to_purge_archived_issues(user):
return base.get_to_purge_archived_resources(user, _TABLE)
@api.route('/issues/purge', methods=['POST'])
@decorators.login_required
@decorators.check_roles
def purge_archived_issues(user):
return base.purge_archived_resources(user, _TABLE)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
from Kamaelia.UI.Tk.TkWindow import TkWindow
from Kamaelia.Support.Tk.Scrolling import ScrollingMenu
from Axon.Ipc import producerFinished, shutdownMicroprocess
import Tkinter
class ArgumentsPanel(Tkinter.Frame):
def __init__(self, parent, theclass):
Tkinter.Frame.__init__(self, parent)
self.theclass = theclass
# build widgets
row=0
if self.theclass['classdoc']:
self.classdoclabel = Tkinter.Label(self, text = self.theclass['classdoc'], justify="left")
self.classdoclabel['font'] = " ".join(self.classdoclabel['font'].split(" ")[0:2])
self.classdoclabel.grid(row=row, column=0,columnspan=2,
sticky=Tkinter.N+Tkinter.E+Tkinter.W+Tkinter.S, padx=4, pady=4)
row+=1
if self.theclass['initdoc']:
self.initdoclabel = Tkinter.Label(self, text = self.theclass['initdoc'], justify="left")
self.initdoclabel['font'] = " ".join(self.initdoclabel['font'].split(" ")[0:2])
self.initdoclabel.grid(row=row, column=0, columnspan=2,
sticky=Tkinter.N+Tkinter.E+Tkinter.W+Tkinter.S, padx=4, pady=4)
row+=1
self.label = Tkinter.Label(self, text="ARGUMENTS:")
self.label.grid(row=row, column=0, columnspan=2,sticky=Tkinter.W+Tkinter.S, padx=4, pady=4)
row+=1
# enumerate std args
self.args = []
for arg in self.theclass['args']['std']:
arglabel = Tkinter.Label(self, text=arg[0])
arglabel.grid(row=row,column=0, sticky=Tkinter.E)
svar = Tkinter.StringVar()
argfield = Tkinter.Entry(self, bg="white", textvariable=svar, takefocus=1)
default=""
if len(arg)>=2:
default = arg[1]
svar.set(default)
argfield.grid(row=row,column=1, sticky=Tkinter.W)
self.args.append( (arg[0], svar, default) )
row+=1
# now do * and ** args
for argname in ["*","**"]:
if self.theclass['args'][argname]:
arglabel = Tkinter.Label(self, text=argname)
arglabel.grid(row=row,column=0, sticky=Tkinter.E)
arglabel = None
svar = Tkinter.StringVar()
argfield = Tkinter.Entry(self, bg="white", textvariable=svar, takefocus=1)
argfield.grid(row=row,column=1, sticky=Tkinter.W)
self.args.append( (argname, svar, "") )
row+=1
# self.rowconfigure(row, weight=1)
# self.grid()
def getDef(self):
return { "name" : self.theclass['class'],
"module" : self.theclass['module'],
"instantiation" : self.getInstantiation()
}
def getInstantiation(self):
"""Return the instantiation string"""
argstr = ""
prefix = ""
for (argname, svar, default) in self.args:
text = svar.get().strip()
default = default.strip()
if argname != "*" and argname != "**":
if default=="" or text != default:
if not text:
text = "<<unspecified>>"
argstr = argstr + prefix + argname + " = " + text
prefix=", "
else:
if text:
argstr = argstr + prefix + text
prefix=", "
return argstr
class BuilderControlsGUI(TkWindow):
def __init__(self, classes):
self.selectedComponent = None
self.uid = 1
self.classes = classes
super(BuilderControlsGUI, self).__init__()
def setupWindow(self):
items = []
lookup = {} # This is a bit of a nasty hack really ... :-)
# Why is this a hack ?
self.window.title("Pipeline Builder")
self.addframe = Tkinter.Frame(self.window, borderwidth=2, relief=Tkinter.GROOVE)
self.addframe.grid(row=0, column=0, sticky=Tkinter.N+Tkinter.E+Tkinter.W+Tkinter.S, padx=4, pady=4)
def menuCallback(index, text):
self.click_menuChoice(lookup[text])
print self.classes[0]
for theclass in self.classes:
lookup[ theclass['module']+"."+theclass['class'] ] = theclass
items.append(theclass['module']+"."+theclass['class'])
self.choosebutton = ScrollingMenu(self.addframe, items,
command = menuCallback)
self.choosebutton.grid(row=0, column=0, columnspan=2, sticky=Tkinter.N)
self.argPanel = None
self.argCanvas = Tkinter.Canvas(self.addframe, relief=Tkinter.SUNKEN, borderwidth=2)
self.argCanvas.grid(row=1, column=0, sticky=Tkinter.N+Tkinter.S+Tkinter.E+Tkinter.W)
self.argCanvasWID = self.argCanvas.create_window(0,0, anchor=Tkinter.NW)
self.argCanvasScroll = Tkinter.Scrollbar(self.addframe, orient=Tkinter.VERTICAL)
self.argCanvasScroll.grid(row=1, column=1, sticky=Tkinter.N+Tkinter.S+Tkinter.E)
self.argCanvasScroll['command'] = self.argCanvas.yview
self.argCanvas['yscrollcommand'] = self.argCanvasScroll.set
self.click_menuChoice(self.classes[1])
self.addbutton = Tkinter.Button(self.addframe, text="ADD Component", command=self.click_addComponent )
self.addbutton.grid(row=2, column=0, columnspan=2, sticky=Tkinter.S)
self.addframe.rowconfigure(1, weight=1)
self.addframe.columnconfigure(0, weight=1)
self.remframe = Tkinter.Frame(self.window, borderwidth=2, relief=Tkinter.GROOVE)
self.remframe.grid(row=1, column=0, columnspan=2, sticky=Tkinter.S+Tkinter.E+Tkinter.W, padx=4, pady=4)
self.selectedlabel = Tkinter.Label(self.remframe, text="<no component selected>")
self.selectedlabel.grid(row=0, column=0, sticky=Tkinter.S)
self.delbutton = Tkinter.Button(self.remframe, text="REMOVE Component", command=self.click_removeComponent )
self.delbutton.grid(row=1, column=0, sticky=Tkinter.S)
self.delbutton.config(state=Tkinter.DISABLED)
self.window.rowconfigure(0, weight=1)
self.window.columnconfigure(0, weight=1)
self.window.protocol("WM_DELETE_WINDOW", self.handleCloseWindowRequest )
def main(self):
while not self.isDestroyed():
if self.dataReady("inbox"):
data = self.recv("inbox")
if data[0].upper() == "SELECT":
if data[1].upper() == "NODE":
self.componentSelected(data[2])
while self.dataReady("control"):
msg = self.recv("control")
if isinstance(msg, producerFinished) or isinstance(msg, shutdownMicroprocess):
self.send(msg, "signal")
self.window.destroy()
self.tkupdate()
yield 1
def handleCloseWindowRequest(self):
self.send( shutdownMicroprocess(self), "signal")
self.window.destroy()
def makeUID(self):
uid = self.uid
self.uid += 1
return uid
def componentSelected(self, component):
self.selectedComponent = component
if component == None:
self.delbutton.config(state=Tkinter.DISABLED)
self.selectedlabel["text"] = "<no component selected>"
else:
self.delbutton.config(state=Tkinter.NORMAL)
self.selectedlabel["text"] = repr(component[0])
def click_addComponent(self):
# add to the pipeline and wire it in
c = self.argPanel.getDef()
c["id"] = ( c['name'], repr(self.makeUID()) )
msg = ("ADD", c['id'], c['name'], c, self.selectedComponent)
self.send( msg, "outbox")
def click_removeComponent(self):
if self.selectedComponent:
self.send( ("DEL", self.selectedComponent), "outbox")
def click_chooseComponent(self):
pass
def click_menuChoice(self, theclass):
if self.argPanel != None:
self.argPanel.destroy()
self.argPanel = ArgumentsPanel(self.argCanvas, theclass)
self.argPanel.update_idletasks()
self.argCanvas.itemconfigure(self.argCanvasWID, window=self.argPanel)
self.argCanvas['scrollregion'] = self.argCanvas.bbox("all")
# -------------------------------------------------------------------
class TextOutputGUI(TkWindow):
def __init__(self, title):
self.title = title
self.allreceived = True
super(TextOutputGUI, self).__init__()
def setupWindow(self):
self.textbox = Tkinter.Text(self.window, cnf={"state":Tkinter.DISABLED} )
self.window.title(self.title)
self.textbox.grid(row=0, column=0, sticky=Tkinter.N+Tkinter.E+Tkinter.W+Tkinter.S)
self.window.rowconfigure(0, weight=1)
self.window.columnconfigure(0, weight=1)
self.window.protocol("WM_DELETE_WINDOW", self.handleCloseWindowRequest )
def main(self):
while not self.isDestroyed():
if self.dataReady("inbox"):
self.textbox.config(state=Tkinter.NORMAL) # enable editing
if self.allreceived:
self.allreceived = False
self.textbox.delete(1.0, Tkinter.END)
while self.dataReady("inbox"):
data = self.recv("inbox")
if data == None:
self.allreceived = True
else:
self.textbox.insert(Tkinter.END, data)
self.textbox.config(state=Tkinter.DISABLED) # disable editing
while self.dataReady("control"):
msg = self.recv("control")
if isinstance(msg, shutdownMicroprocess) or isinstance(msg, producerFinished):
self.send(msg, "signal")
self.window.destroy()
self.tkupdate()
yield 1
def handleCloseWindowRequest(self):
self.send( shutdownMicroprocess(self), "signal")
self.window.destroy()
|
|
"""
======================================================================
Repeated measures ANOVA on source data with spatio-temporal clustering
======================================================================
This example illustrates how to make use of the clustering functions
for arbitrary, self-defined contrasts beyond standard t-tests. In this
case we will tests if the differences in evoked responses between
stimulation modality (visual VS auditory) depend on the stimulus
location (left vs right) for a group of subjects (simulated here
using one subject's data). For this purpose we will compute an
interaction effect using a repeated measures ANOVA. The multiple
comparisons problem is addressed with a cluster-level permutation test
across space and time.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Eric Larson <larson.eric.d@gmail.com>
# Denis Engemannn <denis.engemann@gmail.com>
#
# License: BSD (3-clause)
import os.path as op
import numpy as np
from numpy.random import randn
import matplotlib.pyplot as plt
import mne
from mne.stats import (spatio_temporal_cluster_test, f_threshold_mway_rm,
f_mway_rm, summarize_clusters_stc)
from mne.minimum_norm import apply_inverse, read_inverse_operator
from mne.datasets import sample
print(__doc__)
###############################################################################
# Set parameters
# --------------
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
subjects_dir = data_path + '/subjects'
src_fname = subjects_dir + '/fsaverage/bem/fsaverage-ico-5-src.fif'
tmin = -0.2
tmax = 0.3 # Use a lower tmax to reduce multiple comparisons
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
###############################################################################
# Read epochs for all channels, removing a bad one
# ------------------------------------------------
raw.info['bads'] += ['MEG 2443']
picks = mne.pick_types(raw.info, meg=True, eog=True, exclude='bads')
# we'll load all four conditions that make up the 'two ways' of our ANOVA
event_id = dict(l_aud=1, r_aud=2, l_vis=3, r_vis=4)
reject = dict(grad=1000e-13, mag=4000e-15, eog=150e-6)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject, preload=True)
# Equalize trial counts to eliminate bias (which would otherwise be
# introduced by the abs() performed below)
epochs.equalize_event_counts(event_id)
###############################################################################
# Transform to source space
# -------------------------
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE, sLORETA, or eLORETA)
inverse_operator = read_inverse_operator(fname_inv)
# we'll only use one hemisphere to speed up this example
# instead of a second vertex array we'll pass an empty array
sample_vertices = [inverse_operator['src'][0]['vertno'], np.array([], int)]
# Let's average and compute inverse, then resample to speed things up
conditions = []
for cond in ['l_aud', 'r_aud', 'l_vis', 'r_vis']: # order is important
evoked = epochs[cond].average()
evoked.resample(50, npad='auto')
condition = apply_inverse(evoked, inverse_operator, lambda2, method)
# Let's only deal with t > 0, cropping to reduce multiple comparisons
condition.crop(0, None)
conditions.append(condition)
tmin = conditions[0].tmin
tstep = conditions[0].tstep
###############################################################################
# Transform to common cortical space
# ----------------------------------
#
# Normally you would read in estimates across several subjects and morph them
# to the same cortical space (e.g. fsaverage). For example purposes, we will
# simulate this by just having each "subject" have the same response (just
# noisy in source space) here.
#
# We'll only consider the left hemisphere in this tutorial.
n_vertices_sample, n_times = conditions[0].lh_data.shape
n_subjects = 7
print('Simulating data for %d subjects.' % n_subjects)
# Let's make sure our results replicate, so set the seed.
np.random.seed(0)
X = randn(n_vertices_sample, n_times, n_subjects, 4) * 10
for ii, condition in enumerate(conditions):
X[:, :, :, ii] += condition.lh_data[:, :, np.newaxis]
###############################################################################
# It's a good idea to spatially smooth the data, and for visualization
# purposes, let's morph these to fsaverage, which is a grade 5 ICO source space
# with vertices 0:10242 for each hemisphere. Usually you'd have to morph
# each subject's data separately, but here since all estimates are on
# 'sample' we can use one morph matrix for all the heavy lifting.
# Read the source space we are morphing to (just left hemisphere)
src = mne.read_source_spaces(src_fname)
fsave_vertices = [src[0]['vertno'], []]
morph_mat = mne.compute_source_morph(
src=inverse_operator['src'], subject_to='fsaverage',
spacing=fsave_vertices, subjects_dir=subjects_dir, smooth=20).morph_mat
morph_mat = morph_mat[:, :n_vertices_sample] # just left hemi from src
n_vertices_fsave = morph_mat.shape[0]
# We have to change the shape for the dot() to work properly
X = X.reshape(n_vertices_sample, n_times * n_subjects * 4)
print('Morphing data.')
X = morph_mat.dot(X) # morph_mat is a sparse matrix
X = X.reshape(n_vertices_fsave, n_times, n_subjects, 4)
###############################################################################
# Now we need to prepare the group matrix for the ANOVA statistic. To make the
# clustering function work correctly with the ANOVA function X needs to be a
# list of multi-dimensional arrays (one per condition) of shape: samples
# (subjects) x time x space.
#
# First we permute dimensions, then split the array into a list of conditions
# and discard the empty dimension resulting from the split using numpy squeeze.
X = np.transpose(X, [2, 1, 0, 3]) #
X = [np.squeeze(x) for x in np.split(X, 4, axis=-1)]
###############################################################################
# Prepare function for arbitrary contrast
# ---------------------------------------
# As our ANOVA function is a multi-purpose tool we need to apply a few
# modifications to integrate it with the clustering function. This
# includes reshaping data, setting default arguments and processing
# the return values. For this reason we'll write a tiny dummy function.
#
# We will tell the ANOVA how to interpret the data matrix in terms of
# factors. This is done via the factor levels argument which is a list
# of the number factor levels for each factor.
factor_levels = [2, 2]
###############################################################################
# Finally we will pick the interaction effect by passing 'A:B'.
# (this notation is borrowed from the R formula language). Without this also
# the main effects will be returned.
effects = 'A:B'
# Tell the ANOVA not to compute p-values which we don't need for clustering
return_pvals = False
# a few more convenient bindings
n_times = X[0].shape[1]
n_conditions = 4
###############################################################################
# A ``stat_fun`` must deal with a variable number of input arguments.
#
# Inside the clustering function each condition will be passed as flattened
# array, necessitated by the clustering procedure. The ANOVA however expects an
# input array of dimensions: subjects X conditions X observations (optional).
#
# The following function catches the list input and swaps the first and the
# second dimension, and finally calls ANOVA.
#
# .. note:: For further details on this ANOVA function consider the
# corresponding
# :ref:`time-frequency tutorial <tut-timefreq-twoway-anova>`.
def stat_fun(*args):
# get f-values only.
return f_mway_rm(np.swapaxes(args, 1, 0), factor_levels=factor_levels,
effects=effects, return_pvals=return_pvals)[0]
###############################################################################
# Compute clustering statistic
# ----------------------------
#
# To use an algorithm optimized for spatio-temporal clustering, we
# just pass the spatial connectivity matrix (instead of spatio-temporal).
# as we only have one hemisphere we need only need half the connectivity
print('Computing connectivity.')
connectivity = mne.spatial_src_connectivity(src[:1])
# Now let's actually do the clustering. Please relax, on a small
# notebook and one single thread only this will take a couple of minutes ...
pthresh = 0.0005
f_thresh = f_threshold_mway_rm(n_subjects, factor_levels, effects, pthresh)
# To speed things up a bit we will ...
n_permutations = 128 # ... run fewer permutations (reduces sensitivity)
print('Clustering.')
T_obs, clusters, cluster_p_values, H0 = clu = \
spatio_temporal_cluster_test(X, connectivity=connectivity, n_jobs=1,
threshold=f_thresh, stat_fun=stat_fun,
n_permutations=n_permutations,
buffer_size=None)
# Now select the clusters that are sig. at p < 0.05 (note that this value
# is multiple-comparisons corrected).
good_cluster_inds = np.where(cluster_p_values < 0.05)[0]
###############################################################################
# Visualize the clusters
# ----------------------
print('Visualizing clusters.')
# Now let's build a convenient representation of each cluster, where each
# cluster becomes a "time point" in the SourceEstimate
stc_all_cluster_vis = summarize_clusters_stc(clu, tstep=tstep,
vertices=fsave_vertices,
subject='fsaverage')
# Let's actually plot the first "time point" in the SourceEstimate, which
# shows all the clusters, weighted by duration
subjects_dir = op.join(data_path, 'subjects')
# The brighter the color, the stronger the interaction between
# stimulus modality and stimulus location
brain = stc_all_cluster_vis.plot(subjects_dir=subjects_dir, views='lat',
time_label='Duration significant (ms)',
clim=dict(kind='value', lims=[0, 1, 40]))
brain.save_image('cluster-lh.png')
brain.show_view('medial')
###############################################################################
# Finally, let's investigate interaction effect by reconstructing the time
# courses
inds_t, inds_v = [(clusters[cluster_ind]) for ii, cluster_ind in
enumerate(good_cluster_inds)][0] # first cluster
times = np.arange(X[0].shape[1]) * tstep * 1e3
plt.figure()
colors = ['y', 'b', 'g', 'purple']
event_ids = ['l_aud', 'r_aud', 'l_vis', 'r_vis']
for ii, (condition, color, eve_id) in enumerate(zip(X, colors, event_ids)):
# extract time course at cluster vertices
condition = condition[:, :, inds_v]
# normally we would normalize values across subjects but
# here we use data from the same subject so we're good to just
# create average time series across subjects and vertices.
mean_tc = condition.mean(axis=2).mean(axis=0)
std_tc = condition.std(axis=2).std(axis=0)
plt.plot(times, mean_tc.T, color=color, label=eve_id)
plt.fill_between(times, mean_tc + std_tc, mean_tc - std_tc, color='gray',
alpha=0.5, label='')
ymin, ymax = mean_tc.min() - 5, mean_tc.max() + 5
plt.xlabel('Time (ms)')
plt.ylabel('Activation (F-values)')
plt.xlim(times[[0, -1]])
plt.ylim(ymin, ymax)
plt.fill_betweenx((ymin, ymax), times[inds_t[0]],
times[inds_t[-1]], color='orange', alpha=0.3)
plt.legend()
plt.title('Interaction between stimulus-modality and location.')
plt.show()
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Subclass for httplib.HTTPSConnection with optional certificate name
verification, depending on libcloud.security settings.
"""
import os
import warnings
import requests
from requests.adapters import HTTPAdapter
try:
# requests no longer vendors urllib3 in newer versions
# https://github.com/python/typeshed/issues/6893#issuecomment-1012511758
from urllib3.poolmanager import PoolManager
except ImportError:
from requests.packages.urllib3.poolmanager import PoolManager # type: ignore
import libcloud.security
from libcloud.utils.py3 import urlparse, PY3
__all__ = ["LibcloudBaseConnection", "LibcloudConnection"]
ALLOW_REDIRECTS = 1
# Default timeout for HTTP requests in seconds
DEFAULT_REQUEST_TIMEOUT = 60
HTTP_PROXY_ENV_VARIABLE_NAME = "http_proxy"
HTTPS_PROXY_ENV_VARIABLE_NAME = "https_proxy"
class SignedHTTPSAdapter(HTTPAdapter):
def __init__(self, cert_file, key_file):
self.cert_file = cert_file
self.key_file = key_file
super(SignedHTTPSAdapter, self).__init__()
def init_poolmanager(self, connections, maxsize, block=False):
self.poolmanager = PoolManager(
num_pools=connections,
maxsize=maxsize,
block=block,
cert_file=self.cert_file,
key_file=self.key_file,
)
class LibcloudBaseConnection(object):
"""
Base connection class to inherit from.
Note: This class should not be instantiated directly.
"""
session = None
proxy_scheme = None
proxy_host = None
proxy_port = None
proxy_username = None
proxy_password = None
http_proxy_used = False
ca_cert = None
def __init__(self):
self.session = requests.Session()
def set_http_proxy(self, proxy_url):
"""
Set a HTTP proxy which will be used with this connection.
:param proxy_url: Proxy URL (e.g. http://<hostname>:<port> without
authentication and
http://<username>:<password>@<hostname>:<port> for
basic auth authentication information.
:type proxy_url: ``str``
"""
result = self._parse_proxy_url(proxy_url=proxy_url)
scheme = result[0]
host = result[1]
port = result[2]
username = result[3]
password = result[4]
self.proxy_scheme = scheme
self.proxy_host = host
self.proxy_port = port
self.proxy_username = username
self.proxy_password = password
self.http_proxy_used = True
self.session.proxies = {
"http": proxy_url,
"https": proxy_url,
}
def _parse_proxy_url(self, proxy_url):
"""
Parse and validate a proxy URL.
:param proxy_url: Proxy URL (e.g. http://hostname:3128)
:type proxy_url: ``str``
:rtype: ``tuple`` (``scheme``, ``hostname``, ``port``)
"""
parsed = urlparse.urlparse(proxy_url)
if parsed.scheme not in ("http", "https"):
raise ValueError("Only http and https proxies are supported")
if not parsed.hostname or not parsed.port:
raise ValueError(
"proxy_url must be in the following format: "
"<scheme>://<proxy host>:<proxy port>"
)
proxy_scheme = parsed.scheme
proxy_host, proxy_port = parsed.hostname, parsed.port
netloc = parsed.netloc
if "@" in netloc:
username_password = netloc.split("@", 1)[0]
split = username_password.split(":", 1)
if len(split) < 2:
raise ValueError("URL is in an invalid format")
proxy_username, proxy_password = split[0], split[1]
else:
proxy_username = None
proxy_password = None
return (proxy_scheme, proxy_host, proxy_port, proxy_username, proxy_password)
def _setup_verify(self):
self.verify = libcloud.security.VERIFY_SSL_CERT
def _setup_ca_cert(self, **kwargs):
# simulating keyword-only argument in Python 2
ca_certs_path = kwargs.get("ca_cert", libcloud.security.CA_CERTS_PATH)
if self.verify is False:
pass
else:
if isinstance(ca_certs_path, list):
msg = (
"Providing a list of CA trusts is no longer supported "
"since libcloud 2.0. Using the first element in the list. "
"See http://libcloud.readthedocs.io/en/latest/other/"
"changes_in_2_0.html#providing-a-list-of-ca-trusts-is-no-"
"longer-supported"
)
warnings.warn(msg, DeprecationWarning)
self.ca_cert = ca_certs_path[0]
else:
self.ca_cert = ca_certs_path
def _setup_signing(self, cert_file=None, key_file=None):
"""
Setup request signing by mounting a signing
adapter to the session
"""
self.session.mount("https://", SignedHTTPSAdapter(cert_file, key_file))
class LibcloudConnection(LibcloudBaseConnection):
timeout = None
host = None
response = None
def __init__(self, host, port, secure=None, **kwargs):
scheme = "https" if secure is not None and secure else "http"
self.host = "{0}://{1}{2}".format(
"https" if port == 443 else scheme,
host,
":{0}".format(port) if port not in (80, 443) else "",
)
# Support for HTTP(s) proxy
# NOTE: We always only use a single proxy (either HTTP or HTTPS)
https_proxy_url_env = os.environ.get(HTTPS_PROXY_ENV_VARIABLE_NAME, None)
http_proxy_url_env = os.environ.get(
HTTP_PROXY_ENV_VARIABLE_NAME, https_proxy_url_env
)
# Connection argument has precedence over environment variables
proxy_url = kwargs.pop("proxy_url", http_proxy_url_env)
self._setup_verify()
self._setup_ca_cert()
LibcloudBaseConnection.__init__(self)
self.session.timeout = kwargs.pop("timeout", DEFAULT_REQUEST_TIMEOUT)
if "cert_file" in kwargs or "key_file" in kwargs:
self._setup_signing(**kwargs)
if proxy_url:
self.set_http_proxy(proxy_url=proxy_url)
@property
def verification(self):
"""
The option for SSL verification given to underlying requests
"""
return self.ca_cert if self.ca_cert is not None else self.verify
def request(
self, method, url, body=None, headers=None, raw=False, stream=False, hooks=None
):
url = urlparse.urljoin(self.host, url)
headers = self._normalize_headers(headers=headers)
self.response = self.session.request(
method=method.lower(),
url=url,
data=body,
headers=headers,
allow_redirects=ALLOW_REDIRECTS,
stream=stream,
verify=self.verification,
timeout=self.session.timeout,
hooks=hooks,
)
def prepared_request(
self, method, url, body=None, headers=None, raw=False, stream=False
):
headers = self._normalize_headers(headers=headers)
req = requests.Request(
method, "".join([self.host, url]), data=body, headers=headers
)
prepped = self.session.prepare_request(req)
self.response = self.session.send(
prepped,
stream=stream,
verify=self.ca_cert if self.ca_cert is not None else self.verify,
)
def getresponse(self):
return self.response
def getheaders(self):
# urlib decoded response body, libcloud has a bug
# and will not check if content is gzipped, so let's
# remove headers indicating compressed content.
if "content-encoding" in self.response.headers:
del self.response.headers["content-encoding"]
return self.response.headers
@property
def status(self):
return self.response.status_code
@property
def reason(self):
return None if self.response.status_code > 400 else self.response.text
def connect(self): # pragma: no cover
pass
def read(self):
return self.response.content
def close(self): # pragma: no cover
# return connection back to pool
self.response.close()
def _normalize_headers(self, headers):
headers = headers or {}
# all headers should be strings
for key, value in headers.items():
if isinstance(value, (int, float)):
headers[key] = str(value)
return headers
class HttpLibResponseProxy(object):
"""
Provides a proxy pattern around the :class:`requests.Reponse`
object to a :class:`httplib.HTTPResponse` object
"""
def __init__(self, response):
self._response = response
def read(self, amt=None):
return self._response.text
def getheader(self, name, default=None):
"""
Get the contents of the header name, or default
if there is no matching header.
"""
if name in self._response.headers.keys():
return self._response.headers[name]
else:
return default
def getheaders(self):
"""
Return a list of (header, value) tuples.
"""
if PY3:
return list(self._response.headers.items())
else:
return self._response.headers.items()
@property
def status(self):
return self._response.status_code
@property
def reason(self):
return self._response.reason
@property
def version(self):
# requests doesn't expose this
return "11"
@property
def body(self):
# NOTE: We use property to avoid saving whole response body into RAM
# See https://github.com/apache/libcloud/pull/1132 for details
return self._response.content
|
|
"""A medical image analysis pipeline.
The pipeline is used for brain tissue segmentation using a decision forest classifier.
"""
import argparse
import datetime
import os
import sys
import timeit
import SimpleITK as sitk
import numpy as np
from tensorflow.python.platform import app
from sklearn.mixture import GaussianMixture
from scipy import stats as scipy_stats
sys.path.insert(0, os.path.join(os.path.dirname(sys.argv[0]), '..')) # append the MIALab root directory to Python path
# fixes the ModuleNotFoundError when executing main.py in the console after code changes (e.g. git pull)
# somehow pip install does not keep track of packages
import mialab.classifier.decision_forest as df
import mialab.data.conversion as conversion
import mialab.data.structure as structure
import mialab.data.loading as load
import mialab.utilities.file_access_utilities as futil
import mialab.utilities.pipeline_utilities as putil
import mialab.utilities.statistic_utilities as statistics
FLAGS = None # the program flags
IMAGE_KEYS = [structure.BrainImageTypes.T1, structure.BrainImageTypes.T2, structure.BrainImageTypes.GroundTruth] # the list of images we will load
TEST_BATCH_SIZE = 2 # 1..30, the higher the faster but more memory usage
NORMALIZE_FEATURES = False # Normalize feature vectors to mean 0 and std 1
def main(_):
"""Brain tissue segmentation using decision forests.
The main routine executes the medical image analysis pipeline:
- Image loading
- Registration
- Pre-processing
- Feature extraction
- Decision forest classifier model building
- Segmentation using the decision forest classifier model on unseen images
- Post-processing of the segmentation
- Evaluation of the segmentation
"""
# load atlas images
putil.load_atlas_images(FLAGS.data_atlas_dir)
print('-' * 5, 'Training...')
# generate a model directory (use datetime to ensure that the directory is empty)
# we need an empty directory because TensorFlow will continue training an existing model if it is not empty
t = datetime.datetime.now().strftime('%Y-%m-%d%H%M%S')
model_dir = os.path.join(FLAGS.model_dir, t)
os.makedirs(model_dir, exist_ok=True)
# crawl the training image directories
crawler = load.FileSystemDataCrawler(FLAGS.data_train_dir,
IMAGE_KEYS,
futil.BrainImageFilePathGenerator(),
futil.DataDirectoryFilter())
data_items = list(crawler.data.items())
train_data_size = len(data_items)
pre_process_params = {'zscore_pre': True,
'coordinates_feature': True,
'intensity_feature': True,
'gradient_intensity_feature': True}
start_time_total_train = timeit.default_timer()
batch_data = dict(data_items)
# load images for training and pre-process
images = putil.pre_process_batch(batch_data, pre_process_params, multi_process=True)
print('pre-processing done')
# generate feature matrix and label vector
data_train = np.concatenate([img.feature_matrix[0] for img in images])
labels_train = np.concatenate([img.feature_matrix[1] for img in images])
if NORMALIZE_FEATURES:
# normalize data (mean 0, std 1)
data_train = scipy_stats.zscore(data_train)
start_time = timeit.default_timer()
# Gaussian mixture model
# ##############################################################################################################
thegmm = GaussianMixture(n_components=3, covariance_type='tied')
thegmm.fit(data_train, labels_train)
# ##############################################################################################################
print(' Time elapsed:', timeit.default_timer() - start_time, 's')
time_total_train = timeit.default_timer() - start_time_total_train
start_time_total_test = timeit.default_timer()
print('-' * 5, 'Testing...')
result_dir = os.path.join(FLAGS.result_dir, t)
os.makedirs(result_dir, exist_ok=True)
# initialize evaluator
evaluator = putil.init_evaluator(result_dir)
# crawl the training image directories
crawler = load.FileSystemDataCrawler(FLAGS.data_test_dir,
IMAGE_KEYS,
futil.BrainImageFilePathGenerator(),
futil.DataDirectoryFilter())
data_items = list(crawler.data.items())
all_probabilities = None
for batch_index in range(0, len(data_items), TEST_BATCH_SIZE):
# slicing manages out of range; no need to worry
batch_data = dict(data_items[batch_index: batch_index + TEST_BATCH_SIZE])
# load images for testing and pre-process
pre_process_params['training'] = False
images_test = putil.pre_process_batch(batch_data, pre_process_params, multi_process=True)
images_prediction = []
images_probabilities = []
for img in images_test:
print('-' * 10, 'Testing', img.id_)
start_time = timeit.default_timer()
# ##############################################################################################################
features = img.feature_matrix[0]
if NORMALIZE_FEATURES:
features = scipy_stats.zscore(features)
predictions = thegmm.predict(features)
probabilities = thegmm.predict_proba(features)
if all_probabilities is None:
all_probabilities = np.array([probabilities])
else:
all_probabilities = np.concatenate((all_probabilities, [probabilities]), axis=0)
# ##############################################################################################################
print(' Time elapsed:', timeit.default_timer() - start_time, 's')
# convert prediction and probabilities back to SimpleITK images
image_prediction = conversion.NumpySimpleITKImageBridge.convert(predictions.astype(np.uint8),
img.image_properties)
image_probabilities = conversion.NumpySimpleITKImageBridge.convert(probabilities, img.image_properties)
# evaluate segmentation without post-processing
evaluator.evaluate(image_prediction, img.images[structure.BrainImageTypes.GroundTruth], img.id_)
images_prediction.append(image_prediction)
images_probabilities.append(image_probabilities)
# post-process segmentation and evaluate with post-processing
post_process_params = {'crf_post': True}
images_post_processed = putil.post_process_batch(images_test, images_prediction, images_probabilities,
post_process_params, multi_process=True)
for i, img in enumerate(images_test):
evaluator.evaluate(images_post_processed[i], img.images[structure.BrainImageTypes.GroundTruth],
img.id_ + '-PP')
# save results
sitk.WriteImage(images_prediction[i], os.path.join(result_dir, images_test[i].id_ + '_SEG.mha'), True)
sitk.WriteImage(images_post_processed[i], os.path.join(result_dir, images_test[i].id_ + '_SEG-PP.mha'), True)
time_total_test = timeit.default_timer() - start_time_total_test
# write summary of parameters to results dir
with open(os.path.join(result_dir, 'summary.txt'), 'w') as summary_file:
print('Result dir: {}'.format(result_dir))
print('Result dir: {}'.format(result_dir), file=summary_file)
print('Training data size: {}'.format(train_data_size), file=summary_file)
print('Total training time: {:.1f}s'.format(time_total_train), file=summary_file)
print('Total testing time: {:.1f}s'.format(time_total_test), file=summary_file)
print('Voxel Filter Mask: {}'.format(putil.FeatureExtractor.VOXEL_MASK_FLT), file=summary_file)
print('Normalize Features: {}'.format(NORMALIZE_FEATURES), file=summary_file)
print('GMM', file=summary_file)
stats = statistics.gather_statistics(os.path.join(result_dir, 'results.csv'))
print('Result statistics:', file=summary_file)
print(stats, file=summary_file)
# all_probabilities.astype(np.float16).dump(os.path.join(result_dir, 'all_probabilities.npy'))
if __name__ == "__main__":
"""The program's entry point."""
script_dir = os.path.dirname(sys.argv[0])
parser = argparse.ArgumentParser(description='Medical image analysis pipeline for brain tissue segmentation')
parser.add_argument(
'--model_dir',
type=str,
default=os.path.normpath(os.path.join(script_dir, './mia-model')),
help='Base directory for output models.'
)
parser.add_argument(
'--result_dir',
type=str,
default=os.path.normpath(os.path.join(script_dir, './mia-result')),
help='Directory for results.'
)
parser.add_argument(
'--data_atlas_dir',
type=str,
default=os.path.normpath(os.path.join(script_dir, '../data/atlas')),
help='Directory with atlas data.'
)
parser.add_argument(
'--data_train_dir',
type=str,
default=os.path.normpath(os.path.join(script_dir, '../data/train/')),
help='Directory with training data.'
)
parser.add_argument(
'--data_test_dir',
type=str,
default=os.path.normpath(os.path.join(script_dir, '../data/test/')),
help='Directory with testing data.'
)
FLAGS, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
r"""Implementation of EmQL model on different tasks.
We implement EmQL model that contains a dense retrieval with sparse filtering.
There are a few different training targets: membership, intersection, union,
follow, set_follow, metaqa2 and metaqa3. The first few targets (except for
the last two), are trained to represent a knowledge base in the embedding
space. The KB embeddings are them used in the downstream tasks metaqa2 and
metaqa3.
This file mainly consists of three groups of functions:
1. model definitions: model_xxx() and helper functions
2. evaluation functions: run_tf_evaluation()
3. prediction functions: get_tf_xxx_prediction()
which are called from build_model_fn(). Models and predictions are selected
and computed based on the names of targets.
"""
from absl import flags
from language.emql import module
from language.emql import util
import tensorflow.compat.v1 as tf
import tensorflow_hub as hub
FLAGS = flags.FLAGS
VERY_NEG = -1e6
ZERO_THRES = 1e-5
TensorTupleType = Tuple[tf.Tensor, Ellipsis]
ParamsType = Dict[str, Any]
ModelReturnType = Tuple[tf.Tensor, Dict[str, tf.Tensor]]
class EmQL(object):
"""EmQL Implementation."""
def __init__(self, name, params, data_loader):
with tf.variable_scope('embeddings_mat', reuse=tf.AUTO_REUSE):
self.entity_embeddings_mat = tf.get_variable(
name='entity_embeddings_mat',
shape=[data_loader.num_entities, params['entity_emb_size']],
initializer=tf.random_normal_initializer(),
trainable=params['train_entity_emb'])
self.relation_embeddings_mat = tf.get_variable(
name='relation_embeddings_mat',
shape=[data_loader.num_relations, params['relation_emb_size']],
initializer=tf.random_normal_initializer(),
trainable=params['train_relation_emb'])
if name in ['metaqa2', 'metaqa3']:
self.word_embeddings_mat = tf.get_variable(
name='word_embeddings_mat',
shape=[data_loader.num_vocab, params['vocab_emb_size']],
initializer=tf.random_normal_initializer(),
trainable=True)
### Convert fact info to tf tensors.
self.all_entity_ids = tf.constant(
data_loader.all_entity_ids, dtype=tf.int32)
self.all_entity_sketches = tf.constant(
data_loader.all_entity_sketches, dtype=tf.int32)
self.all_relation_sketches = tf.constant(
data_loader.all_relation_sketches, dtype=tf.int32)
self.all_fact_subjids = tf.constant(
data_loader.all_fact_subjids, dtype=tf.int32)
self.all_fact_relids = tf.constant(
data_loader.all_fact_relids, dtype=tf.int32)
self.all_fact_objids = tf.constant(
data_loader.all_fact_objids, dtype=tf.int32)
# Construct fact embeddings.
self.kb_embeddings_mat = self.load_kb_embeddings(name, params)
# Construct other utilities
if name == 'webqsp':
self.all_entity_is_cvt = tf.constant(
data_loader.all_entity_is_cvt, dtype=tf.float32)
self.bert_module = hub.Module(
params['bert_handle'],
tags={'train'} if params['train_bert'] else {},
trainable=params['train_bert'])
def load_kb_embeddings(self, name, params):
"""Construct or load KB embeddings.
Args:
name: task name
params: params
Returns:
a tensor for kb embeddings
"""
if (name in ['set_follow', 'metaqa2', 'metaqa3'] or
name.startswith('query2box')):
all_fact_subj_embs = tf.nn.embedding_lookup(self.entity_embeddings_mat,
self.all_fact_subjids)
# num_facts, hidden_size
all_fact_rel_embs = tf.nn.embedding_lookup(self.relation_embeddings_mat,
self.all_fact_relids)
# num_facts, hidden_size
all_fact_obj_embs = tf.nn.embedding_lookup(self.entity_embeddings_mat,
self.all_fact_objids)
# num_facts, hidden_size
kb_embs = tf.concat(
[all_fact_subj_embs, all_fact_rel_embs, all_fact_obj_embs], axis=1)
# num_fact, hidden_size * 3
return kb_embs
elif name in ['webqsp']:
return util.load_db_checkpoint(
var_name='embeddings_mat/kb_embeddings_mat',
checkpoint_dir=params['kb_index'],
cpu=False,
trainable=False)
##############################################################
########################### Models ###########################
##############################################################
def get_tf_model(self, name, features,
params):
"""Select model.
Args:
name: model name
features: features
candidate_subj_ids -- batch_size, num_candidate
params: hyper-parameters
Returns:
logits
"""
if name == 'membership':
return self.model_membership(features, params)
elif name == 'intersection' or name == 'union':
return self.model_intersection(features, params)
elif name == 'follow' or name == 'set_follow':
return self.model_follow(features, params)
elif name == 'mixture':
return self.model_mixture(features, params)
elif name == 'metaqa2' or name == 'metaqa3':
num_hops = int(name[-1])
return self.model_metaqa(
features, params, hop=num_hops, top_k=params['intermediate_top_k'])
elif name == 'webqsp':
return self.model_webqsp(
features, params, top_k=params['intermediate_top_k'])
elif name.startswith('query2box'):
return self.model_query2box(name, features, params)
else:
raise ValueError('name not recognized')
def model_membership(self, features,
unused_params):
"""Compute logits for set membership.
A representation of set is the average of embeddings of entities in
the set.
Args:
features:
-- entity_ids: ids of entities in the set, padded with -1
e.g. [1, 5, 3, -1, -1]
# batch_size, max_set
-- labels: if the i'th entity is in the set, 1 for True, 0 for False
e.g. [0, 1, 0, 1, 0, 1, 0, 0, ...]
# batch_size, num_entities
unused_params: hyper-parameters
Returns:
predictions and losses
"""
entity_ids, labels = features
entity_mask = tf.cast(tf.not_equal(entity_ids, -1), dtype=tf.int32)
# batch_size, max_set
entity_embs = tf.nn.embedding_lookup(
self.entity_embeddings_mat, entity_ids * entity_mask)
# batch_size, max_set, hidden_size
entity_mask_3d = tf.cast(
tf.expand_dims(entity_mask, axis=2), dtype=tf.float32)
# batch_size, max_set, 1
set_embs = tf.reduce_sum(entity_embs * entity_mask_3d, axis=1)
# batch_size, hidden_size
set_embs /= tf.reduce_sum(entity_mask_3d, axis=1)
# batch_size, hidden_size
logits = tf.matmul(set_embs, self.entity_embeddings_mat, transpose_b=True)
# batch_size, num_entities
loss = self.compute_tf_loss(logits, labels)
return loss, {'logits': logits, 'labels': labels}
def model_intersection(self, features,
params):
"""Compute logits for intersection.
A representation of intersection is the sum of representations of sets
that are being intersected.
Args:
features: features
-- candidate_set1: a k-hot vector of length num_entities representing
the entities in the first set.
e.g. [1, 0, 1, 0, 1, 1, 0, 0]
-- candidate_set2: a k-hot vector of length num_entities representing
the entities in the first set.
e.g. [1, 1, 0, 0, 1, 0, 0, 0]
-- labels: a k-hot vector of length num_entities representing
the entities in the intersection of two sets.
e.g. [1, 0, 0, 0, 1, 0, 0, 0]
params: hyper-parameters
Returns:
predictions and losses
"""
candidate_set1, candidate_set2, labels = features
candidate_set1_mask = tf.expand_dims(candidate_set1, axis=2)
# batch_size, num_candidate, 1
candidate_set2_mask = tf.expand_dims(candidate_set2, axis=2)
# batch_size, num_candidate, 1
candidate_embs = self.entity_embeddings_mat
# batch_size, num_candidate, hidden_size
set1_embs_query = tf.reduce_sum(
candidate_embs * candidate_set1_mask, axis=1, keepdims=True) \
/ tf.reduce_sum(candidate_set1_mask, axis=1, keepdims=True)
# batch_size, 1, hidden_size
set2_embs_query = tf.reduce_sum(
candidate_embs * candidate_set2_mask, axis=1, keepdims=True) \
/ tf.reduce_sum(candidate_set2_mask, axis=1, keepdims=True)
# batch_size, 1, hidden_size
intersection_embs_query = set1_embs_query + set2_embs_query
# batch_size, 1, hidden_size
logits, _ = self.compute_logits(candidate_embs, intersection_embs_query,
params['entity_emb_size'])
loss = self.compute_tf_loss(logits, labels)
return loss, {'logits': logits, 'labels': labels}
def model_follow(self, features,
params):
"""Compute logits for follow operation.
A follow operation is considered as the intersection of a set of
facts with correct subjects and a set of facts with correct relation.
Args:
features: features
-- subject_set: all facts that have the correct subject
-- relation_set: all facts that have the correct relation
-- labels: facts that have both correct subject and relation
params: hyper-parameters
Returns:
predictions and losses
"""
subject_set, relation_set, labels = features
subject_set_mask = tf.expand_dims(subject_set, axis=2)
# batch_size, num_candidate, 1
relation_set_mask = tf.expand_dims(relation_set, axis=2)
# batch_size, num_candidate, 1
candidate_embs = self.load_kb_embeddings('set_follow', params)
# batch_size, num_candidate, hidden_size * 3
set1_embs_query = tf.reduce_sum(
candidate_embs * subject_set_mask, axis=1, keepdims=True) \
/ tf.reduce_sum(subject_set_mask, axis=1, keepdims=True)
# batch_size, 1, hidden_size * 3
set2_embs_query = tf.reduce_sum(
candidate_embs * relation_set_mask, axis=1, keepdims=True) \
/ tf.reduce_sum(relation_set_mask, axis=1, keepdims=True)
# batch_size, 1, hidden_size * 3
intersection_embs_query = set1_embs_query + set2_embs_query
# batch_size, 1, hidden_size * 3
logits, _ = self.compute_logits(
candidate_embs, intersection_embs_query,
params['entity_emb_size'] * 2 + params['relation_emb_size'])
loss = self.compute_tf_loss(logits, labels)
return loss, {'logits': logits, 'labels': labels}
def model_mixture(self, features,
params):
"""Jointly train four tasks.
m_xxx stands for feature and label to train the set membership task.
p_xxx stands for a pair of set to train intersection and union.
f_xxx stands for features to train the follow operation.
Args:
features: features
params: hyper-parameters
Returns:
predictions and losses
"""
m_padded_ent_ids, m_labels, \
p_candidate_set1, p_candidate_set2, p_union_labels, p_intersection_labels, \
f_subject_set, f_relation_set, f_labels = features
# Run model on the task of set membership.
m_features = (m_padded_ent_ids, m_labels)
m_loss, m_tensors = self.model_membership(m_features, params)
# Run model on the task of intersection and union.
p_features = (p_candidate_set1, p_candidate_set2, p_intersection_labels)
p_intersection_loss, p_tensors = self.model_intersection(
p_features, params)
p_union_loss = self.compute_tf_loss(
p_tensors['logits'], p_union_labels)
# Run model on the task of set follow.
f_features = (f_subject_set, f_relation_set, f_labels)
f_loss, f_tensors = self.model_follow(f_features, params)
loss = m_loss + p_intersection_loss + p_union_loss + f_loss
tensors = dict()
tensors['membership_logits'] = m_tensors['logits']
tensors['membership_labels'] = m_labels
tensors['intersection_logits'] = p_tensors['logits']
tensors['intersection_labels'] = p_intersection_labels
tensors['union_logits'] = p_tensors['logits']
tensors['union_labels'] = p_union_labels
tensors['set_follows_logits'] = f_tensors['logits']
tensors['set_follows_labels'] = f_labels
return loss, tensors
def model_metaqa(self, features, params,
hop, top_k):
"""Compute logits for MetaQA multi-hop reasoning task.
MetaQA model is made of 4 different parts:
1. Construct KB triple query to retrieve the top k triples from KB.
2. Check if retrieved triples are eligible by filtering them with sketch.
3. Check if any retrieved triples should be excluded, i.e. if their
object entities have been visited in the previous iterations.
4. Update sketch and query embeddings from the next iteration.
Args:
features:
-- question: a list of token ids for vocabs padded with -1
-- question_entity_id: question entity id
-- question_entity_sketch: a count-min sketch with only one element
(the quesion entity)
-- answer_labels: a k-hot vector marking if an entity is an answer
params: hyper-parameters
hop: number of hops
top_k: select top_k facts at each iteration
Returns:
predictions and losses
"""
tensors = {}
question, question_entity_id, question_entity_sketch, answer_labels = features
# Get text encoding of questions and question question entities.
question_embs = self.compute_average_embs(question)
# batch_size, hidden_size
question_entity_embs = tf.nn.embedding_lookup(
self.entity_embeddings_mat, question_entity_id)
# batch_size, hidden_size
# Set initial query embeddings and sketchs.
query_entity_embs = question_entity_embs
# batch_size, hidden_size
query_entity_sketch = question_entity_sketch
# batch_size, depth, width
excluded_entity_sketch = question_entity_sketch
# batch_size, depth, width
# Begin iteration.
for i in range(hop):
# Construct queries by mapping original question embeddings to another
# space. Queries are constructed as a concatenation of subject, relation,
# and placeholders for object embeddings.
with tf.variable_scope('question_emb_ffn_%s' % i):
question_embs = tf.keras.layers.Dense(
units=params['relation_emb_size'])(
question_embs)
# batch_size, hidden_size
query_embs = tf.concat(
[query_entity_embs, question_embs,
tf.zeros_like(query_entity_embs)],
axis=1)
# batch_size, hidden_size * 3
# Retrieve top k facts that match the queries, and check if the top
# k facts are eligible by checking the count-min sketch. Subject
# entities must have non-zero probability to be considered as
# eligible.
topk_fact_logits, topk_fact_ids = util.topk_search_fn(
query_embs, self.kb_embeddings_mat, top_k)
# batch_size, top_k batch_size, top_k
topk_subj_ids = tf.gather(self.all_fact_subjids, topk_fact_ids)
# batch_size, topk
is_topk_fact_eligible, _ = module.check_topk_fact_eligible(
topk_subj_ids, query_entity_sketch, self.all_entity_sketches, params)
# Entities visited before should also be excluded. Similarly, we first
# get the probability from sketch and exclude those with non-zero
# probability in previous iterations.
topk_obj_ids = tf.gather(self.all_fact_objids, topk_fact_ids)
# batch_size, topk
is_topk_fact_excluded, _ = module.check_topk_fact_eligible(
topk_obj_ids, excluded_entity_sketch,
self.all_entity_sketches, params)
# Apply the filtering results to logits of topk facts.
if params['use_cm_sketch']:
topk_fact_logits += (
1 - is_topk_fact_eligible + is_topk_fact_excluded) * VERY_NEG
# batch_size, top_k
# Construct query embeddings for next iteration.
topk_fact_obj_embs = tf.nn.embedding_lookup(
self.entity_embeddings_mat, topk_obj_ids)
# batch_size, top_k, hidden_size
topk_softmax = tf.nn.softmax(topk_fact_logits)
# batch_size, top_k, 1
query_entity_embs = tf.reduce_sum(
topk_fact_obj_embs * tf.expand_dims(topk_softmax, axis=2), axis=1)
# batch_size, hidden_size
# Update sketches.
query_entity_sketch = module.create_cm_sketch(
topk_obj_ids, topk_softmax, self.all_entity_sketches,
cm_width=params['cm_width'])
# batch_size, depth, width
excluded_entity_sketch += query_entity_sketch
# batch_size, depth, width
# We only compute loss on the topk retrieval results at the last iteration.
# No intermediate training signal is required.
topk_fact_labels = tf.gather(
answer_labels, topk_fact_ids, batch_dims=1, axis=1)
topk_fact_loss = self.compute_tf_loss(topk_fact_logits, topk_fact_labels)
tensors = {
'logits': topk_fact_logits,
'labels': topk_fact_labels,
'candidates': topk_fact_ids
}
return topk_fact_loss, tensors
def compute_average_embs(self, question):
"""Compute the text encoding of questions.
We take a bag of word approach. Question encoding is an average pooling
of word embeddings.
Args:
question: a list of token ids
Returns:
a tensor for question encoding
"""
question_mask = tf.cast(tf.not_equal(question, -1), tf.int32)
# batch_size, max_question_len
question_mask_3d = tf.cast(
tf.expand_dims(question_mask, axis=2), tf.float32)
question_embs = tf.nn.embedding_lookup(
self.word_embeddings_mat, question * question_mask)
# batch_size, max_question_len, hidden_size
question_embs = tf.reduce_sum(question_embs * question_mask_3d, axis=1)
# batch_size, hidden_size
question_len = tf.reduce_sum(question_mask, axis=1, keepdims=True)
# batch_size, 1
question_embs /= tf.cast(question_len, tf.float32)
# batch_size, hidden_size
return question_embs
def model_webqsp(self, features, params,
top_k):
"""Compute logits for more evaluation.
Args:
features: features
params: hyper-parameters
top_k: top k to retrieve at each hop
Returns:
predictions and losses
"""
tensors = dict()
tensors['intermediate_logits'] = list()
tensors['intermediate_labels'] = list()
tensors['intermediate_objs'] = list()
tensors['intermediate_answerable'] = list()
(question_token_ids, segment_ids, question_mask, question_entity_id,
question_entity_sketch, constraint_entity_id, constraint_entity_sketch,
answer_ids) = features
# Compute question embeddings and question entity embeddings.
question_embs = self.compute_bert_cls_embs(question_token_ids, segment_ids,
question_mask)
# batch_size, bert_hidden_size
question_entity_embs = tf.nn.embedding_lookup(self.entity_embeddings_mat,
question_entity_id)
# batch_size, hidden_size
# Initialize query embeddings before iteration.
query_entity_embs = question_entity_embs
# batch_size, hidden_size
query_entity_sketch = question_entity_sketch
# batch_size, depth, width
# Questions in WebQuestionsSP are either one hop questions or
# two hop questions where the intermediate entities is CVT
# entities. In this experiment, we set the max hop to 2.
for hid in range(2):
# Compute query embeddings of relation by projecting the original
# question embeddings into another space.
with tf.variable_scope('question_emb_ffn_%d' % hid):
query_relation_embs = tf.keras.layers.Dense(
units=params['relation_emb_size'])(
question_embs)
# batch_size, hidden_size
# We concatenate the subject, relation, and object embeddings to form
# a query. Note that we set relation embeddings as 0, because
# we observe that this could makes the initial retrieval process
# more stable at training time. This retrieval will only return a fact id.
# We will recompute the similarity score with non-zero query relation
# embeddings which will eventually be used to compute logits.
# Another possibility is to set a small coeffient, \alpha, with small
# values in the beginning, and slightly increase it as training going.
query_embs = tf.concat([
query_entity_embs,
tf.zeros_like(query_relation_embs),
tf.zeros_like(query_entity_embs)
],
axis=1)
# batch_size, hiddent_size * 3
# Retrieve the topk facts and gather their subjects and objects.
_, topk_fact_ids = util.topk_search_fn(
query_embs, self.kb_embeddings_mat, top_k)
# batch_size, topk
topk_subj_ids = tf.gather(self.all_fact_subjids, topk_fact_ids)
# batch_size, topk
topk_obj_ids = tf.gather(self.all_fact_objids, topk_fact_ids)
# batch_size, topk
# We check if the retrieved triple is eligible. To do so, we check
# if the subject of the triple passes the cm-sketch with non-zero
# probability. The probability of entities in the sketch is computed
# from the previous iterations (or directly initialized) as the
# softmax of logits. To recover the logits from softmax, we take the
# log values and further mask out those that are not eligible.
is_topk_fact_eligible, topk_fact_prev_probs = \
module.check_topk_fact_eligible(
topk_subj_ids, query_entity_sketch,
self.all_entity_sketches, params)
# batch_size, topk
topk_fact_prev_logits = tf.math.log(topk_fact_prev_probs + 1e-6)
# batch_size, topk
topk_fact_prev_logits += (1.0 - is_topk_fact_eligible) * VERY_NEG
# batch_size, topk
with tf.variable_scope('topk_fact_eligibility_%d' % hid):
# We learn another bias term here. This helps to adjust how
# significant signals from previous iterations contribute to
# later predictions.
topk_fact_prev_logit_bias = tf.get_variable(
name='topk_fact_logit_bias',
dtype=tf.float32,
shape=[1, 1],
initializer=tf.random_normal_initializer(),
trainable=True)
topk_fact_prev_logits += topk_fact_prev_logit_bias
# Now, we take the full fact embedding and compute the similarity
# score between query and facts (with query relation embedding). We
# further added the logits from previous iterations (after passing
# the sketch) if we would like to use cm sketch.
query_embs = tf.concat(
[tf.zeros_like(query_entity_embs), query_relation_embs], axis=1)
# batch_size, hidden_size * 2
topk_fact_logit = self.compute_topk_fact_logits(topk_fact_ids, query_embs,
params)
# batch_size, topk
if params['use_cm_sketch']:
topk_fact_logit += topk_fact_prev_logits
# batch_size, topk
# We filter the logits of CVT and regular entities. In the WebQuestionsSP
# dataset, questions are either 1 hop questions or 2 hop questions
# that travel through a CVT node. Thus, logits of regular entities
# are considered to be answer candidates. Logits of CVT entities
# will be passed to the next hop.
# To distinguish CVT nodes, we could add another bit to entity embeddings
# which we refer to as "hard" type. For simplicity, we use another
# vector to store such information (just not appended to the end of
# the embedding table).
is_topk_obj_cvt = tf.gather(self.all_entity_is_cvt, topk_obj_ids)
# batch_size, topk
topk_ent_fact_logit = topk_fact_logit + is_topk_obj_cvt * VERY_NEG
# batch_size, topk
topk_cvt_fact_logit = topk_fact_logit + (1.0 - is_topk_obj_cvt) * VERY_NEG
# batch_size, topk
tensors['intermediate_logits'].append(topk_ent_fact_logit)
tensors['intermediate_objs'].append(topk_obj_ids)
# Then we compute the new query embedding and cm-sketch for the next
# iteration.
query_entity_embs, query_entity_sketch = \
self.compute_query_embs_and_sketch(
topk_obj_ids, topk_cvt_fact_logit, params)
# Finally, we check if any of the retrieved facts actually contain
# the correct answer. We treated them as labels and store them
# for future use. We also compute how many percent of questions are
# retrieved but not correctly answered. This is the upper bound of
# the performance of our model.
_, topk_fact_labels = util.compute_x_in_set(topk_obj_ids, answer_ids)
topk_fact_labels = tf.cast(topk_fact_labels, dtype=tf.float32)
# batch_size, topk
topk_fact_labels *= is_topk_fact_eligible
# batch_size, topk
_, topk_objs_in_answers = util.compute_x_in_set(topk_obj_ids, answer_ids)
# batch_size, topk
topk_objs_in_answers = tf.logical_and(topk_objs_in_answers,
is_topk_fact_eligible > 0.0)
# batch_size, topk
topk_objs_in_answers = tf.reduce_any(topk_objs_in_answers, axis=1)
# batch_size
tensors['intermediate_labels'].append(topk_fact_labels)
tensors['intermediate_answerable'].append(topk_objs_in_answers)
# After a few iterations, we concatenate the logits, labels and predictions
# to unify the retrieval and prediction results of all iterations. The
# concatenated results will be used for final prediction.
concat_topk_obj_ids = tf.concat(tensors['intermediate_objs'], axis=1)
# batch_size, topk * 2
concat_topk_fact_logit = tf.concat(tensors['intermediate_logits'], axis=1)
# batch_size, topk * 2
concat_topk_fact_labels = tf.concat(tensors['intermediate_labels'], axis=1)
# batch_size, topk * 2
# We observe that there are ties between top predicted facts. They share
# similar prediction scores but only a few of them satisfy the constraint.
# Thus, we implement a constraint module to discriminate those entities.
# We first compute an average of entity embeddings of tied facts.
# Constraint entity embeddings are directly loaded from embedding table.
concat_topk_fact_best_logit = tf.reduce_max(
concat_topk_fact_logit, axis=1, keepdims=True)
# batch_size, 1
filtered_concat_topk_fact_best_logit = tf.cast(
tf.equal(concat_topk_fact_logit, concat_topk_fact_best_logit),
dtype=tf.float32)
# batch_size, topk * 2
concat_topk_subj_query_embs, concat_topk_subj_sketches = \
self.compute_query_embs_and_sketch(
concat_topk_obj_ids, filtered_concat_topk_fact_best_logit, params)
# batch_size, topk * 2, hidden_size
constraint_entity_embs = util.embedding_lookup_with_padding(
self.entity_embeddings_mat, constraint_entity_id)
# batch_size, hidden_size
with tf.variable_scope('question_emb_ffn_constraint'):
# Project question embeddings to get query relation embeddings.
constraint_relation_embs = tf.keras.layers.Dense(
units=params['relation_emb_size'])(
question_embs)
# batch_size, hidden_size
constraint_query_embs = tf.concat([
concat_topk_subj_query_embs,
tf.zeros_like(constraint_relation_embs), constraint_entity_embs
],
axis=1)
# batch_size, hiddent_size * 3
constraint_topk_fact_logits, constraint_topk_fact_ids = \
util.topk_search_fn(constraint_query_embs, self.kb_embeddings_mat,
top_k)
# batch_size, topk
# Similar as previous retrieval steps, we check if retrieved facts for
# constraints are eligible. We mask out logits of ineligible facts.
constraint_topk_subj_ids = tf.gather(self.all_fact_subjids,
constraint_topk_fact_ids)
# batch_size, topk
is_constraint_topk_subj_eligible, _ = module.check_topk_fact_eligible(
constraint_topk_subj_ids, concat_topk_subj_sketches,
self.all_entity_sketches, params)
# batch_size, topk
constraint_topk_obj_ids = tf.gather(self.all_fact_objids,
constraint_topk_fact_ids)
# batch_size, topk
is_constraint_topk_obj_eligible, _ = module.check_topk_fact_eligible(
constraint_topk_obj_ids, constraint_entity_sketch,
self.all_entity_sketches, params)
# batch_size, topk
is_constraint_topk_eligible = tf.minimum(is_constraint_topk_subj_eligible,
is_constraint_topk_obj_eligible)
# batch_size, topk
constraint_topk_fact_logits += (1.0 -
is_constraint_topk_eligible) * VERY_NEG
# batch_size, topk
constraint_topk_fact_logits = tf.nn.relu(constraint_topk_fact_logits)
# batch_size, topk
# We need to add the logits from constraints to the logits at the
# end of reasoning. However, the order of subject and object entities
# does not match. We first find the mapping, map the logits, and add
# it to the original logits.
constraint_topk_fact_logits_mapped_to_concat = self.map_constraint_logits(
concat_topk_obj_ids, constraint_topk_subj_ids,
constraint_topk_fact_logits)
concat_topk_fact_logit_with_constraint = concat_topk_fact_logit + \
constraint_topk_fact_logits_mapped_to_concat
# batch_size, topk * 2
# Finally compute loss, cache a few tensors.
answerable_at_topk = tf.metrics.mean(
tf.cast(
tf.logical_or(tensors['intermediate_answerable'][0],
tensors['intermediate_answerable'][1]),
dtype=tf.float32))
loss = self.compute_tf_loss(concat_topk_fact_logit, concat_topk_fact_labels)
tensors['logits'] = concat_topk_fact_logit_with_constraint
tensors['labels'] = concat_topk_fact_labels
tensors['answerable_at_topk'] = answerable_at_topk
return loss, tensors
def compute_bert_cls_embs(self, token_ids, segment_ids,
masks):
"""Return the embedding of CLS token in BERT.
Args:
token_ids: BERT token ids
segment_ids: BERT segment ids
masks: BERT mask
Returns:
BERT CLS embedding
"""
bert_outputs = self.bert_module(
inputs={
'input_ids': token_ids,
'segment_ids': segment_ids,
'input_mask': masks
},
signature='tokens',
as_dict=True)
cls_embs = bert_outputs['sequence_output']
# batch_size, max_seq_length, hidden_size
cls_embs = cls_embs[:, 0, :]
# batch_size, hidden_size
return cls_embs
def compute_query_embs_and_sketch(
self, entity_ids, logits,
params):
"""Compute embeddings and sketch with logits of entities.
Given entity_ids and logits in the same order, compute weighted average
of entity embeddings and a sketch that stores their weights.
Args:
entity_ids: entity ids
logits: logits before softmax
params: params
Returns:
A weighted average of embeddings and a cm sketch for the entities
"""
topk_softmax = tf.nn.softmax(logits)
# batch_size, topk
topk_ent_embs = tf.nn.embedding_lookup(self.entity_embeddings_mat,
entity_ids)
# batch_size, topk, hidden_size
query_entity_embs = tf.reduce_sum(
topk_ent_embs * tf.expand_dims(topk_softmax, axis=2), axis=1)
# batch_size, hidden_size
query_entity_sketch = module.create_cm_sketch(
entity_ids, topk_softmax, self.all_entity_sketches,
cm_width=params['cm_width'])
# batch_size, depth, width
return query_entity_embs, query_entity_sketch
def compute_topk_fact_logits(self, topk_fact_idx,
query_embs,
params):
"""Get the logits between query and facts with topk_fact_idx.
Args:
topk_fact_idx: topk ids from scam -- batch_size, topk
query_embs: embeddings of query -- batch_size, hidden_size * 2
params: flags
Returns:
topk_fact_embs, topk_mask, topk_labels
"""
topk_subj_ids = tf.gather(self.all_fact_subjids, topk_fact_idx)
# batch_size, topk
topk_rel_ids = tf.gather(self.all_fact_relids, topk_fact_idx)
# batch_size, topk
topk_mask = tf.cast(topk_subj_ids >= 0, dtype=tf.int32)
# batch_size, topk
topk_subj_embs = tf.nn.embedding_lookup(self.entity_embeddings_mat,
topk_subj_ids * topk_mask)
# batch_size, topk, hidden_size
topk_rel_embs = tf.nn.embedding_lookup(self.relation_embeddings_mat,
topk_rel_ids * topk_mask)
# batch_size, topk, hidden_size
# compute logits for top k facts
topk_fact_embs = tf.concat((topk_subj_embs, topk_rel_embs), axis=2)
# batch_size, topk, hidden_size * 2
topk_fact_embs *= tf.cast(
tf.expand_dims(topk_mask, axis=2), dtype=tf.float32)
# batch_size, topk, hidden_size * 2
query_embs = tf.expand_dims(query_embs, axis=2)
# batch_size, hidden_size * 2, 1
topk_fact_logit = tf.matmul(topk_fact_embs, query_embs) / tf.sqrt(
float(params['entity_emb_size'] + params['relation_emb_size']))
topk_fact_logit = tf.squeeze(topk_fact_logit, axis=2)
# batch_size, topk
return topk_fact_logit
def map_constraint_logits(self, original_ids,
constraint_ids,
constraint_logits):
"""Map constraint logits to original if ids match.
constraint_logits is logits for constraint_ids. If an id in constrain_ids
also appear in original_ids, we will map its logit to the corresponding
position as in original_ids.
Args:
original_ids: order to map to
constraint_ids: order to map from
constraint_logits: logits in the same order as constraint_ids
Returns:
logits in the order of original_ids if exist, otherwise 0.
"""
constraint_logits_mapping, _ = util.compute_x_in_set(
constraint_ids, original_ids)
# batch_size, topk * 2, topk
constraint_logits_mapping = tf.cast(
constraint_logits_mapping, dtype=tf.float32)
# batch_size, topk * 2, topk
mapped_constraint_logits = tf.matmul(
constraint_logits_mapping, tf.expand_dims(constraint_logits, axis=2))
# batch_size, topk * 2, 1
mapped_constraint_logits = tf.squeeze(mapped_constraint_logits, axis=2)
# batch_size, topk * 2
return mapped_constraint_logits
def compute_logits(self, candidates, query,
hidden_size):
"""Compute logits between query embedding and candidate embeddings.
Args:
candidates: candidate embeddings
query: query embeddings
hidden_size: hidden size
Returns:
L2 logits and MIPS logits
"""
l2_logits = - tf.reduce_sum(
tf.square(candidates - query), axis=2, keepdims=True)
# batch_size, num_candidate, 1
l2_logits = l2_logits / tf.sqrt(float(hidden_size))
# batch_size, num_candidate, 1
l2_logits = tf.squeeze(l2_logits, axis=2)
# batch_size, num_candidate
mips_logits = tf.matmul(
candidates, query, transpose_b=True) / tf.sqrt(float(hidden_size))
# batch_size, num_candidate, 1
mips_logits = tf.squeeze(mips_logits, axis=2)
# batch_size, num_candidate
return mips_logits, l2_logits
def compute_tf_loss(self, logits, labels):
"""Compute loss between logits and labels.
Args:
logits: batch_size, num_candidate
labels: batch_size, num_candidate
Returns:
loss and train_op
"""
labels = tf.cast(labels, tf.float32)
# If labels contains all zeros, replace them with all ones
is_label_all_zeros = tf.cast(tf.reduce_all(
tf.equal(labels, 0.0), axis=1, keepdims=True), dtype=tf.float32)
# batch_size, 1
padded_labels = tf.tile(is_label_all_zeros, [1, labels.shape[1]])
# batch_size, num_candidate
labels += padded_labels
# batch_size, num_candidate
# Also zero out logits if their labels are all zeros.
logits *= (1 - is_label_all_zeros)
# batch_size, num_candidate
labels_sum = tf.reduce_sum(labels, axis=1, keepdims=True)
labels = labels / labels_sum
loss = tf.nn.softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
loss = tf.reduce_mean(loss)
return loss
def create_one_hot_sketch(self, element, all_sketches, cm_width):
"""Wrapper to create the initial sketch for an one-hot vector."""
element = tf.expand_dims(element, axis=1)
softmax = tf.ones_like(element, dtype=tf.float32)
return module.create_cm_sketch(element, softmax, all_sketches, cm_width)
def simple_follow_op(self, params, ent_embs, rel_embs,
ent_sketch, rel_sketch):
"""Wrapper of follow operation inside EmQL."""
if not params['use_cm_sketch']:
ent_sketch, rel_sketch = None, None
object_embs, object_sketch, unused_topk_fact_logits = module.follow_op(
params, ent_embs, ent_sketch, rel_embs, rel_sketch,
self.all_fact_subjids, self.all_fact_relids, self.all_fact_objids,
self.entity_embeddings_mat, self.kb_embeddings_mat,
self.all_entity_sketches, self.all_relation_sketches)
return object_embs, object_sketch
def simple_entity_encode_op(self, params, ent):
"""Wrapper to encode a single entity into its EmQL representation."""
set_ids = tf.expand_dims(ent, axis=1)
set_mask = tf.ones_like(set_ids, dtype=tf.float32)
return module.encode_op(params, set_ids, set_mask,
self.entity_embeddings_mat,
self.all_entity_sketches)
def simple_relation_encode_op(self, params, rel):
"""Wrapper to encode a single relation into its EmQL representation."""
set_ids = tf.expand_dims(rel, axis=1)
set_mask = tf.ones_like(set_ids, dtype=tf.float32)
return module.encode_op(params, set_ids, set_mask,
self.relation_embeddings_mat,
self.all_relation_sketches)
def model_query2box(self, name, features,
params):
"""Model query2box for 9 different tasks.
This function simulate 9 compositional queries as defined in the
Query2Box paper:
1c: X.follow(R)
2c: X.follow(R1).follow(R2)
3c: X.follow(R1).follow(R2).follow(R3)
2i: X1.follow(R1) & X2.follow(R2)
3i: X1.follow(R1) & X2.follow(R2) & X3.follow(R3)
ic: (X1.follow(R1) & X2.follow(R2)).follow(R3)
ci: X1.follow(R1) & X2.follow(R2).follow(R3)
2u: X1.follow(R1) | X2.follow(R2)
uc: (X1.follow(R1) | X2.follow(R2)).follow(R3)
Modules (follow, intersection, union, decode) are defined in module.py.
Args:
name: name of the task, e.g. "query2box_2i".
features: a tuple of tf.Tensors. The order is decided by each task.
params: a dictionary of hyper-parameters
Returns:
loss and predictions
"""
task = name.split('_')[-1]
if task == '1c': # X.follow(R)
ent, rel1 = features
ent_embs, ent_sketch = self.simple_entity_encode_op(params, ent)
rel1_embs, rel1_sketch = self.simple_relation_encode_op(params, rel1)
answer_embs, answer_sketch = self.simple_follow_op(
params, ent_embs, rel1_embs, ent_sketch, rel1_sketch)
elif task == '2c': # X.follow(R1).follow(R2)
ent, rel1, rel2 = features
ent_embs, ent_sketch = self.simple_entity_encode_op(params, ent)
rel1_embs, rel1_sketch = self.simple_relation_encode_op(params, rel1)
rel2_embs, rel2_sketch = self.simple_relation_encode_op(params, rel2)
obj1_embs, obj1_sketch = self.simple_follow_op(
params, ent_embs, rel1_embs, ent_sketch, rel1_sketch)
answer_embs, answer_sketch = self.simple_follow_op(
params, obj1_embs, rel2_embs, obj1_sketch, rel2_sketch)
elif task == '3c': # X.follow(R1).follow(R2).follow(R3)
ent, rel1, rel2, rel3 = features
ent_embs, ent_sketch = self.simple_entity_encode_op(params, ent)
rel1_embs, rel1_sketch = self.simple_relation_encode_op(params, rel1)
rel2_embs, rel2_sketch = self.simple_relation_encode_op(params, rel2)
rel3_embs, rel3_sketch = self.simple_relation_encode_op(params, rel3)
obj1_embs, obj1_sketch = self.simple_follow_op(
params, ent_embs, rel1_embs, ent_sketch, rel1_sketch)
obj2_embs, obj2_sketch = self.simple_follow_op(
params, obj1_embs, rel2_embs, obj1_sketch, rel2_sketch)
answer_embs, answer_sketch = self.simple_follow_op(
params, obj2_embs, rel3_embs, obj2_sketch, rel3_sketch)
elif task == '2i' or task == '2u': # X1.follow(R1) & / | X2.follow(R2)
ent1, rel1, ent2, rel2 = features
ent1_embs, ent1_sketch = self.simple_entity_encode_op(params, ent1)
rel1_embs, rel1_sketch = self.simple_relation_encode_op(params, rel1)
ent2_embs, ent2_sketch = self.simple_entity_encode_op(params, ent2)
rel2_embs, rel2_sketch = self.simple_relation_encode_op(params, rel2)
obj1_embs, obj1_sketch = self.simple_follow_op(
params, ent1_embs, rel1_embs, ent1_sketch, rel1_sketch)
obj2_embs, obj2_sketch = self.simple_follow_op(
params, ent2_embs, rel2_embs, ent2_sketch, rel2_sketch)
if task == '2i':
answer_embs, answer_sketch = module.intersection_op(
obj1_embs, obj1_sketch, obj2_embs, obj2_sketch)
elif task == '2u':
answer_embs, answer_sketch = module.union_op(
obj1_embs, obj1_sketch, obj2_embs, obj2_sketch)
elif task == '3i': # X1.follow(R1) & X2.follow(R2) & X3.follow(R3)
ent1, rel1, ent2, rel2, ent3, rel3 = features
ent1_embs, ent1_sketch = self.simple_entity_encode_op(params, ent1)
rel1_embs, rel1_sketch = self.simple_relation_encode_op(params, rel1)
ent2_embs, ent2_sketch = self.simple_entity_encode_op(params, ent2)
rel2_embs, rel2_sketch = self.simple_relation_encode_op(params, rel2)
ent3_embs, ent3_sketch = self.simple_entity_encode_op(params, ent3)
rel3_embs, rel3_sketch = self.simple_relation_encode_op(params, rel3)
obj1_embs, obj1_sketch = self.simple_follow_op(
params, ent1_embs, rel1_embs, ent1_sketch, rel1_sketch)
obj2_embs, obj2_sketch = self.simple_follow_op(
params, ent2_embs, rel2_embs, ent2_sketch, rel2_sketch)
obj3_embs, obj3_sketch = self.simple_follow_op(
params, ent3_embs, rel3_embs, ent3_sketch, rel3_sketch)
answer_embs, answer_sketch = module.intersection_op(
obj1_embs, obj1_sketch, obj2_embs, obj2_sketch)
answer_embs, answer_sketch = module.intersection_op(
answer_embs, answer_sketch, obj3_embs, obj3_sketch)
elif task == 'ic' or task == 'uc':
# (X1.follow(R1) & / | X2.follow(R2)).follow(R3)
ent1, rel1, ent2, rel2, rel3 = features
ent1_embs, ent1_sketch = self.simple_entity_encode_op(params, ent1)
rel1_embs, rel1_sketch = self.simple_relation_encode_op(params, rel1)
ent2_embs, ent2_sketch = self.simple_entity_encode_op(params, ent2)
rel2_embs, rel2_sketch = self.simple_relation_encode_op(params, rel2)
rel3_embs, rel3_sketch = self.simple_relation_encode_op(params, rel3)
obj1_embs, obj1_sketch = self.simple_follow_op(
params, ent1_embs, rel1_embs, ent1_sketch, rel1_sketch)
obj2_embs, obj2_sketch = self.simple_follow_op(
params, ent2_embs, rel2_embs, ent2_sketch, rel2_sketch)
if task == 'ic':
answer_embs, answer_sketch = module.intersection_op(
obj1_embs, obj1_sketch, obj2_embs, obj2_sketch)
elif task == 'uc':
answer_embs, answer_sketch = module.union_op(
obj1_embs, obj1_sketch, obj2_embs, obj2_sketch)
answer_embs, answer_sketch = self.simple_follow_op(
params, answer_embs, rel3_embs, answer_sketch, rel3_sketch)
elif task == 'ci': # X1.follow(R1) & X2.follow(R2).follow(R3)
ent1, rel1, rel2, ent2, rel3 = features
ent1_embs, ent1_sketch = self.simple_entity_encode_op(params, ent1)
rel1_embs, rel1_sketch = self.simple_relation_encode_op(params, rel1)
rel2_embs, rel2_sketch = self.simple_relation_encode_op(params, rel2)
ent2_embs, ent2_sketch = self.simple_entity_encode_op(params, ent2)
rel3_embs, rel3_sketch = self.simple_relation_encode_op(params, rel3)
obj1_embs, obj1_sketch = self.simple_follow_op(
params, ent1_embs, rel1_embs, ent1_sketch, rel1_sketch)
obj2_embs, obj2_sketch = self.simple_follow_op(
params, obj1_embs, rel2_embs, obj1_sketch, rel2_sketch)
obj3_embs, obj3_sketch = self.simple_follow_op(
params, ent2_embs, rel3_embs, ent2_sketch, rel3_sketch)
answer_embs, answer_sketch = module.intersection_op(
obj2_embs, obj2_sketch, obj3_embs, obj3_sketch)
else:
raise ValueError('task: %s not recognized' % task)
# Decode from set representation to a list of entities. We will apply a
# null sketch for decoding at query time.
answer_sketch = None
answer_ids, unused_answer_logits = module.decode_op(
params, answer_embs, answer_sketch,
self.entity_embeddings_mat, self.all_entity_sketches)
return tf.constant(0.0), {'answer_ids': answer_ids}
##############################################################
######################### Evaluation #########################
##############################################################
def run_tf_evaluation(self, logits, labels,
prefix = ''):
"""Compute evaluation metrics.
Args:
logits: batch_size, num_candidate
labels: batch_size, num_candidate
prefix: prefix for evaluation key name
Returns:
evaluation metrics
"""
hits_at_one = util.compute_hits_at_k(logits, labels, k=1)
hits_at_five = util.compute_hits_at_k(logits, labels, k=5)
recall_at_one = util.compute_recall_at_k(logits, labels, k=1)
recall_at_five = util.compute_recall_at_k(logits, labels, k=5)
average_precision_at_5 = \
util.compute_average_precision_at_k(logits, labels, k=5)
evaluations = {
prefix + 'hits@1': tf.metrics.mean(hits_at_one),
prefix + 'hits@5': tf.metrics.mean(hits_at_five),
prefix + 'recall@1': tf.metrics.mean(recall_at_one),
prefix + 'recall@5': tf.metrics.mean(recall_at_five),
prefix + 'map@5': tf.metrics.mean(average_precision_at_5),
}
return evaluations
def get_tf_prediction(self, name, features, tensors):
# raise NotImplementedError
if name.startswith('query2box'):
return {
'query': tf.concat(
[tf.expand_dims(f, axis=-1) for f in features], axis=-1),
'answer_ids': tensors['answer_ids']
}
else:
raise ValueError
def build_model_fn(
name, data_loader, eval_name,
eval_metric_at_k):
"""Build model function.
Args:
name: name of the model -- 'membership' or 'intersection' or 'union'
data_loader: data loader
eval_name: if model contains several sub-models
eval_metric_at_k: top k for evaluation metrics
Returns:
model function
"""
del eval_name, eval_metric_at_k
def model_fn(features, labels, mode, # pylint: disable=unused-argument
params):
"""Wrapper function to select model mode.
This function is called by tf.estimator.train_and_evaluate function in the
background.
Args:
features: features
labels: unused labels
mode: tf.estimator.ModeKeys.PREDICT or TRAIN or EVAL
params: extra params
Returns:
A tf.estimator spec
"""
emql = EmQL(name, params, data_loader)
# Define behaviors for different operationstrain / eval / pred
if mode == tf.estimator.ModeKeys.TRAIN:
loss, tensors = emql.get_tf_model(name, features, params)
optimizer = tf.train.AdamOptimizer(params['learning_rate'])
gvs = optimizer.compute_gradients(loss)
capped_gvs = [(tf.clip_by_norm(grad, params['gradient_clip']), var)
for grad, var in gvs if grad is not None]
train_op = optimizer.apply_gradients(
capped_gvs, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(
mode=mode, train_op=train_op, loss=loss)
elif mode == tf.estimator.ModeKeys.EVAL:
loss, tensors = emql.get_tf_model(name, features, params)
if name == 'mixture':
evaluations = dict()
evaluations.update(emql.run_tf_evaluation(
tensors['membership_logits'], tensors['membership_labels'],
prefix='membership_'))
evaluations.update(emql.run_tf_evaluation(
tensors['intersection_logits'], tensors['intersection_labels'],
prefix='intersection_'))
evaluations.update(emql.run_tf_evaluation(
tensors['union_logits'], tensors['union_labels'],
prefix='union_'))
evaluations.update(emql.run_tf_evaluation(
tensors['set_follows_logits'], tensors['set_follows_labels'],
prefix='set_follows_'))
else:
evaluations = emql.run_tf_evaluation(
tensors['logits'], tensors['labels'])
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=evaluations)
elif mode == tf.estimator.ModeKeys.PREDICT:
loss, tensors = emql.get_tf_model(name, features, params)
predictions = emql.get_tf_prediction(name, features, tensors)
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
else:
raise ValueError('illegal mode %r' % mode)
return model_fn
|
|
#!/usr/bin/env python3
# Copyright 2017 Jetperch LLC. All rights reserved.
"""
A python EMBC framer for PCs.
This framer executable is used to test and qualify microcontroller
implementations.
"""
import argparse
import sys
import os
import time
import signal
import serial
import logging
log = logging.getLogger()
MYPATH = os.path.abspath(os.path.dirname(__file__))
LIBPATH = os.path.dirname(MYPATH)
sys.path.insert(0, LIBPATH)
import embc
RX_PING_DEPTH = 3 # default setting
TX_PING_DEPTH = 3 # default setting
PING_TIMEOUT = 5.0 # seconds
PING_DEPTH = 3 # default setting
DELAY_TIME = 1.0
def get_parser():
parser = argparse.ArgumentParser(description='Test EMBC framer implementations.')
parser.add_argument('--port',
help='The serial comm port / device to open')
parser.add_argument('--baudrate',
default=115200,
help='The baud rate (defaults to 115200)')
parser.add_argument('--ping',
default=0,
type=int,
help='Send ping frames')
return parser
class PingFrame:
def __init__(self, message_id, payload):
self.message_id_raw = message_id
self.message_id = message_id & 0xff
self.payload = payload
# in case of lost ACK frames, can receive response before tx_done
self.response_received = False
self.tx_done_received = False
self.time = None
@property
def is_done(self):
return self.tx_done_received and self.response_received
class PingFrameGenerator:
def __init__(self, count):
self._count_total = count
self._count_generated = 0
self.payload_size = 240
assert(0 == (self.payload_size % 4))
self._pattern = embc.PatternTx()
def is_done(self):
if self._count_total < 0:
return False
if self._count_total == 0:
return True
return self._count_generated >= self._count_total
def __iter__(self):
return self
def __next__(self):
if self.is_done():
raise StopIteration
return self.next()
def next(self):
payload = []
for k in range(0, self.payload_size, 4):
word = self._pattern.next_u32()
word_bytes = [word & 0xff,
(word >> 8) & 0xff,
(word >> 16) & 0xff,
(word >> 24) & 0xff]
payload.extend(word_bytes)
payload = bytes(payload)
frame = PingFrame(self._count_generated, payload)
self._count_generated += 1
return frame
class PingQueue:
def __init__(self, count, send):
self.count = count
self._generator = PingFrameGenerator(count)
self._queue = []
self._time_start = None
self._time_last = None
self._send = send
self._frames_completed = 0
self._frames_completed_last = 0
self._frames_tx_error = 0
self._frames_missing = 0
self._frames_timeout = 0
self._delay = time.time()
def log_running_stats(self):
t = time.time()
dt = t - self._time_last
if dt > 1.0:
frames = self._frames_completed - self._frames_completed_last
length = frames * self._generator.payload_size
throughput = length / dt
log.info('%d Bps in %d frames: total errors tx=%d, rx_missing=%d, timeout=%d',
throughput, frames,
self._frames_tx_error,
self._frames_missing,
self._frames_timeout)
self._frames_completed_last = self._frames_completed
self._time_last = t
def log_total_stats(self):
t = time.time()
dt = t - self._time_start
frames = self._frames_completed
length = frames * self._generator.payload_size
throughput = length / dt
log.info('%d Bps in %d frames: total errors: tx=%d, rx_missing=%d, timeout=%d',
throughput, frames,
self._frames_tx_error,
self._frames_missing,
self._frames_timeout)
def process(self):
queue = []
t = time.time()
if self._time_start is None:
self._time_start = t
self._time_last = t
for msg in self._queue:
if msg.tx_done_received and msg.response_received:
self._frames_completed += 1
elif t - msg.time >= PING_TIMEOUT:
log.info('remove frame due to timeout')
self._frames_timeout += 1
continue
else:
queue.append(msg)
self._queue = queue
if time.time() >= self._delay:
rx_available = RX_PING_DEPTH - self._rx_pending_count()
tx_available = TX_PING_DEPTH - self._tx_done_pending_count()
available = min(rx_available, tx_available)
for i in range(available):
if self._generator.is_done():
break
else:
msg = self._generator.next()
msg.time = t
self._queue.append(msg)
self._send(msg)
self.log_running_stats()
def _tx_done_pending_count(self):
return len([msg for msg in self._queue if not msg.tx_done_received])
def _rx_pending_count(self):
return len([msg for msg in self._queue if not msg.response_received])
def _ping_rx_resync(self, message_id, data):
for idx in range(len(self._queue)):
msg = self._queue[idx]
if msg.message_id == message_id and msg.payload == data:
log.info('resync message_id=%d to index %d', message_id, idx)
self._frames_missing += idx
del self._queue[:idx]
return msg
log.warning('rx resync failed message_id=%d (frame from previous ping session?)',
message_id)
return None
def rx(self, message_id, payload):
if 0 == self._rx_pending_count(): # error
log.warning("rx ping response %d, but not expected" % message_id)
return
msg = self._queue[0]
if msg.message_id != message_id:
log.debug('rx ping response message_id mismatch: expected %d != received %d',
msg.message_id, message_id)
msg = self._ping_rx_resync(message_id, payload)
elif msg.payload != payload:
log.warning('rx ping response %d payload mismatch', message_id)
msg = self._ping_rx_resync(message_id, payload)
else:
log.debug('rx ping response %d', message_id)
if msg is not None:
msg.response_received = True
self.process()
def _get_tx_done_message(self, message_id):
for idx, msg in enumerate(self._queue):
if not msg.tx_done_received and msg.message_id == message_id:
return idx, msg
return 0, None
def tx_done(self, message_id, status):
idx, msg = self._get_tx_done_message(message_id)
if msg is None:
log.warning('tx_done ping %d with no matching tx message', message_id)
return
msg.tx_done_received = True
if 0 == status:
log.debug('tx_done ping %d', message_id)
else: # error
self._frames_tx_error += 1
self._queue.pop(idx)
log.info('tx_done ping error %s: message_id=%d, tx_done_pending=%s, rx_pending=%d',
embc.ec.num_to_name.get(status, 'unknown'), message_id,
self._tx_done_pending_count(),
self._rx_pending_count())
# delay to prevent far-end overflow during loss of 1 direction
self._delay = time.time() + DELAY_TIME
self.process()
class MasterFramer:
def __init__(self, port, baudrate):
# port=None defers open until explict open().
self._serial = serial.Serial(port=None, baudrate=baudrate, timeout=0.002)
self._serial.port = port
self._last_time = None
self._framer = embc.stream.framer.Framer()
self._framer.hal_tx = self._serial.write
self.message_id = 0
self._ping = PingQueue(0, self._send_ping)
self._framer.register_port(0, self.rx_port0, self.tx_done_port0)
for i in range(1, 16):
self._framer.register_port(i, self.rx, self.tx_done)
def _send_ping(self, msg):
self._framer.send(0, msg.message_id, embc.stream.framer.Port0.PING_REQ, msg.payload)
def start_ping(self, count):
self._ping = PingQueue(count, self._send_ping)
self._ping.process()
def rx_port0(self, port, message_id, port_def, data):
if embc.stream.framer.Port0.PING_RSP == port_def:
self._ping.rx(message_id, data)
elif embc.stream.framer.Port0.STATUS_RSP == port_def:
pass
else:
log.info('rx_port0 message_id=%d port_def=0x%04x', message_id, port_def)
def tx_done_port0(self, port, message_id, port_def, status):
if embc.stream.framer.Port0.PING_REQ == port_def:
self._ping.tx_done(message_id, status)
elif embc.stream.framer.Port0.RESYNC == port_def:
pass
elif embc.stream.framer.Port0.STATUS_REQ == port_def:
pass
else:
log.info('tx_done_port0 message_id=%d port_def=0x%04x status=%d',
message_id, port_def, status)
def rx(self, port, message_id, port_def, data):
log.info('%s %s %s %s', port, message_id, port_def, data)
def tx_done(self, port, message_id, port_def, status):
if status:
log.info('tx_done error %s: port=%d, message_id=%d, port_def=0x%04x',
embc.ec.num_to_name.get(status, 'unknown'), port, message_id, port_def)
def open(self):
self._serial.open()
self._last_time = time.time()
self._framer.resync()
def close(self):
self._serial.close()
def process(self):
b = self._serial.read()
if len(b):
self._framer.hal_rx(b)
self._framer.process()
if self._ping.count != 0:
self._ping.process()
else:
t = time.time()
if t - self._last_time > 1.0:
self._last_time = t
print(self._framer.status)
def run():
logging.basicConfig(level=logging.INFO)
args = get_parser().parse_args()
quit = False
framer = MasterFramer(port=args.port, baudrate=args.baudrate)
framer.open()
def do_quit(*args, **kwargs):
nonlocal quit
print('quit')
quit = True
signal.signal(signal.SIGINT, do_quit)
print('start')
if args.ping != 0:
framer.start_ping(args.ping)
while not quit:
framer.process()
framer.close()
print('stop')
return 0
if __name__ == "__main__":
sys.exit(run())
|
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import re
import urlparse
from datetime import datetime
from dateutil import tz
from dateutil import parser
from django.utils.formats import localize_input
from django.utils.translation import ugettext as _
from desktop.lib.parameterization import find_variables
from liboozie.oozie_api import get_oozie, DEFAULT_USER
LOG = logging.getLogger(__name__)
JSON_FIELDS = ('parameters', 'job_properties', 'files', 'archives', 'prepares', 'params',
'deletes', 'mkdirs', 'moves', 'chmods', 'touchzs')
BOOLEAN_FIELDS = ('propagate_configuration','capture_output')
NUMBER_FIELDS_OR_NULL = ('sub_workflow',)
GMT_TIME_FORMAT = "%Y-%m-%dT%H:%MGMT%z"
UTC_TIME_FORMAT = "%Y-%m-%dT%H:%MZ"
FREQUENCY_REGEX = r'^\$\{coord:(?P<frequency_unit>\w+)\((?P<frequency_number>\d+)\)\}$'
def format_field_value(field, value):
if field in JSON_FIELDS:
if not isinstance(value, basestring):
return json.dumps(value)
if field in NUMBER_FIELDS_OR_NULL:
if not isinstance(value, int) and value is not None:
return int(value)
if field in BOOLEAN_FIELDS:
return str(value).lower() == 'true'
return value
def format_dict_field_values(dictionary):
for key in dictionary:
dictionary[key] = format_field_value(key, dictionary[key])
return dictionary
def model_to_dict(model):
from django.db import models
dictionary = {}
for field in model._meta.fields:
try:
attr = getattr(model, field.name, None)
if isinstance(attr, models.Model):
dictionary[field.name] = attr.id
elif isinstance(attr, datetime):
dictionary[field.name] = str(attr)
else:
dictionary[field.name] = attr
except Exception, e:
LOG.debug(_("Could not set field %(field)s: %(exception)s") % {'field': field.name, 'exception': str(e)})
return dictionary
def sanitize_node_dict(node_dict):
for field in ['node_ptr', 'workflow']:
if field in node_dict:
del node_dict[field]
return node_dict
def workflow_to_dict(workflow):
workflow_dict = model_to_dict(workflow)
node_list = [node.get_full_node() for node in workflow.node_list]
nodes = [model_to_dict(node) for node in node_list]
for index, node in enumerate(node_list):
nodes[index]['child_links'] = [model_to_dict(link) for link in node.get_all_children_links()]
workflow_dict['nodes'] = nodes
return workflow_dict
def smart_path(path, mapping=None, is_coordinator=False):
# Try to prepend home_dir and FS scheme if needed.
# If path starts by a parameter try to get its value from the list of parameters submitted by the user or the coordinator.
# This dynamic checking enable the use of <prepares> statements in a workflow scheduled manually of by a coordinator.
# The logic is a bit complicated but Oozie is not consistent with data paths, prepare, coordinator paths and Fs action.
if mapping is None:
mapping = {}
if not path.startswith('$') and not path.startswith('/') and not urlparse.urlsplit(path).scheme:
path = '/user/%(username)s/%(path)s' % {
'username': '${coord:user()}' if is_coordinator else '${wf:user()}',
'path': path
}
if path.startswith('$'):
variables = find_variables(path)
for var in variables:
prefix = '${%s}' % var
if path.startswith(prefix):
if var in mapping:
if not urlparse.urlsplit(mapping[var]).scheme and not mapping[var].startswith('$'):
path = '%(nameNode)s%(path)s' % {'nameNode': '${nameNode}', 'path': path}
else:
if not urlparse.urlsplit(path).scheme:
path = '%(nameNode)s%(path)s' % {'nameNode': '${nameNode}', 'path': path}
return path
def contains_symlink(path, mapping):
vars = find_variables(path)
return any([var in mapping and '#' in mapping[var] for var in vars]) or '#' in path
def utc_datetime_format(utc_time):
return utc_time.strftime(UTC_TIME_FORMAT)
def oozie_to_django_datetime(dt_string):
try:
return localize_input(datetime.strptime(dt_string, UTC_TIME_FORMAT))
except ValueError:
pass
try:
return localize_input(datetime.strptime(dt_string, GMT_TIME_FORMAT))
except ValueError:
pass
return None
class InvalidFrequency(Exception):
pass
def oozie_to_hue_frequency(frequency_string):
"""
Get frequency number and units from frequency, which must be of the format
"${coord:$unit($number)}".
frequency units and number are just different parts of the EL function.
Returns:
A tuple of the frequency unit and number
Raises:
InvalidFrequency: If the `frequency_string` does not match the frequency pattern.
"""
matches = re.match(FREQUENCY_REGEX, frequency_string)
if matches:
return matches.group('frequency_unit'), matches.group('frequency_number')
else:
raise InvalidFrequency(_('invalid frequency: %s') % frequency_string)
def convert_to_server_timezone(date, local_tz='UTC', server_tz=None, user=DEFAULT_USER):
api = get_oozie(user)
if server_tz is None:
oozie_conf = api.get_configuration()
server_tz = oozie_conf.get('oozie.processing.timezone') or 'UTC'
if date and date.startswith('$'):
return date
# To support previously created jobs
if date.endswith('Z'):
date = date[:-1]
local_tz = 'UTC'
try:
date_local_tz = parser.parse(date)
date_local_tz = date_local_tz.replace(tzinfo=tz.gettz(local_tz))
date_server_tz = date_local_tz.astimezone(tz.gettz(server_tz))
date_server_tz = date_server_tz.strftime('%Y-%m-%dT%H:%M')
# Oozie timezone is either UTC or GMT(+/-)####
if 'UTC' == server_tz:
return date_server_tz + u'Z'
else:
return date_server_tz + u'+' + re.split('[+-]', server_tz)[1]
except TypeError, ValueError:
LOG.error("Failed to convert Oozie timestamp: %s" % date)
return None
|
|
# Copyright (C) 2009, Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# WebKit's Python module for understanding the various ports
import os
import platform
import sys
from webkitpy.common.system.executive import Executive
class DeprecatedPort(object):
results_directory = "/tmp/layout-test-results"
# Subclasses must override
port_flag_name = None
# FIXME: This is only used by BotInfo.
def name(self):
return self.__class__
def flag(self):
if self.port_flag_name:
return "--port=%s" % self.port_flag_name
return None
# We might need to pass scm into this function for scm.checkout_root
def script_path(self, script_name):
return os.path.join("Tools", "Scripts", script_name)
def script_shell_command(self, script_name):
script_path = self.script_path(script_name)
return Executive.shell_command_for_script(script_path)
@staticmethod
def port(port_name):
ports = {
"chromium": ChromiumPort,
"chromium-android": ChromiumAndroidPort,
"chromium-xvfb": ChromiumXVFBPort,
"gtk": GtkPort,
"mac": MacPort,
"win": WinPort,
"qt": QtPort,
"efl": EflPort,
}
default_port = {
"Windows": WinPort,
"Darwin": MacPort,
}
# Do we really need MacPort as the ultimate default?
return ports.get(port_name, default_port.get(platform.system(), MacPort))()
def makeArgs(self):
# FIXME: This shouldn't use a static Executive().
args = '--makeargs="-j%s"' % Executive().cpu_count()
if os.environ.has_key('MAKEFLAGS'):
args = '--makeargs="%s"' % os.environ['MAKEFLAGS']
return args
def update_webkit_command(self, non_interactive=False):
return self.script_shell_command("update-webkit")
def check_webkit_style_command(self):
return self.script_shell_command("check-webkit-style")
def prepare_changelog_command(self):
return self.script_shell_command("prepare-ChangeLog")
def build_webkit_command(self, build_style=None):
command = self.script_shell_command("build-webkit")
if build_style == "debug":
command.append("--debug")
if build_style == "release":
command.append("--release")
return command
def run_javascriptcore_tests_command(self):
return self.script_shell_command("run-javascriptcore-tests")
def run_webkit_unit_tests_command(self):
return None
def run_webkit_tests_command(self):
return self.script_shell_command("run-webkit-tests")
def run_python_unittests_command(self):
return self.script_shell_command("test-webkitpy")
def run_perl_unittests_command(self):
return self.script_shell_command("test-webkitperl")
def layout_tests_results_path(self):
return os.path.join(self.results_directory, "full_results.json")
def unit_tests_results_path(self):
return os.path.join(self.results_directory, "webkit_unit_tests_output.xml")
class MacPort(DeprecatedPort):
port_flag_name = "mac"
class WinPort(DeprecatedPort):
port_flag_name = "win"
class GtkPort(DeprecatedPort):
port_flag_name = "gtk"
def build_webkit_command(self, build_style=None):
command = super(GtkPort, self).build_webkit_command(build_style=build_style)
command.append("--gtk")
command.append("--update-gtk")
command.append(super(GtkPort, self).makeArgs())
return command
def run_webkit_tests_command(self):
command = super(GtkPort, self).run_webkit_tests_command()
command.append("--gtk")
return command
class QtPort(DeprecatedPort):
port_flag_name = "qt"
def build_webkit_command(self, build_style=None):
command = super(QtPort, self).build_webkit_command(build_style=build_style)
command.append("--qt")
command.append(super(QtPort, self).makeArgs())
return command
class EflPort(DeprecatedPort):
port_flag_name = "efl"
def build_webkit_command(self, build_style=None):
command = super(EflPort, self).build_webkit_command(build_style=build_style)
command.append("--efl")
command.append("--update-efl")
command.append(super(EflPort, self).makeArgs())
return command
class ChromiumPort(DeprecatedPort):
port_flag_name = "chromium"
def update_webkit_command(self, non_interactive=False):
command = super(ChromiumPort, self).update_webkit_command(non_interactive=non_interactive)
command.append("--chromium")
if non_interactive:
command.append("--force-update")
return command
def build_webkit_command(self, build_style=None):
command = super(ChromiumPort, self).build_webkit_command(build_style=build_style)
command.append("--chromium")
command.append("--update-chromium")
return command
def run_webkit_tests_command(self):
# Note: This could be run-webkit-tests now.
command = self.script_shell_command("new-run-webkit-tests")
command.append("--chromium")
command.append("--skip-failing-tests")
return command
def run_webkit_unit_tests_command(self):
return self.script_shell_command("run-chromium-webkit-unit-tests")
def run_javascriptcore_tests_command(self):
return None
class ChromiumAndroidPort(ChromiumPort):
port_flag_name = "chromium-android"
def update_webkit_command(self, non_interactive=False):
command = super(ChromiumAndroidPort, self).update_webkit_command(non_interactive=non_interactive)
command.append("--chromium-android")
return command
def build_webkit_command(self, build_style=None):
command = super(ChromiumAndroidPort, self).build_webkit_command(build_style=build_style)
command.append("--chromium-android")
return command
class ChromiumXVFBPort(ChromiumPort):
port_flag_name = "chromium-xvfb"
def run_webkit_tests_command(self):
return ["xvfb-run"] + super(ChromiumXVFBPort, self).run_webkit_tests_command()
|
|
# Author: Arnaud Joly, Joel Nothman, Hamzeh Alsalhi
#
# License: BSD 3 clause
"""
Multi-class / multi-label utility function
==========================================
"""
from __future__ import division
from collections import Sequence
from itertools import chain
from scipy.sparse import issparse
from scipy.sparse.base import spmatrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
import numpy as np
from ..externals.six import string_types
from .validation import check_array
def _unique_multiclass(y):
if hasattr(y, '__array__'):
return np.unique(np.asarray(y))
else:
return set(y)
def _unique_indicator(y):
return np.arange(check_array(y, ['csr', 'csc', 'coo']).shape[1])
_FN_UNIQUE_LABELS = {
'binary': _unique_multiclass,
'multiclass': _unique_multiclass,
'multilabel-indicator': _unique_indicator,
}
def unique_labels(*ys):
"""Extract an ordered array of unique labels
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes,
Returns
-------
out : numpy array of shape [n_unique_labels]
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
"""
if not ys:
raise ValueError('No argument has been passed.')
# Check that we don't mix label format
ys_types = set(type_of_target(x) for x in ys)
if ys_types == set(["binary", "multiclass"]):
ys_types = set(["multiclass"])
if len(ys_types) > 1:
raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
label_type = ys_types.pop()
# Check consistency for the indicator format
if (label_type == "multilabel-indicator" and
len(set(check_array(y, ['csr', 'csc', 'coo']).shape[1]
for y in ys)) > 1):
raise ValueError("Multi-label binary indicator input with "
"different numbers of labels")
# Get the unique set of labels
_unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
if not _unique_labels:
raise ValueError("Unknown label type: %s" % repr(ys))
ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys))
# Check that we don't mix string type with number type
if (len(set(isinstance(label, string_types) for label in ys_labels)) > 1):
raise ValueError("Mix of label input types (string and number)")
return np.array(sorted(ys_labels))
def _is_integral_float(y):
return y.dtype.kind == 'f' and np.all(y.astype(int) == y)
def is_multilabel(y):
""" Check if ``y`` is in a multilabel format.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel([[1], [0, 2], []])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
"""
if hasattr(y, '__array__'):
y = np.asarray(y)
if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1):
return False
if issparse(y):
if isinstance(y, (dok_matrix, lil_matrix)):
y = y.tocsr()
return (len(y.data) == 0 or np.unique(y.data).size == 1 and
(y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(np.unique(y.data))))
else:
labels = np.unique(y)
return len(labels) < 3 and (y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(labels))
def check_classification_targets(y):
"""Ensure that target y is of a non-regression type.
Only the following target types (as defined in type_of_target) are allowed:
'binary', 'multiclass', 'multiclass-multioutput',
'multilabel-indicator', 'multilabel-sequences'
Parameters
----------
y : array-like
"""
y_type = type_of_target(y)
if y_type not in ['binary', 'multiclass', 'multiclass-multioutput',
'multilabel-indicator', 'multilabel-sequences']:
raise ValueError("Unknown label type: %r" % y_type)
def type_of_target(y):
"""Determine the type of data indicated by the target.
Note that this type is the most specific type that can be inferred.
For example:
* ``binary`` is more specific but compatible with ``multiclass``.
* ``multiclass`` of integers is more specific but compatible with
``continuous``.
* ``multilabel-indicator`` is more specific but compatible with
``multiclass-multioutput``.
Parameters
----------
y : array-like
Returns
-------
target_type : string
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, sequence of sequences, or an array of non-sequence objects.
Examples
--------
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1.0, 2.0])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target([1.0, 0.0, 3.0])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target([[1, 2]])
'multiclass-multioutput'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))
and not isinstance(y, string_types))
if not valid:
raise ValueError('Expected array-like (array or non-string sequence), '
'got %r' % y)
sparseseries = (y.__class__.__name__ == 'SparseSeries')
if sparseseries:
raise ValueError("y cannot be class 'SparseSeries'.")
if is_multilabel(y):
return 'multilabel-indicator'
try:
y = np.asarray(y)
except ValueError:
# Known to fail in numpy 1.3 for array of arrays
return 'unknown'
# The old sequence of sequences format
try:
if (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)
and not isinstance(y[0], string_types)):
raise ValueError('You appear to be using a legacy multi-label data'
' representation. Sequence of sequences are no'
' longer supported; use a binary array or sparse'
' matrix instead.')
except IndexError:
pass
# Invalid inputs
if y.ndim > 2 or (y.dtype == object and len(y) and
not isinstance(y.flat[0], string_types)):
return 'unknown' # [[[1, 2]]] or [obj_1] and not ["label_1"]
if y.ndim == 2 and y.shape[1] == 0:
return 'unknown' # [[]]
if y.ndim == 2 and y.shape[1] > 1:
suffix = "-multioutput" # [[1, 2], [1, 2]]
else:
suffix = "" # [1, 2, 3] or [[1], [2], [3]]
# check float and contains non-integer float values
if y.dtype.kind == 'f' and np.any(y != y.astype(int)):
# [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]
return 'continuous' + suffix
if (len(np.unique(y)) > 2) or (y.ndim >= 2 and len(y[0]) > 1):
return 'multiclass' + suffix # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]
else:
return 'binary' # [1, 2] or [["a"], ["b"]]
def _check_partial_fit_first_call(clf, classes=None):
"""Private helper function for factorizing common classes param logic
Estimators that implement the ``partial_fit`` API need to be provided with
the list of possible classes at the first call to partial_fit.
Subsequent calls to partial_fit should check that ``classes`` is still
consistent with a previous value of ``clf.classes_`` when provided.
This function returns True if it detects that this was the first call to
``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also
set on ``clf``.
"""
if getattr(clf, 'classes_', None) is None and classes is None:
raise ValueError("classes must be passed on the first call "
"to partial_fit.")
elif classes is not None:
if getattr(clf, 'classes_', None) is not None:
if not np.array_equal(clf.classes_, unique_labels(classes)):
raise ValueError(
"`classes=%r` is not the same as on last call "
"to partial_fit, was: %r" % (classes, clf.classes_))
else:
# This is the first call to partial_fit
clf.classes_ = unique_labels(classes)
return True
# classes is None and clf.classes_ has already previously been set:
# nothing to do
return False
def class_distribution(y, sample_weight=None):
"""Compute class priors from multioutput-multiclass target data
Parameters
----------
y : array like or sparse matrix of size (n_samples, n_outputs)
The labels for each example.
sample_weight : array-like of shape = (n_samples,), optional
Sample weights.
Returns
-------
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
n_classes : list of integers of size n_outputs
Number of classes in each column
class_prior : list of size n_outputs of arrays of size (n_classes,)
Class distribution of each column.
"""
classes = []
n_classes = []
class_prior = []
n_samples, n_outputs = y.shape
if issparse(y):
y = y.tocsc()
y_nnz = np.diff(y.indptr)
for k in range(n_outputs):
col_nonzero = y.indices[y.indptr[k]:y.indptr[k + 1]]
# separate sample weights for zero and non-zero elements
if sample_weight is not None:
nz_samp_weight = np.asarray(sample_weight)[col_nonzero]
zeros_samp_weight_sum = (np.sum(sample_weight) -
np.sum(nz_samp_weight))
else:
nz_samp_weight = None
zeros_samp_weight_sum = y.shape[0] - y_nnz[k]
classes_k, y_k = np.unique(y.data[y.indptr[k]:y.indptr[k + 1]],
return_inverse=True)
class_prior_k = np.bincount(y_k, weights=nz_samp_weight)
# An explicit zero was found, combine its weight with the weight
# of the implicit zeros
if 0 in classes_k:
class_prior_k[classes_k == 0] += zeros_samp_weight_sum
# If an there is an implicit zero and it is not in classes and
# class_prior, make an entry for it
if 0 not in classes_k and y_nnz[k] < y.shape[0]:
classes_k = np.insert(classes_k, 0, 0)
class_prior_k = np.insert(class_prior_k, 0,
zeros_samp_weight_sum)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior.append(class_prior_k / class_prior_k.sum())
else:
for k in range(n_outputs):
classes_k, y_k = np.unique(y[:, k], return_inverse=True)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior_k = np.bincount(y_k, weights=sample_weight)
class_prior.append(class_prior_k / class_prior_k.sum())
return (classes, n_classes, class_prior)
def _ovr_decision_function(predictions, confidences, n_classes):
"""Compute a continuous, tie-breaking ovr decision function.
It is important to include a continuous value, not only votes,
to make computing AUC or calibration meaningful.
Parameters
----------
predictions : array-like, shape (n_samples, n_classifiers)
Predicted classes for each binary classifier.
confidences : array-like, shape (n_samples, n_classifiers)
Decision functions or predicted probabilities for positive class
for each binary classifier.
n_classes : int
Number of classes. n_classifiers must be
``n_classes * (n_classes - 1 ) / 2``
"""
n_samples = predictions.shape[0]
votes = np.zeros((n_samples, n_classes))
sum_of_confidences = np.zeros((n_samples, n_classes))
k = 0
for i in range(n_classes):
for j in range(i + 1, n_classes):
sum_of_confidences[:, i] -= confidences[:, k]
sum_of_confidences[:, j] += confidences[:, k]
votes[predictions[:, k] == 0, i] += 1
votes[predictions[:, k] == 1, j] += 1
k += 1
max_confidences = sum_of_confidences.max()
min_confidences = sum_of_confidences.min()
if max_confidences == min_confidences:
return votes
# Scale the sum_of_confidences to (-0.5, 0.5) and add it with votes.
# The motivation is to use confidence levels as a way to break ties in
# the votes without switching any decision made based on a difference
# of 1 vote.
eps = np.finfo(sum_of_confidences.dtype).eps
max_abs_confidence = max(abs(max_confidences), abs(min_confidences))
scale = (0.5 - eps) / max_abs_confidence
return votes + sum_of_confidences * scale
|
|
# Copyright 2016 Twitter. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''pplan_helper.py'''
import socket
from heron.api.src.python.custom_grouping import ICustomGrouping
from heron.api.src.python.serializer import default_serializer
from heron.proto import topology_pb2
from heron.common.src.python.utils.log import Log
from heron.common.src.python.utils.topology import TopologyContextImpl
import heron.common.src.python.pex_loader as pex_loader
from .custom_grouping_helper import CustomGroupingHelper
# pylint: disable=too-many-instance-attributes
class PhysicalPlanHelper(object):
"""Helper class for accessing Physical Plan
:ivar pplan: Physical Plan protobuf message
:ivar topology_pex_abs_path: Topology pex file's absolute path
:ivar my_instance_id: instance id for this instance
:ivar my_instance: Instance protobuf message for this instance
:ivar my_component_name: component name for this instance
:ivar my_task_id: global task id for this instance
:ivar is_spout: ``True`` if it's spout, ``False`` if it's bolt
:ivar hostname: hostname of this instance
:ivar my_component: Component protobuf message for this instance
:ivar context: Topology context if set, otherwise ``None``
"""
def __init__(self, pplan, instance_id, topology_pex_abs_path):
self.pplan = pplan
self.my_instance_id = instance_id
self.my_instance = None
self.topology_pex_abs_path = topology_pex_abs_path
# get my instance
for instance in pplan.instances:
if instance.instance_id == self.my_instance_id:
self.my_instance = instance
break
if self.my_instance is None:
raise RuntimeError("There was no instance that matched my id: %s" % self.my_instance_id)
self.my_component_name = self.my_instance.info.component_name
self.my_task_id = self.my_instance.info.task_id
# get spout or bolt
self._my_spbl, self.is_spout = self._get_my_spout_or_bolt(pplan.topology)
# Map <stream id -> number of fields in that stream's schema>
self._output_schema = dict()
outputs = self._my_spbl.outputs
# setup output schema
for out_stream in outputs:
self._output_schema[out_stream.stream.id] = len(out_stream.schema.keys)
self.hostname = socket.gethostname()
self.my_component = self._my_spbl.comp
self.context = None
# setup for custom grouping
self.custom_grouper = CustomGroupingHelper()
self._setup_custom_grouping(pplan.topology)
def _get_my_spout_or_bolt(self, topology):
my_spbl = None
for spbl in list(topology.spouts) + list(topology.bolts):
if spbl.comp.name == self.my_component_name:
if my_spbl is not None:
raise RuntimeError("Duplicate my component found")
my_spbl = spbl
if isinstance(my_spbl, topology_pb2.Spout):
is_spout = True
elif isinstance(my_spbl, topology_pb2.Bolt):
is_spout = False
else:
raise RuntimeError("My component neither spout nor bolt")
return my_spbl, is_spout
def check_output_schema(self, stream_id, tup):
"""Checks if a given stream_id and tuple matches with the output schema
:type stream_id: str
:param stream_id: stream id into which tuple is sent
:type tup: list
:param tup: tuple that is going to be sent
"""
# do some checking to make sure that the number of fields match what's expected
size = self._output_schema.get(stream_id, None)
if size is None:
raise RuntimeError("%s emitting to stream %s but was not declared in output fields"
% (self.my_component_name, stream_id))
elif size != len(tup):
raise RuntimeError("Number of fields emitted in stream %s does not match what's expected. "
"Expected: %s, Observed: %s" % (stream_id, size, len(tup)))
def get_my_spout(self):
"""Returns spout instance, or ``None`` if bolt is assigned"""
if self.is_spout:
return self._my_spbl
else:
return None
def get_my_bolt(self):
"""Returns bolt instance, or ``None`` if spout is assigned"""
if self.is_spout:
return None
else:
return self._my_spbl
def get_topology_state(self):
"""Returns the current topology state"""
return self.pplan.topology.state
def is_topology_running(self):
"""Checks whether topology is currently running"""
return self.pplan.topology.state == topology_pb2.TopologyState.Value("RUNNING")
def is_topology_paused(self):
"""Checks whether topology is currently paused"""
return self.pplan.topology.state == topology_pb2.TopologyState.Value("PAUSED")
def is_topology_killed(self):
"""Checks whether topology is already killed"""
return self.pplan.topology.state == topology_pb2.TopologyState.Value("KILLED")
def get_topology_config(self):
"""Returns the topology config"""
if self.pplan.topology.HasField("topology_config"):
return self._get_dict_from_config(self.pplan.topology.topology_config)
else:
return {}
def set_topology_context(self, metrics_collector):
"""Sets a new topology context"""
Log.debug("Setting topology context")
cluster_config = self.get_topology_config()
cluster_config.update(self._get_dict_from_config(self.my_component.config))
task_to_component_map = self._get_task_to_comp_map()
self.context = TopologyContextImpl(cluster_config, self.pplan.topology, task_to_component_map,
self.my_task_id, metrics_collector,
self.topology_pex_abs_path)
@staticmethod
def _get_dict_from_config(topology_config):
"""Converts Config protobuf message to python dictionary
Values are converted according to the rules below:
- Number string (e.g. "12" or "1.2") is appropriately converted to ``int`` or ``float``
- Boolean string ("true", "True", "false" or "False") is converted to built-in boolean type
(i.e. ``True`` or ``False``)
- Normal string is inserted to dict as is
- Serialized value is deserialized and inserted as a corresponding Python object
"""
config = {}
for kv in topology_config.kvs:
if kv.HasField("value"):
assert kv.type == topology_pb2.ConfigValueType.Value("STRING_VALUE")
# value is string
if PhysicalPlanHelper._is_number(kv.value):
config[kv.key] = PhysicalPlanHelper._get_number(kv.value)
elif kv.value.lower() in ("true", "false"):
config[kv.key] = True if kv.value.lower() == "true" else False
else:
config[kv.key] = kv.value
elif kv.HasField("serialized_value") and \
kv.type == topology_pb2.ConfigValueType.Value("PYTHON_SERIALIZED_VALUE"):
# deserialize that
config[kv.key] = default_serializer.deserialize(kv.serialized_value)
else:
assert kv.HasField("type")
Log.error("Unsupported config <key:value> found: %s, with type: %s"
% (str(kv), str(kv.type)))
continue
return config
@staticmethod
def _is_number(string):
try:
float(string)
return True
except ValueError:
return False
@staticmethod
def _get_number(string):
try:
return int(string)
except ValueError:
return float(string)
def _get_task_to_comp_map(self):
ret = {}
for instance in self.pplan.instances:
ret[instance.info.task_id] = instance.info.component_name
return ret
##### custom grouping related #####
def _setup_custom_grouping(self, topology):
"""Checks whether there are any bolts that consume any of my streams using custom grouping"""
for i in range(len(topology.bolts)):
for in_stream in topology.bolts[i].inputs:
if in_stream.stream.component_name == self.my_component_name and \
in_stream.gtype == topology_pb2.Grouping.Value("CUSTOM"):
# this bolt takes my output in custom grouping manner
if in_stream.type == topology_pb2.CustomGroupingObjectType.Value("PYTHON_OBJECT"):
custom_grouping_obj = default_serializer.deserialize(in_stream.custom_grouping_object)
if isinstance(custom_grouping_obj, str):
pex_loader.load_pex(self.topology_pex_abs_path)
grouping_cls = \
pex_loader.import_and_get_class(self.topology_pex_abs_path, custom_grouping_obj)
custom_grouping_obj = grouping_cls()
assert isinstance(custom_grouping_obj, ICustomGrouping)
self.custom_grouper.add(in_stream.stream.id,
self._get_taskids_for_component(topology.bolts[i].comp.name),
custom_grouping_obj,
self.my_component_name)
elif in_stream.type == topology_pb2.CustomGroupingObjectType.Value("JAVA_OBJECT"):
raise NotImplementedError("Java-serialized custom grouping is not yet supported "
"for python topology")
else:
raise ValueError("Unrecognized custom grouping type found: %s" % str(in_stream.type))
def _get_taskids_for_component(self, component_name):
return [instance.info.task_id for instance in self.pplan.instances
if instance.info.component_name == component_name]
def prepare_custom_grouping(self, context):
"""Prepares for custom grouping for this component
:param context: Topology context
"""
self.custom_grouper.prepare(context)
def choose_tasks_for_custom_grouping(self, stream_id, values):
"""Choose target task ids for custom grouping
:return: task ids
"""
return self.custom_grouper.choose_tasks(stream_id, values)
|
|
"""distutils.command.register
Implements the Distutils 'register' command (register with the repository).
"""
# created 2002/10/21, Richard Jones
__revision__ = "$Id: register.py,v 1.1.1.1 2006/05/30 06:04:38 hhzhou Exp $"
import sys, os, string, urllib2, getpass, urlparse
import StringIO, ConfigParser
from distutils.core import Command
from distutils.errors import *
class register(Command):
description = ("register the distribution with the Python package index")
DEFAULT_REPOSITORY = 'http://www.python.org/pypi'
user_options = [
('repository=', 'r',
"url of repository [default: %s]"%DEFAULT_REPOSITORY),
('list-classifiers', None,
'list the valid Trove classifiers'),
('show-response', None,
'display full response text from server'),
]
boolean_options = ['verify', 'show-response', 'list-classifiers']
def initialize_options(self):
self.repository = None
self.show_response = 0
self.list_classifiers = 0
def finalize_options(self):
if self.repository is None:
self.repository = self.DEFAULT_REPOSITORY
def run(self):
self.check_metadata()
if self.dry_run:
self.verify_metadata()
elif self.list_classifiers:
self.classifiers()
else:
self.send_metadata()
def check_metadata(self):
"""Ensure that all required elements of meta-data (name, version,
URL, (author and author_email) or (maintainer and
maintainer_email)) are supplied by the Distribution object; warn if
any are missing.
"""
metadata = self.distribution.metadata
missing = []
for attr in ('name', 'version', 'url'):
if not (hasattr(metadata, attr) and getattr(metadata, attr)):
missing.append(attr)
if missing:
self.warn("missing required meta-data: " +
string.join(missing, ", "))
if metadata.author:
if not metadata.author_email:
self.warn("missing meta-data: if 'author' supplied, " +
"'author_email' must be supplied too")
elif metadata.maintainer:
if not metadata.maintainer_email:
self.warn("missing meta-data: if 'maintainer' supplied, " +
"'maintainer_email' must be supplied too")
else:
self.warn("missing meta-data: either (author and author_email) " +
"or (maintainer and maintainer_email) " +
"must be supplied")
def classifiers(self):
''' Fetch the list of classifiers from the server.
'''
response = urllib2.urlopen(self.repository+'?:action=list_classifiers')
print response.read()
def verify_metadata(self):
''' Send the metadata to the package index server to be checked.
'''
# send the info to the server and report the result
(code, result) = self.post_to_server(self.build_post_data('verify'))
print 'Server response (%s): %s'%(code, result)
def send_metadata(self):
''' Send the metadata to the package index server.
Well, do the following:
1. figure who the user is, and then
2. send the data as a Basic auth'ed POST.
First we try to read the username/password from $HOME/.pypirc,
which is a ConfigParser-formatted file with a section
[server-login] containing username and password entries (both
in clear text). Eg:
[server-login]
username: fred
password: sekrit
Otherwise, to figure who the user is, we offer the user three
choices:
1. use existing login,
2. register as a new user, or
3. set the password to a random string and email the user.
'''
choice = 'x'
username = password = ''
# see if we can short-cut and get the username/password from the
# config
config = None
if os.environ.has_key('HOME'):
rc = os.path.join(os.environ['HOME'], '.pypirc')
if os.path.exists(rc):
print 'Using PyPI login from %s'%rc
config = ConfigParser.ConfigParser()
config.read(rc)
username = config.get('server-login', 'username')
password = config.get('server-login', 'password')
choice = '1'
# get the user's login info
choices = '1 2 3 4'.split()
while choice not in choices:
print '''We need to know who you are, so please choose either:
1. use your existing login,
2. register as a new user,
3. have the server generate a new password for you (and email it to you), or
4. quit
Your selection [default 1]: ''',
choice = raw_input()
if not choice:
choice = '1'
elif choice not in choices:
print 'Please choose one of the four options!'
if choice == '1':
# get the username and password
while not username:
username = raw_input('Username: ')
while not password:
password = getpass.getpass('Password: ')
# set up the authentication
auth = urllib2.HTTPPasswordMgr()
host = urlparse.urlparse(self.repository)[1]
auth.add_password('pypi', host, username, password)
# send the info to the server and report the result
code, result = self.post_to_server(self.build_post_data('submit'),
auth)
print 'Server response (%s): %s'%(code, result)
# possibly save the login
if os.environ.has_key('HOME') and config is None and code == 200:
rc = os.path.join(os.environ['HOME'], '.pypirc')
print 'I can store your PyPI login so future submissions will be faster.'
print '(the login will be stored in %s)'%rc
choice = 'X'
while choice.lower() not in 'yn':
choice = raw_input('Save your login (y/N)?')
if not choice:
choice = 'n'
if choice.lower() == 'y':
f = open(rc, 'w')
f.write('[server-login]\nusername:%s\npassword:%s\n'%(
username, password))
f.close()
try:
os.chmod(rc, 0600)
except:
pass
elif choice == '2':
data = {':action': 'user'}
data['name'] = data['password'] = data['email'] = ''
data['confirm'] = None
while not data['name']:
data['name'] = raw_input('Username: ')
while data['password'] != data['confirm']:
while not data['password']:
data['password'] = getpass.getpass('Password: ')
while not data['confirm']:
data['confirm'] = getpass.getpass(' Confirm: ')
if data['password'] != data['confirm']:
data['password'] = ''
data['confirm'] = None
print "Password and confirm don't match!"
while not data['email']:
data['email'] = raw_input(' EMail: ')
code, result = self.post_to_server(data)
if code != 200:
print 'Server response (%s): %s'%(code, result)
else:
print 'You will receive an email shortly.'
print 'Follow the instructions in it to complete registration.'
elif choice == '3':
data = {':action': 'password_reset'}
data['email'] = ''
while not data['email']:
data['email'] = raw_input('Your email address: ')
code, result = self.post_to_server(data)
print 'Server response (%s): %s'%(code, result)
def build_post_data(self, action):
# figure the data to send - the metadata plus some additional
# information used by the package server
meta = self.distribution.metadata
data = {
':action': action,
'metadata_version' : '1.0',
'name': meta.get_name(),
'version': meta.get_version(),
'summary': meta.get_description(),
'home_page': meta.get_url(),
'author': meta.get_contact(),
'author_email': meta.get_contact_email(),
'license': meta.get_licence(),
'description': meta.get_long_description(),
'keywords': meta.get_keywords(),
'platform': meta.get_platforms(),
'classifiers': meta.get_classifiers(),
'download_url': meta.get_download_url(),
}
return data
def post_to_server(self, data, auth=None):
''' Post a query to the server, and return a string response.
'''
# Build up the MIME payload for the urllib2 POST data
boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
sep_boundary = '\n--' + boundary
end_boundary = sep_boundary + '--'
body = StringIO.StringIO()
for key, value in data.items():
# handle multiple entries for the same name
if type(value) != type([]):
value = [value]
for value in value:
value = unicode(value).encode("utf-8")
body.write(sep_boundary)
body.write('\nContent-Disposition: form-data; name="%s"'%key)
body.write("\n\n")
body.write(value)
if value and value[-1] == '\r':
body.write('\n') # write an extra newline (lurve Macs)
body.write(end_boundary)
body.write("\n")
body = body.getvalue()
# build the Request
headers = {
'Content-type': 'multipart/form-data; boundary=%s; charset=utf-8'%boundary,
'Content-length': str(len(body))
}
req = urllib2.Request(self.repository, body, headers)
# handle HTTP and include the Basic Auth handler
opener = urllib2.build_opener(
urllib2.HTTPBasicAuthHandler(password_mgr=auth)
)
data = ''
try:
result = opener.open(req)
except urllib2.HTTPError, e:
if self.show_response:
data = e.fp.read()
result = e.code, e.msg
except urllib2.URLError, e:
result = 500, str(e)
else:
if self.show_response:
data = result.read()
result = 200, 'OK'
if self.show_response:
print '-'*75, data, '-'*75
return result
|
|
#!/usr/bin/env python
#
# Copyright 2013 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Line based socket using gevent
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import errno
import gevent
from gevent import socket
from gevent import queue, select
from OpenSSL import SSL
from .util.decorator import utf8Encode, utf8Decode, raise_exceptions
class LineSocketBuffers(object):
def __init__(self):
self.readbuffer = bytearray()
self.writebuffer = bytearray()
def clear(self):
del self.readbuffer[0:]
del self.writebuffer[0:]
def readbuffer_mv(self):
return memoryview(self.readbuffer)
def writebuffer_mv(self):
return memoryview(self.writebuffer)
#We use this to end lines we send to the server its in the RFC
#Buffers don't support unicode just yet so 'encode'
LINEENDING = b'\r\n'
class LineSocket(object):
"""Line based socket impl takes a host and port"""
def __init__(self, host, port, SSL):
self.host, self.port, self.SSL = (host, port, SSL)
self._socket = None
self._buffer = LineSocketBuffers()
#Thread Safe Queues for
self._IN = queue.Queue()
self._OUT = queue.Queue()
#Exceptions for LineSockets
class SocketError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
# Connect to remote host
def connect(self):
host, port = (self.host, self.port)
#Clean out the buffers
self._buffer.clear()
#If the existing socket is not None close it
if self._socket is not None:
self.close()
# Resolve the hostname and connect (ipv6 ready)
sock = None
try:
for info in socket.getaddrinfo(host, port, socket.AF_UNSPEC,
socket.SOCK_STREAM):
family, socktype, proto, canonname, sockaddr = info
#Validate the socket will make
try:
sock = socket.socket(family, socktype, proto)
#Set Keepalives
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
except socket.error, msg:
print('Socket Error: %s' % msg)
sock = None
continue
#Wrap in ssl if asked
if self.SSL:
print('Starting SSL')
try:
ctx = SSL.Context(SSL.SSLv23_METHOD)
sock = SSL.Connection(ctx, sock)
except SSL.Error, err:
print('Could not Initiate SSL: %s' % err)
sock = None
continue
#Try to establish the connection
try:
print('Trying Connect(%s)' % repr(sockaddr))
sock.settimeout(10)
sock.connect(sockaddr)
except socket.error, msg:
print('Socket Error: %s' % msg)
if self.SSL:
sock.shutdown()
sock.close()
sock = None
continue
break
except Exception as e:
print('Some unknown exception: %s' % e)
#After all the connection attempts and sock is still none lets bomb out
if sock is None:
print('Could not open connection')
return False
#Set the socket to non_blocking
sock.setblocking(0)
print("Connection Open.")
self._socket = sock
return True
#Start up the read and write threads
def run(self):
#Fire off some greenlits to handing reading and writing
try:
print("Starting Read/Write Loops")
tasks = [gevent.spawn(raise_exceptions(self._read)),
gevent.spawn(raise_exceptions(self._write))]
#Wait for a socket exception and raise the flag
select.select([], [], [self._socket]) # Yield
raise self.SocketError('Socket Exception')
finally: # Make sure we kill the tasks
print("Killing read and write loops")
gevent.killall(tasks)
def close(self):
if self.SSL:
try:
self._socket.shutdown()
except:
pass
self._socket.close()
self._socket = None
#Read from the socket, split out lines into a queue for readline
def _read(self):
eof = False
while True:
try:
#Wait for when the socket is ready for read
select.select([self._socket], [], []) # Yield
data = self._socket.recv(4096)
if not data: # Disconnected Remote
eof = True
self._buffer.readbuffer.extend(data)
except SSL.WantReadError:
pass # Nonblocking ssl yo
except (SSL.ZeroReturnError, SSL.SysCallError):
eof = True
except socket.error as e:
if e.errno == errno.EAGAIN:
pass # Don't Care
else:
raise
#If there are lines to proccess do so
while LINEENDING in self._buffer.readbuffer:
#Find the buffer offset
size = self._buffer.readbuffer.find(LINEENDING)
#Get the string from the buffer
line = self._buffer.readbuffer_mv()[0:size].tobytes()
#Place the string the the queue for safe handling
#Also convert it to unicode
self._IN.put(line)
#Delete the line from the buffer + 2 for line endings
del self._buffer.readbuffer[0:size + 2]
# Make sure we parse our readbuffer before we return
if eof: # You would think reading from a disconnected socket would
# raise an excaption
raise self.SocketError('EOF')
#Read Operation (Block)
@utf8Decode.returnValue
def readline(self):
return self._IN.get()
#Write Operation
def _write(self):
while True:
line = self._OUT.get() # Yield Operation
self._buffer.writebuffer.extend(line + LINEENDING)
#If we have buffers to write lets write them all
while self._buffer.writebuffer:
try:
gevent.sleep(0) # This gets tight sometimes
#Try to dump 4096 bytes to the socket
count = self._socket.send(
self._buffer.writebuffer_mv()[0:4096])
#Remove sent len from buffer
del self._buffer.writebuffer[0:count]
except SSL.WantReadError:
gevent.sleep(0) # Yield so this is not tight
except socket.error as e:
if e.errno == errno.EPIPE:
raise self.SocketError('Broken Pipe')
else:
raise self.SocketError('Err Socket Code: ' + e.errno)
except SSL.SysCallError as (errnum, errstr):
if errnum == errno.EPIPE:
raise self.SocketError(errstr)
else:
raise self.SocketError('SSL Syscall (%d) Error: %s'
% (errnum, errstr))
#writeline Operation [Blocking]
@utf8Encode
def writeline(self, data):
self._OUT.put(data)
|
|
import re
import sys
def find_tag(info_string, tag):
tag_start = info_string.find(tag)
while tag_start != -1 and (tag_start > 0 and info_string[tag_start-1] != ';'):
tag_start = info_string.find(tag, tag_start+1)
return tag_start
class Bedpe(object):
def __init__(self, bed_list):
self.c1 = bed_list[0]
self.s1 = int(bed_list[1])
self.e1 = int(bed_list[2])
self.c2 = bed_list[3]
self.s2 = int(bed_list[4])
self.e2 = int(bed_list[5])
self.name = bed_list[6]
self.score = self.parse_score(bed_list[7])
self.o1 = bed_list[8]
self.o2 = bed_list[9]
self.svtype = bed_list[10]
self.filter = bed_list[11]
self.orig_name1 = bed_list[12]
self.orig_ref1 = bed_list[13]
self.orig_alt1 = bed_list[14]
self.orig_name2 = bed_list[15]
self.orig_ref2 = bed_list[16]
self.orig_alt2 = bed_list[17]
self.malformedFlag = 0
self.info1 = bed_list[18]
self.info2 = bed_list[19]
self.misc = bed_list[20:]
self.check_malformed()
# FIXME This is only really needed for varlookup. Something more general would be helpful
self.cohort_vars = dict()
try:
self.svtype = self.retrieve_svtype()
except ValueError:
sys.stderr.write('No SVTYPE parseable for {0}'.format('\t'.join(bed_list)))
sys.exit(1)
self.af = self.retrieve_af()
if self.svtype != bed_list[10]:
sys.stderr.write("SVTYPE at Column 11({0})) and SVTYPE in INFO Column({1}) don't match at variant ID {3}\n".format(str(bed_list[10]), str(self.svtype), self.name))
@staticmethod
def parse_score(score):
if score.isdigit():
return float(score)
else:
return score
@staticmethod
def parse_info_tag(info_string, tag):
'''
Accessory method to parse out the value of a tag in an info string.
Make sure to include the equals sign if you are looking for a
non-boolean tag
'''
tag_start = find_tag(info_string, tag)
if tag_start == -1:
# If you were looking for a flag then this is the right value.
# Otherwise your tag doesn't exist. Client code must know how to
# interpret.
return False
tag_end = info_string.find(';', tag_start)
value_start = tag_start + len(tag)
if (value_start >= len(info_string)) or (tag_end != -1 and value_start >= tag_end):
return True
if tag_end == -1:
tag_end = None # We didn't find our end index
return info_string[value_start:tag_end]
@staticmethod
def update_info_tag(info_string, tag, new_value):
'''
Accessory method to update a tag's value. Like parse_info_tag, make sure to include the equals sign.
'''
tag_start = find_tag(info_string, tag)
if tag_start == -1:
new_tag = ';' + str(tag);
new_tag += str(new_value)
new_info_string = info_string + new_tag
return new_info_string
#raise ValueError("Tag {0} doesn't exist".format(tag))
tag_end = info_string.find(';', tag_start)
value_start = tag_start + len(tag)
if (value_start >= tag_end and tag_end != -1) or value_start >= len(info_string):
raise ValueError("Tag {0} doesn't have a value".format(tag))
if tag_end == -1:
tag_end = None # We didn't find our end index
new_info_string = info_string[:value_start] + new_value
if tag_end:
new_info_string += info_string[tag_end:]
return new_info_string
@property
def info(self):
'''
Return the appropriate info field if only one is required. Info from the primary variant is preferred if available.
'''
if self.info1 == 'MISSING':
return self.info2
else:
return self.info1
def set_info(self, field, value):
'''
Add the info field to the BEDPE line info fields. As BEDPE lines don't know about their headers this is not a safe operation.
Doesn't add to info field if it is the null character. Probably this is wrong.
'''
new_tag = ';' + str(field);
if value is not None:
new_tag += '=' + str(value)
if self.malformedFlag != 1:
self.info1 = self.info1 + new_tag
if self.malformedFlag != 2 and self.info2 != '.':
self.info2 = self.info2 + new_tag
def check_malformed(self):
if self.info1 == 'MISSING':
self.malformedFlag = 1
if self.info2 == 'MISSING':
self.malformedFlag = 2
def retrieve_svtype(self):
try:
svtype = re.split('=', ''.join(filter(lambda x: x.startswith('SVTYPE='), self.info.split(';'))))[1]
except IndexError:
raise ValueError('SVTYPE field not present in INFO field')
return svtype
def retrieve_af(self):
try:
af = re.split('=', ''.join(filter(lambda x: x.startswith('AF='), self.info.split(';'))))[1]
except IndexError:
af = None
return af
def __str__(self):
'''
A string representation of the line represented by this object
'''
return '\t'.join([
self.c1,
str(self.s1),
str(self.e1),
self.c2,
str(self.s2),
str(self.e2),
self.name,
str(self.score),
self.o1,
self.o2,
self.svtype,
self.filter,
self.orig_name1,
self.orig_ref1,
self.orig_alt1,
self.orig_name2,
self.orig_ref2,
self.orig_alt2,
self.info1,
self.info2] +
self.misc
)
@staticmethod
def sname_value(info_string):
'''
Retrieves the SNAME value from an info_string. Static method so we can
easily do it for either info1 or info2 on demand
'''
value = Bedpe.parse_info_tag(info_string, 'SNAME=')
if value in (False, True):
return None
else:
return value
@staticmethod
def _combine_sname_values(first, second):
'''
Combine the sname values from two comma-separated strings
'''
combined = None
if first is not None and second is not None:
sname_set = set(first.split(',') + second.split(','))
combined = ','.join(sname_set)
else:
combined = first or second # set to whichever is non-None
return combined
@staticmethod
def _update_sname_field(original_info1, original_info2):
'''
Update the sname field in the original info string by adding
values from the another info string
'''
new_sname = Bedpe._combine_sname_values(
Bedpe.sname_value(original_info1),
Bedpe.sname_value(original_info2))
if new_sname:
return Bedpe.update_info_tag(original_info1, 'SNAME=', new_sname)
else:
return original_info1
def combine_snames(self, other):
'''
This method adds the sname values from the info fields of another bedpe
entry into the sname tag of itself
'''
self.info1 = self._update_sname_field(self.info1, other.info1)
self.info2 = self._update_sname_field(self.info2, other.info2)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 PLUMgrid, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Edgar Magana, emagana@plumgrid.com, PLUMgrid, Inc.
"""
Neutron PLUMgrid Plug-in for PLUMgrid Virtual Technology
This plugin will forward authenticated REST API calls
to the Network Operating System by PLUMgrid called NOS
"""
from oslo.config import cfg
from neutron.db import api as db
from neutron.db import db_base_plugin_v2
from neutron.openstack.common import log as logging
from neutron.plugins.plumgrid.common import exceptions as plum_excep
from neutron.plugins.plumgrid.plumgrid_nos_plugin.plugin_ver import VERSION
from neutron.plugins.plumgrid.plumgrid_nos_plugin import plumgrid_nos_snippets
from neutron.plugins.plumgrid.plumgrid_nos_plugin import rest_connection
LOG = logging.getLogger(__name__)
nos_server_opts = [
cfg.StrOpt('nos_server', default='localhost',
help=_("PLUMgrid NOS server to connect to")),
cfg.StrOpt('nos_server_port', default='8080',
help=_("PLUMgrid NOS server port to connect to")),
cfg.StrOpt('username', default='username',
help=_("PLUMgrid NOS admin username")),
cfg.StrOpt('password', default='password', secret=True,
help=_("PLUMgrid NOS admin password")),
cfg.IntOpt('servertimeout', default=5,
help=_("PLUMgrid NOS server timeout")),
cfg.IntOpt('topologyname', default='t1',
help=_("PLUMgrid NOS topology name")), ]
cfg.CONF.register_opts(nos_server_opts, "PLUMgridNOS")
class NeutronPluginPLUMgridV2(db_base_plugin_v2.NeutronDbPluginV2):
def __init__(self):
LOG.info(_('NeutronPluginPLUMgrid Status: Starting Plugin'))
# PLUMgrid NOS configuration
nos_plumgrid = cfg.CONF.PLUMgridNOS.nos_server
nos_port = cfg.CONF.PLUMgridNOS.nos_server_port
timeout = cfg.CONF.PLUMgridNOS.servertimeout
self.topology_name = cfg.CONF.PLUMgridNOS.topologyname
self.snippets = plumgrid_nos_snippets.DataNOSPLUMgrid()
# TODO(Edgar) These are placeholders for next PLUMgrid release
cfg.CONF.PLUMgridNOS.username
cfg.CONF.PLUMgridNOS.password
self.rest_conn = rest_connection.RestConnection(nos_plumgrid,
nos_port, timeout)
if self.rest_conn is None:
raise SystemExit(_('NeutronPluginPLUMgrid Status: '
'Aborting Plugin'))
else:
# Plugin DB initialization
db.configure_db()
# PLUMgrid NOS info validation
LOG.info(_('NeutronPluginPLUMgrid NOS: %s'), nos_plumgrid)
if not nos_plumgrid:
raise SystemExit(_('NeutronPluginPLUMgrid Status: '
'NOS value is missing in config file'))
LOG.debug(_('NeutronPluginPLUMgrid Status: Neutron server with '
'PLUMgrid Plugin has started'))
def create_network(self, context, network):
"""Create network core Neutron API."""
LOG.debug(_('NeutronPluginPLUMgrid Status: create_network() called'))
# Plugin DB - Network Create and validation
tenant_id = self._get_tenant_id_for_create(context,
network["network"])
self._network_admin_state(network)
with context.session.begin(subtransactions=True):
net = super(NeutronPluginPLUMgridV2, self).create_network(context,
network)
try:
LOG.debug(_('NeutronPluginPLUMgrid Status: %(tenant_id)s, '
'%(network)s, %(network_id)s'),
dict(
tenant_id=tenant_id,
network=network["network"],
network_id=net["id"],
))
nos_url = self.snippets.BASE_NOS_URL + net["id"]
headers = {}
body_data = self.snippets.create_domain_body_data(tenant_id)
self.rest_conn.nos_rest_conn(nos_url,
'PUT', body_data, headers)
except Exception:
err_message = _("PLUMgrid NOS communication failed")
LOG.Exception(err_message)
raise plum_excep.PLUMgridException(err_message)
# return created network
return net
def update_network(self, context, net_id, network):
"""Update network core Neutron API."""
LOG.debug(_("NeutronPluginPLUMgridV2.update_network() called"))
self._network_admin_state(network)
tenant_id = self._get_tenant_id_for_create(context, network["network"])
with context.session.begin(subtransactions=True):
# Plugin DB - Network Update
new_network = super(
NeutronPluginPLUMgridV2, self).update_network(context,
net_id, network)
try:
# PLUMgrid Server does not support updating resources yet
nos_url = self.snippets.BASE_NOS_URL + net_id
headers = {}
body_data = {}
self.rest_conn.nos_rest_conn(nos_url,
'DELETE', body_data, headers)
nos_url = self.snippets.BASE_NOS_URL + new_network["id"]
body_data = self.snippets.create_domain_body_data(tenant_id)
self.rest_conn.nos_rest_conn(nos_url,
'PUT', body_data, headers)
except Exception:
err_message = _("PLUMgrid NOS communication failed")
LOG.Exception(err_message)
raise plum_excep.PLUMgridException(err_message)
# return updated network
return new_network
def delete_network(self, context, net_id):
"""Delete network core Neutron API."""
LOG.debug(_("NeutronPluginPLUMgrid Status: delete_network() called"))
super(NeutronPluginPLUMgridV2, self).get_network(context, net_id)
with context.session.begin(subtransactions=True):
# Plugin DB - Network Delete
super(NeutronPluginPLUMgridV2, self).delete_network(context,
net_id)
try:
nos_url = self.snippets.BASE_NOS_URL + net_id
headers = {}
body_data = {}
self.rest_conn.nos_rest_conn(nos_url,
'DELETE', body_data, headers)
except Exception:
err_message = _("PLUMgrid NOS communication failed")
LOG.Exception(err_message)
raise plum_excep.PLUMgridException(err_message)
def create_port(self, context, port):
"""Create port core Neutron API."""
LOG.debug(_("NeutronPluginPLUMgrid Status: create_port() called"))
# Port operations on PLUMgrid NOS is an automatic operation from the
# VIF driver operations in Nova. It requires admin_state_up to be True
port["port"]["admin_state_up"] = True
# Plugin DB - Port Create and Return port
return super(NeutronPluginPLUMgridV2, self).create_port(context,
port)
def update_port(self, context, port_id, port):
"""Update port core Neutron API."""
LOG.debug(_("NeutronPluginPLUMgrid Status: update_port() called"))
# Port operations on PLUMgrid NOS is an automatic operation from the
# VIF driver operations in Nova.
# Plugin DB - Port Update
return super(NeutronPluginPLUMgridV2, self).update_port(
context, port_id, port)
def delete_port(self, context, port_id):
"""Delete port core Neutron API."""
LOG.debug(_("NeutronPluginPLUMgrid Status: delete_port() called"))
# Port operations on PLUMgrid NOS is an automatic operation from the
# VIF driver operations in Nova.
# Plugin DB - Port Delete
super(NeutronPluginPLUMgridV2, self).delete_port(context, port_id)
def create_subnet(self, context, subnet):
"""Create subnet core Neutron API."""
LOG.debug(_("NeutronPluginPLUMgrid Status: create_subnet() called"))
with context.session.begin(subtransactions=True):
# Plugin DB - Subnet Create
subnet = super(NeutronPluginPLUMgridV2, self).create_subnet(
context, subnet)
subnet_details = self._get_subnet(context, subnet["id"])
net_id = subnet_details["network_id"]
tenant_id = subnet_details["tenant_id"]
try:
nos_url = self.snippets.BASE_NOS_URL + net_id
headers = {}
body_data = self.snippets.create_network_body_data(
tenant_id, self.topology_name)
self.rest_conn.nos_rest_conn(nos_url,
'PUT', body_data, headers)
except Exception:
err_message = _("PLUMgrid NOS communication failed: ")
LOG.Exception(err_message)
raise plum_excep.PLUMgridException(err_message)
return subnet
def delete_subnet(self, context, subnet_id):
"""Delete subnet core Neutron API."""
LOG.debug(_("NeutronPluginPLUMgrid Status: delete_subnet() called"))
#Collecting subnet info
subnet_details = self._get_subnet(context, subnet_id)
with context.session.begin(subtransactions=True):
# Plugin DB - Subnet Delete
del_subnet = super(NeutronPluginPLUMgridV2, self).delete_subnet(
context, subnet_id)
try:
headers = {}
body_data = {}
net_id = subnet_details["network_id"]
self._cleaning_nos_subnet_structure(body_data, headers, net_id)
except Exception:
err_message = _("PLUMgrid NOS communication failed: ")
LOG.Exception(err_message)
raise plum_excep.PLUMgridException(err_message)
return del_subnet
def update_subnet(self, context, subnet_id, subnet):
"""Update subnet core Neutron API."""
LOG.debug(_("update_subnet() called"))
#Collecting subnet info
initial_subnet = self._get_subnet(context, subnet_id)
net_id = initial_subnet["network_id"]
tenant_id = initial_subnet["tenant_id"]
with context.session.begin(subtransactions=True):
# Plugin DB - Subnet Update
new_subnet = super(NeutronPluginPLUMgridV2, self).update_subnet(
context, subnet_id, subnet)
try:
# PLUMgrid Server does not support updating resources yet
headers = {}
body_data = {}
self._cleaning_nos_subnet_structure(body_data, headers, net_id)
nos_url = self.snippets.BASE_NOS_URL + net_id
body_data = self.snippets.create_network_body_data(
tenant_id, self.topology_name)
self.rest_conn.nos_rest_conn(nos_url,
'PUT', body_data, headers)
except Exception:
err_message = _("PLUMgrid NOS communication failed: ")
LOG.Exception(err_message)
raise plum_excep.PLUMgridException(err_message)
return new_subnet
"""
Extension API implementation
"""
# TODO(Edgar) Complete extensions for PLUMgrid
"""
Internal PLUMgrid fuctions
"""
def _get_plugin_version(self):
return VERSION
def _cleaning_nos_subnet_structure(self, body_data, headers, net_id):
domain_structure = ['/properties', '/link', '/ne']
for structure in domain_structure:
nos_url = self.snippets.BASE_NOS_URL + net_id + structure
self.rest_conn.nos_rest_conn(nos_url, 'DELETE', body_data, headers)
def _network_admin_state(self, network):
try:
if network["network"].get("admin_state_up"):
network_name = network["network"]["name"]
if network["network"]["admin_state_up"] is False:
LOG.warning(_("Network with admin_state_up=False are not "
"supported yet by this plugin. Ignoring "
"setting for network %s"), network_name)
except Exception:
err_message = _("Network Admin State Validation Falied: ")
LOG.Exception(err_message)
raise plum_excep.PLUMgridException(err_message)
return network
|
|
import codecs
from contextlib import contextmanager
from cStringIO import StringIO
import json
import os
import shutil
import socket
import tempfile
import urllib2
from django.conf import settings
from django.core import mail
from django.core.files.storage import default_storage as storage
import mock
from nose.tools import eq_
from PIL import Image
import amo
import amo.tests
from addons.models import Preview
from amo.tests.test_helpers import get_image_path
from amo.urlresolvers import reverse
from amo.utils import ImageCheck
from files.models import FileUpload
import mkt
from mkt.developers import tasks
from mkt.site.fixtures import fixture
from mkt.submit.tests.test_views import BaseWebAppTest
from mkt.webapps.models import AddonExcludedRegion as AER, Webapp
def test_resize_icon_shrink():
""" Image should be shrunk so that the longest side is 32px. """
resize_size = 32
final_size = (32, 12)
_uploader(resize_size, final_size)
def test_resize_icon_enlarge():
""" Image stays the same, since the new size is bigger than both sides. """
resize_size = 1000
final_size = (339, 128)
_uploader(resize_size, final_size)
def test_resize_icon_same():
""" Image stays the same, since the new size is the same. """
resize_size = 339
final_size = (339, 128)
_uploader(resize_size, final_size)
def test_resize_icon_list():
""" Resize multiple images at once. """
resize_size = [32, 82, 100]
final_size = [(32, 12), (82, 30), (100, 37)]
_uploader(resize_size, final_size)
def _uploader(resize_size, final_size):
img = get_image_path('mozilla.png')
original_size = (339, 128)
src = tempfile.NamedTemporaryFile(mode='r+w+b', suffix=".png",
delete=False)
# resize_icon removes the original
shutil.copyfile(img, src.name)
with storage.open(src.name) as fp:
src_image = Image.open(fp)
src_image.load()
eq_(src_image.size, original_size)
if isinstance(final_size, list):
for rsize, fsize in zip(resize_size, final_size):
dest_name = os.path.join(settings.ADDON_ICONS_PATH, '1234')
tasks.resize_icon(src.name, dest_name, resize_size, locally=True)
with storage.open("%s-%s.png" % (dest_name, rsize)) as fp:
dest_image = Image.open(fp)
dest_image.load()
# Assert that the width is always identical.
eq_(dest_image.size[0], fsize[0])
# Assert that the height can be a wee bit fuzzy.
assert -1 <= dest_image.size[1] - fsize[1] <= 1, (
"Got width %d, expected %d" %
(fsize[1], dest_image.size[1]))
if os.path.exists(dest_image.filename):
os.remove(dest_image.filename)
assert not os.path.exists(dest_image.filename)
else:
dest = tempfile.NamedTemporaryFile(mode='r+w+b', suffix=".png")
tasks.resize_icon(src.name, dest.name, resize_size, locally=True)
with storage.open(dest.name) as fp:
dest_image = Image.open(fp)
dest_image.load()
# Assert that the width is always identical.
eq_(dest_image.size[0], final_size[0])
# Assert that the height can be a wee bit fuzzy.
assert -1 <= dest_image.size[1] - final_size[1] <= 1, (
"Got width %d, expected %d" % (final_size[1], dest_image.size[1]))
assert not os.path.exists(src.name)
class TestValidator(amo.tests.TestCase):
def setUp(self):
self.upload = FileUpload.objects.create()
assert not self.upload.valid
def get_upload(self):
return FileUpload.objects.get(pk=self.upload.pk)
@mock.patch('mkt.developers.tasks.run_validator')
def test_pass_validation(self, _mock):
_mock.return_value = '{"errors": 0}'
tasks.validator(self.upload.pk)
assert self.get_upload().valid
@mock.patch('mkt.developers.tasks.run_validator')
def test_fail_validation(self, _mock):
_mock.return_value = '{"errors": 2}'
tasks.validator(self.upload.pk)
assert not self.get_upload().valid
@mock.patch('mkt.developers.tasks.run_validator')
def test_validation_error(self, _mock):
_mock.side_effect = Exception
eq_(self.upload.task_error, None)
with self.assertRaises(Exception):
tasks.validator(self.upload.pk)
error = self.get_upload().task_error
assert error is not None
assert error.startswith('Traceback (most recent call last)'), error
@mock.patch('mkt.developers.tasks.validate_app')
@mock.patch('mkt.developers.tasks.storage.open')
def test_validate_manifest(self, _open, _mock):
self.get_upload().update(is_webapp=True)
_open.return_value = StringIO('')
_mock.return_value = '{"errors": 0}'
tasks.validator(self.upload.pk)
assert _mock.called
@mock.patch('mkt.developers.tasks.validate_packaged_app')
@mock.patch('zipfile.is_zipfile')
def test_validate_packaged_app(self, _zipfile, _mock):
self.get_upload().update(is_webapp=True)
_zipfile.return_value = True
_mock.return_value = '{"errors": 0}'
tasks.validator(self.upload.pk)
assert _mock.called
storage_open = storage.open
def _mock_hide_64px_icon(path, *args, **kwargs):
"""
A function that mocks `storage.open` and throws an IOError if you try to
open a 128x128px icon.
"""
if '128' in path:
raise IOError('No 128px icon for you!')
return storage_open(path, *args, **kwargs)
class TestResizePreview(amo.tests.TestCase):
fixtures = fixture('webapp_337141')
def test_preview(self):
addon = Webapp.objects.get(pk=337141)
preview = Preview.objects.create(addon=addon)
src = get_image_path('preview.jpg')
tasks.resize_preview(src, preview)
preview = preview.reload()
eq_(preview.image_size, [400, 533])
eq_(preview.thumbnail_size, [180, 240])
eq_(preview.is_landscape, False)
with storage.open(preview.thumbnail_path) as fp:
im = Image.open(fp)
eq_(list(im.size), [180, 240])
def test_preview_rotated(self):
addon = Webapp.objects.get(pk=337141)
preview = Preview.objects.create(addon=addon)
src = get_image_path('preview_landscape.jpg')
tasks.resize_preview(src, preview)
preview = preview.reload()
eq_(preview.image_size, [533, 400])
eq_(preview.thumbnail_size, [240, 180])
eq_(preview.is_landscape, True)
with storage.open(preview.thumbnail_path) as fp:
im = Image.open(fp)
eq_(list(im.size), [240, 180])
class TestFetchManifest(amo.tests.TestCase):
def setUp(self):
self.upload = FileUpload.objects.create()
self.content_type = 'application/x-web-app-manifest+json'
patcher = mock.patch('mkt.developers.tasks.urllib2.urlopen')
self.urlopen_mock = patcher.start()
self.addCleanup(patcher.stop)
def get_upload(self):
return FileUpload.objects.get(pk=self.upload.pk)
def file(self, name):
return os.path.join(os.path.dirname(__file__), 'addons', name)
@contextmanager
def patch_urlopen(self):
response_mock = mock.Mock()
response_mock.getcode.return_value = 200
response_mock.read.return_value = '<default>'
response_mock.headers = {'Content-Type': self.content_type}
yield response_mock
self.urlopen_mock.return_value = response_mock
@mock.patch('mkt.developers.tasks.validator')
def test_success_add_file(self, validator_mock):
with self.patch_urlopen() as ur:
ur.read.return_value = 'woo'
ur.headers = {'Content-Type': self.content_type}
tasks.fetch_manifest('http://xx.com/manifest.json', self.upload.pk)
upload = FileUpload.objects.get(pk=self.upload.pk)
eq_(upload.name, 'http://xx.com/manifest.json')
eq_(upload.is_webapp, True)
eq_(storage.open(upload.path).read(), 'woo')
@mock.patch('mkt.developers.tasks.validator')
def test_success_call_validator(self, validator_mock):
with self.patch_urlopen() as ur:
ct = self.content_type + '; charset=utf-8'
ur.headers = {'Content-Type': ct}
tasks.fetch_manifest('http://xx.com/manifest.json', self.upload.pk)
assert validator_mock.called
def check_validation(self, msg=''):
upload = self.get_upload()
if msg:
validation = json.loads(upload.validation)
eq_([m['message'] for m in validation['messages']], [msg])
eq_(validation['errors'], 1)
eq_(validation['success'], False)
eq_(len(validation['messages']), 1)
else:
validation_output = upload.validation
if not validation_output:
return
validation = json.loads(validation_output)
assert not validation['messages']
eq_(validation['errors'], 0)
eq_(validation['success'], True)
def test_connection_error(self):
reason = socket.gaierror(8, 'nodename nor servname provided')
self.urlopen_mock.side_effect = urllib2.URLError(reason)
tasks.fetch_manifest('url', self.upload.pk)
self.check_validation(
'No manifest was found at that URL. Check the address and try '
'again.')
def test_url_timeout(self):
reason = socket.timeout('too slow')
self.urlopen_mock.side_effect = urllib2.URLError(reason)
tasks.fetch_manifest('url', self.upload.pk)
self.check_validation(
'No manifest was found at that URL. Check the address and try '
'again.')
def test_other_url_error(self):
reason = Exception('Some other failure.')
self.urlopen_mock.side_effect = urllib2.URLError(reason)
tasks.fetch_manifest('url', self.upload.pk)
self.check_validation(
'No manifest was found at that URL. Check the address and try '
'again.')
@mock.patch('mkt.developers.tasks.validator', lambda uid, **kw: None)
def test_no_content_type(self):
with self.patch_urlopen() as ur:
ur.headers = {}
tasks.fetch_manifest('url', self.upload.pk)
self.check_validation(
'No manifest was found at that URL. Check the address and try '
'again.')
@mock.patch('mkt.developers.tasks.validator', lambda uid, **kw: None)
def test_bad_content_type(self):
with self.patch_urlopen() as ur:
ur.headers = {'Content-Type': 'x'}
tasks.fetch_manifest('url', self.upload.pk)
self.check_validation(
'Manifests must be served with the HTTP header "Content-Type: '
'application/x-web-app-manifest+json". See %s for more '
'information.' % tasks.CT_URL)
@mock.patch('mkt.developers.tasks.validator', lambda uid, **kw: None)
def test_good_charset(self):
with self.patch_urlopen() as ur:
ur.headers = {
'Content-Type': 'application/x-web-app-manifest+json;'
'charset=utf-8'}
tasks.fetch_manifest('url', self.upload.pk)
self.check_validation()
@mock.patch('mkt.developers.tasks.validator', lambda uid, **kw: None)
def test_bad_charset(self):
with self.patch_urlopen() as ur:
ur.headers = {
'Content-Type': 'application/x-web-app-manifest+json;'
'charset=ISO-1234567890-LOL'}
tasks.fetch_manifest('url', self.upload.pk)
self.check_validation("The manifest's encoding does not match the "
'charset provided in the HTTP Content-Type.')
def test_response_too_large(self):
with self.patch_urlopen() as ur:
content = 'x' * (settings.MAX_WEBAPP_UPLOAD_SIZE + 1)
ur.read.return_value = content
tasks.fetch_manifest('url', self.upload.pk)
max_webapp_size = settings.MAX_WEBAPP_UPLOAD_SIZE
self.check_validation('Your manifest must be less than %s bytes.' %
max_webapp_size)
def test_http_error(self):
self.urlopen_mock.side_effect = urllib2.HTTPError(
'url', 404, 'Not Found', [], None)
tasks.fetch_manifest('url', self.upload.pk)
self.check_validation(
'No manifest was found at that URL. Check the address and try '
'again.')
def test_strip_utf8_bom(self):
with self.patch_urlopen() as ur:
with open(self.file('utf8bom.webapp')) as fp:
ur.read.return_value = fp.read()
tasks.fetch_manifest('url', self.upload.pk)
upload = self.get_upload()
with storage.open(upload.path, 'rb') as fp:
manifest = fp.read()
json.loads(manifest) # no parse error
assert not manifest.startswith(codecs.BOM_UTF8)
def test_non_utf8_encoding(self):
with self.patch_urlopen() as ur:
with open(self.file('utf8bom.webapp')) as fp:
# Set encoding to utf16 which will be invalid
ur.read.return_value = fp.read().decode('utf8').encode('utf16')
tasks.fetch_manifest('url', self.upload.pk)
self.check_validation(
'Your manifest file was not encoded as valid UTF-8.')
class TestFetchIcon(BaseWebAppTest):
def setUp(self):
super(TestFetchIcon, self).setUp()
self.content_type = 'image/png'
self.apps_path = os.path.join(settings.ROOT, 'apps', 'devhub', 'tests',
'addons')
patcher = mock.patch('mkt.developers.tasks.urllib2.urlopen')
self.urlopen_mock = patcher.start()
self.urlopen_mock.return_value = StringIO('mozballin')
self.addCleanup(patcher.stop)
def webapp_from_path(self, path):
self.upload = self.get_upload(abspath=path)
self.url = reverse('submit.app')
assert self.client.login(username='regular@mozilla.com',
password='password')
return self.post_addon()
def test_no_version(self):
app = Webapp()
eq_(tasks.fetch_icon(app), None)
def test_no_icons(self):
path = os.path.join(self.apps_path, 'noicon.webapp')
iconless_app = self.webapp_from_path(path)
tasks.fetch_icon(iconless_app)
assert not self.urlopen_mock.called
def test_bad_icons(self):
path = os.path.join(self.apps_path, 'badicon.webapp')
iconless_app = self.webapp_from_path(path)
tasks.fetch_icon(iconless_app)
assert not self.urlopen_mock.called
def check_icons(self, webapp):
manifest = webapp.get_manifest_json()
biggest = max([int(size) for size in manifest['icons']])
icon_dir = webapp.get_icon_dir()
for size in amo.ADDON_ICON_SIZES:
if not size <= biggest:
continue
icon_path = os.path.join(icon_dir, '%s-%s.png'
% (str(webapp.id), size))
with open(icon_path, 'r') as img:
checker = ImageCheck(img)
assert checker.is_image()
eq_(checker.img.size, (size, size))
def test_data_uri(self):
app_path = os.path.join(self.apps_path, 'dataicon.webapp')
webapp = self.webapp_from_path(app_path)
tasks.fetch_icon(webapp)
eq_(webapp.icon_type, self.content_type)
self.check_icons(webapp)
def test_hosted_icon(self):
app_path = os.path.join(self.apps_path, 'mozball.webapp')
webapp = self.webapp_from_path(app_path)
img_path = os.path.join(self.apps_path, 'mozball-128.png')
with open(img_path, 'r') as content:
tasks.save_icon(webapp, content.read())
eq_(webapp.icon_type, self.content_type)
self.check_icons(webapp)
@mock.patch('mkt.developers.tasks._fetch_content')
@mock.patch('mkt.developers.tasks.save_icon')
def test_cdn_icon(self, save, fetch):
response = mock.Mock()
response.read.return_value = ''
webapp = mock.Mock()
webapp.is_packaged = False
url = 'http://foo.com/bar'
webapp.get_manifest_json.return_value = {'icons': {'128': url}}
tasks.fetch_icon(webapp)
assert url in fetch.call_args[0][0]
@mock.patch('mkt.developers.tasks.SafeUnzip')
@mock.patch('mkt.developers.tasks.save_icon')
def test_packaged_icon(self, save, zip):
response = mock.Mock()
response.read.return_value = ''
zf = mock.Mock()
zip.return_value = zf
webapp = mock.Mock()
webapp.is_packaged = True
url = '/path/to/icon.png'
webapp.get_manifest_json.return_value = {'icons': {'128': url}}
tasks.fetch_icon(webapp)
assert url[1:] in zf.extract_path.call_args[0][0]
class TestRegionEmail(amo.tests.WebappTestCase):
@mock.patch.object(settings, 'SITE_URL', 'http://omg.org/')
def test_email_for_one_new_region(self):
tasks.region_email([self.app.id], [mkt.regions.BR])
msg = mail.outbox[0]
eq_(msg.subject, '%s: Brazil region added to the Firefox Marketplace'
% self.app.name)
eq_(msg.to, ['steamcube@mozilla.com'])
dev_url = ('http://omg.org/developers/app/something-something/'
'edit#details')
assert unicode(self.app.name) in msg.body
assert dev_url in msg.body
assert ' added a new ' in msg.body
assert ' for Brazil.' in msg.body
# TODO: Re-enable this when we bring back Unsubscribe (bug 802379).
#assert 'Unsubscribe' in msg.body
@mock.patch.object(settings, 'SITE_URL', 'http://omg.org/')
def test_email_for_two_new_regions(self):
tasks.region_email([self.app.id],
[mkt.regions.UK, mkt.regions.BR])
msg = mail.outbox[0]
eq_(msg.subject, '%s: New regions added to the Firefox Marketplace'
% self.app.name)
eq_(msg.to, ['steamcube@mozilla.com'])
dev_url = ('http://omg.org/developers/app/something-something/'
'edit#details')
assert unicode(self.app.name) in msg.body
assert dev_url in msg.body
assert ' added two new ' in msg.body
assert ': Brazil and United Kingdom.' in msg.body
# TODO: Re-enable this when we bring back Unsubscribe (bug 802379).
#assert 'Unsubscribe' in msg.body
@mock.patch.object(settings, 'SITE_URL', 'http://omg.org/')
def test_email_for_several_new_regions(self):
tasks.region_email([self.app.id],
[mkt.regions.UK, mkt.regions.US, mkt.regions.BR])
msg = mail.outbox[0]
eq_(msg.subject, '%s: New regions added to the Firefox Marketplace'
% self.app.name)
assert ' added a few new ' in msg.body
assert ': Brazil, United Kingdom, and United States.' in msg.body
class TestRegionExclude(amo.tests.WebappTestCase):
def test_exclude_no_apps(self):
tasks.region_exclude([], [])
eq_(AER.objects.count(), 0)
tasks.region_exclude([], [mkt.regions.UK])
eq_(AER.objects.count(), 0)
def test_exclude_no_regions(self):
tasks.region_exclude([self.app.id], [])
eq_(AER.objects.count(), 0)
def test_exclude_one_new_region(self):
tasks.region_exclude([self.app.id], [mkt.regions.UK])
excluded = list(AER.objects.filter(addon=self.app)
.values_list('region', flat=True))
eq_(excluded, [mkt.regions.UK.id])
def test_exclude_several_new_regions(self):
tasks.region_exclude([self.app.id], [mkt.regions.US, mkt.regions.UK])
excluded = sorted(AER.objects.filter(addon=self.app)
.values_list('region', flat=True))
eq_(excluded, sorted([mkt.regions.US.id, mkt.regions.UK.id]))
|
|
import re
import inspect
import textwrap
import pydoc
import sphinx
from .docscrape import NumpyDocString
from .docscrape import FunctionDoc
from .docscrape import ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config=None):
config = {} if config is None else config
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' ' * indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
# GAEL: Toctree commented out below because it creates
# hundreds of sphinx warnings
# out += ['.. autosummary::', ' :toctree:', '']
out += ['.. autosummary::', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "=" * maxlen_0 + " " + "=" * maxlen_1 + " " + "=" * 10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in idx.iteritems():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex', '']
else:
out += ['.. latexonly::', '']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Raises'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Attributes', 'Methods'):
out += self._str_member_list(param_list)
out = self._str_indent(out, indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config=None):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
|
|
import socket
import threading
import ssl
import OpenSSL
import pytest
from unittest import mock
from mitmproxy import connections
from mitmproxy import exceptions
from mitmproxy.net import tcp
from mitmproxy.net.http import http1
from mitmproxy.test import tflow
from mitmproxy.test import tutils
from .net import tservers
from pathod import test
class TestClientConnection:
def test_send(self):
c = tflow.tclient_conn()
c.send(b'foobar')
c.send([b'foo', b'bar'])
with pytest.raises(TypeError):
c.send('string')
with pytest.raises(TypeError):
c.send(['string', 'not'])
assert c.wfile.getvalue() == b'foobarfoobar'
def test_repr(self):
c = tflow.tclient_conn()
assert '127.0.0.1:22' in repr(c)
assert 'ALPN' in repr(c)
assert 'TLS' not in repr(c)
c.alpn_proto_negotiated = None
c.tls_established = True
assert 'ALPN' not in repr(c)
assert 'TLS' in repr(c)
def test_tls_established_property(self):
c = tflow.tclient_conn()
c.tls_established = True
assert c.ssl_established
assert c.tls_established
c.tls_established = False
assert not c.ssl_established
assert not c.tls_established
def test_make_dummy(self):
c = connections.ClientConnection.make_dummy(('foobar', 1234))
assert c.address == ('foobar', 1234)
def test_state(self):
c = tflow.tclient_conn()
assert connections.ClientConnection.from_state(c.get_state()).get_state() == \
c.get_state()
c2 = tflow.tclient_conn()
c2.address = (c2.address[0], 4242)
assert not c == c2
c2.timestamp_start = 42
c.set_state(c2.get_state())
assert c.timestamp_start == 42
c3 = c.copy()
assert c3.get_state() != c.get_state()
c.id = c3.id = "foo"
assert c3.get_state() == c.get_state()
def test_eq(self):
c = tflow.tclient_conn()
c2 = c.copy()
assert c == c
assert c != c2
assert c != 42
assert hash(c) != hash(c2)
class TestServerConnection:
def test_send(self):
c = tflow.tserver_conn()
c.send(b'foobar')
c.send([b'foo', b'bar'])
with pytest.raises(TypeError):
c.send('string')
with pytest.raises(TypeError):
c.send(['string', 'not'])
assert c.wfile.getvalue() == b'foobarfoobar'
def test_repr(self):
c = tflow.tserver_conn()
c.sni = 'foobar'
c.tls_established = True
c.alpn_proto_negotiated = b'h2'
assert 'address:22' in repr(c)
assert 'ALPN' in repr(c)
assert 'TLSv1.2: foobar' in repr(c)
c.sni = None
c.tls_established = True
c.alpn_proto_negotiated = None
assert 'ALPN' not in repr(c)
assert 'TLS' in repr(c)
c.sni = None
c.tls_established = False
assert 'TLS' not in repr(c)
def test_tls_established_property(self):
c = tflow.tserver_conn()
c.tls_established = True
assert c.ssl_established
assert c.tls_established
c.tls_established = False
assert not c.ssl_established
assert not c.tls_established
def test_make_dummy(self):
c = connections.ServerConnection.make_dummy(('foobar', 1234))
assert c.address == ('foobar', 1234)
def test_simple(self):
d = test.Daemon()
c = connections.ServerConnection((d.IFACE, d.port))
c.connect()
f = tflow.tflow()
f.server_conn = c
f.request.path = "/p/200:da"
# use this protocol just to assemble - not for actual sending
c.wfile.write(http1.assemble_request(f.request))
c.wfile.flush()
assert http1.read_response(c.rfile, f.request, 1000)
assert d.last_log()
c.finish()
c.close()
d.shutdown()
def test_terminate_error(self):
d = test.Daemon()
c = connections.ServerConnection((d.IFACE, d.port))
c.connect()
c.close()
c.connection = mock.Mock()
c.connection.recv = mock.Mock(return_value=False)
c.connection.flush = mock.Mock(side_effect=exceptions.TcpDisconnect)
d.shutdown()
def test_sni(self):
c = connections.ServerConnection(('', 1234))
with pytest.raises(ValueError, matches='sni must be str, not '):
c.establish_ssl(None, b'foobar')
def test_state(self):
c = tflow.tserver_conn()
c2 = c.copy()
assert c2.get_state() != c.get_state()
c.id = c2.id = "foo"
assert c2.get_state() == c.get_state()
def test_eq(self):
c = tflow.tserver_conn()
c2 = c.copy()
assert c == c
assert c != c2
assert c != 42
assert hash(c) != hash(c2)
class TestClientConnectionTLS:
@pytest.mark.parametrize("sni", [
None,
"example.com"
])
def test_tls_with_sni(self, sni):
address = ('127.0.0.1', 0)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(address)
sock.listen()
address = sock.getsockname()
def client_run():
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
s = socket.create_connection(address)
s = ctx.wrap_socket(s, server_hostname=sni)
s.send(b'foobar')
s.close()
threading.Thread(target=client_run).start()
connection, client_address = sock.accept()
c = connections.ClientConnection(connection, client_address, None)
cert = tutils.test_data.path("mitmproxy/net/data/server.crt")
with open(tutils.test_data.path("mitmproxy/net/data/server.key")) as f:
raw_key = f.read()
key = OpenSSL.crypto.load_privatekey(
OpenSSL.crypto.FILETYPE_PEM,
raw_key)
c.convert_to_ssl(cert, key)
assert c.connected()
assert c.sni == sni
assert c.tls_established
assert c.rfile.read(6) == b'foobar'
c.finish()
sock.close()
class TestServerConnectionTLS(tservers.ServerTestBase):
ssl = True
class handler(tcp.BaseHandler):
def handle(self):
self.finish()
@pytest.mark.parametrize("clientcert", [
None,
tutils.test_data.path("mitmproxy/data/clientcert"),
tutils.test_data.path("mitmproxy/data/clientcert/client.pem"),
])
def test_tls(self, clientcert):
c = connections.ServerConnection(("127.0.0.1", self.port))
c.connect()
c.establish_ssl(clientcert, "foo.com")
assert c.connected()
assert c.sni == "foo.com"
assert c.tls_established
c.close()
c.finish()
|
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
import sys
from pants.base.cmd_line_spec_parser import CmdLineSpecParser
from pants.base.project_tree_factory import get_project_tree
from pants.base.workunit import WorkUnit, WorkUnitLabel
from pants.bin.engine_initializer import EngineInitializer
from pants.bin.repro import Reproducer
from pants.bin.target_roots import TargetRoots
from pants.build_graph.build_file_address_mapper import BuildFileAddressMapper
from pants.build_graph.build_file_parser import BuildFileParser
from pants.build_graph.mutable_build_graph import MutableBuildGraph
from pants.engine.round_engine import RoundEngine
from pants.engine.subsystem.native import Native
from pants.goal.context import Context
from pants.goal.goal import Goal
from pants.goal.run_tracker import RunTracker
from pants.help.help_printer import HelpPrinter
from pants.java.nailgun_executor import NailgunProcessGroup
from pants.pantsd.subsystem.pants_daemon_launcher import PantsDaemonLauncher
from pants.reporting.reporting import Reporting
from pants.scm.subsystems.changed import Changed
from pants.source.source_root import SourceRootConfig
from pants.task.task import QuietTaskMixin
from pants.util.filtering import create_filters, wrap_filters
logger = logging.getLogger(__name__)
class GoalRunnerFactory(object):
def __init__(self, root_dir, options, build_config, run_tracker, reporting,
daemon_graph_helper=None, exiter=sys.exit):
"""
:param str root_dir: The root directory of the pants workspace (aka the "build root").
:param Options options: The global, pre-initialized Options instance.
:param BuildConfiguration build_config: A pre-initialized BuildConfiguration instance.
:param Runtracker run_tracker: The global, pre-initialized/running RunTracker instance.
:param Reporting reporting: The global, pre-initialized Reporting instance.
:param LegacyGraphHelper daemon_graph_helper: A LegacyGraphHelper instance for graph reuse. (Optional)
:param func exiter: A function that accepts an exit code value and exits. (for tests, Optional)
"""
self._root_dir = root_dir
self._options = options
self._build_config = build_config
self._run_tracker = run_tracker
self._reporting = reporting
self._daemon_graph_helper = daemon_graph_helper
self._exiter = exiter
self._requested_goals = self._options.goals
self._help_request = self._options.help_request
self._build_file_parser = BuildFileParser(self._build_config, self._root_dir)
self._build_graph = None
self._address_mapper = None
self._global_options = options.for_global_scope()
self._tag = self._global_options.tag
self._fail_fast = self._global_options.fail_fast
self._explain = self._global_options.explain
self._kill_nailguns = self._global_options.kill_nailguns
def _handle_help(self, help_request):
"""Handle requests for `help` information."""
if help_request:
help_printer = HelpPrinter(self._options)
result = help_printer.print_help()
self._exiter(result)
def _init_graph(self, use_engine, pants_ignore_patterns, build_ignore_patterns,
exclude_target_regexps, target_specs, graph_helper=None):
"""Determine the BuildGraph, AddressMapper and spec_roots for a given run.
:param bool use_engine: Whether or not to use the v2 engine to construct the BuildGraph.
:param list pants_ignore_patterns: The pants ignore patterns from '--pants-ignore'.
:param list build_ignore_patterns: The build ignore patterns from '--build-ignore',
applied during BUILD file searching.
:param list exclude_target_regexps: Regular expressions for targets to be excluded.
:param list target_specs: The original target specs.
:param LegacyGraphHelper graph_helper: A LegacyGraphHelper to use for graph construction,
if available. This would usually come from the daemon.
:returns: A tuple of (BuildGraph, AddressMapper, spec_roots).
"""
# N.B. Use of the daemon implies use of the v2 engine.
if graph_helper or use_engine:
# The daemon may provide a `graph_helper`. If that's present, use it for graph construction.
graph_helper = graph_helper or EngineInitializer.setup_legacy_graph(
pants_ignore_patterns,
build_ignore_patterns=build_ignore_patterns,
exclude_target_regexps=exclude_target_regexps)
target_roots = TargetRoots.create(options=self._options,
build_root=self._root_dir,
change_calculator=graph_helper.change_calculator)
graph, address_mapper = graph_helper.create_build_graph(target_roots, self._root_dir)
return graph, address_mapper, target_roots.as_specs()
else:
spec_roots = TargetRoots.parse_specs(target_specs, self._root_dir)
address_mapper = BuildFileAddressMapper(self._build_file_parser,
get_project_tree(self._global_options),
build_ignore_patterns,
exclude_target_regexps)
return MutableBuildGraph(address_mapper), address_mapper, spec_roots
def _determine_goals(self, requested_goals):
"""Check and populate the requested goals for a given run."""
def is_quiet(goals):
return any(goal.has_task_of_type(QuietTaskMixin) for goal in goals) or self._explain
spec_parser = CmdLineSpecParser(self._root_dir)
for goal in requested_goals:
if self._address_mapper.is_valid_single_address(spec_parser.parse_spec(goal)):
logger.warning("Command-line argument '{0}' is ambiguous and was assumed to be "
"a goal. If this is incorrect, disambiguate it with ./{0}.".format(goal))
goals = [Goal.by_name(goal) for goal in requested_goals]
return goals, is_quiet(goals)
def _specs_to_targets(self, specs):
"""Populate the BuildGraph and target list from a set of input specs."""
with self._run_tracker.new_workunit(name='parse', labels=[WorkUnitLabel.SETUP]):
def filter_for_tag(tag):
return lambda target: tag in map(str, target.tags)
tag_filter = wrap_filters(create_filters(self._tag, filter_for_tag))
def generate_targets(specs):
for address in self._build_graph.inject_specs_closure(specs, self._fail_fast):
target = self._build_graph.get_target(address)
if tag_filter(target):
yield target
return list(generate_targets(specs))
def _maybe_launch_pantsd(self):
"""Launches pantsd if configured to do so."""
if self._global_options.enable_pantsd:
# Avoid runtracker output if pantsd is disabled. Otherwise, show up to inform the user its on.
with self._run_tracker.new_workunit(name='pantsd', labels=[WorkUnitLabel.SETUP]):
pantsd_launcher = PantsDaemonLauncher.Factory.global_instance().create(EngineInitializer)
pantsd_launcher.maybe_launch()
def _setup_context(self):
with self._run_tracker.new_workunit(name='setup', labels=[WorkUnitLabel.SETUP]):
self._build_graph, self._address_mapper, spec_roots = self._init_graph(
self._global_options.enable_v2_engine,
self._global_options.pants_ignore,
self._global_options.build_ignore,
self._global_options.exclude_target_regexp,
self._options.target_specs,
self._daemon_graph_helper
)
goals, is_quiet = self._determine_goals(self._requested_goals)
target_roots = self._specs_to_targets(spec_roots)
# Now that we've parsed the bootstrap BUILD files, and know about the SCM system.
self._run_tracker.run_info.add_scm_info()
# Update the Reporting settings now that we have options and goal info.
invalidation_report = self._reporting.update_reporting(self._global_options,
is_quiet,
self._run_tracker)
context = Context(options=self._options,
run_tracker=self._run_tracker,
target_roots=target_roots,
requested_goals=self._requested_goals,
build_graph=self._build_graph,
build_file_parser=self._build_file_parser,
address_mapper=self._address_mapper,
invalidation_report=invalidation_report)
return goals, context
def setup(self):
self._maybe_launch_pantsd()
self._handle_help(self._help_request)
goals, context = self._setup_context()
return GoalRunner(context=context,
goals=goals,
run_tracker=self._run_tracker,
kill_nailguns=self._kill_nailguns,
exiter=self._exiter)
class GoalRunner(object):
"""Lists installed goals or else executes a named goal."""
Factory = GoalRunnerFactory
def __init__(self, context, goals, run_tracker, kill_nailguns, exiter=sys.exit):
"""
:param Context context: The global, pre-initialized Context as created by GoalRunnerFactory.
:param list[Goal] goals: The list of goals to act on.
:param Runtracker run_tracker: The global, pre-initialized/running RunTracker instance.
:param bool kill_nailguns: Whether or not to kill nailguns after the run.
:param func exiter: A function that accepts an exit code value and exits (for tests, Optional).
"""
self._context = context
self._goals = goals
self._run_tracker = run_tracker
self._kill_nailguns = kill_nailguns
self._exiter = exiter
@classmethod
def subsystems(cls):
"""Subsystems used outside of any task."""
return {
SourceRootConfig,
Reporting,
Reproducer,
RunTracker,
Changed.Factory,
Native.Factory,
PantsDaemonLauncher.Factory,
}
def _execute_engine(self):
workdir = self._context.options.for_global_scope().pants_workdir
if not workdir.endswith('.pants.d'):
self._context.log.error('Pants working directory should end with \'.pants.d\', currently it is {}\n'
.format(workdir))
return 1
unknown_goals = [goal.name for goal in self._goals if not goal.ordered_task_names()]
if unknown_goals:
self._context.log.error('Unknown goal(s): {}\n'.format(' '.join(unknown_goals)))
return 1
engine = RoundEngine()
result = engine.execute(self._context, self._goals)
if self._context.invalidation_report:
self._context.invalidation_report.report()
return result
def run(self):
should_kill_nailguns = self._kill_nailguns
try:
result = self._execute_engine()
if result:
self._run_tracker.set_root_outcome(WorkUnit.FAILURE)
except KeyboardInterrupt:
self._run_tracker.set_root_outcome(WorkUnit.FAILURE)
# On ctrl-c we always kill nailguns, otherwise they might keep running
# some heavyweight compilation and gum up the system during a subsequent run.
should_kill_nailguns = True
raise
except Exception:
self._run_tracker.set_root_outcome(WorkUnit.FAILURE)
raise
finally:
# Must kill nailguns only after run_tracker.end() is called, otherwise there may still
# be pending background work that needs a nailgun.
if should_kill_nailguns:
# TODO: This is JVM-specific and really doesn't belong here.
# TODO: Make this more selective? Only kill nailguns that affect state?
# E.g., checkstyle may not need to be killed.
NailgunProcessGroup().killall()
return result
|
|
# Copyright 2015 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from mock import patch, Mock, call
from nose_parameterized import parameterized
from netaddr import IPAddress, IPNetwork
from subprocess import CalledProcessError
from calico_ctl.bgp import *
from calico_ctl import container
from calico_ctl import utils
from pycalico.datastore_datatypes import Endpoint, IPPool
class TestContainer(unittest.TestCase):
@parameterized.expand([
({'<CONTAINER>':'node1', 'ip':1, 'add':1, '<IP>':'127.a.0.1'}, True),
({'<CONTAINER>':'node1', 'ip':1, 'add':1, '<IP>':'aa:bb::zz'}, True),
({'add':1, '<CONTAINER>':'node1', '<IP>':'127.a.0.1'}, True),
({'add':1, '<CONTAINER>':'node1', '<IP>':'aa:bb::zz'}, True)
])
def test_validate_arguments(self, case, sys_exit_called):
"""
Test validate_arguments for calicoctl container command
"""
with patch('sys.exit', autospec=True) as m_sys_exit:
# Call method under test
container.validate_arguments(case)
# Assert method exits if bad input
self.assertEqual(m_sys_exit.called, sys_exit_called)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_container_info_or_exit', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
@patch('calico_ctl.container.get_pool_or_exit', autospec=True)
@patch('calico_ctl.container.netns', autospec=True)
def test_container_add(self, m_netns, m_get_pool_or_exit, m_client,
m_get_container_info_or_exit, m_enforce_root):
"""
Test container_add method of calicoctl container command
"""
# Set up mock objects
m_get_container_info_or_exit.return_value = {
'Id': 666,
'State': {'Running': 1, 'Pid': 'Pid_info'},
'HostConfig': {'NetworkMode': "not host"}
}
m_client.get_endpoint.side_effect = KeyError
m_client.get_default_next_hops.return_value = 'next_hops'
# Call method under test
test_return = container.container_add('container1', '1.1.1.1', 'interface')
# Assert
m_enforce_root.assert_called_once_with()
m_get_container_info_or_exit.assert_called_once_with('container1')
m_client.get_endpoint.assert_called_once_with(
hostname=utils.hostname,
orchestrator_id=utils.ORCHESTRATOR_ID,
workload_id=666
)
m_get_pool_or_exit.assert_called_once_with(IPAddress('1.1.1.1'))
m_client.get_default_next_hops.assert_called_once_with(utils.hostname)
# Check an enpoint object was returned
self.assertTrue(isinstance(test_return, Endpoint))
self.assertTrue(m_netns.create_veth.called)
self.assertTrue(m_netns.move_veth_into_ns.called)
self.assertTrue(m_netns.add_ip_to_ns_veth.called)
self.assertTrue(m_netns.add_ns_default_route.called)
self.assertTrue(m_netns.get_ns_veth_mac.called)
self.assertTrue(m_client.set_endpoint.called)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_container_info_or_exit', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
def test_container_add_container_host_ns(self, m_client,
m_get_container_info_or_exit, m_enforce_root):
"""
Test container_add method of calicoctl container command when the
container shares the host namespace.
"""
# Set up mock objects
m_get_container_info_or_exit.return_value = {
'Id': 666,
'State': {'Running': 1, 'Pid': 'Pid_info'},
'HostConfig': {'NetworkMode': 'host'}
}
m_client.get_endpoint.side_effect = KeyError
# Call method under test expecting a SystemExit
self.assertRaises(SystemExit, container.container_add,
'container1', '1.1.1.1', 'interface')
m_enforce_root.assert_called_once_with()
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_container_info_or_exit', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
@patch('calico_ctl.container.get_pool_or_exit', autospec=True)
def test_container_add_existing_container(
self, m_get_pool_or_exit, m_client, m_get_container_info_or_exit,
m_enforce_root):
"""
Test container_add when a container already exists.
Do not raise an exception when the client tries 'get_endpoint'
Assert that the system then exits and all expected calls are made
"""
# Call method under test expecting a SystemExit
self.assertRaises(SystemExit, container.container_add,
'container1', '1.1.1.1', 'interface')
# Assert only expected calls were made
self.assertTrue(m_enforce_root.called)
self.assertTrue(m_get_container_info_or_exit.called)
self.assertTrue(m_client.get_endpoint.called)
self.assertFalse(m_get_pool_or_exit.called)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_container_info_or_exit', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
@patch('calico_ctl.container.get_pool_or_exit', autospec=True)
def test_container_add_container_not_running(
self, m_get_pool_or_exit, m_client,
m_get_container_info_or_exit, m_enforce_root):
"""
Test container_add when a container is not running
get_container_info_or_exit returns a running state of value 0
Assert that the system then exits and all expected calls are made
"""
# Set up mock object
m_client.get_endpoint.side_effect = KeyError
m_get_container_info_or_exit.return_value = {
'Id': 666,
'State': {'Running': 0, 'Pid': 'Pid_info'}
}
# Call method under test expecting a SystemExit
self.assertRaises(SystemExit, container.container_add,
'container1', '1.1.1.1', 'interface')
# Assert only expected calls were made
self.assertTrue(m_enforce_root.called)
self.assertTrue(m_get_container_info_or_exit.called)
self.assertTrue(m_client.get_endpoint.called)
self.assertFalse(m_get_pool_or_exit.called)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_container_info_or_exit', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
@patch('calico_ctl.container.get_pool_or_exit', autospec=True)
def test_container_add_not_ipv4_configured(
self, m_get_pool_or_exit, m_client, m_get_container_info_or_exit,
m_enforce_root):
"""
Test container_add when the client cannot obtain next hop IPs
client.get_default_next_hops returns an empty dictionary, which produces
a KeyError when trying to determine the IP.
Assert that the system then exits and all expected calls are made
"""
# Set up mock objects
m_client.get_endpoint.side_effect = KeyError
m_client.get_default_next_hops.return_value = {}
# Call method under test expecting a SystemExit
self.assertRaises(SystemExit, container.container_add,
'container1', '1.1.1.1', 'interface')
# Assert only expected calls were made
self.assertTrue(m_enforce_root.called)
self.assertTrue(m_get_container_info_or_exit.called)
self.assertTrue(m_client.get_endpoint.called)
self.assertTrue(m_get_pool_or_exit.called)
self.assertTrue(m_client.get_default_next_hops.called)
self.assertFalse(m_client.assign_address.called)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_container_info_or_exit', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
@patch('calico_ctl.container.get_pool_or_exit', autospec=True)
@patch('calico_ctl.container.netns', autospec=True)
def test_container_add_ip_previously_assigned(
self, m_netns, m_get_pool_or_exit, m_client,
m_get_container_info_or_exit, m_enforce_root):
"""
Test container_add when an ip address is already assigned in pool
client.assign_address returns an empty list.
Assert that the system then exits and all expected calls are made
"""
# Set up mock object
m_client.get_endpoint.side_effect = KeyError
m_client.assign_address.return_value = []
# Call method under test expecting a SystemExit
self.assertRaises(SystemExit, container.container_add,
'container1', '1.1.1.1', 'interface')
# Assert only expected calls were made
self.assertTrue(m_enforce_root.called)
self.assertTrue(m_get_container_info_or_exit.called)
self.assertTrue(m_client.get_endpoint.called)
self.assertTrue(m_get_pool_or_exit.called)
self.assertTrue(m_client.get_default_next_hops.called)
self.assertTrue(m_client.assign_address.called)
self.assertFalse(m_netns.create_veth.called)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_container_id', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
@patch('calico_ctl.container.netns', autospec=True)
def test_container_remove(self, m_netns, m_client, m_get_container_id,
m_enforce_root):
"""
Test for container_remove of calicoctl container command
"""
# Set up mock objects
m_get_container_id.return_value = 666
ipv4_nets = set()
ipv4_nets.add(IPNetwork(IPAddress('1.1.1.1')))
ipv6_nets = set()
m_endpoint = Mock(spec=Endpoint)
m_endpoint.ipv4_nets = ipv4_nets
m_endpoint.ipv6_nets = ipv6_nets
m_endpoint.endpoint_id = 12
m_endpoint.name = "eth1234"
ippool = IPPool('1.1.1.1/24')
m_client.get_endpoint.return_value = m_endpoint
m_client.get_ip_pools.return_value = [ippool]
# Call method under test
container.container_remove('container1')
# Assert
m_enforce_root.assert_called_once_with()
m_get_container_id.assert_called_once_with('container1')
m_client.get_endpoint.assert_called_once_with(
hostname=utils.hostname,
orchestrator_id=utils.ORCHESTRATOR_ID,
workload_id=666
)
self.assertEqual(m_client.unassign_address.call_count, 1)
m_netns.remove_veth.assert_called_once_with("eth1234")
m_client.remove_workload.assert_called_once_with(
utils.hostname, utils.ORCHESTRATOR_ID, 666)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_container_id', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
def test_container_remove_no_endpoint(
self, m_client, m_get_container_id, m_enforce_root):
"""
Test for container_remove when the client cannot obtain an endpoint
client.get_endpoint raises a KeyError.
Assert that the system then exits and all expected calls are made
"""
# Set up mock objects
m_client.get_endpoint.side_effect = KeyError
# Call function under test expecting a SystemExit
self.assertRaises(SystemExit, container.container_remove, 'container1')
# Assert
self.assertTrue(m_enforce_root.called)
self.assertTrue(m_get_container_id.called)
self.assertTrue(m_client.get_endpoint.called)
self.assertFalse(m_client.get_ip_pools.called)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_pool_or_exit', autospec=True)
@patch('calico_ctl.container.get_container_info_or_exit', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
@patch('calico_ctl.container.netns', autospec=True)
def test_container_ip_add_ipv4(
self, m_netns, m_client, m_get_container_info_or_exit,
m_get_pool_or_exit, m_enforce_root):
"""
Test for container_ip_add with an ipv4 ip argument
Assert that the correct calls associated with an ipv4 address are made
"""
# Set up mock objects
pool_return = 'pool'
m_get_pool_or_exit.return_value = pool_return
m_get_container_info_or_exit.return_value = {
'Id': 666,
'State': {'Running': 1, 'Pid': 'Pid_info'}
}
m_endpoint = Mock()
m_client.get_endpoint.return_value = m_endpoint
# Set up arguments to pass to method under test
container_name = 'container1'
ip = '1.1.1.1'
ip_addr = IPAddress(ip)
interface = 'interface'
# Call method under test
container.container_ip_add(container_name, ip, interface)
# Assert
m_enforce_root.assert_called_once_with()
m_get_pool_or_exit.assert_called_once_with(ip_addr)
m_get_container_info_or_exit.assert_called_once_with(container_name)
m_client.get_endpoint.assert_called_once_with(
hostname=utils.hostname,
orchestrator_id=utils.ORCHESTRATOR_ID,
workload_id=666
)
m_client.assign_address.assert_called_once_with(pool_return, ip_addr)
m_endpoint.ipv4_nets.add.assert_called_once_with(IPNetwork(ip_addr))
m_client.update_endpoint.assert_called_once_with(m_endpoint)
m_netns.add_ip_to_ns_veth.assert_called_once_with(
'Pid_info', ip_addr, interface
)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_pool_or_exit', autospec=True)
@patch('calico_ctl.container.get_container_info_or_exit', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
@patch('calico_ctl.container.netns', autospec=True)
def test_container_ip_add_ipv6(
self, m_netns, m_client, m_get_container_info_or_exit,
m_get_pool_or_exit, m_enforce_root):
"""
Test for container_ip_add with an ipv6 ip argument
Assert that the correct calls associated with an ipv6 address are made
"""
# Set up mock objects
pool_return = 'pool'
m_get_pool_or_exit.return_value = pool_return
m_get_container_info_or_exit.return_value = {
'Id': 666,
'State': {'Running': 1, 'Pid': 'Pid_info'}
}
m_endpoint = Mock()
m_client.get_endpoint.return_value = m_endpoint
# Set up arguments to pass to method under test
container_name = 'container1'
ip = '1::1'
ip_addr = IPAddress(ip)
interface = 'interface'
# Call method under test
container.container_ip_add(container_name, ip, interface)
# Assert
m_enforce_root.assert_called_once_with()
m_get_pool_or_exit.assert_called_once_with(ip_addr)
m_get_container_info_or_exit.assert_called_once_with(container_name)
m_client.get_endpoint.assert_called_once_with(
hostname=utils.hostname,
orchestrator_id=utils.ORCHESTRATOR_ID,
workload_id=666
)
m_client.assign_address.assert_called_once_with(pool_return, ip_addr)
m_endpoint.ipv6_nets.add.assert_called_once_with(IPNetwork(ip_addr))
m_client.update_endpoint.assert_called_once_with(m_endpoint)
m_netns.add_ip_to_ns_veth.assert_called_once_with(
'Pid_info', ip_addr, interface
)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_pool_or_exit', autospec=True)
@patch('calico_ctl.container.get_container_info_or_exit', autospec=True)
@patch('calico_ctl.container.client.get_endpoint', autospec=True)
def test_container_ip_add_container_not_running(
self, m_client_get_endpoint, m_get_container_info_or_exit,
m_get_pool_or_exit, m_enforce_root):
"""
Test for container_ip_add when the container is not running
get_container_info_or_exit returns a running state of value 0.
Assert that the system then exits and all expected calls are made
"""
# Set up mock objects
m_get_container_info_or_exit.return_value = {
'Id': 666,
'State': {'Running': 0, 'Pid': 'Pid_info'}
}
# Set up arguments to pass to method under test
container_name = 'container1'
ip = '1.1.1.1'
interface = 'interface'
# Call method under test expecting a SystemExit
self.assertRaises(SystemExit, container.container_ip_add,
container_name, ip, interface)
# Assert
self.assertTrue(m_enforce_root.called)
self.assertTrue(m_get_container_info_or_exit.called)
self.assertTrue(m_get_pool_or_exit.called)
self.assertFalse(m_client_get_endpoint.called)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_pool_or_exit', autospec=True)
@patch('calico_ctl.container.get_container_info_or_exit', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
@patch('calico_ctl.container.print_container_not_in_calico_msg', autospec=True)
def test_container_ip_add_container_not_in_calico(
self, m_print_container_not_in_calico_msg, m_client,
m_get_container_info_or_exit, m_get_pool_or_exit, m_enforce_root):
"""
Test for container_ip_add when the container is not networked into calico
client.get_endpoint raises a KeyError.
Assert that the system then exits and all expected calls are made
"""
# Set up mock objects
m_get_container_info_or_exit.return_value = {
'Id': 666,
'State': {'Running': 1, 'Pid': 'Pid_info'}
}
m_client.get_endpoint.return_value = Mock()
m_client.get_endpoint.side_effect = KeyError
# Set up arguments to pass to method under test
container_name = 'container1'
ip = '1.1.1.1'
interface = 'interface'
# Call method under test expecting a System Exit
self.assertRaises(SystemExit, container.container_ip_add,
container_name, ip, interface)
# Assert
self.assertTrue(m_enforce_root.called)
self.assertTrue(m_get_pool_or_exit.called)
self.assertTrue(m_get_container_info_or_exit.called)
self.assertTrue(m_client.get_endpoint.called)
m_print_container_not_in_calico_msg.assert_called_once_with(container_name)
self.assertFalse(m_client.assign_address.called)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_pool_or_exit', autospec=True)
@patch('calico_ctl.container.get_container_info_or_exit', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
@patch('calico_ctl.container.netns', autospec=True)
def test_container_ip_add_fail_assign_address(
self, m_netns, m_client, m_get_container_info_or_exit,
m_get_pool_or_exit, m_enforce_root):
"""
Test for container_ip_add when the client cannot assign an IP
client.assign_address returns an empty list.
Assert that the system then exits and all expected calls are made
"""
# Set up mock objects
m_get_container_info_or_exit.return_value = {
'Id': 666,
'State': {'Running': 1, 'Pid': 'Pid_info'}
}
m_client.assign_address.return_value = []
# Set up arguments to pass to method under test
container_name = 'container1'
ip = '1.1.1.1'
interface = 'interface'
# Call method under test expecting a SystemExit
self.assertRaises(SystemExit, container.container_ip_add,
container_name, ip, interface)
# Assert
self.assertTrue(m_enforce_root.called)
self.assertTrue(m_get_pool_or_exit.called)
self.assertTrue(m_get_container_info_or_exit.called)
self.assertTrue(m_client.get_endpoint.called)
self.assertFalse(m_netns.add_ip_to_ns_veth.called)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_pool_or_exit', autospec=True)
@patch('calico_ctl.container.get_container_info_or_exit', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
@patch('calico_ctl.container.netns.add_ip_to_ns_veth', autospec=True)
def test_container_ip_add_error_updating_datastore(
self, m_netns_add_ip_to_ns_veth, m_client,
m_get_container_info_or_exit, m_get_pool_or_exit, m_enforce_root):
"""
Test for container_ip_add when the client fails to update endpoint
client.update_endpoint raises a KeyError.
Assert that the system then exits and all expected calls are made
"""
# Set up mock objects
m_get_pool_or_exit.return_value = 'pool'
m_get_container_info_or_exit.return_value = {
'Id': 666,
'State': {'Running': 1, 'Pid': 'Pid_info'}
}
m_client.update_endpoint.side_effect = KeyError
# Set up arguments to pass to method under test
container_name = 'container1'
ip = '1.1.1.1'
interface = 'interface'
# Call method under test expecting a SystemExit
self.assertRaises(SystemExit, container.container_ip_add,
container_name, ip, interface)
# Assert
self.assertTrue(m_enforce_root.called)
self.assertTrue(m_get_pool_or_exit.called)
self.assertTrue(m_get_container_info_or_exit.called)
self.assertTrue(m_client.get_endpoint.called)
self.assertTrue(m_client.assign_address.called)
m_client.unassign_address.assert_called_once_with('pool', ip)
self.assertFalse(m_netns_add_ip_to_ns_veth.called)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_pool_or_exit', autospec=True)
@patch('calico_ctl.container.get_container_info_or_exit', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
@patch('calico_ctl.container.netns.add_ip_to_ns_veth', autospec=True)
def test_container_ip_add_netns_error_ipv4(
self, m_netns_add_ip_to_ns_veth, m_client,
m_get_container_info_or_exit, m_get_pool_or_exit, m_enforce_root):
"""
Test container_ip_add when netns cannot add an ipv4 to interface
netns.add_ip_to_ns_veth throws a CalledProcessError.
Assert that the system then exits and all expected calls are made
"""
# Set up mock objects
m_get_container_info_or_exit.return_value = {
'Id': 666,
'State': {'Running': 1, 'Pid': 'Pid_info'}
}
m_get_pool_or_exit.return_value = 'pool'
m_endpoint = Mock()
m_client.get_endpoint.return_value = m_endpoint
err = CalledProcessError(
1, m_netns_add_ip_to_ns_veth, "Error updating container")
m_netns_add_ip_to_ns_veth.side_effect = err
# Set up arguments to pass to method under test
container_name = 'container1'
ip = '1.1.1.1'
interface = 'interface'
# Call method under test expecting a SystemExit
self.assertRaises(SystemExit, container.container_ip_add,
container_name, ip, interface)
# Assert
self.assertTrue(m_enforce_root.called)
self.assertTrue(m_get_pool_or_exit.called)
self.assertTrue(m_get_container_info_or_exit.called)
self.assertTrue(m_client.get_endpoint.called)
self.assertTrue(m_client.assign_address.called)
self.assertTrue(m_netns_add_ip_to_ns_veth.called)
m_endpoint.ipv4_nets.remove.assert_called_once_with(
IPNetwork(IPAddress(ip))
)
m_client.update_endpoint.assert_has_calls([
call(m_endpoint), call(m_endpoint)])
m_client.unassign_address.assert_called_once_with('pool', ip)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_pool_or_exit', autospec=True)
@patch('calico_ctl.container.get_container_info_or_exit', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
@patch('calico_ctl.container.print_container_not_in_calico_msg', autospec=True)
@patch('calico_ctl.container.netns.add_ip_to_ns_veth', autospec=True)
def test_container_ip_add_netns_error_ipv6(
self, m_netns, m_print_container_not_in_calico_msg, m_client,
m_get_container_info_or_exit, m_get_pool_or_exit, m_enforce_root):
"""
Test container_ip_add when netns cannot add an ipv6 to interface
netns.add_ip_to_ns_veth throws a CalledProcessError.
Assert that the system then exits and all expected calls are made
"""
# Set up mock objects
m_get_container_info_or_exit.return_value = {
'Id': 666,
'State': {'Running': 1, 'Pid': 'Pid_info'}
}
m_get_pool_or_exit.return_value = 'pool'
m_endpoint = Mock()
m_client.get_endpoint.return_value = m_endpoint
err = CalledProcessError(1, m_netns, "Error updating container")
m_netns.side_effect = err
# Set up arguments to pass to method under test
container_name = 'container1'
ip = '1::1'
interface = 'interface'
# Call method under test
self.assertRaises(SystemExit, container.container_ip_add,
container_name, ip, interface)
# Assert
self.assertTrue(m_enforce_root.called)
self.assertTrue(m_get_pool_or_exit.called)
self.assertTrue(m_get_container_info_or_exit.called)
self.assertTrue(m_client.get_endpoint.called)
self.assertTrue(m_client.assign_address.called)
self.assertTrue(m_netns.called)
m_endpoint.ipv6_nets.remove.assert_called_once_with(
IPNetwork(IPAddress(ip))
)
m_client.update_endpoint.assert_has_calls([
call(m_endpoint), call(m_endpoint)])
m_client.unassign_address.assert_called_once_with('pool', ip)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_pool_or_exit', autospec=True)
@patch('calico_ctl.container.get_container_info_or_exit', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
@patch('calico_ctl.container.netns', autospec=True)
def test_container_ip_remove_ipv4(self, m_netns, m_client,
m_get_container_info_or_exit, m_get_pool_or_exit, m_enforce_root):
"""
Test container_ip_remove with an ipv4 ip argument
"""
# Set up mock objects
m_get_pool_or_exit.return_value = 'pool'
m_get_container_info_or_exit.return_value = {
'Id': 666,
'State': {'Running': 1, 'Pid': 'Pid_info'}
}
ipv4_nets = set()
ipv4_nets.add(IPNetwork(IPAddress('1.1.1.1')))
m_endpoint = Mock(spec=Endpoint)
m_endpoint.ipv4_nets = ipv4_nets
m_client.get_endpoint.return_value = m_endpoint
# Set up arguments to pass to method under test
container_name = 'container1'
ip = '1.1.1.1'
interface = 'interface'
# Call method under test
container.container_ip_remove(container_name, ip, interface)
# Assert
m_enforce_root.assert_called_once_with()
m_get_pool_or_exit.assert_called_once_with(IPAddress(ip))
m_get_container_info_or_exit.assert_called_once_with(container_name)
m_client.get_endpoint.assert_called_once_with(
hostname=utils.hostname,
orchestrator_id=utils.ORCHESTRATOR_ID,
workload_id=666
)
m_client.update_endpoint.assert_called_once_with(m_endpoint)
m_netns.remove_ip_from_ns_veth.assert_called_once_with(
'Pid_info',
IPAddress(ip),
interface
)
m_client.unassign_address.assert_called_once_with('pool', ip)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_pool_or_exit', autospec=True)
@patch('calico_ctl.container.get_container_info_or_exit', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
@patch('calico_ctl.container.netns', autospec=True)
def test_container_ip_remove_ipv6(self, m_netns, m_client,
m_get_container_info_or_exit, m_get_pool_or_exit, m_enforce_root):
"""
Test for container_ip_remove with an ipv6 ip argument
"""
# Set up mock objects
m_get_pool_or_exit.return_value = 'pool'
m_get_container_info_or_exit.return_value = {
'Id': 666,
'State': {'Running': 1, 'Pid': 'Pid_info'}
}
ipv6_nets = set()
ipv6_nets.add(IPNetwork(IPAddress('1::1')))
m_endpoint = Mock(spec=Endpoint)
m_endpoint.ipv6_nets = ipv6_nets
m_client.get_endpoint.return_value = m_endpoint
# Set up arguments to pass to method under test
container_name = 'container1'
ip = '1::1'
interface = 'interface'
# Call method under test
container.container_ip_remove(container_name, ip, interface)
# Assert
m_enforce_root.assert_called_once_with()
m_get_pool_or_exit.assert_called_once_with(IPAddress(ip))
m_get_container_info_or_exit.assert_called_once_with(container_name)
m_client.get_endpoint.assert_called_once_with(
hostname=utils.hostname,
orchestrator_id=utils.ORCHESTRATOR_ID,
workload_id=666
)
m_client.update_endpoint.assert_called_once_with(m_endpoint)
m_netns.remove_ip_from_ns_veth.assert_called_once_with(
'Pid_info',
IPAddress(ip),
interface
)
m_client.unassign_address.assert_called_once_with('pool', ip)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_pool_or_exit', autospec=True)
@patch('calico_ctl.container.get_container_info_or_exit', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
def test_container_ip_remove_not_running(
self, m_client, m_get_container_info_or_exit,
m_get_pool_or_exit, m_enforce_root):
"""
Test for container_ip_remove when the container is not running
get_container_info_or_exit returns a running state of value 0.
Assert that the system then exits and all expected calls are made
"""
# Set up mock objects
m_get_container_info_or_exit.return_value = {
'Id': 666,
'State': {'Running': 0, 'Pid': 'Pid_info'}
}
# Set up arguments to pass to method under test
container_name = 'container1'
ip = '1::1'
interface = 'interface'
# Call method under test expecting a SystemExit
self.assertRaises(SystemExit, container.container_ip_remove,
container_name, ip, interface)
# Assert
self.assertTrue(m_enforce_root.called)
self.assertTrue(m_get_pool_or_exit.called)
self.assertTrue(m_get_container_info_or_exit.called)
self.assertFalse(m_client.get_endpoint.called)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_pool_or_exit', autospec=True)
@patch('calico_ctl.container.get_container_info_or_exit', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
def test_container_ip_remove_ip_not_assigned(
self, m_client, m_get_container_info_or_exit, m_get_pool_or_exit,
m_enforce_root):
"""
Test container_ip_remove when an IP address is not assigned to a container
client.get_endpoint returns an endpoint with no ip nets
Assert that the system then exits and all expected calls are made
"""
# Set up mock objects
m_get_container_info_or_exit.return_value = {
'Id': 666,
'State': {'Running': 1, 'Pid': 'Pid_info'}
}
ipv6_nets = set()
m_endpoint = Mock(spec=Endpoint)
m_endpoint.ipv6_nets = ipv6_nets
m_client.get_endpoint.return_value = m_endpoint
# Set up arguments to pass to method under test
container_name = 'container1'
ip = '1::1'
interface = 'interface'
# Call method under test expecting a SystemExit
self.assertRaises(SystemExit, container.container_ip_remove,
container_name, ip, interface)
# Assert
self.assertTrue(m_enforce_root.called)
self.assertTrue(m_get_pool_or_exit.called)
self.assertTrue(m_get_container_info_or_exit.called)
self.assertTrue(m_client.get_endpoint.called)
self.assertFalse(m_client.update_endpoint.called)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_pool_or_exit', autospec=True)
@patch('calico_ctl.container.get_container_info_or_exit', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
def test_container_ip_remove_container_not_on_calico(
self, m_client, m_get_container_info_or_exit, m_get_pool_or_exit,
m_enforce_root):
"""
Test for container_ip_remove when container is not networked into Calico
client.get_endpoint raises a KeyError
Assert that the system then exits and all expected calls are made
"""
# Set up mock objects
m_get_container_info_or_exit.return_value = {
'Id': 666,
'State': {'Running': 1, 'Pid': 'Pid_info'}
}
m_client.get_endpoint.side_effect = KeyError
# Set up arguments to pass to method under test
container_name = 'container1'
ip = '1::1'
interface = 'interface'
# Call method under test expecting a SystemExit
self.assertRaises(SystemExit, container.container_ip_remove,
container_name, ip, interface)
# Assert
self.assertTrue(m_enforce_root.called)
self.assertTrue(m_get_pool_or_exit.called)
self.assertTrue(m_get_container_info_or_exit.called)
self.assertTrue(m_client.get_endpoint.called)
self.assertFalse(m_client.update_endpoint.called)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_pool_or_exit', autospec=True)
@patch('calico_ctl.container.get_container_info_or_exit', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
@patch('calico_ctl.container.netns', autospec=True)
def test_container_ip_remove_fail_updating_datastore(
self, m_netns, m_client, m_get_container_info_or_exit,
m_get_pool_or_exit, m_enforce_root):
"""
Test container_ip_remove when client fails to update endpoint in datastore
client.update_endpoint throws a KeyError
Assert that the system then exits and all expected calls are made
"""
# Set up mock objects
m_get_container_info_or_exit.return_value = {
'Id': 666,
'State': {'Running': 1, 'Pid': 'Pid_info'}
}
ipv6_nets = set()
ipv6_nets.add(IPNetwork(IPAddress('1::1')))
m_endpoint = Mock(spec=Endpoint)
m_endpoint.ipv6_nets = ipv6_nets
m_client.get_endpoint.return_value = m_endpoint
m_client.update_endpoint.side_effect = KeyError
# Set up arguments to pass to method under test
container_name = 'container1'
ip = '1::1'
interface = 'interface'
# Call method under test expecting a SystemExit
self.assertRaises(SystemExit, container.container_ip_remove,
container_name, ip, interface)
# Assert
self.assertTrue(m_enforce_root.called)
self.assertTrue(m_get_pool_or_exit.called)
self.assertTrue(m_get_container_info_or_exit.called)
self.assertTrue(m_client.get_endpoint.called)
self.assertTrue(m_client.update_endpoint.called)
self.assertFalse(m_netns.remove_ip_from_ns_veth.called)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_pool_or_exit', autospec=True)
@patch('calico_ctl.container.get_container_info_or_exit', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
@patch('calico_ctl.container.netns', autospec=True)
def test_container_ip_remove_netns_error(
self, m_netns, m_client, m_get_container_info_or_exit,
m_get_pool_or_exit, m_enforce_root):
"""
Test container_ip_remove when client fails on removing ip from interface
netns.remove_ip_from_ns_veth raises a CalledProcessError
Assert that the system then exits and all expected calls are made
"""
# Set up mock objects
m_get_container_info_or_exit.return_value = {
'Id': 666,
'State': {'Running': 1, 'Pid': 'Pid_info'}
}
ipv6_nets = set()
ipv6_nets.add(IPNetwork(IPAddress('1::1')))
m_endpoint = Mock(spec=Endpoint)
m_endpoint.ipv6_nets = ipv6_nets
m_client.get_endpoint.return_value = m_endpoint
err = CalledProcessError(1, m_netns, "Error removing ip")
m_netns.remove_ip_from_ns_veth.side_effect = err
# Set up arguments to pass to method under test
container_name = 'container1'
ip = '1::1'
interface = 'interface'
# Call method under test expecting a SystemExit
self.assertRaises(SystemExit, container.container_ip_remove,
container_name, ip, interface)
# Assert
self.assertTrue(m_enforce_root.called)
self.assertTrue(m_get_pool_or_exit.called)
self.assertTrue(m_get_container_info_or_exit.called)
self.assertTrue(m_client.get_endpoint.called)
self.assertTrue(m_client.update_endpoint.called)
self.assertTrue(m_netns.remove_ip_from_ns_veth.called)
self.assertFalse(m_client.unassign_address.called)
|
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.import string
"""
function for calculating the convergence of an x, y data set
main api:
test_conv(xs, ys, name, tol)
tries to fit multiple functions to the x, y data
calculates which function fits best
for tol < 0
returns the x value for which y is converged within tol of the assymtotic value
for tol > 0
returns the x_value for which dy(x)/dx < tol for all x >= x_value, conv is true is such a x_value exists
for the best fit a gnuplot line is printed plotting the data, the function and the assymthotic value
"""
import random
import string
import numpy as np
__author__ = "Michiel van Setten"
__copyright__ = " "
__version__ = "0.9"
__maintainer__ = "Michiel van Setten"
__email__ = "mjvansetten@gmail.com"
__date__ = "June 2014"
def id_generator(size=8, chars=string.ascii_uppercase + string.digits):
"""
Args:
size ():
chars ():
Returns:
"""
return "".join(random.choice(chars) for _ in range(size))
class SplineInputError(Exception):
"""
Error for Spline input
"""
def __init__(self, msg):
"""
Args:
msg (str): Message
"""
self.msg = msg
def get_derivatives(xs, ys, fd=False):
"""
return the derivatives of y(x) at the points x
if scipy is available a spline is generated to calculate the derivatives
if scipy is not available the left and right slopes are calculated, if both exist the average is returned
putting fd to zero always returns the finite difference slopes
"""
try:
if fd:
raise SplineInputError("no spline wanted")
if len(xs) < 4:
er = SplineInputError("too few data points")
raise er
from scipy.interpolate import UnivariateSpline
spline = UnivariateSpline(xs, ys)
d = spline.derivative(1)(xs)
except (ImportError, SplineInputError):
d = []
m, left, right = 0, 0, 0
for n in range(0, len(xs), 1):
try:
left = (ys[n] - ys[n - 1]) / (xs[n] - xs[n - 1])
m += 1
except IndexError:
pass
try:
right = (ys[n + 1] - ys[n]) / (xs[n + 1] - xs[n])
m += 1
except IndexError:
pass
d.append(left + right / m)
return d
"""
functions used in the fitting procedure, with initial guesses
"""
def print_and_raise_error(xs, ys, name):
"""
Args:
xs ():
ys ():
name ():
Returns:
"""
print("Index error in", name)
print("ys: ", ys)
print("xs: ", xs)
raise RuntimeError
def reciprocal(x, a, b, n):
"""
reciprocal function to the power n to fit convergence data
"""
if n < 1:
n = 1
elif n > 5:
n = 5
if isinstance(x, list):
y_l = []
for x_v in x:
y_l.append(a + b / x_v ** n)
y = np.array(y_l)
else:
y = a + b / x ** n
return y
def p0_reciprocal(xs, ys):
"""
predictor for first guess for reciprocal
"""
a0 = ys[len(ys) - 1]
b0 = ys[0] * xs[0] - a0 * xs[0]
return [a0, b0, 1]
def exponential(x, a, b, n):
"""
exponential function base n to fit convergence data
"""
if n < 1.000001:
n = 1.000001
elif n > 1.2:
n = 1.2
if b < -10:
b = -10
elif b > 10:
b = 10
if isinstance(x, list):
y_l = []
for x_v in x:
y_l.append(a + b * n ** -x_v)
y = np.array(y_l)
else:
y = a + b * n ** -x
return y
def p0_exponential(xs, ys):
"""
Args:
xs ():
ys ():
Returns:
"""
n0 = 1.005
b0 = (n0 ** -xs[-1] - n0 ** -xs[1]) / (ys[-1] - ys[1])
a0 = ys[1] - b0 * n0 ** -xs[1]
# a0 = ys[-1]
# b0 = (ys[0] - a0) / n0 ** xs[0]
return [a0, b0, n0]
def single_reciprocal(x, a, b, c):
"""
reciprocal function to fit convergence data
"""
if isinstance(x, list):
y_l = []
for x_v in x:
y_l.append(a + b / (x_v - c))
y = np.array(y_l)
else:
y = a + b / (x - c)
return y
def p0_single_reciprocal(xs, ys):
"""
Args:
xs ():
ys ():
Returns:
"""
c = 1
b = (1 / (xs[-1] - c) - 1 / (xs[1] - c)) / (ys[-1] - ys[1])
a = ys[1] - b / (xs[1] - c)
return [a, b, c]
def simple_reciprocal(x, a, b):
"""
reciprocal function to fit convergence data
"""
if isinstance(x, list):
y_l = []
for x_v in x:
y_l.append(a + b / x_v)
y = np.array(y_l)
else:
y = a + b / x
return y
def p0_simple_reciprocal(xs, ys):
"""
Args:
xs ():
ys ():
Returns:
"""
# b = (ys[-1] - ys[1]) / (1/xs[-1] - 1/xs[1])
# a = ys[1] - b / xs[1]
b = (ys[-1] - ys[-2]) / (1 / (xs[-1]) - 1 / (xs[-2]))
a = ys[-2] - b / (xs[-2])
return [a, b]
def simple_2reciprocal(x, a, b):
"""
reciprocal function to fit convergence data
"""
c = 2
if isinstance(x, list):
y_l = []
for x_v in x:
y_l.append(a + b / x_v ** c)
y = np.array(y_l)
else:
y = a + b / x ** c
return y
def p0_simple_2reciprocal(xs, ys):
"""
Args:
xs ():
ys ():
Returns:
"""
c = 2
b = (ys[-1] - ys[1]) / (1 / xs[-1] ** c - 1 / xs[1] ** c)
a = ys[1] - b / xs[1] ** c
return [a, b]
def simple_4reciprocal(x, a, b):
"""
reciprocal function to fit convergence data
"""
c = 4
if isinstance(x, list):
y_l = []
for x_v in x:
y_l.append(a + b / x_v ** c)
y = np.array(y_l)
else:
y = a + b / x ** c
return y
def p0_simple_4reciprocal(xs, ys):
"""
Args:
xs ():
ys ():
Returns:
"""
c = 4
b = (ys[-1] - ys[1]) / (1 / xs[-1] ** c - 1 / xs[1] ** c)
a = ys[1] - b / xs[1] ** c
return [a, b]
def simple_5reciprocal(x, a, b):
"""
reciprocal function to fit convergence data
"""
c = 0.5
if isinstance(x, list):
y_l = []
for x_v in x:
y_l.append(a + b / x_v ** c)
y = np.array(y_l)
else:
y = a + b / x ** c
return y
def p0_simple_5reciprocal(xs, ys):
"""
Args:
xs ():
ys ():
Returns:
"""
c = 0.5
b = (ys[-1] - ys[1]) / (1 / xs[-1] ** c - 1 / xs[1] ** c)
a = ys[1] - b / xs[1] ** c
return [a, b]
def extrapolate_simple_reciprocal(xs, ys):
"""
Args:
xs ():
ys ():
Returns:
"""
b = (ys[-2] - ys[-1]) / (1 / (xs[-2]) - 1 / (xs[-1]))
a = ys[-1] - b / (xs[-1])
return [a, b]
def extrapolate_reciprocal(xs, ys, n, noise):
"""
return the parameters such that a + b / x^n hits the last two data points
"""
if len(xs) > 4 and noise:
y1 = (ys[-3] + ys[-4]) / 2
y2 = (ys[-1] + ys[-2]) / 2
x1 = (xs[-3] + xs[-4]) / 2
x2 = (xs[-1] + xs[-2]) / 2
try:
b = (y1 - y2) / (1 / x1 ** n - 1 / x2 ** n)
a = y2 - b / x2 ** n
except IndexError:
print_and_raise_error(xs, ys, "extrapolate_reciprocal")
else:
try:
b = (ys[-2] - ys[-1]) / (1 / (xs[-2]) ** n - 1 / (xs[-1]) ** n)
a = ys[-1] - b / (xs[-1]) ** n
except IndexError:
print_and_raise_error(xs, ys, "extrapolate_reciprocal")
return [a, b, n]
def measure(function, xs, ys, popt, weights):
"""
measure the quality of a fit
"""
m = 0
n = 0
for x in xs:
try:
if len(popt) == 2:
m += (ys[n] - function(x, popt[0], popt[1])) ** 2 * weights[n]
elif len(popt) == 3:
m += (ys[n] - function(x, popt[0], popt[1], popt[2])) ** 2 * weights[n]
else:
raise NotImplementedError
n += 1
except IndexError:
raise RuntimeError("y does not exist for x = ", x, " this should not happen")
return m
def get_weights(xs, ys, mode=2):
"""
Args:
xs ():
ys ():
mode ():
Returns:
"""
ds = get_derivatives(xs, ys, fd=True)
if mode == 1:
mind = np.inf
for d in ds:
mind = min(abs(d), mind)
weights = []
for d in ds:
weights.append(abs((mind / d)))
if mode == 2:
maxxs = max(xs) ** 2
weights = []
for x in xs:
weights.append(x ** 2 / maxxs)
else:
weights = [1] * len(xs)
return weights
def multi_curve_fit(xs, ys, verbose):
"""
fit multiple functions to the x, y data, return the best fit
"""
# functions = {exponential: p0_exponential, reciprocal: p0_reciprocal, single_reciprocal: p0_single_reciprocal}
functions = {
exponential: p0_exponential,
reciprocal: p0_reciprocal,
# single_reciprocal: p0_single_reciprocal,
simple_reciprocal: p0_simple_reciprocal,
simple_2reciprocal: p0_simple_2reciprocal,
simple_4reciprocal: p0_simple_4reciprocal,
simple_5reciprocal: p0_simple_5reciprocal,
}
from scipy.optimize import curve_fit
fit_results = {}
best = ["", np.inf]
for k, v in functions.items():
try:
weights = get_weights(xs, ys)
popt, pcov = curve_fit(
k,
xs,
ys,
v(xs, ys),
maxfev=8000,
sigma=weights,
)
pcov = []
m = measure(k, xs, ys, popt, weights)
fit_results.update({k: {"measure": m, "popt": popt, "pcov": pcov}})
for f, v in fit_results.items():
if v["measure"] <= best[1]:
best = f, v["measure"]
if verbose:
print(str(k), m)
except RuntimeError:
print("no fit found for ", k)
return fit_results[best[0]]["popt"], fit_results[best[0]]["pcov"], best
def multi_reciprocal_extra(xs, ys, noise=False):
"""
Calculates for a series of powers ns the parameters for which the last two points are at the curve.
With these parameters measure how well the other data points fit.
return the best fit.
"""
ns = np.linspace(0.5, 6.0, num=56)
best = ["", np.inf]
fit_results = {}
weights = get_weights(xs, ys)
for n in ns:
popt = extrapolate_reciprocal(xs, ys, n, noise)
m = measure(reciprocal, xs, ys, popt, weights)
pcov = []
fit_results.update({n: {"measure": m, "popt": popt, "pcov": pcov}})
for n, v in fit_results.items():
if v["measure"] <= best[1]:
best = reciprocal, v["measure"], n
return fit_results[best[2]]["popt"], fit_results[best[2]]["pcov"], best
def print_plot_line(function, popt, xs, ys, name, tol=0.05, extra=""):
"""
print the gnuplot command line to plot the x, y data with the fitted function using the popt parameters
"""
idp = id_generator()
with open("convdat." + str(idp), mode="w") as f:
for n in range(0, len(ys), 1):
f.write(str(xs[n]) + " " + str(ys[n]) + "\n")
tol = abs(tol)
line = "plot 'convdat.%s' pointsize 4 lt 0, " % idp
line += "%s lt 3, %s lt 4, %s lt 4, " % (popt[0], popt[0] - tol, popt[0] + tol)
if function is exponential:
line += "%s + %s * %s ** -x" % (
popt[0],
popt[1],
min(max(1.00001, popt[2]), 1.2),
)
elif function is reciprocal:
line += "%s + %s / x**%s" % (popt[0], popt[1], min(max(0.5, popt[2]), 6))
elif function is single_reciprocal:
line += "%s + %s / (x - %s)" % (popt[0], popt[1], popt[2])
elif function is simple_reciprocal:
line += "%s + %s / x" % (popt[0], popt[1])
elif function is simple_2reciprocal:
line += "%s + %s / x**2" % (popt[0], popt[1])
elif function is simple_4reciprocal:
line += "%s + %s / x**4" % (popt[0], popt[1])
elif function is simple_5reciprocal:
line += "%s + %s / x**0.5" % (popt[0], popt[1])
else:
print(function, " no plot ")
with open("plot-fits", mode="a") as f:
f.write('set title "' + name + " - " + extra + '"\n')
f.write("set output '" + name + "-" + idp + ".gif'" + "\n")
f.write("set yrange [" + str(popt[0] - 5 * tol) + ":" + str(popt[0] + 5 * tol) + "]\n")
f.write(line + "\n")
f.write("pause -1 \n")
def determine_convergence(xs, ys, name, tol=0.0001, extra="", verbose=False, mode="extra", plots=True):
"""
test it and at which x_value dy(x)/dx < tol for all x >= x_value, conv is true is such a x_value exists.
"""
if len(xs) != len(ys):
raise RuntimeError("the range of x and y are not equal")
conv = False
x_value = float("inf")
y_value = None
n_value = None
popt = [None, None, None]
if len(xs) > 2:
ds = get_derivatives(xs[0 : len(ys)], ys)
try:
if None not in ys:
if mode == "fit":
popt, pcov, func = multi_curve_fit(xs, ys, verbose)
elif mode == "extra":
res = multi_reciprocal_extra(xs, ys)
if res is not None:
popt, pcov, func = multi_reciprocal_extra(xs, ys)
else:
print(xs, ys)
popt, pcov = None, None
elif mode == "extra_noise":
popt, pcov, func = multi_reciprocal_extra(xs, ys, noise=True)
else:
raise NotImplementedError("unknown mode for test conv")
if func[1] > abs(tol):
print(
"warning function ",
func[0],
" as the best fit but not a good fit: ",
func[1],
)
# todo print this to file via a method in helper, as dict
if plots:
with open(name + ".fitdat", mode="a") as f:
f.write("{")
f.write('"popt": ' + str(popt) + ", ")
f.write('"pcov": ' + str(pcov) + ", ")
f.write('"data": [')
for n in range(0, len(ys), 1):
f.write("[" + str(xs[n]) + " " + str(ys[n]) + "]")
f.write("]}\n")
print_plot_line(func[0], popt, xs, ys, name, tol=tol, extra=extra)
except ImportError:
popt, pcov = None, None
for n in range(0, len(ds), 1):
if verbose:
print(n, ys[n])
print(ys)
if tol < 0:
if popt[0] is not None:
test = abs(popt[0] - ys[n])
else:
test = float("inf")
else:
test = abs(ds[n])
if verbose:
print(test)
if test < abs(tol):
if verbose:
print("converged")
conv = True
if xs[n] < x_value:
x_value = xs[n]
y_value = ys[n]
n_value = n
else:
if verbose:
print("not converged")
conv = False
x_value = float("inf")
if n_value is None:
return [conv, x_value, y_value, n_value, popt[0], None]
return [conv, x_value, y_value, n_value, popt[0], ds[n_value]]
return [conv, x_value, y_value, n_value, popt[0], None]
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4l359
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author Mark McClain (DreamHost)
"""folsom initial database
Revision ID: folsom
Revises: None
Create Date: 2012-12-03 09:14:50.579765
"""
PLUGINS = {
'bigswitch': 'neutron.plugins.bigswitch.plugin.NeutronRestProxyV2',
'brocade': 'neutron.plugins.brocade.NeutronPlugin.BrocadePluginV2',
'cisco': 'neutron.plugins.cisco.network_plugin.PluginV2',
'lbr': 'neutron.plugins.linuxbridge.lb_neutron_plugin.LinuxBridgePluginV2',
'meta': 'neutron.plugins.metaplugin.meta_neutron_plugin.MetaPluginV2',
'ml2': 'neutron.plugins.ml2.plugin.Ml2Plugin',
'nec': 'neutron.plugins.nec.nec_plugin.NECPluginV2',
'nvp': 'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2',
'ovs': 'neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2',
'plumgrid': 'neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.'
'NeutronPluginPLUMgridV2',
'ryu': 'neutron.plugins.ryu.ryu_neutron_plugin.RyuNeutronPluginV2',
}
L3_CAPABLE = [
PLUGINS['lbr'],
PLUGINS['meta'],
PLUGINS['ml2'],
PLUGINS['nec'],
PLUGINS['ovs'],
PLUGINS['ryu'],
PLUGINS['brocade'],
PLUGINS['plumgrid'],
]
FOLSOM_QUOTA = [
PLUGINS['lbr'],
PLUGINS['ml2'],
PLUGINS['nvp'],
PLUGINS['ovs'],
]
# revision identifiers, used by Alembic.
revision = 'folsom'
down_revision = None
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
from neutron.db.migration.alembic_migrations import common_ext_ops
# NOTE: This is a special migration that creates a Folsom compatible database.
def upgrade(active_plugins=None, options=None):
# general model
upgrade_base()
if migration.should_run(active_plugins, L3_CAPABLE):
common_ext_ops.upgrade_l3()
if migration.should_run(active_plugins, FOLSOM_QUOTA):
common_ext_ops.upgrade_quota(options)
if PLUGINS['lbr'] in active_plugins:
upgrade_linuxbridge()
elif PLUGINS['ovs'] in active_plugins:
upgrade_ovs()
elif PLUGINS['cisco'] in active_plugins:
upgrade_cisco()
# Cisco plugin imports OVS models too
upgrade_ovs()
elif PLUGINS['meta'] in active_plugins:
upgrade_meta()
elif PLUGINS['nec'] in active_plugins:
upgrade_nec()
elif PLUGINS['ryu'] in active_plugins:
upgrade_ryu()
elif PLUGINS['brocade'] in active_plugins:
upgrade_brocade()
# Brocade plugin imports linux bridge models too
upgrade_linuxbridge()
def upgrade_base():
op.create_table(
'networks',
sa.Column('tenant_id', sa.String(length=255), nullable=True),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('status', sa.String(length=16), nullable=True),
sa.Column('admin_state_up', sa.Boolean(), nullable=True),
sa.Column('shared', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table(
'subnets',
sa.Column('tenant_id', sa.String(length=255), nullable=True),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('network_id', sa.String(length=36), nullable=True),
sa.Column('ip_version', sa.Integer(), nullable=False),
sa.Column('cidr', sa.String(length=64), nullable=False),
sa.Column('gateway_ip', sa.String(length=64), nullable=True),
sa.Column('enable_dhcp', sa.Boolean(), nullable=True),
sa.Column('shared', sa.Boolean(), nullable=True),
sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table(
'ports',
sa.Column('tenant_id', sa.String(length=255), nullable=True),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('network_id', sa.String(length=36), nullable=False),
sa.Column('mac_address', sa.String(length=32), nullable=False),
sa.Column('admin_state_up', sa.Boolean(), nullable=False),
sa.Column('status', sa.String(length=16), nullable=False),
sa.Column('device_id', sa.String(length=255), nullable=False),
sa.Column('device_owner', sa.String(length=255), nullable=False),
sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table(
'dnsnameservers',
sa.Column('address', sa.String(length=128), nullable=False),
sa.Column('subnet_id', sa.String(length=36), nullable=False),
sa.ForeignKeyConstraint(['subnet_id'], ['subnets.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('address', 'subnet_id')
)
op.create_table(
'ipallocations',
sa.Column('port_id', sa.String(length=36), nullable=True),
sa.Column('ip_address', sa.String(length=64), nullable=False),
sa.Column('subnet_id', sa.String(length=36), nullable=False),
sa.Column('network_id', sa.String(length=36), nullable=False),
sa.Column('expiration', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
ondelete='CASCADE'),
sa.ForeignKeyConstraint(['port_id'], ['ports.id'],
ondelete='CASCADE'),
sa.ForeignKeyConstraint(['subnet_id'], ['subnets.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('ip_address', 'subnet_id', 'network_id')
)
op.create_table(
'routes',
sa.Column('destination', sa.String(length=64), nullable=False),
sa.Column('nexthop', sa.String(length=64), nullable=False),
sa.Column('subnet_id', sa.String(length=36), nullable=False),
sa.ForeignKeyConstraint(['subnet_id'], ['subnets.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('destination', 'nexthop', 'subnet_id')
)
op.create_table(
'ipallocationpools',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('subnet_id', sa.String(length=36), nullable=True),
sa.Column('first_ip', sa.String(length=64), nullable=False),
sa.Column('last_ip', sa.String(length=64), nullable=False),
sa.ForeignKeyConstraint(['subnet_id'], ['subnets.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id')
)
op.create_table(
'ipavailabilityranges',
sa.Column('allocation_pool_id', sa.String(length=36), nullable=False),
sa.Column('first_ip', sa.String(length=64), nullable=False),
sa.Column('last_ip', sa.String(length=64), nullable=False),
sa.ForeignKeyConstraint(['allocation_pool_id'],
['ipallocationpools.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('allocation_pool_id', 'first_ip', 'last_ip')
)
def upgrade_linuxbridge():
op.create_table(
'network_states',
sa.Column('physical_network', sa.String(length=64), nullable=False),
sa.Column('vlan_id', sa.Integer(), autoincrement=False,
nullable=False),
sa.Column('allocated', sa.Boolean(), nullable=False),
sa.PrimaryKeyConstraint('physical_network', 'vlan_id')
)
op.create_table(
'network_bindings',
sa.Column('network_id', sa.String(length=36), nullable=False),
sa.Column('physical_network', sa.String(length=64), nullable=True),
sa.Column('vlan_id', sa.Integer(), autoincrement=False,
nullable=False),
sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('network_id')
)
def upgrade_ovs():
op.create_table(
'ovs_tunnel_endpoints',
sa.Column('ip_address', sa.String(length=64), nullable=False),
sa.Column('id', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('ip_address')
)
op.create_table(
'ovs_tunnel_ips',
sa.Column('ip_address', sa.String(length=255), nullable=False),
sa.PrimaryKeyConstraint('ip_address')
)
op.create_table(
'ovs_vlan_allocations',
sa.Column('physical_network', sa.String(length=64), nullable=False),
sa.Column('vlan_id', sa.Integer(), autoincrement=False,
nullable=False),
sa.Column('allocated', sa.Boolean(), nullable=False),
sa.PrimaryKeyConstraint('physical_network', 'vlan_id')
)
op.create_table(
'ovs_tunnel_allocations',
sa.Column('tunnel_id', sa.Integer(), autoincrement=False,
nullable=False),
sa.Column('allocated', sa.Boolean(), nullable=False),
sa.PrimaryKeyConstraint('tunnel_id')
)
op.create_table(
'ovs_network_bindings',
sa.Column('network_id', sa.String(length=36), nullable=False),
sa.Column('network_type', sa.String(length=32), nullable=False),
sa.Column('physical_network', sa.String(length=64), nullable=True),
sa.Column('segmentation_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('network_id')
)
def upgrade_meta():
op.create_table(
'networkflavors',
sa.Column('flavor', sa.String(length=255)),
sa.Column('network_id', sa.String(length=36), nullable=False),
sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('network_id')
)
op.create_table(
'routerflavors',
sa.Column('flavor', sa.String(length=255)),
sa.Column('router_id', sa.String(length=36), nullable=False),
sa.ForeignKeyConstraint(['router_id'], ['routers.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('router_id')
)
def upgrade_nec():
op.create_table(
'ofctenants',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('quantum_id', sa.String(length=36), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table(
'ofcnetworks',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('quantum_id', sa.String(length=36), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table(
'ofcports',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('quantum_id', sa.String(length=36), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table(
'ofcfilters',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('quantum_id', sa.String(length=36), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table(
'portinfos',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('datapath_id', sa.String(length=36), nullable=False),
sa.Column('port_no', sa.Integer(), nullable=False),
sa.Column('vlan_id', sa.Integer(), nullable=False),
sa.Column('mac', sa.String(length=32), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table(
'packetfilters',
sa.Column('tenant_id', sa.String(length=255), nullable=True),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('network_id', sa.String(length=36), nullable=False),
sa.Column('priority', sa.Integer(), nullable=False),
sa.Column('action', sa.String(16), nullable=False),
sa.Column('in_port', sa.String(36), nullable=False),
sa.Column('src_mac', sa.String(32), nullable=False),
sa.Column('dst_mac', sa.String(32), nullable=False),
sa.Column('eth_type', sa.Integer(), nullable=False),
sa.Column('src_cidr', sa.String(64), nullable=False),
sa.Column('dst_cidr', sa.String(64), nullable=False),
sa.Column('protocol', sa.String(16), nullable=False),
sa.Column('src_port', sa.Integer(), nullable=False),
sa.Column('dst_port', sa.Integer(), nullable=False),
sa.Column('admin_state_up', sa.Boolean(), nullable=False),
sa.Column('status', sa.String(16), nullable=False),
sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id')
)
def upgrade_ryu():
op.create_table(
'ofp_server',
sa.Column('id', sa.Integer(), autoincrement=False, nullable=False),
sa.Column('address', sa.String(255)),
sa.Column('host_type', sa.String(255)),
sa.PrimaryKeyConstraint('id')
)
def upgrade_brocade():
op.create_table(
'brocadenetworks',
sa.Column('id', sa.Integer(), autoincrement=False, nullable=False),
sa.Column('vlan', sa.String(10)),
sa.PrimaryKeyConstraint('id')
)
op.create_table(
'brocadeports',
sa.Column('port_id', sa.String(36), nullable=False),
sa.Column('network_id', sa.String(36)),
sa.Column('admin_state_up', sa.Boolean()),
sa.Column('physical_interface', sa.String(36)),
sa.Column('vlan_id', sa.String(10)),
sa.Column('tenant_id', sa.String(36)),
sa.PrimaryKeyConstraint('port_id')
)
def upgrade_cisco():
op.create_table(
'cisco_vlan_ids',
sa.Column('vlan_id', sa.Integer(), autoincrement=True),
sa.Column('vlan_used', sa.Boolean()),
sa.PrimaryKeyConstraint('vlan_id')
)
op.create_table(
'cisco_vlan_bindings',
sa.Column('vlan_id', sa.Integer(), autoincrement=True),
sa.Column('vlan_name', sa.String(255)),
sa.Column('network_id', sa.String(255), nullable=False),
sa.PrimaryKeyConstraint('vlan_id')
)
op.create_table(
'portprofiles',
sa.Column('uuid', sa.String(255), nullable=False),
sa.Column('name', sa.String(255)),
sa.Column('vlan_id', sa.Integer()),
sa.Column('qos', sa.String(255)),
sa.PrimaryKeyConstraint('uuid')
)
op.create_table(
'portprofile_bindings',
sa.Column('id', sa.Integer(), autoincrement=True),
sa.Column('tenant_id', sa.String(255)),
sa.Column('port_id', sa.String(255), nullable=False),
sa.Column('portprofile_id', sa.String(255), nullable=False),
sa.Column('default', sa.Boolean()),
sa.PrimaryKeyConstraint('id'),
sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ),
sa.ForeignKeyConstraint(['portprofile_id'], ['portprofiles.uuid'], ),
)
op.create_table(
'qoss', # yes two S's
sa.Column('qos_id', sa.String(255)),
sa.Column('tenant_id', sa.String(255)),
sa.Column('qos_name', sa.String(255)),
sa.Column('qos_desc', sa.String(255)),
sa.PrimaryKeyConstraint('tenant_id', 'qos_name')
)
op.create_table(
'credentials',
sa.Column('credential_id', sa.String(255)),
sa.Column('tenant_id', sa.String(255)),
sa.Column('credential_name', sa.String(255)),
sa.Column('user_name', sa.String(255)),
sa.Column('password', sa.String(255)),
sa.PrimaryKeyConstraint('tenant_id', 'credential_name')
)
op.create_table(
'port_bindings',
sa.Column('id', sa.Integer(), autoincrement=True),
sa.Column('port_id', sa.String(255), nullable=False),
sa.Column('blade_intf_dn', sa.String(255), nullable=False),
sa.Column('portprofile_name', sa.String(255)),
sa.Column('vlan_name', sa.String(255)),
sa.Column('vlan_id', sa.Integer()),
sa.Column('qos', sa.String(255)),
sa.Column('tenant_id', sa.String(255)),
sa.Column('instance_id', sa.String(255)),
sa.Column('vif_id', sa.String(255)),
sa.PrimaryKeyConstraint('id')
)
op.create_table(
'nexusport_bindings',
sa.Column('id', sa.Integer(), primary_key=True, autoincrement=True),
sa.Column('port_id', sa.String(255)),
sa.Column('vlan_id', sa.Integer(255)),
sa.PrimaryKeyConstraint('id')
)
def downgrade(active_plugins=None, options=None):
if PLUGINS['lbr'] in active_plugins:
downgrade_linuxbridge()
elif PLUGINS['ovs'] in active_plugins:
downgrade_ovs()
elif PLUGINS['cisco'] in active_plugins:
# Cisco plugin imports OVS models too
downgrade_ovs()
downgrade_cisco()
elif PLUGINS['meta'] in active_plugins:
downgrade_meta()
elif PLUGINS['nec'] in active_plugins:
downgrade_nec()
elif PLUGINS['ryu'] in active_plugins:
downgrade_ryu()
elif PLUGINS['brocade'] in active_plugins:
# Brocade plugin imports linux bridge models too
downgrade_brocade()
downgrade_linuxbridge()
if migration.should_run(active_plugins, FOLSOM_QUOTA):
common_ext_ops.downgrade_quota(options)
if migration.should_run(active_plugins, L3_CAPABLE):
common_ext_ops.downgrade_l3()
downgrade_base()
def downgrade_base():
drop_tables(
'ipavailabilityranges',
'ipallocationpools',
'routes',
'ipallocations',
'dnsnameservers',
'ports',
'subnets',
'networks'
)
def downgrade_linuxbridge():
drop_tables('network_bindings', 'network_states')
def downgrade_ovs():
drop_tables(
'ovs_network_bindings',
'ovs_tunnel_allocations',
'ovs_vlan_allocations',
'ovs_tunnel_ips',
'ovs_tunnel_endpoints'
)
def downgrade_meta():
drop_tables('routerflavors', 'networkflavors')
def downgrade_nec():
drop_tables(
'packetfilters',
'portinfos',
'ofcfilters',
'ofcports',
'ofcnetworks',
'ofctenants'
)
def downgrade_ryu():
op.drop_table('ofp_server')
def downgrade_brocade():
op.drop_table('brocadenetworks')
op.drop_table('brocadeports')
def downgrade_cisco():
op.drop_tables(
'nextport_bindings',
'port_bindings',
'credentials',
'qoss',
'portprofile_bindings',
'portprofiles',
'cisco_vlan_bindings',
'cisco_vlan_ids'
)
def drop_tables(*tables):
for table in tables:
op.drop_table(table)
|
|
from __future__ import absolute_import, print_function
import traceback
import sys
import click
import itertools
from netlib import tcp
from netlib.http import CONTENT_MISSING
import netlib.utils
from . import flow, filt, contentviews
from .exceptions import ContentViewException
class DumpError(Exception):
pass
class Options(object):
attributes = [
"app",
"app_host",
"app_port",
"anticache",
"anticomp",
"client_replay",
"filtstr",
"flow_detail",
"keepserving",
"kill",
"no_server",
"nopop",
"refresh_server_playback",
"replacements",
"rfile",
"rheaders",
"setheaders",
"server_replay",
"scripts",
"showhost",
"stickycookie",
"stickyauth",
"stream_large_bodies",
"verbosity",
"outfile",
"replay_ignore_content",
"replay_ignore_params",
"replay_ignore_payload_params",
"replay_ignore_host"
]
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
for i in self.attributes:
if not hasattr(self, i):
setattr(self, i, None)
class DumpMaster(flow.FlowMaster):
def __init__(self, server, options, outfile=None):
flow.FlowMaster.__init__(self, server, flow.State())
self.outfile = outfile
self.o = options
self.anticache = options.anticache
self.anticomp = options.anticomp
self.showhost = options.showhost
self.replay_ignore_params = options.replay_ignore_params
self.replay_ignore_content = options.replay_ignore_content
self.replay_ignore_host = options.replay_ignore_host
self.refresh_server_playback = options.refresh_server_playback
self.replay_ignore_payload_params = options.replay_ignore_payload_params
self.set_stream_large_bodies(options.stream_large_bodies)
if self.server and self.server.config.http2 and not tcp.HAS_ALPN: # pragma: no cover
print("ALPN support missing (OpenSSL 1.0.2+ required)!\n"
"HTTP/2 is disabled. Use --no-http2 to silence this warning.",
file=sys.stderr)
if options.filtstr:
self.filt = filt.parse(options.filtstr)
else:
self.filt = None
if options.stickycookie:
self.set_stickycookie(options.stickycookie)
if options.stickyauth:
self.set_stickyauth(options.stickyauth)
if options.outfile:
err = self.start_stream_to_path(
options.outfile[0],
options.outfile[1]
)
if err:
raise DumpError(err)
if options.replacements:
for i in options.replacements:
self.replacehooks.add(*i)
if options.setheaders:
for i in options.setheaders:
self.setheaders.add(*i)
if options.server_replay:
self.start_server_playback(
self._readflow(options.server_replay),
options.kill, options.rheaders,
not options.keepserving,
options.nopop,
options.replay_ignore_params,
options.replay_ignore_content,
options.replay_ignore_payload_params,
options.replay_ignore_host
)
if options.client_replay:
self.start_client_playback(
self._readflow(options.client_replay),
not options.keepserving
)
scripts = options.scripts or []
for command in scripts:
err = self.load_script(command, use_reloader=True)
if err:
raise DumpError(err)
if options.rfile:
try:
self.load_flows_file(options.rfile)
except flow.FlowReadError as v:
self.add_event("Flow file corrupted.", "error")
raise DumpError(v)
if self.o.app:
self.start_app(self.o.app_host, self.o.app_port)
def _readflow(self, paths):
"""
Utitility function that reads a list of flows
or raises a DumpError if that fails.
"""
try:
return flow.read_flows_from_paths(paths)
except flow.FlowReadError as e:
raise DumpError(e.strerror)
def add_event(self, e, level="info"):
needed = dict(error=0, info=1, debug=2).get(level, 1)
if self.o.verbosity >= needed:
self.echo(
e,
fg="red" if level == "error" else None,
dim=(level == "debug"),
err=(level == "error")
)
@staticmethod
def indent(n, text):
l = str(text).strip().splitlines()
pad = " " * n
return "\n".join(pad + i for i in l)
def echo(self, text, indent=None, **style):
if indent:
text = self.indent(indent, text)
click.secho(text, file=self.outfile, **style)
def _echo_message(self, message):
if self.o.flow_detail >= 2:
headers = "\r\n".join(
"{}: {}".format(
click.style(k, fg="blue", bold=True),
click.style(v, fg="blue"))
for k, v in message.headers.fields
)
self.echo(headers, indent=4)
if self.o.flow_detail >= 3:
if message.content == CONTENT_MISSING:
self.echo("(content missing)", indent=4)
elif message.content:
self.echo("")
try:
type, lines = contentviews.get_content_view(
contentviews.get("Auto"),
message.content,
headers=message.headers
)
except ContentViewException:
s = "Content viewer failed: \n" + traceback.format_exc()
self.add_event(s, "debug")
type, lines = contentviews.get_content_view(
contentviews.get("Raw"),
message.content,
headers=message.headers
)
styles = dict(
highlight=dict(bold=True),
offset=dict(fg="blue"),
header=dict(fg="green", bold=True),
text=dict(fg="green")
)
def colorful(line):
yield u" " # we can already indent here
for (style, text) in line:
yield click.style(text, **styles.get(style, {}))
if self.o.flow_detail == 3:
lines_to_echo = itertools.islice(lines, 70)
else:
lines_to_echo = lines
lines_to_echo = list(lines_to_echo)
content = u"\r\n".join(
u"".join(colorful(line)) for line in lines_to_echo
)
self.echo(content)
if next(lines, None):
self.echo("(cut off)", indent=4, dim=True)
if self.o.flow_detail >= 2:
self.echo("")
def _echo_request_line(self, flow):
if flow.request.stickycookie:
stickycookie = click.style("[stickycookie] ", fg="yellow", bold=True)
else:
stickycookie = ""
if flow.client_conn:
client = click.style(flow.client_conn.address.host, bold=True)
else:
client = click.style("[replay]", fg="yellow", bold=True)
method = flow.request.method
method_color = dict(
GET="green",
DELETE="red"
).get(method.upper(), "magenta")
method = click.style(method, fg=method_color, bold=True)
if self.showhost:
url = flow.request.pretty_url
else:
url = flow.request.url
url = click.style(url, bold=True)
httpversion = ""
if flow.request.http_version not in ("HTTP/1.1", "HTTP/1.0"):
httpversion = " " + flow.request.http_version # We hide "normal" HTTP 1.
line = "{stickycookie}{client} {method} {url}{httpversion}".format(
stickycookie=stickycookie,
client=client,
method=method,
url=url,
httpversion=httpversion
)
self.echo(line)
def _echo_response_line(self, flow):
if flow.response.is_replay:
replay = click.style("[replay] ", fg="yellow", bold=True)
else:
replay = ""
code = flow.response.status_code
code_color = None
if 200 <= code < 300:
code_color = "green"
elif 300 <= code < 400:
code_color = "magenta"
elif 400 <= code < 600:
code_color = "red"
code = click.style(str(code), fg=code_color, bold=True, blink=(code == 418))
reason = click.style(flow.response.reason, fg=code_color, bold=True)
if flow.response.content == CONTENT_MISSING:
size = "(content missing)"
else:
size = netlib.utils.pretty_size(len(flow.response.content))
size = click.style(size, bold=True)
arrows = click.style("<<", bold=True)
line = "{replay} {arrows} {code} {reason} {size}".format(
replay=replay,
arrows=arrows,
code=code,
reason=reason,
size=size
)
self.echo(line)
def echo_flow(self, f):
if self.o.flow_detail == 0:
return
if f.request:
self._echo_request_line(f)
self._echo_message(f.request)
if f.response:
self._echo_response_line(f)
self._echo_message(f.response)
if f.error:
self.echo(" << {}".format(f.error.msg), bold=True, fg="red")
if self.outfile:
self.outfile.flush()
def _process_flow(self, f):
self.state.delete_flow(f)
if self.filt and not f.match(self.filt):
return
self.echo_flow(f)
def handle_request(self, f):
flow.FlowMaster.handle_request(self, f)
if f:
f.reply()
return f
def handle_response(self, f):
flow.FlowMaster.handle_response(self, f)
if f:
f.reply()
self._process_flow(f)
return f
def handle_error(self, f):
flow.FlowMaster.handle_error(self, f)
if f:
self._process_flow(f)
return f
def shutdown(self): # pragma: no cover
return flow.FlowMaster.shutdown(self)
def run(self): # pragma: no cover
if self.o.rfile and not self.o.keepserving:
self.shutdown()
return
try:
return flow.FlowMaster.run(self)
except BaseException:
self.shutdown()
raise
|
|
"""
Consul Management
=================
.. versionadded:: 3005
The consul module is used to create and manage Consul ACLs
.. code-block:: yaml
acl_present:
consul.acl_present:
- id: 38AC8470-4A83-4140-8DFD-F924CD32917F
- name: acl_name
- rules: node "" {policy = "write"} service "" {policy = "read"} key "_rexec" {policy = "write"}
- type: client
- consul_url: http://localhost:8500
acl_delete:
consul.acl_absent:
- id: 38AC8470-4A83-4140-8DFD-F924CD32917F
"""
import logging
log = logging.getLogger(__name__)
def _acl_changes(name, id=None, type=None, rules=None, consul_url=None, token=None):
"""
return True if the acl need to be update, False if it doesn't need to be update
"""
info = __salt__["consul.acl_info"](id=id, token=token, consul_url=consul_url)
if info["res"] and info["data"][0]["Name"] != name:
return True
elif info["res"] and info["data"][0]["Rules"] != rules:
return True
elif info["res"] and info["data"][0]["Type"] != type:
return True
else:
return False
def _acl_exists(name=None, id=None, token=None, consul_url=None):
"""
Check the acl exists by using the name or the ID,
name is ignored if ID is specified,
if only Name is used the ID associated with it is returned
"""
ret = {"result": False, "id": None}
if id:
info = __salt__["consul.acl_info"](id=id, token=token, consul_url=consul_url)
elif name:
info = __salt__["consul.acl_list"](token=token, consul_url=consul_url)
else:
return ret
if info.get("data"):
for acl in info["data"]:
if id and acl["ID"] == id:
ret["result"] = True
ret["id"] = id
elif name and acl["Name"] == name:
ret["result"] = True
ret["id"] = acl["ID"]
return ret
def acl_present(
name,
id=None,
token=None,
type="client",
rules="",
consul_url="http://localhost:8500",
):
"""
Ensure the ACL is present
name
Specifies a human-friendly name for the ACL token.
id
Specifies the ID of the ACL.
type: client
Specifies the type of ACL token. Valid values are: client and management.
rules
Specifies rules for this ACL token.
consul_url : http://locahost:8500
consul URL to query
.. note::
For more information https://www.consul.io/api/acl.html#create-acl-token, https://www.consul.io/api/acl.html#update-acl-token
"""
ret = {
"name": name,
"changes": {},
"result": True,
"comment": 'ACL "{}" exists and is up to date'.format(name),
}
exists = _acl_exists(name, id, token, consul_url)
if not exists["result"]:
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "The acl doesn't exist, it will be created"
return ret
create = __salt__["consul.acl_create"](
name=name, id=id, token=token, type=type, rules=rules, consul_url=consul_url
)
if create["res"]:
ret["result"] = True
ret["comment"] = "The acl has been created"
elif not create["res"]:
ret["result"] = False
ret["comment"] = "Failed to create the acl"
elif exists["result"]:
changes = _acl_changes(
name=name,
id=exists["id"],
token=token,
type=type,
rules=rules,
consul_url=consul_url,
)
if changes:
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "The acl exists and will be updated"
return ret
update = __salt__["consul.acl_update"](
name=name,
id=exists["id"],
token=token,
type=type,
rules=rules,
consul_url=consul_url,
)
if update["res"]:
ret["result"] = True
ret["comment"] = "The acl has been updated"
elif not update["res"]:
ret["result"] = False
ret["comment"] = "Failed to update the acl"
return ret
def acl_absent(name, id=None, token=None, consul_url="http://localhost:8500"):
"""
Ensure the ACL is absent
name
Specifies a human-friendly name for the ACL token.
id
Specifies the ID of the ACL.
token
token to authenticate you Consul query
consul_url : http://locahost:8500
consul URL to query
.. note::
For more information https://www.consul.io/api/acl.html#delete-acl-token
"""
ret = {
"name": id,
"changes": {},
"result": True,
"comment": 'ACL "{}" does not exist'.format(id),
}
exists = _acl_exists(name, id, token, consul_url)
if exists["result"]:
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "The acl exists, it will be deleted"
return ret
delete = __salt__["consul.acl_delete"](
id=exists["id"], token=token, consul_url=consul_url
)
if delete["res"]:
ret["result"] = True
ret["comment"] = "The acl has been deleted"
elif not delete["res"]:
ret["result"] = False
ret["comment"] = "Failed to delete the acl"
return ret
|
|
import logging
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from django import forms
from .jira import JIRAClient
log = logging.getLogger(__name__)
class JIRAFormUtils(object):
@staticmethod
def make_choices(x):
return [(y["id"], y["name"] if "name" in y else y["value"]) for y in x] if x else []
class JIRAOptionsForm(forms.Form):
instance_url = forms.CharField(
label=_("JIRA Instance URL"),
widget=forms.TextInput(attrs={'class': 'span6', 'placeholder': 'e.g. "https://jira.atlassian.com"'}),
help_text=_("It must be visible to the Sentry server"),
required=True
)
username = forms.CharField(
label=_("Username"),
widget=forms.TextInput(attrs={'class': 'span6'}),
help_text=_("Ensure the JIRA user has admin perm. on the project"),
required=True
)
password = forms.CharField(
label=_("Password"),
widget=forms.PasswordInput(attrs={'class': 'span6'}),
help_text=_("Only enter a value if you wish to change it"),
required=False
)
default_project = forms.ChoiceField(
label=_("Linked Project"),
)
ignored_fields = forms.CharField(
label=_("Ignored Fields"),
widget=forms.Textarea(attrs={'class': 'span11', 'placeholder': 'e.g. "components, security, customfield_10006"'}),
help_text=_("Comma-separated list of properties that you don't want to show in the form"),
required=False
)
default_priority = forms.ChoiceField(
label=_("Default Priority"),
required=False
)
default_issue_type = forms.ChoiceField(
label=_("Default Issue Type"),
required=False)
auto_create = forms.BooleanField(
label=_("Auto create JIRA tickets"),
help_text=_("Only enable if you want any new event to auto-create a JIRA ticket."),
required=False
)
def __init__(self, *args, **kwargs):
super(JIRAOptionsForm, self).__init__(*args, **kwargs)
initial = kwargs.get("initial")
project_safe = False
if initial and initial.get("instance_url"):
# make a connection to JIRA to fetch a default project.
jira = JIRAClient(initial["instance_url"], initial.get("username"), initial.get("password"))
projects_response = jira.get_projects_list()
if projects_response.status_code == 200:
projects = projects_response.json
if projects:
project_choices = [(p.get('key'), "%s (%s)" % (p.get('name'), p.get('key'))) for p in projects]
project_safe = True
self.fields["default_project"].choices = project_choices
priorities_response = jira.get_priorities()
if priorities_response.status_code == 200:
priorities = priorities_response.json
if priorities:
priority_choices = [(p.get('id'), "%s" % (p.get('name'))) for p in priorities]
self.fields["default_priority"].choices = priority_choices
default_project = initial.get('default_project')
if default_project:
meta = jira.get_create_meta_for_project(default_project)
if meta:
self.fields["default_issue_type"].choices = JIRAFormUtils.make_choices(meta["issuetypes"])
if not project_safe:
del self.fields["default_project"]
del self.fields["ignored_fields"]
def clean_password(self):
"""
Don't complain if the field is empty and a password is already stored,
no one wants to type a pw in each time they want to change it.
"""
pw = self.cleaned_data.get("password")
if pw:
return pw
else:
old_pw = self.initial.get("password")
if not old_pw:
raise ValidationError("A Password is Required")
return old_pw
def clean_instance_url(self):
"""
Strip forward slashes off any url passed through the form.
"""
url = self.cleaned_data.get("instance_url")
if url and url[-1:] == "/":
return url[:-1]
else:
return url
def clean(self):
"""
try and build a JIRAClient and make a random call to make sure the
configuration is right.
"""
cd = self.cleaned_data
missing_fields = False
if not cd.get("instance_url"):
self.errors["instance_url"] = ["Instance URL is required"]
missing_fields = True
if not cd.get("username"):
self.errors["username"] = ["Username is required"]
missing_fields = True
if missing_fields:
raise ValidationError("Missing Fields")
jira = JIRAClient(cd["instance_url"], cd["username"], cd["password"])
sut_response = jira.get_priorities()
if sut_response.status_code == 403 or sut_response.status_code == 401:
self.errors["username"] = ["Username might be incorrect"]
self.errors["password"] = ["Password might be incorrect"]
raise ValidationError("Unable to connect to JIRA: %s, if you have "
"tried and failed multiple times you may have"
" to enter a CAPTCHA in JIRA to re-enable API"
" logins." % sut_response.status_code)
elif sut_response.status_code == 500 or sut_response.json is None:
raise ValidationError("Unable to connect to JIRA: Bad Response")
elif sut_response.status_code > 200:
raise ValidationError("Unable to connect to JIRA: %s" % sut_response.status_code)
return cd
# A list of common builtin custom field types for JIRA for easy reference.
CUSTOM_FIELD_TYPES = {
"select": "com.atlassian.jira.plugin.system.customfieldtypes:select",
"textarea": "com.atlassian.jira.plugin.system.customfieldtypes:textarea",
"multiuserpicker": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker",
"number": "com.atlassian.jira.plugin.system.customfieldtypes:float"
}
class JIRAIssueForm(forms.Form):
project = forms.CharField(widget=forms.HiddenInput())
issuetype = forms.ChoiceField(
label="Issue Type",
help_text="Changing the issue type will refresh the page with the required form fields.",
required=True
)
summary = forms.CharField(
label=_("Issue Summary"),
widget=forms.TextInput(attrs={'class': 'span6'})
)
description = forms.CharField(
widget=forms.Textarea(attrs={"class": 'span6'})
)
def __init__(self, *args, **kwargs):
self.ignored_fields = kwargs.pop("ignored_fields")
initial = kwargs.get("initial")
jira_client = kwargs.pop("jira_client")
project_key = kwargs.pop("project_key")
priorities = jira_client.get_priorities().json
versions = jira_client.get_versions(project_key).json
# Returns the metadata the configured JIRA instance requires for
# creating issues for a given project.
# https://developer.atlassian.com/static/rest/jira/5.0.html#id200251
meta = jira_client.get_create_meta(project_key).json
# Early exit, somehow made it here without properly configuring the
# plugin.
if not meta or not priorities:
super(JIRAIssueForm, self).__init__(*args, **kwargs)
self.errors["__all__"] = [
"Error communicating with JIRA, Please check your configuration."]
return
# Early exit #2, no projects available.
if len(meta["projects"]) is 0:
super(JIRAIssueForm, self).__init__(*args, **kwargs)
self.errors["__all__"] = [
"Error in JIRA configuration, no projects found for user %s." %
jira_client.username]
return
# Looking up the project meta by exact key, so it's always the first
# one.
project = meta["projects"][0]
issue_types = project["issuetypes"]
# check if the issuetype was passed as a GET parameter
self.issue_type = initial.get("issuetype")
if self.issue_type:
matching_type = [t for t in issue_types if t["id"] == self.issue_type]
self.issue_type = matching_type[0] if len(matching_type) > 0 else None
# still no issue type? just use the first one.
if not self.issue_type:
self.issue_type = issue_types[0]
# set back after we've played with the inital data
kwargs["initial"] = initial
# call the super to bind self.fields from the defaults.
super(JIRAIssueForm, self).__init__(*args, **kwargs)
self.fields["project"].initial = project["id"]
self.fields["issuetype"].choices = JIRAFormUtils.make_choices(issue_types)
# apply ordering to fields based on some known built-in JIRA fields.
# otherwise weird ordering occurs.
anti_gravity = {"priority": -150,
"fixVersions": -125,
"components": -100,
"security": -50}
dynamic_fields = self.issue_type.get("fields").keys()
dynamic_fields.sort(key=lambda f: anti_gravity.get(f) or 0)
# build up some dynamic fields based on required shit.
for field in dynamic_fields:
if field in self.fields.keys() or field in [x.strip() for x in self.ignored_fields.split(",")]:
# don't overwrite the fixed fields for the form.
continue
mb_field = self.build_dynamic_field(self.issue_type["fields"][field])
if mb_field:
# apply field to form
self.fields[field] = mb_field
if "priority" in self.fields.keys():
# whenever priorities are available, put the available ones in the list.
# allowedValues for some reason doesn't pass enough info.
self.fields["priority"].choices = JIRAFormUtils.make_choices(priorities)
if "fixVersions" in self.fields.keys():
self.fields["fixVersions"].choices = JIRAFormUtils.make_choices(versions)
def clean_description(self):
"""
Turn code blocks that are in the stack trace into JIRA code blocks.
"""
desc = self.cleaned_data["description"]
return desc.replace("```", "{code}")
def clean(self):
"""
The form clean method needs to take advantage of the loaded issue type
fields and meta info so it can determine the format that the datatypes
should render as.
"""
very_clean = self.cleaned_data
# protect against mis-configured plugin submitting a form without an
# issuetype assigned.
if not very_clean.get("issuetype"):
raise ValidationError("Issue Type is required. Check your plugin configuration.")
fs = self.issue_type["fields"]
for field in fs.keys():
f = fs[field]
if field in ["description", "summary"]:
continue
if field in very_clean.keys():
v = very_clean.get(field)
if v:
schema = f["schema"]
if schema.get("type") == "string" and not schema.get("custom") == CUSTOM_FIELD_TYPES["select"]:
continue # noop
if schema["type"] == "user" or schema.get('item') == "user":
v = {"name": v}
elif schema.get("custom") == CUSTOM_FIELD_TYPES.get("multiuserpicker"):
# custom multi-picker
v = [{"name": v}]
elif schema["type"] == "array" and schema.get("item") != "string":
v = [{"id": vx} for vx in v]
elif schema.get("custom") == CUSTOM_FIELD_TYPES.get("textarea"):
v = v
elif schema.get("custom") == CUSTOM_FIELD_TYPES.get("number"):
v = float(v)
elif (schema.get("type") != "string"
or schema.get("item") != "string"
or schema.get("custom") == CUSTOM_FIELD_TYPES.get("select")):
v = {"id": v}
very_clean[field] = v
else:
# We don't want to pass blank data back to the API, so kill
# None values
very_clean.pop(field, None)
if not (isinstance(very_clean["issuetype"], dict)
and "id" in very_clean["issuetype"]):
# something fishy is going on with this field, working on some JIRA
# instances, and some not.
# testing against 5.1.5 and 5.1.4 does not convert (perhaps is no longer included
# in the projectmeta API call, and would normally be converted in the
# above clean method.)
very_clean["issuetype"] = {"id": very_clean["issuetype"]}
return very_clean
@staticmethod
def build_dynamic_field(field_meta):
"""
Builds a field based on JIRA's meta field information
"""
schema = field_meta["schema"]
# set up some defaults for form fields
fieldtype = forms.CharField
fkwargs = {
'label': field_meta["name"],
'required': field_meta["required"],
'widget': forms.TextInput(attrs={'class': 'span6'})
}
# override defaults based on field configuration
if (schema["type"] in ["securitylevel", "priority"]
or schema.get("custom") == CUSTOM_FIELD_TYPES.get("select")):
fieldtype = forms.ChoiceField
fkwargs["choices"] = JIRAFormUtils.make_choices(field_meta.get('allowedValues'))
fkwargs["widget"] = forms.Select()
elif schema.get("items") == "user" or schema["type"] == "user":
fkwargs["widget"] = forms.TextInput(attrs={'class': 'user-selector', 'data-autocomplete': field_meta.get("autoCompleteUrl")})
elif schema["type"] in ["timetracking"]:
# TODO: Implement timetracking (currently unsupported alltogether)
return None
elif schema.get("items") in ["worklog", "attachment"]:
# TODO: Implement worklogs and attachments someday
return None
elif schema["type"] == "array" and schema["items"] != "string":
fieldtype = forms.MultipleChoiceField
fkwargs["choices"] = JIRAFormUtils.make_choices(field_meta.get("allowedValues"))
fkwargs["widget"] = forms.SelectMultiple()
# break this out, since multiple field types could additionally
# be configured to use a custom property instead of a default.
if schema.get("custom"):
if schema["custom"] == CUSTOM_FIELD_TYPES.get("textarea"):
fkwargs["widget"] = forms.Textarea(attrs={'class': 'span6'})
return fieldtype(**fkwargs)
class JIRAIssueLinkForm(forms.Form):
project_key = forms.CharField(widget=forms.HiddenInput())
project = forms.CharField(widget=forms.HiddenInput())
issue_id = forms.CharField(label=_("Issue ID"), widget=forms.TextInput(attrs={'class': 'span6'}))
|
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint:disable=too-many-lines
from azure.cli.core.commands.validators import get_default_location_from_resource_group
from azure.mgmt.sqlvirtualmachine.models import (
SqlServerLicenseType,
BackupScheduleType,
FullBackupFrequencyType,
ConnectivityType,
SqlWorkloadType,
DiskConfigurationType,
DayOfWeek,
SqlVmGroupImageSku,
SqlImageSku,
SqlManagementMode
)
from azure.cli.core.commands.parameters import (
get_enum_type,
tags_type,
get_three_state_flag,
get_location_type
)
from ._validators import (
validate_sqlvm_group,
validate_sqlvm_list,
validate_load_balancer,
validate_public_ip_address,
validate_subnet,
validate_sqlmanagement,
validate_expand
)
# pylint: disable=too-many-statements, line-too-long
def load_arguments(self, _):
for scope in ['sql vm', 'sql vm group']:
with self.argument_context(scope) as c:
c.argument('tags', tags_type)
###############################################
# sql virtual machine groups params #
###############################################
with self.argument_context('sql vm group') as c:
c.argument('sql_virtual_machine_group_name',
options_list=['--name', '-n'],
id_part='name',
help='Name of the SQL virtual machine group.')
c.argument('sql_image_offer',
options_list=['--image-offer', '-i'],
help='SQL image offer. Examples may include SQL2016-WS2016, SQL2017-WS2016.')
c.argument('sql_image_sku',
options_list=['--image-sku', '-s'],
help='SQL image sku.',
arg_type=get_enum_type(SqlVmGroupImageSku))
c.argument('location',
help='Location. If not provided, group will be created in the same reosurce group location.'
'You can configure the default location using `az configure --defaults location=<location>`.',
arg_type=get_location_type(self.cli_ctx),
validator=get_default_location_from_resource_group)
with self.argument_context('sql vm group', arg_group='WSFC Domain Profile') as c:
c.argument('domain_fqdn',
options_list=['--domain-fqdn', '-f'],
help='Fully qualified name of the domain.')
c.argument('cluster_operator_account',
options_list=['--operator-acc', '-p'],
help='Account name used for operating cluster i.e. will be part of administrators group on all the participating virtual machines in the cluster.')
c.argument('sql_service_account',
options_list=['--service-acc', '-e'],
help='Account name under which SQL service will run on all participating SQL virtual machines in the cluster.')
c.argument('storage_account_url',
options_list=['--storage-account', '-u'],
help='Storage account url of the witness storage account.')
c.argument('storage_account_key',
options_list=['--sa-key', '-k'],
help='Primary key of the witness storage account.')
c.argument('cluster_bootstrap_account',
options_list=['--bootstrap-acc'],
help='Account name used for creating cluster (at minimum needs permissions to \'Create Computer Objects\' in domain).')
c.argument('file_share_witness_path',
options_list=['--fsw-path'],
help='Optional path for fileshare witness.')
c.argument('ou_path',
help='Organizational Unit path in which the nodes and cluster will be present. Example: OU=WSCluster,DC=testdomain,DC=com')
###############################################
# availability group listener params #
###############################################
with self.argument_context('sql vm group ag-listener') as c:
c.argument('availability_group_listener_name',
options_list=['--name', '-n'],
id_part='name',
help='Name of the availability group listener.')
c.argument('sql_virtual_machine_group_name',
options_list=['--group-name', '-r'],
help='Name of the SQL virtual machine group.',
id_part=None)
c.argument('port',
options_list=['--port', '-p'],
help='Listener port.',
type=int)
c.argument('availability_group_name',
options_list=['--ag-name', '-a'],
help='Name of the availability group. Please refer to '
'https://docs.microsoft.com/sql/database-engine/availability-groups/windows/use-the-availability-group-wizard-sql-server-management-studio?view=sql-server-2017 '
'to create and availability group')
with self.argument_context('sql vm group ag-listener', arg_group='Load Balancer Configuration') as c:
c.argument('ip_address',
options_list=['--ip-address', '-i'],
help='Private IP address bound to the availability group listener.')
c.argument('subnet_resource_id',
options_list=['--subnet', '-u'],
validator=validate_subnet,
help='The name or resource id of the subnet to include in the private IP.')
c.argument('vnet_name',
options_list=['--vnet-name'],
help='Name of the virtual network. Provide only if name of the subnet has been provided.')
c.argument('public_ip_address_resource_id',
options_list=['--public-ip', '-c'],
validator=validate_public_ip_address,
help='Name or resource ID of the public IP.')
c.argument('load_balancer_resource_id',
options_list=['--load-balancer', '-b'],
validator=validate_load_balancer,
help='Name or resource ID of the load balancer.')
c.argument('probe_port',
options_list=['--probe-port', '-e'],
help='Probe port.',
type=int)
c.argument('sql_virtual_machine_instances',
options_list=['--sqlvms', '-q'],
nargs='+',
validator=validate_sqlvm_list,
help='Space-separated list of SQL virtual machine instance name or resource IDs that are enrolled into the availability group.')
###############################################
# sql virtual machine params #
###############################################
with self.argument_context('sql vm') as c:
c.argument('sql_virtual_machine_name',
options_list=['--name', '-n'],
id_part='name',
help='Name of the SQL virtual machine.')
c.argument('location',
help='Location. If not provided, virtual machine should be in the same region of resource group.'
'You can configure the default location using `az configure --defaults location=<location>`.',
arg_type=get_location_type(self.cli_ctx),
validator=get_default_location_from_resource_group)
c.argument('expand',
help='Get the SQLIaaSExtension configuration settings. To view all settings, use *. To select only a few, the settings must be space-separted.',
nargs='+',
validator=validate_expand,
arg_type=get_enum_type(['*', 'AutoBackupSettings', 'AutoPatchingSettings', 'KeyVaultCredentialSettings', 'ServerConfigurationsManagementSettings']))
c.argument('sql_management_mode',
help='SQL Server management type. If NoAgent selected, please provide --image-sku and --offer-type.',
options_list=['--sql-mgmt-type'],
validator=validate_sqlmanagement,
arg_type=get_enum_type(SqlManagementMode))
with self.argument_context('sql vm', arg_group='SQL Server License') as c:
c.argument('sql_server_license_type',
help='SQL Server license type.',
options_list=['--license-type', '-i'],
arg_type=get_enum_type(SqlServerLicenseType))
c.argument('sql_image_sku',
options_list=['--image-sku'],
help='SQL image sku.',
arg_type=get_enum_type(SqlImageSku))
c.argument('sql_image_offer',
options_list=['--image-offer'],
help='SQL image offer. Examples include SQL2008R2-WS2008, SQL2008-WS2008.')
with self.argument_context('sql vm add-to-group') as c:
c.argument('sql_virtual_machine_group_resource_id',
options_list=['--sqlvm-group', '-r'],
validator=validate_sqlvm_group,
help='Name or resource ID of the SQL virtual machine group. If only name provided, SQL virtual machine group should be in the same '
'resource group of the SQL virtual machine.')
c.argument('sql_virtual_machine_name',
id_part='name',
help="Name of the SQL virtual machine.")
with self.argument_context('sql vm remove-from-group') as c:
c.argument('sql_virtual_machine_name',
id_part='name',
help="Name of the SQL virtual machine.")
with self.argument_context('sql vm update') as c:
c.argument('sql_management_mode',
help='SQL Server management type. Updates from LightWeight to Full.',
options_list=['--sql-mgmt-type'],
arg_type=get_enum_type(['Full']))
c.argument('prompt',
options_list=['--yes', '-y'],
help="Do not prompt for confirmation. Requires --sql-mgmt-type.")
with self.argument_context('sql vm add-to-group', arg_group='WSFC Domain Credentials') as c:
c.argument('cluster_bootstrap_account_password',
options_list=['-b', '--bootstrap-acc-pwd'],
help='Password for the cluster bootstrap account if provided in the SQL virtual machine group.')
c.argument('cluster_operator_account_password',
options_list=['--operator-acc-pwd', '-p'],
help='Password for the cluster operator account provided in the SQL virtual machine group.')
c.argument('sql_service_account_password',
options_list=['--service-acc-pwd', '-s'],
help='Password for the SQL service account provided in the SQL virtual machine group.')
with self.argument_context('sql vm', arg_group='Auto Patching Settings') as c:
c.argument('enable_auto_patching',
help='Enable or disable autopatching on SQL virtual machine. If any autopatching settings provided, parameter automatically sets to true.',
arg_type=get_three_state_flag())
c.argument('day_of_week',
help='Day of week to apply the patch on.',
arg_type=get_enum_type(DayOfWeek))
c.argument('maintenance_window_starting_hour',
type=int,
options_list=['--maintenance-window-start-hour'],
help='Hour of the day when patching is initiated. Local VM time 0-23 hours.')
c.argument('maintenance_window_duration',
type=int,
help='Duration of patching. 30-180 minutes.')
with self.argument_context('sql vm', arg_group='Auto Backup Settings') as c:
c.argument('enable_auto_backup',
help='Enable or disable autobackup on SQL virtual machine. If any backup settings provided, parameter automatically sets to true.',
arg_type=get_three_state_flag())
c.argument('enable_encryption',
help=' Enable encryption for backup on SQL virtual machine.',
arg_type=get_three_state_flag())
c.argument('retention_period',
type=int,
help='Retention period of backup. 1-30 days.')
c.argument('storage_account_url',
options_list=['--storage-account'],
help='Storage account url where backup will be taken to.')
c.argument('storage_access_key',
options_list=['--sa-key'],
help='Storage account key where backup will be taken to.')
c.argument('backup_password',
options_list=['--backup-pwd'],
help='Password for encryption on backup.')
c.argument('backup_system_dbs',
help='Include system databases on backup.',
arg_type=get_three_state_flag())
c.argument('backup_schedule_type',
help='Backup schedule type.',
arg_type=get_enum_type(BackupScheduleType))
c.argument('full_backup_frequency',
help='Frequency of full backups. In both cases, full backups begin during the next scheduled time window.',
arg_type=get_enum_type(FullBackupFrequencyType))
c.argument('full_backup_start_time',
type=int,
options_list=['--full-backup-start-hour'],
help='Start time of a given day during which full backups can take place. 0-23 hours.')
c.argument('full_backup_window_hours',
type=int,
options_list=['--full-backup-duration'],
help='Duration of the time window of a given day during which full backups can take place. 1-23 hours.')
c.argument('log_backup_frequency',
type=int,
help='Frequency of log backups. 5-60 minutes.')
with self.argument_context('sql vm', arg_group='Key Vault Credential Settings') as c:
c.argument('enable_key_vault_credential',
help='Enable or disable key vault credential setting. If any key vault settings provided, parameter automatically sets to true.',
arg_type=get_three_state_flag())
c.argument('credential_name',
help='Credential name')
c.argument('azure_key_vault_url',
options_list=['--key-vault'],
help='Azure Key Vault url.')
c.argument('service_principal_name',
options_list=['--sp-name'],
help='Service principal name to access key vault.')
c.argument('service_principal_secret',
options_list=['--sp-secret'],
help='Service principal name secret to access key vault.')
with self.argument_context('sql vm', arg_group='SQL Connectivity Update Settings') as c:
c.argument('connectivity_type',
help='SQL Server connectivity option.',
arg_type=get_enum_type(ConnectivityType))
c.argument('port',
help='SQL Server port.',
type=int)
c.argument('sql_auth_update_username',
help='SQL Server sysadmin login to create.')
c.argument('sql_auth_update_password',
options_list=['--sql-auth-update-pwd'],
help='SQL Server sysadmin login password.')
with self.argument_context('sql vm', arg_group='SQL Workload Type Update Settings') as c:
c.argument('sql_workload_type',
help='SQL Server workload type.',
arg_type=get_enum_type(SqlWorkloadType))
with self.argument_context('sql vm', arg_group='SQL Storage Update Settings') as c:
c.argument('disk_count',
help='Virtual machine disk count.',
type=int)
c.argument('disk_configuration_type',
help='Disk configuration to apply to SQL Server.',
arg_type=get_enum_type(DiskConfigurationType))
with self.argument_context('sql vm', arg_group='Additional Features') as c:
c.argument('enable_r_services',
help='Enable or disable R services (SQL 2016 onwards).',
arg_type=get_three_state_flag())
|
|
from unittest import mock
import os
import platform
import subprocess
import sys
import tempfile
import unittest
import warnings
from test import support
class PlatformTest(unittest.TestCase):
def test_architecture(self):
res = platform.architecture()
@support.skip_unless_symlink
def test_architecture_via_symlink(self): # issue3762
# On Windows, the EXE needs to know where pythonXY.dll and *.pyd is at
# so we add the directory to the path and PYTHONPATH.
if sys.platform == "win32":
def restore_environ(old_env):
os.environ.clear()
os.environ.update(old_env)
self.addCleanup(restore_environ, dict(os.environ))
os.environ["Path"] = "{};{}".format(
os.path.dirname(sys.executable), os.environ["Path"])
os.environ["PYTHONPATH"] = os.path.dirname(sys.executable)
def get(python):
cmd = [python, '-c',
'import platform; print(platform.architecture())']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
return p.communicate()
real = os.path.realpath(sys.executable)
link = os.path.abspath(support.TESTFN)
os.symlink(real, link)
try:
self.assertEqual(get(real), get(link))
finally:
os.remove(link)
def test_platform(self):
for aliased in (False, True):
for terse in (False, True):
res = platform.platform(aliased, terse)
def test_system(self):
res = platform.system()
def test_node(self):
res = platform.node()
def test_release(self):
res = platform.release()
def test_version(self):
res = platform.version()
def test_machine(self):
res = platform.machine()
def test_processor(self):
res = platform.processor()
def setUp(self):
self.save_version = sys.version
self.save_git = sys._git
self.save_platform = sys.platform
def tearDown(self):
sys.version = self.save_version
sys._git = self.save_git
sys.platform = self.save_platform
def test_sys_version(self):
# Old test.
for input, output in (
('2.4.3 (#1, Jun 21 2006, 13:54:21) \n[GCC 3.3.4 (pre 3.3.5 20040809)]',
('CPython', '2.4.3', '', '', '1', 'Jun 21 2006 13:54:21', 'GCC 3.3.4 (pre 3.3.5 20040809)')),
('IronPython 1.0.60816 on .NET 2.0.50727.42',
('IronPython', '1.0.60816', '', '', '', '', '.NET 2.0.50727.42')),
('IronPython 1.0 (1.0.61005.1977) on .NET 2.0.50727.42',
('IronPython', '1.0.0', '', '', '', '', '.NET 2.0.50727.42')),
('2.4.3 (truncation, date, t) \n[GCC]',
('CPython', '2.4.3', '', '', 'truncation', 'date t', 'GCC')),
('2.4.3 (truncation, date, ) \n[GCC]',
('CPython', '2.4.3', '', '', 'truncation', 'date', 'GCC')),
('2.4.3 (truncation, date,) \n[GCC]',
('CPython', '2.4.3', '', '', 'truncation', 'date', 'GCC')),
('2.4.3 (truncation, date) \n[GCC]',
('CPython', '2.4.3', '', '', 'truncation', 'date', 'GCC')),
('2.4.3 (truncation, d) \n[GCC]',
('CPython', '2.4.3', '', '', 'truncation', 'd', 'GCC')),
('2.4.3 (truncation, ) \n[GCC]',
('CPython', '2.4.3', '', '', 'truncation', '', 'GCC')),
('2.4.3 (truncation,) \n[GCC]',
('CPython', '2.4.3', '', '', 'truncation', '', 'GCC')),
('2.4.3 (truncation) \n[GCC]',
('CPython', '2.4.3', '', '', 'truncation', '', 'GCC')),
):
# branch and revision are not "parsed", but fetched
# from sys._git. Ignore them
(name, version, branch, revision, buildno, builddate, compiler) \
= platform._sys_version(input)
self.assertEqual(
(name, version, '', '', buildno, builddate, compiler), output)
# Tests for python_implementation(), python_version(), python_branch(),
# python_revision(), python_build(), and python_compiler().
sys_versions = {
("2.6.1 (r261:67515, Dec 6 2008, 15:26:00) \n[GCC 4.0.1 (Apple Computer, Inc. build 5370)]",
('CPython', 'tags/r261', '67515'), self.save_platform)
:
("CPython", "2.6.1", "tags/r261", "67515",
('r261:67515', 'Dec 6 2008 15:26:00'),
'GCC 4.0.1 (Apple Computer, Inc. build 5370)'),
("IronPython 2.0 (2.0.0.0) on .NET 2.0.50727.3053", None, "cli")
:
("IronPython", "2.0.0", "", "", ("", ""),
".NET 2.0.50727.3053"),
("2.6.1 (IronPython 2.6.1 (2.6.10920.0) on .NET 2.0.50727.1433)", None, "cli")
:
("IronPython", "2.6.1", "", "", ("", ""),
".NET 2.0.50727.1433"),
("2.7.4 (IronPython 2.7.4 (2.7.0.40) on Mono 4.0.30319.1 (32-bit))", None, "cli")
:
("IronPython", "2.7.4", "", "", ("", ""),
"Mono 4.0.30319.1 (32-bit)"),
("2.5 (trunk:6107, Mar 26 2009, 13:02:18) \n[Java HotSpot(TM) Client VM (\"Apple Computer, Inc.\")]",
('Jython', 'trunk', '6107'), "java1.5.0_16")
:
("Jython", "2.5.0", "trunk", "6107",
('trunk:6107', 'Mar 26 2009'), "java1.5.0_16"),
("2.5.2 (63378, Mar 26 2009, 18:03:29)\n[PyPy 1.0.0]",
('PyPy', 'trunk', '63378'), self.save_platform)
:
("PyPy", "2.5.2", "trunk", "63378", ('63378', 'Mar 26 2009'),
"")
}
for (version_tag, subversion, sys_platform), info in \
sys_versions.items():
sys.version = version_tag
if subversion is None:
if hasattr(sys, "_git"):
del sys._git
else:
sys._git = subversion
if sys_platform is not None:
sys.platform = sys_platform
self.assertEqual(platform.python_implementation(), info[0])
self.assertEqual(platform.python_version(), info[1])
self.assertEqual(platform.python_branch(), info[2])
self.assertEqual(platform.python_revision(), info[3])
self.assertEqual(platform.python_build(), info[4])
self.assertEqual(platform.python_compiler(), info[5])
def test_system_alias(self):
res = platform.system_alias(
platform.system(),
platform.release(),
platform.version(),
)
def test_uname(self):
res = platform.uname()
self.assertTrue(any(res))
self.assertEqual(res[0], res.system)
self.assertEqual(res[1], res.node)
self.assertEqual(res[2], res.release)
self.assertEqual(res[3], res.version)
self.assertEqual(res[4], res.machine)
self.assertEqual(res[5], res.processor)
@unittest.skipUnless(sys.platform.startswith('win'), "windows only test")
def test_uname_win32_ARCHITEW6432(self):
# Issue 7860: make sure we get architecture from the correct variable
# on 64 bit Windows: if PROCESSOR_ARCHITEW6432 exists we should be
# using it, per
# http://blogs.msdn.com/david.wang/archive/2006/03/26/HOWTO-Detect-Process-Bitness.aspx
try:
with support.EnvironmentVarGuard() as environ:
if 'PROCESSOR_ARCHITEW6432' in environ:
del environ['PROCESSOR_ARCHITEW6432']
environ['PROCESSOR_ARCHITECTURE'] = 'foo'
platform._uname_cache = None
system, node, release, version, machine, processor = platform.uname()
self.assertEqual(machine, 'foo')
environ['PROCESSOR_ARCHITEW6432'] = 'bar'
platform._uname_cache = None
system, node, release, version, machine, processor = platform.uname()
self.assertEqual(machine, 'bar')
finally:
platform._uname_cache = None
def test_java_ver(self):
res = platform.java_ver()
if sys.platform == 'java':
self.assertTrue(all(res))
def test_win32_ver(self):
res = platform.win32_ver()
def test_mac_ver(self):
res = platform.mac_ver()
if platform.uname().system == 'Darwin':
# We're on a MacOSX system, check that
# the right version information is returned
fd = os.popen('sw_vers', 'r')
real_ver = None
for ln in fd:
if ln.startswith('ProductVersion:'):
real_ver = ln.strip().split()[-1]
break
fd.close()
self.assertFalse(real_ver is None)
result_list = res[0].split('.')
expect_list = real_ver.split('.')
len_diff = len(result_list) - len(expect_list)
# On Snow Leopard, sw_vers reports 10.6.0 as 10.6
if len_diff > 0:
expect_list.extend(['0'] * len_diff)
self.assertEqual(result_list, expect_list)
# res[1] claims to contain
# (version, dev_stage, non_release_version)
# That information is no longer available
self.assertEqual(res[1], ('', '', ''))
if sys.byteorder == 'little':
self.assertIn(res[2], ('i386', 'x86_64'))
else:
self.assertEqual(res[2], 'PowerPC')
@unittest.skipUnless(sys.platform == 'darwin', "OSX only test")
def test_mac_ver_with_fork(self):
# Issue7895: platform.mac_ver() crashes when using fork without exec
#
# This test checks that the fix for that issue works.
#
pid = os.fork()
if pid == 0:
# child
info = platform.mac_ver()
os._exit(0)
else:
# parent
cpid, sts = os.waitpid(pid, 0)
self.assertEqual(cpid, pid)
self.assertEqual(sts, 0)
def test_dist(self):
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore',
'dist\(\) and linux_distribution\(\) '
'functions are deprecated .*',
PendingDeprecationWarning,
)
res = platform.dist()
def test_libc_ver(self):
import os
if os.path.isdir(sys.executable) and \
os.path.exists(sys.executable+'.exe'):
# Cygwin horror
executable = sys.executable + '.exe'
else:
executable = sys.executable
res = platform.libc_ver(executable)
def test_parse_release_file(self):
for input, output in (
# Examples of release file contents:
('SuSE Linux 9.3 (x86-64)', ('SuSE Linux ', '9.3', 'x86-64')),
('SUSE LINUX 10.1 (X86-64)', ('SUSE LINUX ', '10.1', 'X86-64')),
('SUSE LINUX 10.1 (i586)', ('SUSE LINUX ', '10.1', 'i586')),
('Fedora Core release 5 (Bordeaux)', ('Fedora Core', '5', 'Bordeaux')),
('Red Hat Linux release 8.0 (Psyche)', ('Red Hat Linux', '8.0', 'Psyche')),
('Red Hat Linux release 9 (Shrike)', ('Red Hat Linux', '9', 'Shrike')),
('Red Hat Enterprise Linux release 4 (Nahant)', ('Red Hat Enterprise Linux', '4', 'Nahant')),
('CentOS release 4', ('CentOS', '4', None)),
('Rocks release 4.2.1 (Cydonia)', ('Rocks', '4.2.1', 'Cydonia')),
('', ('', '', '')), # If there's nothing there.
):
self.assertEqual(platform._parse_release_file(input), output)
def test_popen(self):
mswindows = (sys.platform == "win32")
if mswindows:
command = '"{}" -c "print(\'Hello\')"'.format(sys.executable)
else:
command = "'{}' -c 'print(\"Hello\")'".format(sys.executable)
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
with platform.popen(command) as stdout:
hello = stdout.read().strip()
stdout.close()
self.assertEqual(hello, "Hello")
data = 'plop'
if mswindows:
command = '"{}" -c "import sys; data=sys.stdin.read(); exit(len(data))"'
else:
command = "'{}' -c 'import sys; data=sys.stdin.read(); exit(len(data))'"
command = command.format(sys.executable)
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
with platform.popen(command, 'w') as stdin:
stdout = stdin.write(data)
ret = stdin.close()
self.assertIsNotNone(ret)
if os.name == 'nt':
returncode = ret
else:
returncode = ret >> 8
self.assertEqual(returncode, len(data))
def test_linux_distribution_encoding(self):
# Issue #17429
with tempfile.TemporaryDirectory() as tempdir:
filename = os.path.join(tempdir, 'fedora-release')
with open(filename, 'w', encoding='utf-8') as f:
f.write('Fedora release 19 (Schr\xf6dinger\u2019s Cat)\n')
with mock.patch('platform._UNIXCONFDIR', tempdir):
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore',
'dist\(\) and linux_distribution\(\) '
'functions are deprecated .*',
PendingDeprecationWarning,
)
distname, version, distid = platform.linux_distribution()
self.assertEqual(distname, 'Fedora')
self.assertEqual(version, '19')
self.assertEqual(distid, 'Schr\xf6dinger\u2019s Cat')
class DeprecationTest(unittest.TestCase):
def test_dist_deprecation(self):
with self.assertWarns(PendingDeprecationWarning) as cm:
platform.dist()
self.assertEqual(str(cm.warning),
'dist() and linux_distribution() functions are '
'deprecated in Python 3.5')
def test_linux_distribution_deprecation(self):
with self.assertWarns(PendingDeprecationWarning) as cm:
platform.linux_distribution()
self.assertEqual(str(cm.warning),
'dist() and linux_distribution() functions are '
'deprecated in Python 3.5')
if __name__ == '__main__':
unittest.main()
|
|
# -*- coding: utf-8 -*-
# Python 3 forward compatability imports.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
# Standard imports
import re
import nltk
from nltk.data import load
from itertools import chain, islice
from collections import defaultdict, Counter, namedtuple, deque
# Setup logger.
import logging
logging.getLogger(__name__).addHandler(logging.NullHandler())
# A simple class for connecting tokens and tags.
class TaggedToken(object):
def __init__(self, token='', tag=''):
self._token = token
self.tag = tag
@property
def token(self):
return self._token
@token.setter
def token(self, token):
self._token = token
@property
def tag(self):
return self._tag
@tag.setter
def tag(self, tag):
self._tag = tag.upper()
@staticmethod
def tag_promoted(tagged_token, tag='TAG'):
return TaggedToken(token=tagged_token._tag, tag=tag)
def __str__(self):
return '{}/{}'.format(self._token, self._tag)
def __repr__(self):
return 'TaggedToken(token={}, tag={})'.format(self._token, self._tag)
def __eq__(self, other):
return self._token == other._token and self._tag == other._tag
def tag(text):
"""Tags the input text.
Arguments:
text (str): The text to tag.
Returns:
([[(str, str)]]): List of sentences containing lists of word/tag pairs.
"""
#Separate the input text into sentences
sentences = nltk.sent_tokenize(str(text))
#Separate each sentence into words
nested = []
for sentence in sentences:
nested.append(nltk.word_tokenize(sentence))
# Prepare default tagger
_POS_TAGGER = 'taggers/maxent_treebank_pos_tagger/english.pickle'
tagger = load(_POS_TAGGER) # Same tagger as using nltk.pos_tag
# Prepare regex tagger for custom tags
regexp_tagger = nltk.tag.RegexpTagger([(r'\(', '('),
(r'\)', ')'),
(r'\[', '['),
(r'\]', ']'),
(r'_+', 'None')],
backoff=tagger)
#Add a part of speech tag to each word
nested_tagged = []
for sentence in nested:
nested_tagged.append([TaggedToken(*x) for x in regexp_tagger.tag(sentence)])
return nested_tagged
def get_tags(tagged):
"""Returns a dict of all tags in the given tagged text, allong with their
counts and word set.
Arguments:
tagged (str): The tagged text.
Returns:
(dict):
"""
tag_dict = defaultdict(dict)
for token, tag in chain.from_iterable(tagged):
try:
tag_dict[tag][token] += 1
except KeyError:
tag_dict[tag].update({token : 1})
return dict(tag_dict)
def read_tagged_string(text):
"""Converts the given tagged text to a list of sentences containing
TaggedToken objects.
"""
def get_tagged(x):
return TaggedToken(*nltk.tag.str2tuple(x))
for line in text.split('\n'):
yield([get_tagged(x) for x in line.split()])
def tagged_to_plain(tagged):
"""Converts a list of TaggedToken object to plain text, dropping any tags.
"""
tagged = chain.from_iterable(tagged)
text = ' '.join(x.token for x in tagged)
text = re.sub(r"`` | ''", '"', text)
text = re.sub(r' (n\'t|\'s|[^\w\s\"\'])', r'\1', text)
return text
def parse_tag_parentheticals(tagged, lparen='(', rparen=')', use_tag=False):
"""Parses the given text and returns a tree of parentheticals.
Arguments:
text (str): The input text.
lparen (str): The left parenthetical delimiter. Defaults to '('.
rparen (str): The right parenthetical delimiter. Defaults to ')'.
use_tag (bool): Whether to match the delimiter against the tag or the
text. Defaults to False (text).
Returns:
(dict | [TaggedToken]): A dictionary representing the parse tree or a
list of tagged tokens. Each node of the tree will have the following
structure:
{'parens': (l, r), 'tagged': []}
where (l, r) are the parentheticals wrapping the text, and the list
contains tokens and subnodes.
Unmatched lparens will be interpretted as regular tokens. Unmatched
rparens will have None as their second parens tuple element. If there
are no parentheticals, a list of tagged tokens will be returned.
"""
# Flatten hierarchical input.
tagged = chain.from_iterable(tagged)
part = 'tag' if use_tag else 'token'
# Build root of tree.
tree = {'parens': (None, None),
'tagged': []}
context = [tree]
# Keep parsing until nothing is left.
for tagged_token in tagged:
node = context[0]
# Match rparens.
if getattr(tagged_token, part) == rparen:
if node['parens'] == (None, None):
node['tagged'].append(tagged_token.token)
else:
node = context.pop(0)
node['parens'] = (node['parens'][0], tagged_token)
continue
# Match lparens.
if getattr(tagged_token, part) == lparen:
new_node = {'parens': (tagged_token, None),
'tagged': []}
node['tagged'].append(new_node)
context.insert(0, new_node)
continue
# Match text.
node['tagged'].append(tagged_token)
# Remove highest level tree if whole string is parenthetical.
if len(tree['tagged']) == 1:
tree = [tree['tagged'][0]]
return tree
def recombine_tag_parentheticals(parse_tree, selector_function=None):
"""Recombines tagged text seperated by the seperate_tag_parentheticals function by
using a selector function to determine which portions to keep or discard.
Arguments:
parse_tree (dict): A tree of parsed parentheticals
(See parse_parentheticals.)
selector_function ((TAG, TAG), [TAG] -> true): A function taking a pair
of tagged parenthesis and a list of tagged tokens, and returning
whether to keep the tokens or discard them. Allows for selective
recombination of text. Defaults to None (everything is kept.)
(TAG is of the form (str, str))
Returns:
([TaggedToken]): The resulting tagged text.
Raises:
(ValueError): When unkown values are contained in parse_tree.
"""
# Set default selector test function if none is provided.
selector_function = selector_function or (lambda x, y: True)
# Reconstruct parse tree root for lists and strings.
if isinstance(parse_tree, list):
parse_tree = {'parens': (None, None), 'tagged': parse_tree}
elif isinstance(parse_tree, tuple):
parse_tree = {'parens': (None, None), 'tagged': [parse_tree]}
tagged = []
for item in parse_tree['tagged']:
if isinstance(item, tuple):
tagged.append(item)
elif isinstance(item, dict):
# Recreate text from rest of this node.
res = recombine_tag_parentheticals(item,
selector_function=selector_function)
# Append text if it passes selector test.
if selector_function(parse_tree['parens'], res):
tagged.extend(res)
else:
raise ValueError('Unknown parse tree content.')
# Use selector test on the whole tree.
if selector_function(parse_tree['parens'], tagged):
l = [parse_tree['parens'][0]]
r = [parse_tree['parens'][1]]
return [x for x in chain.from_iterable([l, tagged, r]) if x is not None]
return []
def ngram(items, n=2, step=1):
"""Returns a generator producing the ngrams of lenght n of the input items.
Arguments:
items (list|iterator): A list or iterator of items.
n (int): The length of the n-grams.
Returns:
generator(tuple): A generator of tuples containing nearby items in
n-length groups.
"""
items = iter(items)
window = deque(islice(items, n))
while True: # Continue till StopIteration gets raised.
if len(window) == 0:
raise StopIteration
yield tuple(window)
for i in range(step):
window.append(items.next())
window.popleft()
def nfollowing(items, n=1, step=1, default=None):
"""Returns a generator producing the items of the input, and n following
items.
Arguments:
items (list|iterator): A list or iterator of items.
n (int): The number of following items
default: The value to use for items past the end of the input. Defaults
to None.
Returns:
generator(tuple): A list of tuples containing nearby items in n-length
groups.
"""
# Try to convert to non-empty iterator. If we can't find length.
# If empty, return empty iterator.
items = iter(items)
window = deque(islice(items, n+1))
overflow = n
while True: # Continue till StopIteration gets raised.
if len(window) == 0:
raise StopIteration
yield tuple(window)
for i in range(step):
try:
window.append(items.next())
except StopIteration:
if overflow > 0:
overflow -= 1
window.append(default)
else:
raise StopIteration
window.popleft()
|
|
#! /usr/bin/env python3
#
# Extract gene mention candidates, add features, and
# perform distant supervision
#
import fileinput
import re
from dstruct.Mention import Mention
from dstruct.Sentence import Sentence
from helper.dictionaries import load_dict
from helper.easierlife import get_all_phrases_in_sentence, \
get_dict_from_TSVline, TSVstring2list, no_op
DOC_ELEMENTS = frozenset(
["figure", "table", "figures", "tables", "fig", "fig.", "figs", "figs.",
"file", "movie"])
INDIVIDUALS = frozenset(["individual", "individuals"])
TYPES = frozenset(["group", "type", "class", "method"])
# Keywords that are often associated with genes
VAR_KWS = frozenset([
"acetylation", "activate", "activation", "adaptor", "agonist", "alignment",
"allele", "antagonist", "antibody", "asynonymous", "backbone", "binding",
"biomarker", "breakdown", "cell", "cleavage", "cluster", "cnv",
"coactivator", "co-activator", "complex", "dd-genotype", "DD-genotype",
"deletion", "determinant", "domain", "duplication", "dysfunction",
"effector", "enhancer", "enrichment", "enzyme", "excision", "factor",
"family", "function", "functionality", "genotype",
"growth", "haplotype", "haplotypes", "heterozygous", "hexons", "hexon",
"histone", "homologue", "homology", "homozygous" "human",
"hypermetylation", "hybridization", "induce", "inducer", "induction",
"inhibitor", "inhibition", "intron", "interaction", "isoform", "isoforms",
"kinase", "kinesin", "level", "ligand", "location", "locus",
"mammalian", "marker", "methilation", "modification", "moiety", "molecule",
"molecules", "morpheein", "motif", "mutant", "mutation",
"mutations", "nonsynonymous", "non-synonymous", "nucleotide",
"oligomerization", "oncoprotein", "pathway", "peptide",
"pharmacokinetic", "pharmacodynamic", "pharmacogenetic" "phosphorylation",
"polymorphism", "proliferation", "promoter", "protein", "receptor",
"receptors", "recruitment", "region", "regulator", "release", "repressor",
"resistance", "retention", "ribonuclease", "role", "sequence",
"sequences", "sequestration", "serum", "signaling", "SNP", "SNPs",
"staining", "sumoylation", "synonymous", "target", "T-cell", "transducer",
"translocation", "transcribe", "transcript", "transcription",
"transporter", "variant", "variation", "vivo", "vitro"
])
KNOCK_KWS = frozenset(["knockdown", "knock-down", "knock-out", "knockout"])
AMINO_ACID_KWS = frozenset(["amino-acid", "aminoacid"])
ANTIGENE_KWS = frozenset(["antigen", "antigene", "anti-gen", "anti-gene"])
DNA_KWS = frozenset([
"cdna", "cDNA", "dna", "mrna", "mRNA", "rna", "rrna", "sirnas", "sirna",
"siRNA", "siRNAs"])
DOWNREGULATION_KWS = frozenset(["down-regulation", "downregulation"])
UPREGULATION_KWS = frozenset(["up-regulation", "upregulation"])
TUMOR_KWS = frozenset([
"tumor", "tumours", "tumour", "cancer", "carcinoma", "fibrosarcoma",
"sarcoma", "lymphoma"])
GENE_KWS = frozenset([
"gene", "protooncogene", "proto-oncogene", "pseudogene", "transgene"])
COEXPRESSION_KWS = frozenset([
"expression", "overexpression", "over-expression", "co-expression",
"coexpression"])
KEYWORDS = VAR_KWS | KNOCK_KWS | AMINO_ACID_KWS | ANTIGENE_KWS | DNA_KWS | \
DOWNREGULATION_KWS | DOWNREGULATION_KWS | TUMOR_KWS | GENE_KWS | \
COEXPRESSION_KWS
# Snowball positive features
# NO LONGER USED
# snowball_pos_feats = frozenset([
# "EXT_KEYWORD_MIN_[gene]@nn",
# "EXT_KEYWORD_MIN_[gene]nn@",
# "EXT_KEYWORD_MIN_[promoter]nn@",
# "EXT_KEYWORD_MIN_[protein]nn@",
# "EXT_KEYWORD_MIN_[protein]@nn",
# "EXT_KEYWORD_MIN_[protein]nn@nn",
# "EXT_KEYWORD_MIN_[protein]nsubj@",
# "EXT_KEYWORD_MIN_[binding]prep_with@",
# "EXT_KEYWORD_MIN_[mrna]nn@",
# "EXT_KEYWORD_MIN_[activation]nn@",
# "EXT_KEYWORD_MIN_[oligomerization]nn@",
# "EXT_KEYWORD_MIN_[methylation]prep_of@",
# "EXT_KEYWORD_MIN_[antibody]nn@",
# "EXT_KEYWORD_MIN_[polymorphism]prep_of@",
# "EXT_KEYWORD_MIN_[gene]appos@",
# "EXT_KEYWORD_MIN_[enzyme]@nn",
# "EXT_KEYWORD_MIN_[phosphorylation]prep_of@",
# "EXT_KEYWORD_MIN_[receptor]@nn",
# "EXT_KEYWORD_MIN_[histone]@nn",
# "EXT_KEYWORD_MIN_[receptor]nn",
# "IS_LONG_ALPHANUMERIC_MAIN_SYMBOL", "IS_HYPHENATED_SYMBOL", "IS_LONG_NAME"
# ])
# Load the dictionaries that we need
merged_genes_dict = load_dict("merged_genes")
english_dict = load_dict("english")
stopwords_dict = load_dict("stopwords")
pos_mentions_dict = load_dict("pos_gene_mentions")
neg_mentions_dict = load_dict("neg_gene_mentions")
med_acrons_dict = load_dict("med_acrons")
long_names_dict = load_dict("long_names")
inverted_long_names = load_dict("inverted_long_names")
hpoterms_with_gene = load_dict("hpoterms_with_gene")
# Max mention length. We won't look at subsentences longer than this.
max_mention_length = 0
for key in merged_genes_dict:
length = len(key.split())
if length > max_mention_length:
max_mention_length = length
# doubling to take into account commas and who knows what
max_mention_length *= 2
# Add features to a gene mention candidate
def add_features(mention, sentence):
# The verb closest to the candidate, with the path to it.
minl = 100
minp = None
minw = None
for word in mention.words:
for word2 in sentence.words:
if word2.lemma.isalpha() and re.search('^VB[A-Z]*$', word2.pos) \
and word2.lemma != 'be':
# Ignoring "be" comes from pharm (Emily)
p = sentence.get_word_dep_path(word.in_sent_idx,
word2.in_sent_idx)
if len(p) < minl:
minl = len(p)
minp = p
minw = word2.lemma
if minw:
mention.add_feature('VERB_[' + minw + ']' + minp)
# The keywords that appear in the sentence with the mention
minl = 100
minp = None
minw = None
for word in mention.words:
for word2 in sentence.words:
if word2.lemma in KEYWORDS:
p = sentence.get_word_dep_path(
word.in_sent_idx, word2.in_sent_idx)
kw = word2.lemma
if word2.lemma in KNOCK_KWS:
kw = "_KNOCKOUT"
elif word2.lemma in ANTIGENE_KWS:
kw = "_ANTIGENE"
elif word2.lemma in AMINO_ACID_KWS:
kw = "_AMINOACID"
# elif word2.lemma in DNA_KWS:
# kw = "_DNA"
elif word2.lemma in DOWNREGULATION_KWS:
kw = "_DOWNREGULATION"
elif word2.lemma in UPREGULATION_KWS:
kw = "_UPREGULATION"
# elif word2.lemma in TUMOR_KWS:
# kw = "_TUMOR"
# elif word2.lemma in GENE_KWS:
# kw = "_GENE"
# elif word2.lemma in COEXPRESSION_KWS:
# ke = "_COEXPRESSION"
if len(p) < minl:
minl = len(p)
minp = p
minw = kw
if len(p) < 100:
mention.add_feature("KEYWORD_[" + kw + "]" + p)
# Special features for the keyword on the shortest dependency path
if minw:
mention.add_feature('EXT_KEYWORD_MIN_[' + minw + ']' + minp)
mention.add_feature('KEYWORD_MIN_[' + minw + ']')
# If another gene is present in the sentence, add a feature with that gene
# and the path to it. This comes from pharm.
minl = 100
minp = None
minw = None
for word in mention.words:
for word2 in sentence.words:
if word2.in_sent_idx not in mention.wordidxs and \
word2.word in merged_genes_dict:
p = sentence.get_word_dep_path(
word.in_sent_idx, word2.in_sent_idx)
if len(p) < minl:
minl = len(p)
minp = p
minw = word2.lemma
if minw:
mention.add_feature('OTHER_GENE_['+minw+']' + minp)
# mention.add_feature('OTHER_GENE_['+minw+']')
# The lemma on the left of the candidate, whatever it is
try:
left = sentence.words[mention.words[0].in_sent_idx-1].lemma
try:
float(left)
left = "_NUMBER"
except ValueError:
pass
mention.add_feature("NGRAM_LEFT_1_[" + left + "]")
except IndexError:
pass
# The lemma on the right of the candidate, whatever it is
try:
right = sentence.words[mention.words[-1].in_sent_idx+1].lemma
try:
float(right)
right = "_NUMBER"
except ValueError:
pass
mention.add_feature("NGRAM_RIGHT_1_[" + right + "]")
except IndexError:
pass
# We know check whether the lemma on the left and on the right are
# "special", for example a year or a gene.
# The concept of left or right is a little tricky here, as we are actually
# looking at the first word that contains only letters and is not a
# stopword.
idx = mention.wordidxs[0] - 1
gene_on_left = None
gene_on_right = None
while idx >= 0 and \
((((not sentence.words[idx].lemma.isalnum() and not
sentence.words[idx] in merged_genes_dict) or
(not sentence.words[idx].word.isupper() and
sentence.words[idx].lemma in stopwords_dict)) and
not re.match("^[0-9]+(.[0-9]+)?$", sentence.words[idx].word)
and not sentence.words[idx] in merged_genes_dict) or
len(sentence.words[idx].lemma) == 1):
idx -= 1
if idx >= 0:
mention.left_lemma = sentence.words[idx].lemma
if sentence.words[idx].word in merged_genes_dict and \
len(sentence.words[idx].word) > 3:
gene_on_left = sentence.words[idx].word
try:
year = float(sentence.words[idx].word)
if round(year) == year and year > 1950 and year <= 2014:
mention.add_feature("IS_YEAR_LEFT")
except:
pass
# The word on the right of the mention, if present, provided it's
# alphanumeric but not a number
idx = mention.wordidxs[-1] + 1
while idx < len(sentence.words) and \
((((not sentence.words[idx].lemma.isalnum() and not
sentence.words[idx] in merged_genes_dict) or
(not sentence.words[idx].word.isupper() and
sentence.words[idx].lemma in stopwords_dict)) and
not re.match("^[0-9]+(.[0-9]+)?$", sentence.words[idx].word)
and not sentence.words[idx] in merged_genes_dict) or
len(sentence.words[idx].lemma) == 1):
idx += 1
if idx < len(sentence.words):
mention.right_lemma = sentence.words[idx].lemma
if sentence.words[idx].word in merged_genes_dict and \
len(sentence.words[idx].word) > 3:
gene_on_right = sentence.words[idx].word
try:
year = float(sentence.words[idx].word)
if round(year) == year and year > 1950 and year <= 2014:
mention.add_feature("IS_YEAR_RIGHT")
except:
pass
if gene_on_left and gene_on_right:
mention.add_feature("IS_BETWEEN_GENES")
elif gene_on_left:
mention.add_feature("GENE_ON_LEFT")
elif gene_on_right:
mention.add_feature("GENE_ON_RIGHT")
# The candidate is a single word that appears many times (more than 4) in
# the sentence
if len(mention.words) == 1 and \
[w.word for w in sentence.words].count(mention.words[0].word) > 4:
mention.add_feature("APPEARS_MANY_TIMES_IN_SENTENCE")
# There are many PERSONs/ORGANIZATIONs/LOCATIONs in the sentence
for ner in ["PERSON", "ORGANIZATION", "LOCATION"]:
if [x.lemma for x in sentence.words].count(ner) > 4:
mention.add_feature("MANY_{}_IN_SENTENCE".format(ner))
# The candidate comes after an organization, or a location, or a person.
# We skip commas as they may trick us.
# comes_after = None
# loc_idx = mention.wordidxs[0] - 1
# while loc_idx >= 0 and sentence.words[loc_idx].lemma == ",":
# loc_idx -= 1
# if loc_idx >= 0 and \
# sentence.words[loc_idx].ner in \
# ["ORGANIZATION", "LOCATION", "PERSON"] and \
# sentence.words[loc_idx].word not in merged_genes_dict:
# comes_after = sentence.words[loc_idx].ner
# The candidate comes before an organization, or a location, or a person.
# We skip commas, as they may trick us.
# comes_before = None
# loc_idx = mention.wordidxs[-1] + 1
# while loc_idx < len(sentence.words) and \
# sentence.words[loc_idx].lemma == ",":
# loc_idx += 1
# if loc_idx < len(sentence.words) and sentence.words[loc_idx].ner in \
# ["ORGANIZATION", "LOCATION", "PERSON"] and \
# sentence.words[loc_idx].word not in merged_genes_dict:
# comes_before = sentence.words[loc_idx].ner
# All the following is commented out because it's not a context feature
# The following features deal with the "appearance" of the symbol.
# They are _not_ context features, but they are reasonable.
# If it looks like a duck, it quacks like a duck, and it flies like a duck,
# then it's probably a duck.
# All the following features are added only if the candidate is a single
# word.
# if len(mention.words) == 1:
# entity_is_word = False
# entity_in_dict = False
# for entity in mention.entity.split("|"):
# if entity == mention.words[0].word:
# entity_is_word = True
# if entity in merged_genes_dict:
# entity_in_dict = True
# if entity_is_word and entity_in_dict and \
# (comes_before is None or comes_after is None):
# # The mention is a 'main' symbol
# if mention.words[0].word.isalnum() and \
# not mention.words[0].word.isalpha():
# if len(mention.words[0].word) >= 4:
# mention.add_feature("IS_LONG_ALPHANUMERIC_MAIN_SYMBOL")
# else:
# is_letter_plus_number = False
# try:
# int(mention.words[0].word[1:])
# is_letter_plus_number = True
# except:
# is_letter_plus_number = False
# if is_letter_plus_number:
# mention.add_feature(
# "IS_LETTER_NUMBER_MAIN_SYMBOL_[{}]".format(
# mention.words[0].word))
# else:
# mention.add_feature(
# "IS_SHORT_ALPHANUMERIC_MAIN_SYMBOL_[{}]".format(
# mention.words[0].word))
# elif len(mention.words[0].word) >= 4:
# mention.add_feature("IS_LONG_MAIN_SYMBOL_[{}]".format(
# mention.words[0].word))
# elif entity_in_dict or mention.words[0].word in merged_genes_dict:
# if len(mention.words[0].word) > 3 and \
# mention.words[0].word.casefold() == mention.words[0].word \
# and not re.match("^p[0-9]+$", mention.words[0].word):
# # Long name - We supervise these.
# #mention.add_feature("IS_LONG_NAME")
# pass
# elif mention.words[0].word in inverted_long_names:
# # Long name - We supervise these
# #mention.add_feature("IS_LONG_NAME")
# pass
# elif "-" in mention.words[0].word and comes_after != "PERSON":
# mention.add_feature("IS_HYPHENATED_SYMBOL")
# elif mention.words[0].word.casefold().endswith("alpha") or \
# mention.words[0].word.casefold().endswith("beta") or \
# mention.words[0].word.casefold().endswith("gamma"):
# mention.add_feature("ENDS_WITH_GREEK")
# elif re.match("^p[0-9][0-9]$", mention.words[0].word):
# mention.add_feature("IS_PXX_SYMBOL_[{}]".format(
# mention.words[0].word))
# elif mention.words[0].word.isalnum() and \
# not mention.words[0].word.isalpha():
# if len(mention.words[0].word) >= 4:
# mention.add_feature(
# "IS_LONG_ALPHANUMERIC_ALTERN_SYMBOL_[{}]".format(
# mention.words[0].word))
# elif len(mention.words[0].word) >= 4:
# mention.add_feature("IS_LONG_ALTERN_SYMBOL_[{}]".format(
# mention.words[0].word))
# Supervise the candidates.
def supervise(mentions, sentence):
phrase = " ".join([x.word for x in sentence.words])
new_mentions = []
for mention in mentions:
new_mentions.append(mention)
if mention.is_correct is not None:
continue
# The candidate is a long name.
if " ".join([word.word for word in mention.words]) in \
inverted_long_names:
mention.is_correct = True
mention.type = "GENE_SUP_long"
continue
# The candidate is a MIM entry
if mention.words[0].word == "MIM":
mention_word_idx = mention.words[0].in_sent_idx
if mention_word_idx < len(sentence.words) - 1:
next_word = sentence.words[mention_word_idx + 1].word
if next_word.casefold() in ["no", "no.", "#", ":"] and \
mention_word_idx + 2 < len(sentence.words):
next_word = sentence.words[mention_word_idx + 2].word
try:
int(next_word)
mention.is_correct = False
mention.type = "GENE_SUP_MIM"
continue
except ValueError:
pass
# The phrase starts with words that are indicative of the candidate not
# being a mention of a gene
# We add a feature for this, as it is a context property
if phrase.startswith("Performed the experiments :") or \
phrase.startswith("Wrote the paper :") or \
phrase.startswith("W'rote the paper :") or \
phrase.startswith("Wlrote the paper") or \
phrase.startswith("Contributed reagents") or \
phrase.startswith("Analyzed the data :") or \
phrase.casefold().startswith("address"):
# An unsupervised copy with the special feature
unsuper_enriched = Mention(
"GENE_dontsup", mention.entity, mention.words)
unsuper_enriched.features = mention.features.copy()
unsuper_enriched.add_feature("IN_CONTRIB_PHRASE")
new_mentions.append(unsuper_enriched)
# This candidate contain only the 'special' feature.
super_spec = Mention(
"GENE_SUP_contr_2", mention.entity, mention.words)
super_spec.is_correct = False
super_spec.add_feature("IN_CONTRIB_PHRASE")
new_mentions.append(super_spec)
# Set is_correct and type.
mention.is_correct = False
mention.type = "GENE_SUP_contr_1"
continue
# The candidate is an entry in Gene Ontology
if len(mention.words) == 1 and mention.words[0].word == "GO":
try:
if sentence.words[mention.words[0].in_sent_idx + 1][0] == ":":
mention.is_correct = False
mention.type = "GENE_SUP_go"
except:
pass
continue
# Index of the word on the left
idx = mention.wordidxs[0] - 1
if idx >= 0:
# The candidate is preceded by a "%" (it's probably a quantity)
if sentence.words[idx].word == "%":
mention.is_correct = False
mention.type = "GENE_SUP_%"
continue
# The candidate comes after a "document element" (e.g., table, or
# figure)
if sentence.words[idx].word.casefold() in DOC_ELEMENTS:
mention.is_correct = False
mention.type = "GENE_SUP_doc"
continue
# The candidate comes after an "individual" word (e.g.,
# "individual")
if sentence.words[idx].word.casefold() in INDIVIDUALS and \
not mention.words[0].word.isalpha() and \
not len(mention.words[0].word) > 4:
mention.is_correct = False
mention.type = "GENE_SUP_indiv"
continue
# The candidate comes after a "type" word, and it is made only of
# the letters "I" and "V"
if sentence.words[idx].lemma.casefold() in TYPES and \
set(mention.words[0].word).issubset(set(["I", "V"])):
mention.is_correct = False
mention.type = "GENE_SUP_type"
continue
# Index of the word on the right
idx = mention.wordidxs[-1] + 1
if idx < len(sentence.words):
# The candidate is followed by a "=" (it's probably a quantity)
if sentence.words[idx].word == "=":
mention.is_correct = False
mention.type = "GENE_SUP_="
continue
# The candidate is followed by a ":" and the word after it is a
# number (it's probably a quantity)
if sentence.words[idx].word == ":":
try:
float(sentence.words[idx + 1].word)
mention.is_correct = False
mention.type = "GENE_SUP_:"
except: # both ValueError and IndexError
pass
continue
# The candidate comes before "et"
if sentence.words[idx].word == "et":
mention.is_correct = False
mention.type = "GENE_SUP_et"
continue
# The candidate is a DNA triplet
# We check this by looking at whether the word before or after is also
# a DNA triplet.
if len(mention.words) == 1 and len(mention.words[0].word) == 3 and \
set(mention.words[0].word) <= set("ACGT"):
done = False
idx = mention.wordidxs[0] - 1
if idx > 0:
if set(sentence.words[idx].word) <= set("ACGT"):
mention.is_correct = False
mention.type = "GENE_SUP_dna"
continue
idx = mention.wordidxs[-1] + 1
if not done and idx < len(sentence.words):
if set(sentence.words[idx].word) <= set("ACGT"):
mention.is_correct = False
mention.type = "GENE_SUP_dna"
continue
# If it's "II", it's most probably wrong.
if mention.words[0].word == "II":
mention.is_correct = False
mention.type = "GENE_SUP_ii"
continue
# Snowball positive features
# Commented out to avoid overfitting
# if mention.features & snowball_pos_feats:
# supervised = Mention("GENE_SUP", mention.entity,
# mention.words)
# supervised.features = mention.features - snowball_pos_feats
# supervised.is_correct = True
# new_mentions.append(supervised)
# supervised2 = Mention("GENE_SUP", mention.entity,
# mention.words)
# supervised2.features = mention.features & snowball_pos_feats
# supervised2.is_correct = True
# new_mentions.append(supervised2)
# continue
# Some negative features
# if "EXT_KEYWORD_MIN_[chromosome]@nn" in mention.features:
# supervised = Mention("GENE_SUP", mention.entity, mention.words)
# supervised.features = mention.features.copy()
# supervised.is_correct = False
# new_mentions.append(supervised)
# continue
# if "IS_YEAR_RIGHT" in mention.features:
# supervised = Mention("GENE_SUP", mention.entity, mention.words)
# supervised.features = mention.features.copy()
# supervised.is_correct = False
# new_mentions.append(supervised)
# continue
# The candidate comes after an organization, or a location, or a
# person. We skip commas as they may trick us.
comes_after = None
loc_idx = mention.wordidxs[0] - 1
while loc_idx >= 0 and sentence.words[loc_idx].lemma == ",":
loc_idx -= 1
if loc_idx >= 0 and \
sentence.words[loc_idx].ner in \
["ORGANIZATION", "LOCATION", "PERSON"] and \
sentence.words[loc_idx].word not in merged_genes_dict:
comes_after = sentence.words[loc_idx].ner
# The candidate comes before an organization, or a location, or a
# person. We skip commas, as they may trick us.
comes_before = None
loc_idx = mention.wordidxs[-1] + 1
while loc_idx < len(sentence.words) and \
sentence.words[loc_idx].lemma == ",":
loc_idx += 1
if loc_idx < len(sentence.words) and sentence.words[loc_idx].ner in \
["ORGANIZATION", "LOCATION", "PERSON"] and \
sentence.words[loc_idx].word not in merged_genes_dict:
comes_before = sentence.words[loc_idx].ner
# Not correct if it's most probably a person name.
if comes_before and comes_after:
mention.is_correct = False
mention.type = "GENE_SUP_name"
continue
# Comes after person and before "," or ":", so it's probably a person
# name
if comes_after == "PERSON" and \
mention.words[-1].in_sent_idx + 1 < len(sentence.words) and \
sentence.words[mention.words[-1].in_sent_idx + 1].word \
in [",", ":"]:
mention.is_correct = False
mention.type = "GENE_SUP_name2"
continue
if comes_after == "PERSON" and mention.words[0].ner == "PERSON":
mention.is_correct = False
mention.type = "GENE_SUP_name3"
continue
# Is a location and comes before a location so it's probably wrong
if comes_before == "LOCATION" and mention.words[0].ner == "LOCATION":
mention.is_correct = False
mention.type = "GENE_SUP_loc"
continue
return new_mentions
# Return a list of mention candidates extracted from the sentence
def extract(sentence):
mentions = []
# Skip the sentence if there are no English words in the sentence
no_english_words = True
for word in sentence.words:
if len(word.word) > 2 and \
(word.word in english_dict or
word.word.casefold() in english_dict):
no_english_words = False
break
if no_english_words:
return [] # Stop iteration
sentence_is_upper = False
if " ".join([x.word for x in sentence.words]).isupper():
sentence_is_upper = True
# The following set keeps a list of indexes we already looked at and which
# contained a mention
history = set()
words = sentence.words
# Scan all subsequences of the sentence of length up to max_mention_length
for start, end in get_all_phrases_in_sentence(sentence,
max_mention_length):
if start in history or end in history:
continue
phrase = " ".join([word.word for word in words[start:end]])
if sentence_is_upper: # This may not be a great idea...
phrase = phrase.casefold()
mention = None
# If the phrase is a hpoterm name containing a gene, then it is a
# mention candidate to supervise as negative
if phrase in hpoterms_with_gene:
mention = Mention("GENE_SUP_HPO", phrase, words[start:end])
add_features(mention, sentence)
mention.is_correct = False
mentions.append(mention)
for i in range(start, end):
history.add(i)
# If the phrase is in the gene dictionary, then is a mention candidate
if len(phrase) > 1 and phrase in merged_genes_dict:
# The entity is a list of all the main symbols that could have the
# phrase as symbol. They're separated by "|".
mention = Mention("GENE",
"|".join(merged_genes_dict[phrase]),
words[start:end])
# Add features to the candidate
add_features(mention, sentence)
# Add mention to the list
mentions.append(mention)
# Add indexes to history so that they are not used for another
# mention
for i in range(start, end):
history.add(i)
return mentions
if __name__ == "__main__":
# Process the input
with fileinput.input() as input_files:
for line in input_files:
# Parse the TSV line
line_dict = get_dict_from_TSVline(
line, ["doc_id", "sent_id", "wordidxs", "words", "poses",
"ners", "lemmas", "dep_paths", "dep_parents",
"bounding_boxes"],
[no_op, int, lambda x: TSVstring2list(x, int), TSVstring2list,
TSVstring2list, TSVstring2list, TSVstring2list,
TSVstring2list, lambda x: TSVstring2list(x, int),
TSVstring2list])
# Create the sentence object
sentence = Sentence(
line_dict["doc_id"], line_dict["sent_id"],
line_dict["wordidxs"], line_dict["words"], line_dict["poses"],
line_dict["ners"], line_dict["lemmas"], line_dict["dep_paths"],
line_dict["dep_parents"], line_dict["bounding_boxes"])
# Skip weird sentences
if sentence.is_weird():
continue
# Get list of mentions candidates in this sentence
mentions = extract(sentence)
# Supervise them
new_mentions = supervise(mentions, sentence)
# Print!
for mention in new_mentions:
print(mention.tsv_dump())
|
|
from inspect import signature
from functools import partial
import string
from typing import Any, Dict, Optional, List, Callable
from django.http import HttpRequest, HttpResponse
from zerver.models import UserProfile
from zerver.decorator import api_key_only_webhook_view
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.webhooks.git import TOPIC_WITH_BRANCH_TEMPLATE, \
get_push_tag_event_message, get_remove_branch_event_message, \
get_create_branch_event_message, get_commits_comment_action_message, \
TOPIC_WITH_PR_OR_ISSUE_INFO_TEMPLATE, get_pull_request_event_message, \
CONTENT_MESSAGE_TEMPLATE
from zerver.lib.webhooks.common import check_send_webhook_message, \
UnexpectedWebhookEventType
from zerver.webhooks.bitbucket2.view import BITBUCKET_TOPIC_TEMPLATE, \
BITBUCKET_FORK_BODY, BITBUCKET_REPO_UPDATED_CHANGED
BRANCH_UPDATED_MESSAGE_TEMPLATE = "{user_name} pushed to branch {branch_name}. Head is now {head}."
PULL_REQUEST_MARKED_AS_NEEDS_WORK_TEMPLATE = "{user_name} marked [PR #{number}]({url}) as \"needs work\"."
PULL_REQUEST_MARKED_AS_NEEDS_WORK_TEMPLATE_WITH_TITLE = """
{user_name} marked [PR #{number} {title}]({url}) as \"needs work\".
""".strip()
PULL_REQUEST_REASSIGNED_TEMPLATE = "{user_name} reassigned [PR #{number}]({url}) to {assignees}."
PULL_REQUEST_REASSIGNED_TEMPLATE_WITH_TITLE = """
{user_name} reassigned [PR #{number} {title}]({url}) to {assignees}.
""".strip()
PULL_REQUEST_REASSIGNED_TO_NONE_TEMPLATE = "{user_name} removed all reviewers from [PR #{number}]({url})."
PULL_REQUEST_REASSIGNED_TO_NONE_TEMPLATE_WITH_TITLE = """
{user_name} removed all reviewers from [PR #{number} {title}]({url})
""".strip()
PULL_REQUEST_OPENED_OR_MODIFIED_TEMPLATE_WITH_REVIEWERS = """
{user_name} {action} [PR #{number}]({url}) from `{source}` to \
`{destination}` (assigned to {assignees} for review)
""".strip()
PULL_REQUEST_OPENED_OR_MODIFIED_TEMPLATE_WITH_REVIEWERS_WITH_TITLE = """
{user_name} {action} [PR #{number} {title}]({url}) from `{source}` to \
`{destination}` (assigned to {assignees} for review)
""".strip()
fixture_to_headers = lambda x: {"HTTP_X_EVENT_KEY": "diagnostics:ping"} if x == "diagnostics_ping" else None
def get_user_name(payload: Dict[str, Any]) -> str:
user_name = "[{name}]({url})".format(name=payload["actor"]["name"],
url=payload["actor"]["links"]["self"][0]["href"])
return user_name
def ping_handler(payload: Dict[str, Any], include_title: Optional[str]=None
) -> List[Dict[str, str]]:
if include_title:
subject = include_title
else:
subject = "Bitbucket Server Ping"
body = "Congratulations! The Bitbucket Server webhook was configured successfully!"
return [{"subject": subject, "body": body}]
def repo_comment_handler(payload: Dict[str, Any], action: str) -> List[Dict[str, str]]:
repo_name = payload["repository"]["name"]
subject = BITBUCKET_TOPIC_TEMPLATE.format(repository_name=repo_name)
sha = payload["commit"]
commit_url = payload["repository"]["links"]["self"][0]["href"][:-6] # remove the "browse" at the end
commit_url += "commits/%s" % (sha,)
message = payload["comment"]["text"]
if action == "deleted their comment":
message = "~~{message}~~".format(message=message)
body = get_commits_comment_action_message(
user_name=get_user_name(payload),
action=action,
commit_url=commit_url,
sha=sha,
message=message
)
return [{"subject": subject, "body": body}]
def repo_forked_handler(payload: Dict[str, Any]) -> List[Dict[str, str]]:
repo_name = payload["repository"]["origin"]["name"]
subject = BITBUCKET_TOPIC_TEMPLATE.format(repository_name=repo_name)
body = BITBUCKET_FORK_BODY.format(
display_name=payload["actor"]["displayName"],
username=get_user_name(payload),
fork_name=payload["repository"]["name"],
fork_url=payload["repository"]["links"]["self"][0]["href"]
)
return [{"subject": subject, "body": body}]
def repo_modified_handler(payload: Dict[str, Any]) -> List[Dict[str, str]]:
subject_new = BITBUCKET_TOPIC_TEMPLATE.format(repository_name=payload["new"]["name"])
new_name = payload['new']['name']
body = BITBUCKET_REPO_UPDATED_CHANGED.format(
actor=get_user_name(payload),
change="name",
repo_name=payload["old"]["name"],
old=payload["old"]["name"],
new=new_name
) # As of writing this, the only change we'd be notified about is a name change.
punctuation = '.' if new_name[-1] not in string.punctuation else ''
body = "{}{}".format(body, punctuation)
return [{"subject": subject_new, "body": body}]
def repo_push_branch_data(payload: Dict[str, Any], change: Dict[str, Any]) -> Dict[str, str]:
event_type = change["type"]
repo_name = payload["repository"]["name"]
user_name = get_user_name(payload)
branch_name = change["ref"]["displayId"]
branch_head = change["toHash"]
if event_type == "ADD":
body = get_create_branch_event_message(
user_name=user_name,
url=None,
branch_name=branch_name
)
elif event_type == "UPDATE":
body = BRANCH_UPDATED_MESSAGE_TEMPLATE.format(
user_name=user_name,
branch_name=branch_name,
head=branch_head
)
elif event_type == "DELETE":
body = get_remove_branch_event_message(user_name, branch_name)
else:
message = "%s.%s" % (payload["eventKey"], event_type) # nocoverage
raise UnexpectedWebhookEventType("BitBucket Server", message)
subject = TOPIC_WITH_BRANCH_TEMPLATE.format(repo=repo_name, branch=branch_name)
return {"subject": subject, "body": body}
def repo_push_tag_data(payload: Dict[str, Any], change: Dict[str, Any]) -> Dict[str, str]:
event_type = change["type"]
repo_name = payload["repository"]["name"]
tag_name = change["ref"]["displayId"]
if event_type == "ADD":
action = "pushed"
elif event_type == "DELETE":
action = "removed"
else:
message = "%s.%s" % (payload["eventKey"], event_type) # nocoverage
raise UnexpectedWebhookEventType("BitBucket Server", message)
subject = BITBUCKET_TOPIC_TEMPLATE.format(repository_name=repo_name)
body = get_push_tag_event_message(get_user_name(payload), tag_name, action=action)
return {"subject": subject, "body": body}
def repo_push_handler(payload: Dict[str, Any], branches: Optional[str]=None
) -> List[Dict[str, str]]:
data = []
for change in payload["changes"]:
event_target_type = change["ref"]["type"]
if event_target_type == "BRANCH":
branch = change["ref"]["displayId"]
if branches:
if branch not in branches:
continue
data.append(repo_push_branch_data(payload, change))
elif event_target_type == "TAG":
data.append(repo_push_tag_data(payload, change))
else:
message = "%s.%s" % (payload["eventKey"], event_target_type) # nocoverage
raise UnexpectedWebhookEventType("BitBucket Server", message)
return data
def get_assignees_string(pr: Dict[str, Any]) -> Optional[str]:
reviewers = []
for reviewer in pr["reviewers"]:
name = reviewer["user"]["name"]
link = reviewer["user"]["links"]["self"][0]["href"]
reviewers.append("[%s](%s)" % (name, link))
if len(reviewers) == 0:
assignees = None
elif len(reviewers) == 1:
assignees = reviewers[0]
else:
assignees = ", ".join(reviewers[:-1]) + " and " + reviewers[-1]
return assignees
def get_pr_subject(repo: str, type: str, id: str, title: str) -> str:
return TOPIC_WITH_PR_OR_ISSUE_INFO_TEMPLATE.format(repo=repo, type=type, id=id, title=title)
def get_simple_pr_body(payload: Dict[str, Any], action: str, include_title: Optional[bool]) -> str:
pr = payload["pullRequest"]
return get_pull_request_event_message(
user_name=get_user_name(payload),
action=action,
url=pr["links"]["self"][0]["href"],
number=pr["id"],
title=pr["title"] if include_title else None
)
def get_pr_opened_or_modified_body(payload: Dict[str, Any], action: str,
include_title: Optional[bool]) -> str:
pr = payload["pullRequest"]
description = pr.get("description")
assignees_string = get_assignees_string(pr)
if assignees_string:
# Then use the custom message template for this particular integration so that we can
# specify the reviewers at the end of the message (but before the description/message).
parameters = {"user_name": get_user_name(payload),
"action": action,
"url": pr["links"]["self"][0]["href"],
"number": pr["id"],
"source": pr["fromRef"]["displayId"],
"destination": pr["toRef"]["displayId"],
"message": description,
"assignees": assignees_string,
"title": pr["title"] if include_title else None}
if include_title:
body = PULL_REQUEST_OPENED_OR_MODIFIED_TEMPLATE_WITH_REVIEWERS_WITH_TITLE.format(
**parameters
)
else:
body = PULL_REQUEST_OPENED_OR_MODIFIED_TEMPLATE_WITH_REVIEWERS.format(**parameters)
punctuation = ':' if description else '.'
body = "{}{}".format(body, punctuation)
if description:
body += '\n' + CONTENT_MESSAGE_TEMPLATE.format(message=description)
return body
return get_pull_request_event_message(
user_name=get_user_name(payload),
action=action,
url=pr["links"]["self"][0]["href"],
number=pr["id"],
target_branch=pr["fromRef"]["displayId"],
base_branch=pr["toRef"]["displayId"],
message=pr.get("description"),
assignee=assignees_string if assignees_string else None,
title=pr["title"] if include_title else None
)
def get_pr_needs_work_body(payload: Dict[str, Any], include_title: Optional[bool]) -> str:
pr = payload["pullRequest"]
if not include_title:
return PULL_REQUEST_MARKED_AS_NEEDS_WORK_TEMPLATE.format(
user_name=get_user_name(payload),
number=pr["id"],
url=pr["links"]["self"][0]["href"]
)
return PULL_REQUEST_MARKED_AS_NEEDS_WORK_TEMPLATE_WITH_TITLE.format(
user_name=get_user_name(payload),
number=pr["id"],
url=pr["links"]["self"][0]["href"],
title=pr["title"]
)
def get_pr_reassigned_body(payload: Dict[str, Any], include_title: Optional[bool]) -> str:
pr = payload["pullRequest"]
assignees_string = get_assignees_string(pr)
if not assignees_string:
if not include_title:
return PULL_REQUEST_REASSIGNED_TO_NONE_TEMPLATE.format(
user_name=get_user_name(payload),
number=pr["id"],
url=pr["links"]["self"][0]["href"]
)
punctuation = '.' if pr['title'][-1] not in string.punctuation else ''
message = PULL_REQUEST_REASSIGNED_TO_NONE_TEMPLATE_WITH_TITLE.format(
user_name=get_user_name(payload),
number=pr["id"],
url=pr["links"]["self"][0]["href"],
title=pr["title"]
)
message = "{}{}".format(message, punctuation)
return message
if not include_title:
return PULL_REQUEST_REASSIGNED_TEMPLATE.format(
user_name=get_user_name(payload),
number=pr["id"],
url=pr["links"]["self"][0]["href"],
assignees=assignees_string
)
return PULL_REQUEST_REASSIGNED_TEMPLATE_WITH_TITLE.format(
user_name=get_user_name(payload),
number=pr["id"],
url=pr["links"]["self"][0]["href"],
assignees=assignees_string,
title=pr["title"]
)
def pr_handler(payload: Dict[str, Any], action: str,
include_title: Optional[bool]=False) -> List[Dict[str, str]]:
pr = payload["pullRequest"]
subject = get_pr_subject(pr["toRef"]["repository"]["name"], type="PR", id=pr["id"],
title=pr["title"])
if action in ["opened", "modified"]:
body = get_pr_opened_or_modified_body(payload, action, include_title)
elif action == "needs_work":
body = get_pr_needs_work_body(payload, include_title)
elif action == "reviewers_updated":
body = get_pr_reassigned_body(payload, include_title)
else:
body = get_simple_pr_body(payload, action, include_title)
return [{"subject": subject, "body": body}]
def pr_comment_handler(payload: Dict[str, Any], action: str,
include_title: Optional[bool]=False) -> List[Dict[str, str]]:
pr = payload["pullRequest"]
subject = get_pr_subject(pr["toRef"]["repository"]["name"], type="PR", id=pr["id"],
title=pr["title"])
message = payload["comment"]["text"]
if action == "deleted their comment on":
message = "~~{message}~~".format(message=message)
body = get_pull_request_event_message(
user_name=get_user_name(payload),
action=action,
url=pr["links"]["self"][0]["href"],
number=pr["id"],
message=message,
title=pr["title"] if include_title else None
)
return [{"subject": subject, "body": body}]
EVENT_HANDLER_MAP = {
"diagnostics:ping": ping_handler,
"repo:comment:added": partial(repo_comment_handler, action="commented"),
"repo:comment:edited": partial(repo_comment_handler, action="edited their comment"),
"repo:comment:deleted": partial(repo_comment_handler, action="deleted their comment"),
"repo:forked": repo_forked_handler,
"repo:modified": repo_modified_handler,
"repo:refs_changed": repo_push_handler,
"pr:comment:added": partial(pr_comment_handler, action="commented on"),
"pr:comment:edited": partial(pr_comment_handler, action="edited their comment on"),
"pr:comment:deleted": partial(pr_comment_handler, action="deleted their comment on"),
"pr:declined": partial(pr_handler, action="declined"),
"pr:deleted": partial(pr_handler, action="deleted"),
"pr:merged": partial(pr_handler, action="merged"),
"pr:modified": partial(pr_handler, action="modified"),
"pr:opened": partial(pr_handler, action="opened"),
"pr:reviewer:approved": partial(pr_handler, action="approved"),
"pr:reviewer:needs_work": partial(pr_handler, action="needs_work"),
"pr:reviewer:updated": partial(pr_handler, action="reviewers_updated"),
"pr:reviewer:unapproved": partial(pr_handler, action="unapproved"),
} # type Dict[str, Optional[Callable[..., List[Dict[str, str]]]]]
def get_event_handler(eventkey: str) -> Callable[..., List[Dict[str, str]]]:
# The main reason for this function existance is because of mypy
handler = EVENT_HANDLER_MAP.get(eventkey) # type: Any
if handler is None:
raise UnexpectedWebhookEventType("BitBucket Server", eventkey)
return handler
@api_key_only_webhook_view("Bitbucket3")
@has_request_variables
def api_bitbucket3_webhook(request: HttpRequest, user_profile: UserProfile,
payload: Dict[str, Any]=REQ(argument_type="body"),
branches: Optional[str]=REQ(default=None),
user_specified_topic: Optional[str]=REQ("topic", default=None)
) -> HttpResponse:
try:
eventkey = payload["eventKey"]
except KeyError:
eventkey = request.META["HTTP_X_EVENT_KEY"]
handler = get_event_handler(eventkey)
if "branches" in signature(handler).parameters:
data = handler(payload, branches)
elif "include_title" in signature(handler).parameters:
data = handler(payload, include_title=user_specified_topic)
else:
data = handler(payload)
for element in data:
check_send_webhook_message(request, user_profile, element["subject"],
element["body"], unquote_url_parameters=True)
return json_success()
|
|
from __future__ import division
import argparse
import os
import json
from multiprocessing import Pool
import numpy as np
import yaml
import numdifftools
from tmd.pwscf.parseScf import fermi_from_scf, D_from_scf, alat_from_scf
from tmd.pwscf.extractQEBands import extractQEBands
from tmd.wannier.bands import Hk_recip
from tmd.wannier.bands import Hk as Hk_Cart
from tmd.wannier.parseWin import parse_inner_window
from tmd.wannier.fitError import dft_wan_correspondence
from tmd.bilayer.wannier import get_Hr
from tmd.bilayer.dgrid import get_prefixes
from tmd.bilayer.bilayer_util import global_config
from tmd.bilayer.plot_ds import get_atom_order, orbital_index, ds_from_prefixes, wrap_cell, sorted_d_group, plot_d_vals
def _close(k, q, eps):
for i in range(len(k)):
if abs(k[i] - q[i]) > eps:
return False
return True
def get_layer_indices(work, prefix, fixed_spin):
atom_order = get_atom_order(work, prefix)
syms = [["X1", "M", "X2"], ["X1p", "Mp", "X2p"]]
orbitals = {"X1": ["pz", "px", "py"], "M": ["dz2", "dxz", "dyz", "dx2-y2", "dxy"]}
orbitals["X2"] = orbitals["X1"]
orbitals["X1p"] = orbitals["X1"]
orbitals["X2p"] = orbitals["X1"]
orbitals["Mp"] = orbitals["M"]
spins = ["up", "down"]
layer_indices = []
for layer_syms in syms:
layer_indices.append([])
for sym in layer_syms:
for orb in orbitals[sym]:
for spin in spins:
if spin != fixed_spin:
continue
index = orbital_index(atom_order, sym, orb, spin, soc=True)
layer_indices[-1].append(index)
return layer_indices
def get_layer_contribs(layer_indices, U):
layer_contribs = [[], []]
num_states = U.shape[0]
for n in range(num_states):
for l, l_indices in enumerate(layer_indices):
contrib = 0.0
for index in l_indices:
contrib += abs(U[index, n])**2
layer_contribs[l].append(contrib)
return layer_contribs
def bracket_indices(w, E_F):
for i, val in enumerate(w):
if i == len(w) - 1:
return None
if val <= E_F and w[i+1] > E_F:
return i, i+1
def select_layer_contrib(layer_contribs_up, layer_contribs_down, spin, l, n):
contrib_up = layer_contribs_up[l][n]
contrib_down = layer_contribs_down[l][n]
if spin is None:
contrib = contrib_up + contrib_down
elif spin == 'up':
contrib = contrib_up
elif spin == 'down':
contrib = contrib_down
else:
raise ValueError("unrecognized spin value")
return contrib
def get_curvature(D, Hr, k, n):
'''Calculate d^2 E / d k^2 along kx and ky directions at band n.
Assumes there are no band crossings in the region sampled, so that
the single index n can be used for all sampled ks.
'''
curvature = []
for d in range(2):
def Er_d(kd):
kr = []
for dp in range(3):
if dp == d:
kr.append(kd)
else:
kr.append(k[dp])
H_kr = Hk_Cart(kr, Hr, D.T)
w, U = np.linalg.eigh(H_kr)
Er = w[n]
return Er
fd = numdifftools.Derivative(Er_d, n=2)
curvature_d = fd(k[d])
curvature.append(curvature_d)
return curvature
def layer_band_extrema(Es, U, E_F, layer_indices_up, layer_indices_down, layer_threshold,
spin_valence=None, spin_conduction=None):
conduction = [None, None]
valence = [None, None]
layer_contribs_up = get_layer_contribs(layer_indices_up, U)
layer_contribs_down = get_layer_contribs(layer_indices_down, U)
below_fermi, above_fermi = bracket_indices(Es, E_F)
n = below_fermi
while n >= 0 and any([valence[l] is None for l in [0, 1]]):
for l in [0, 1]:
contrib = select_layer_contrib(layer_contribs_up, layer_contribs_down, spin_valence, l, n)
if contrib > layer_threshold and valence[l] is None:
valence[l] = n
n -= 1
n = above_fermi
while n < len(Es) and any([conduction[l] is None for l in [0, 1]]):
for l in [0, 1]:
contrib = select_layer_contrib(layer_contribs_up, layer_contribs_down, spin_conduction, l, n)
if contrib > layer_threshold and conduction[l] is None:
conduction[l] = n
n += 1
return conduction, valence
def get_gaps(work, prefix, layer_threshold, k, spin_valence=None, spin_conduction=None, use_QE_evs=False, ev_width=8, do_get_curvature=False):
wannier_dir = os.path.join(work, prefix, "wannier")
scf_path = os.path.join(wannier_dir, "scf.out")
E_F = fermi_from_scf(scf_path)
alat_Bohr = alat_from_scf(scf_path)
D = D_from_scf(scf_path)
R = 2*np.pi*np.linalg.inv(D)
k_Cart = np.dot(np.array(k), R)
if use_QE_evs:
# ks in QE bands output are in units of 2pi/a;
# D is in units of a
k_cart_2pi = np.dot(np.array(k), R) / (2*np.pi)
bands_dir = os.path.join(work, prefix, "bands")
evals_path = os.path.join(bands_dir, "{}_bands.dat".format(prefix))
nbnd, nks, QE_bands = extractQEBands(evals_path, ev_width=ev_width)
eps = 1e-6
QE_bands_k = None
for qe_k_cart, qe_k_evals in QE_bands:
if _close(k_cart_2pi, qe_k_cart, eps):
QE_bands_k = qe_k_evals
break
if QE_bands_k is None:
raise ValueError("could not find QE k")
win_path = os.path.join(wannier_dir, "{}.win".format(prefix))
inner_win = parse_inner_window(win_path)
layer_indices_up = get_layer_indices(work, prefix, 'up')
layer_indices_down = get_layer_indices(work, prefix, 'down')
Hr = get_Hr(work, prefix)
# note:
# rotated K 2pi/3: K_R2 = (-2/3, 1/3, 0.0)
# rotated K 4pi/3: K_R4 = (1/3, -2/3, 0.0)
Hk = Hk_recip(k, Hr)
# TODO - check ws for QE bands.
# Wannier functions may not preserve symmetry.
# Possible that symmetry is exact in QE bands.
w, U = np.linalg.eigh(Hk)
if use_QE_evs:
dft_start_index, wan_start_index, num_states = dft_wan_correspondence(QE_bands_k,
w, inner_win)
offset = dft_start_index - wan_start_index
conduction, valence = layer_band_extrema(w, U, E_F, layer_indices_up, layer_indices_down,
layer_threshold, spin_valence, spin_conduction)
conduction_curvature = [None, None]
valence_curvature = [None, None]
if do_get_curvature:
for l in [0, 1]:
valence_curvature[l] = get_curvature(D, Hr, k_Cart, valence[l])
conduction_curvature[l] = get_curvature(D, Hr, k_Cart, conduction[l])
gaps = {}
if use_QE_evs:
ev = QE_bands_k
gaps["0/0"] = float(ev[conduction[0]+offset] - ev[valence[0]+offset])
gaps["1/1"] = float(ev[conduction[1]+offset] - ev[valence[1]+offset])
gaps["0/1"] = float(ev[conduction[0]+offset] - ev[valence[1]+offset])
gaps["1/0"] = float(ev[conduction[1]+offset] - ev[valence[0]+offset])
gaps["0_valence"] = float(ev[valence[0]+offset])
gaps["1_valence"] = float(ev[valence[1]+offset])
gaps["0_conduction"] = float(ev[conduction[0]+offset])
gaps["1_conduction"] = float(ev[conduction[1]+offset])
conduction_min = min(conduction[0], conduction[1]) + offset
gaps["conduction_min_partner"] = float(w[conduction_min + 1])
if do_get_curvature:
add_curvature(gaps, valence_curvature, conduction_curvature, alat_Bohr)
else:
gaps["0/0"] = float(w[conduction[0]] - w[valence[0]])
gaps["1/1"] = float(w[conduction[1]] - w[valence[1]])
gaps["0/1"] = float(w[conduction[0]] - w[valence[1]])
gaps["1/0"] = float(w[conduction[1]] - w[valence[0]])
gaps["0_valence"] = float(w[valence[0]])
gaps["1_valence"] = float(w[valence[1]])
gaps["0_conduction"] = float(w[conduction[0]])
gaps["1_conduction"] = float(w[conduction[1]])
conduction_min = min(conduction[0], conduction[1])
gaps["conduction_min_partner"] = float(w[conduction_min + 1])
if do_get_curvature:
add_curvature(gaps, valence_curvature, conduction_curvature, alat_Bohr)
return gaps
def reduced_mass(m1, m2):
return m1 * m2 / (m1 + m2)
def add_curvature(gaps, valence_curvature, conduction_curvature, alat_Bohr):
hbar_eV_s = 6.582119514e-16
me_eV_per_c2 = 0.5109989461e6
c_m_per_s = 2.99792458e8
Bohr_m = 0.52917721067e-10
fac = hbar_eV_s**2 / (me_eV_per_c2 * (c_m_per_s)**(-2) * (Bohr_m)**2 * alat_Bohr**2)
gaps["0_valence_effmass_kx"] = float(-fac/valence_curvature[0][0])
gaps["1_valence_effmass_kx"] = float(-fac/valence_curvature[1][0])
gaps["0_valence_effmass_ky"] = float(-fac/valence_curvature[0][1])
gaps["1_valence_effmass_ky"] = float(-fac/valence_curvature[1][1])
gaps["0_conduction_effmass_kx"] = float(fac/conduction_curvature[0][0])
gaps["1_conduction_effmass_kx"] = float(fac/conduction_curvature[1][0])
gaps["0_conduction_effmass_ky"] = float(fac/conduction_curvature[0][1])
gaps["1_conduction_effmass_ky"] = float(fac/conduction_curvature[1][1])
gaps["0_reduced_effmass_kx"] = reduced_mass(gaps["0_valence_effmass_kx"], gaps["0_conduction_effmass_kx"])
gaps["0_reduced_effmass_ky"] = reduced_mass(gaps["0_valence_effmass_ky"], gaps["0_conduction_effmass_ky"])
gaps["1_reduced_effmass_kx"] = reduced_mass(gaps["1_valence_effmass_kx"], gaps["1_conduction_effmass_kx"])
gaps["1_reduced_effmass_ky"] = reduced_mass(gaps["1_valence_effmass_ky"], gaps["1_conduction_effmass_ky"])
def write_gap_data(work, dps, threshold, spin_valence, spin_conduction, use_QE_evs, ev_width, k, gap_label, gap_label_tex, do_get_curvature):
get_gaps_args = []
for d, prefix in dps:
get_gaps_args.append([work, prefix, threshold, k, spin_valence, spin_conduction, use_QE_evs, ev_width, do_get_curvature])
with Pool() as p:
all_gaps = p.starmap(get_gaps, get_gaps_args)
gap_data = []
# For JSON output, use same format as plot_ds.
json_gap_data = {"_ds": []}
for (d, prefix), gaps in zip(dps, all_gaps):
gap_data.append([list(d), gaps])
json_gap_data["_ds"].append(d)
for k, v in gaps.items():
if k not in json_gap_data:
json_gap_data[k] = []
json_gap_data[k].append(v)
with open("{}_gap_data.yaml".format(gap_label), 'w') as fp:
fp.write(yaml.dump(gap_data))
with open("{}_gap_data.json".format(gap_label), 'w') as fp:
json.dump(json_gap_data, fp)
layer0_gap_vals, layer1_gap_vals, interlayer_01_gap_vals, interlayer_10_gap_vals = [], [], [], []
layer0_valence, layer1_valence, layer0_conduction, layer1_conduction = [], [], [], []
conduction_min_partner = []
for d, gaps in gap_data:
layer0_gap_vals.append(gaps["0/0"])
layer1_gap_vals.append(gaps["1/1"])
interlayer_01_gap_vals.append(gaps["0/1"])
interlayer_10_gap_vals.append(gaps["1/0"])
layer0_valence.append(gaps["0_valence"])
layer1_valence.append(gaps["1_valence"])
layer0_conduction.append(gaps["0_conduction"])
layer1_conduction.append(gaps["1_conduction"])
conduction_min_partner.append(gaps["conduction_min_partner"])
plot_d_vals("{}_layer0_gaps".format(gap_label), "{} MoS$_2$ gap [eV]".format(gap_label_tex), dps, layer0_gap_vals)
plot_d_vals("{}_layer1_gaps".format(gap_label), "{} WS$_2$ gap [eV]".format(gap_label_tex), dps, layer1_gap_vals)
plot_d_vals("{}_interlayer_01_gaps".format(gap_label), "{} MoS$_2$ - WS$_2$ gap [eV]".format(gap_label_tex), dps, interlayer_01_gap_vals)
plot_d_vals("{}_interlayer_10_gaps".format(gap_label), "{} WS$_2$ - MoS$_2$ gap [eV]".format(gap_label_tex), dps, interlayer_10_gap_vals)
plot_d_vals("{}_layer0_valence".format(gap_label), "{} MoS$_2$ valence maximum [eV]".format(gap_label_tex), dps, layer0_valence)
plot_d_vals("{}_layer1_valence".format(gap_label), "{} WS$_2$ valence maximum [eV]".format(gap_label_tex), dps, layer1_valence)
plot_d_vals("{}_layer0_conduction".format(gap_label), "{} MoS$_2$ conduction minimum [eV]".format(gap_label_tex), dps, layer0_conduction)
plot_d_vals("{}_layer1_conduction".format(gap_label), "{} WS$_2$ conduction minimum [eV]".format(gap_label_tex), dps, layer1_conduction)
plot_d_vals("{}_conduction_min_partner".format(gap_label), "{} conduction min. + 1 [eV]".format(gap_label_tex), dps, conduction_min_partner)
if do_get_curvature:
layer0_valence_effmass_kx, layer1_valence_effmass_kx, layer0_valence_effmass_ky, layer1_valence_effmass_ky = [], [], [], []
layer0_conduction_effmass_kx, layer1_conduction_effmass_kx, layer0_conduction_effmass_ky, layer1_conduction_effmass_ky = [], [], [], []
layer0_reduced_effmass_kx, layer0_reduced_effmass_ky, layer1_reduced_effmass_kx, layer1_reduced_effmass_ky = [], [], [], []
for d, gaps in gap_data:
layer0_valence_effmass_kx.append(gaps["0_valence_effmass_kx"])
layer1_valence_effmass_kx.append(gaps["1_valence_effmass_kx"])
layer0_valence_effmass_ky.append(gaps["0_valence_effmass_ky"])
layer1_valence_effmass_ky.append(gaps["1_valence_effmass_ky"])
layer0_conduction_effmass_kx.append(gaps["0_conduction_effmass_kx"])
layer1_conduction_effmass_kx.append(gaps["1_conduction_effmass_kx"])
layer0_conduction_effmass_ky.append(gaps["0_conduction_effmass_ky"])
layer1_conduction_effmass_ky.append(gaps["1_conduction_effmass_ky"])
layer0_reduced_effmass_kx.append(gaps["0_reduced_effmass_kx"])
layer0_reduced_effmass_ky.append(gaps["0_reduced_effmass_ky"])
layer1_reduced_effmass_kx.append(gaps["1_reduced_effmass_kx"])
layer1_reduced_effmass_ky.append(gaps["1_reduced_effmass_ky"])
plot_d_vals("{}_layer0_valence_effmass_kx".format(gap_label), "{} MoS$_2$ valence $m^*_x/m_e$".format(gap_label_tex), dps, layer0_valence_effmass_kx)
plot_d_vals("{}_layer1_valence_effmass_kx".format(gap_label), "{} WS$_2$ valence $m^*_x/m_e$".format(gap_label_tex), dps, layer1_valence_effmass_kx)
plot_d_vals("{}_layer0_valence_effmass_ky".format(gap_label), "{} MoS$_2$ valence $m^*_y/m_e$".format(gap_label_tex), dps, layer0_valence_effmass_ky)
plot_d_vals("{}_layer1_valence_effmass_ky".format(gap_label), "{} WS$_2$ valence $m^*_y/m_e$".format(gap_label_tex), dps, layer1_valence_effmass_ky)
plot_d_vals("{}_layer0_conduction_effmass_kx".format(gap_label), "{} MoS$_2$ conduction $m^*_x/m_e$".format(gap_label_tex), dps, layer0_conduction_effmass_kx)
plot_d_vals("{}_layer1_conduction_effmass_kx".format(gap_label), "{} WS$_2$ conduction $m^*_x/m_e$".format(gap_label_tex), dps, layer1_conduction_effmass_kx)
plot_d_vals("{}_layer0_conduction_effmass_ky".format(gap_label), "{} MoS$_2$ conduction $m^*_y/m_e$".format(gap_label_tex), dps, layer0_conduction_effmass_ky)
plot_d_vals("{}_layer1_conduction_effmass_ky".format(gap_label), "{} WS$_2$ conduction $m^*_y/m_e$".format(gap_label_tex), dps, layer1_conduction_effmass_ky)
plot_d_vals("{}_layer0_reduced_effmass_kx".format(gap_label), "{} MoS$_2$ $\\mu^*_x/m_e$".format(gap_label_tex), dps, layer0_reduced_effmass_kx)
plot_d_vals("{}_layer0_reduced_effmass_ky".format(gap_label), "{} MoS$_2$ $\\mu^*_y/m_e$".format(gap_label_tex), dps, layer0_reduced_effmass_ky)
plot_d_vals("{}_layer1_reduced_effmass_kx".format(gap_label), "{} WS$_2$ $\\mu^*_x/m_e$".format(gap_label_tex), dps, layer1_reduced_effmass_kx)
plot_d_vals("{}_layer1_reduced_effmass_ky".format(gap_label), "{} WS$_2$ $\\mu^*_y/m_e$".format(gap_label_tex), dps, layer1_reduced_effmass_ky)
def _main():
parser = argparse.ArgumentParser("Calculation of gaps",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--subdir", type=str, default=None,
help="Subdirectory under work_base where calculation was run")
parser.add_argument("--threshold", type=float, default=0.9,
help="Threshold for deciding if a state is dominated by one layer")
parser.add_argument("--spin_valence", type=str, default=None,
help="Set 'up' or 'down' to choose valence band spin type; closest to E_F is used if not set")
parser.add_argument("--spin_conduction", type=str, default=None,
help="Set 'up' or 'down' to choose conduction band spin type; closest to E_F is used if not set")
parser.add_argument("--use_QE_evs", action='store_true',
help="Use eigenvalues from QE instead of Wannier H(k); if set, spin_valence and spin_conduction act as if not specified.")
parser.add_argument("--ev_width", type=int, default=8,
help="Number of characters per eigenvalue in QE bands.dat")
parser.add_argument('global_prefix', type=str,
help="Calculation name")
args = parser.parse_args()
gconf = global_config()
work = os.path.expandvars(gconf["work_base"])
if args.subdir is not None:
work = os.path.join(work, args.subdir)
prefixes = get_prefixes(work, args.global_prefix)
ds = ds_from_prefixes(prefixes)
ds, prefixes = wrap_cell(ds, prefixes)
dps = sorted_d_group(ds, prefixes)
K = (1/3, 1/3, 0.0)
Gamma = (0.0, 0.0, 0.0)
do_get_curvature_K, do_get_curvature_Gamma = True, False
write_gap_data(work, dps, args.threshold, args.spin_valence, args.spin_conduction, args.use_QE_evs, args.ev_width, K, "K", "$K$", do_get_curvature_K)
write_gap_data(work, dps, args.threshold, args.spin_valence, args.spin_conduction, args.use_QE_evs, args.ev_width, Gamma, "Gamma", "$\\Gamma$", do_get_curvature_Gamma)
if __name__ == "__main__":
_main()
|
|
# Author: Ovidiu Predescu
# Date: July 2011
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import asyncio
import logging
import signal
import unittest
import warnings
from tornado.escape import utf8
from tornado import gen
from tornado.httpclient import AsyncHTTPClient
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.testing import bind_unused_port, AsyncTestCase, gen_test
from tornado.web import RequestHandler, Application
try:
from twisted.internet.defer import ( # type: ignore
Deferred,
inlineCallbacks,
returnValue,
)
from twisted.internet.protocol import Protocol # type: ignore
from twisted.internet.asyncioreactor import AsyncioSelectorReactor # type: ignore
from twisted.web.client import Agent, readBody # type: ignore
from twisted.web.resource import Resource # type: ignore
from twisted.web.server import Site # type: ignore
have_twisted = True
except ImportError:
have_twisted = False
else:
# Not used directly but needed for `yield deferred` to work.
import tornado.platform.twisted # noqa: F401
skipIfNoTwisted = unittest.skipUnless(have_twisted, "twisted module not present")
def save_signal_handlers():
saved = {}
signals = [signal.SIGINT, signal.SIGTERM]
if hasattr(signal, "SIGCHLD"):
signals.append(signal.SIGCHLD)
for sig in signals:
saved[sig] = signal.getsignal(sig)
if "twisted" in repr(saved):
# This indicates we're not cleaning up after ourselves properly.
raise Exception("twisted signal handlers already installed")
return saved
def restore_signal_handlers(saved):
for sig, handler in saved.items():
signal.signal(sig, handler)
# Test various combinations of twisted and tornado http servers,
# http clients, and event loop interfaces.
@skipIfNoTwisted
class CompatibilityTests(unittest.TestCase):
def setUp(self):
self.saved_signals = save_signal_handlers()
self.io_loop = IOLoop()
self.io_loop.make_current()
self.reactor = AsyncioSelectorReactor()
def tearDown(self):
self.reactor.disconnectAll()
self.io_loop.clear_current()
self.io_loop.close(all_fds=True)
restore_signal_handlers(self.saved_signals)
def start_twisted_server(self):
class HelloResource(Resource):
isLeaf = True
def render_GET(self, request):
return b"Hello from twisted!"
site = Site(HelloResource())
port = self.reactor.listenTCP(0, site, interface="127.0.0.1")
self.twisted_port = port.getHost().port
def start_tornado_server(self):
class HelloHandler(RequestHandler):
def get(self):
self.write("Hello from tornado!")
app = Application([("/", HelloHandler)], log_function=lambda x: None)
server = HTTPServer(app)
sock, self.tornado_port = bind_unused_port()
server.add_sockets([sock])
def run_reactor(self):
# In theory, we can run the event loop through Tornado,
# Twisted, or asyncio interfaces. However, since we're trying
# to avoid installing anything as the global event loop, only
# the twisted interface gets everything wired up correectly
# without extra hacks. This method is a part of a
# no-longer-used generalization that allowed us to test
# different combinations.
self.stop_loop = self.reactor.stop
self.stop = self.reactor.stop
self.reactor.run()
def tornado_fetch(self, url, runner):
client = AsyncHTTPClient()
fut = asyncio.ensure_future(client.fetch(url))
fut.add_done_callback(lambda f: self.stop_loop())
runner()
return fut.result()
def twisted_fetch(self, url, runner):
# http://twistedmatrix.com/documents/current/web/howto/client.html
chunks = []
client = Agent(self.reactor)
d = client.request(b"GET", utf8(url))
class Accumulator(Protocol):
def __init__(self, finished):
self.finished = finished
def dataReceived(self, data):
chunks.append(data)
def connectionLost(self, reason):
self.finished.callback(None)
def callback(response):
finished = Deferred()
response.deliverBody(Accumulator(finished))
return finished
d.addCallback(callback)
def shutdown(failure):
if hasattr(self, "stop_loop"):
self.stop_loop()
elif failure is not None:
# loop hasn't been initialized yet; try our best to
# get an error message out. (the runner() interaction
# should probably be refactored).
try:
failure.raiseException()
except:
logging.error("exception before starting loop", exc_info=True)
d.addBoth(shutdown)
runner()
self.assertTrue(chunks)
return b"".join(chunks)
def twisted_coroutine_fetch(self, url, runner):
body = [None]
@gen.coroutine
def f():
# This is simpler than the non-coroutine version, but it cheats
# by reading the body in one blob instead of streaming it with
# a Protocol.
client = Agent(self.reactor)
response = yield client.request(b"GET", utf8(url))
with warnings.catch_warnings():
# readBody has a buggy DeprecationWarning in Twisted 15.0:
# https://twistedmatrix.com/trac/changeset/43379
warnings.simplefilter("ignore", category=DeprecationWarning)
body[0] = yield readBody(response)
self.stop_loop()
self.io_loop.add_callback(f)
runner()
return body[0]
def testTwistedServerTornadoClientReactor(self):
self.start_twisted_server()
response = self.tornado_fetch(
"http://127.0.0.1:%d" % self.twisted_port, self.run_reactor
)
self.assertEqual(response.body, b"Hello from twisted!")
def testTornadoServerTwistedClientReactor(self):
self.start_tornado_server()
response = self.twisted_fetch(
"http://127.0.0.1:%d" % self.tornado_port, self.run_reactor
)
self.assertEqual(response, b"Hello from tornado!")
def testTornadoServerTwistedCoroutineClientReactor(self):
self.start_tornado_server()
response = self.twisted_coroutine_fetch(
"http://127.0.0.1:%d" % self.tornado_port, self.run_reactor
)
self.assertEqual(response, b"Hello from tornado!")
@skipIfNoTwisted
class ConvertDeferredTest(AsyncTestCase):
@gen_test
def test_success(self):
@inlineCallbacks
def fn():
if False:
# inlineCallbacks doesn't work with regular functions;
# must have a yield even if it's unreachable.
yield
returnValue(42)
res = yield fn()
self.assertEqual(res, 42)
@gen_test
def test_failure(self):
@inlineCallbacks
def fn():
if False:
yield
1 / 0
with self.assertRaises(ZeroDivisionError):
yield fn()
if __name__ == "__main__":
unittest.main()
|
|
#! /usr/bin/env python
# This smoke test relys on pyexect. You need to have that installed
# before you can run it. It is available here
# http://www.noah.org/wiki/pexpect
import logging
import os
import pexpect
import unittest
import exceptions
import tempfile
import sys
import re
class TestError(exceptions.Exception):
"""Raised when a test fails """
def __init__(self, result):
self.result = result
class LoggerWriter(object):
def __init__(self):
pass
def write(self, data):
print(data.rstrip())
def flush(self):
#no op
self
def spawn(command):
child = pexpect.spawn(command)
child.logfile_read = LoggerWriter()
return child
def sinan(command):
def check_accepts(f):
def new_f(*args, **kwds):
print("Running Command %s in %s" % (command, os.getcwd()))
self = args[0]
ebin = ""
with open(os.path.join(self.sinan_dir, "sinan.config"), "r") as fl:
data = fl.read()
vsn = re.search(r"""{project_vsn, "(.+)"}""", data).group(1)
ebin = os.path.join(self.sinan_dir,
"_build",
"sinan",
"lib",
"sinan-" + vsn, "ebin")
child_cmd = ("erl -noshell -pa %s "
" -s sinan manual_start"
" -s sinan main"
" -extra %s" %
(ebin, command))
print child_cmd
child = spawn(child_cmd)
res = f(self, child, *(args[1:]), **kwds)
print("Finished %s successfully" % command)
return res
new_f.func_name = f.func_name
return new_f
return check_accepts
class AppDesc(object):
def __init__(self,
user_name=None,
email=None,
copyright_holder=None,
project_name=None,
project_version=None,
app_names=None):
self.user_name = user_name
self.email = email
self.copyright_holder = copyright_holder
self.project_name = project_name
self.project_version = project_version
self.app_names = app_names
def run_tests(class_obj):
cases = unittest.defaultTestLoader.loadTestsFromTestCase(class_obj)
result = unittest.TextTestRunner().run(cases)
if len(result.errors) > 0 or len(result.failures) > 0:
raise TestError(result)
class SmokeTest(unittest.TestCase):
def get_project_root(self, cwd):
return os.path.abspath(cwd)
def setUp(self):
self.release_name = None
self.release_version = None
self.smokedir = tempfile.mkdtemp(prefix='smoke_test_')
self.current_dir = os.getcwd()
self.sinan_dir = self.current_dir
sys.path.append(self.current_dir)
os.chdir(self.smokedir)
def tearDown(self):
os.chdir(self.current_dir)
def assert_dirs_exist(self, base, *dirs):
for d in dirs:
check_dir = ""
if type(d) == list:
check_dir = os.path.join(base, *d)
else:
check_dir = os.path.join(base, d)
self.assertTrue(os.path.isdir(check_dir))
def assert_files_exist(self, base, *files):
for f in files:
check_file = ""
if type(f) == list:
check_file = os.path.join(base, *f)
else:
check_file = os.path.join(base, f)
self.assertTrue(os.path.isfile(check_file))
def do_apply(self, fun_list, arg):
res = arg
for n in fun_list:
f = getattr(self, n)
res = f(res)
return res
@sinan("gen")
def run_gen(self, child, appdesc):
child.expect("your name> ")
child.sendline(appdesc.user_name)
child.expect("your email> ")
child.sendline(appdesc.email)
child.expect('copyright holder \("%s"\)> ' % appdesc.user_name)
child.sendline()
child.expect('project name> ')
child.sendline(appdesc.project_name)
child.expect('project version> ')
child.sendline(appdesc.project_version)
child.expect('Please specify the ERTS version \(".*"\)> ')
child.sendline()
child.expect('Is this a single application project \("n"\)> ')
child.sendline()
child.expect("app> ")
child.sendline(appdesc.app_names[0])
for n in appdesc.app_names[1:]:
child.expect('app \(""\)> ')
child.sendline(n)
child.expect('app \(""\)> ')
child.sendline()
child.expect('\("y"\)> ')
child.sendline()
child.expect("Project was created, you should be good to go!")
child.expect(pexpect.EOF)
return appdesc
def verify_gen(self, a):
projdir = os.path.join(os.getcwd(), a.project_name)
self.assert_dirs_exist(projdir,
"config",
"lib")
self.assert_files_exist(projdir,
["config", "sys.config"],
"sinan.config")
for n in a.app_names:
ppath = os.path.join(projdir, "lib", n)
self.assert_dirs_exist(ppath,
"ebin",
"src",
"include",
"doc")
self.assert_files_exist(ppath,
["src", n + "_app.erl"],
["src", n + "_sup.erl"],
["src", n + ".app.src"])
return a
# gen a new project in the test dir
def do_gen(self, appdesc):
return self.do_apply(["run_gen", "verify_gen"], appdesc)
def build_validate(self, child, appdesc):
child.expect(pexpect.EOF)
build_tmp = self.get_build_root_path()
build_dir = os.path.join(*(build_tmp))
self.assertTrue(build_dir)
for n in appdesc.app_names:
app_dir = os.path.join(build_dir, "lib", "%s-0.1.0" % n)
print app_dir
self.assert_dirs_exist(app_dir,
"ebin",
"src",
"include",
"doc")
self.assert_files_exist(app_dir,
["src", n + "_sup.erl"],
["src", n + "_app.erl"],
["ebin", n + "_sup.beam"],
["ebin", n + "_app.beam"])
return appdesc
# build the project
@sinan("build")
def do_build(self, child, appdesc):
return self.build_validate(child, appdesc)
# clean the project
@sinan("clean")
def do_clean(self, child, appdesc):
child.expect(pexpect.EOF)
self.assertTrue(not os.path.isdir(os.path.join(os.getcwd(), "_build")))
return appdesc
# test the project
@sinan("test")
def do_t(self, child, appdesc):
child.expect(pexpect.EOF)
return appdesc
# release
@sinan("release")
def do_release(self, child, appdesc):
child.expect(pexpect.EOF)
version = appdesc.project_version
name = appdesc.project_name
build_tmp = self.get_build_root_path()
build_tmp.append("releases"),
build_tmp.append(version)
version_dir = os.path.join(*build_tmp)
print("Checking version directory at %s " % version_dir)
self.assert_files_exist(version_dir,
"%s.boot" % name,
"%s.rel" % name,
"%s.script" % name,
"sys.config")
return appdesc
# dist (check the tarball)
@sinan("dist")
def do_dist(self, child, appdesc):
child.expect(pexpect.EOF)
build_tmp = self.get_release_root_path()
build_tmp.append("tar")
build_tmp.append("%s-%s.tar.gz" %
(appdesc.project_name, appdesc.project_version))
tar_file = os.path.join(*build_tmp)
print tar_file
self.assertTrue(os.path.isfile(tar_file))
return appdesc
def do_run(self, appdesc):
self.current_app_desc = appdesc
a = self.do_gen(appdesc)
self.project_dir = os.path.join(self.smokedir, a.project_name)
os.chdir(os.path.join(self.project_dir))
self.do_apply(["do_build",
"do_clean",
"do_build",
"do_t",
"do_release",
"do_dist"], a)
def get_build_root_path(self, project_dir=None, release_name=None,
release_version=None):
release_root = self.get_release_root_path(project_dir)
if not release_name and not self.release_name:
release_name = self.current_app_desc.project_name
elif not release_name:
release_name = self.release_name
if not release_version and not self.release_version:
release_version = self.current_app_desc.project_version
elif not release_version:
release_version = self.release_version
release_root.append(release_name)
return release_root
def get_release_root_path(self, project_dir=None):
if not project_dir:
project_dir = self.project_dir
return [project_dir,
"_build"]
|
|
# This file is part of Ansible
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import base64
import json
import os
import os.path
import tempfile
import traceback
import zipfile
from ansible.errors import AnsibleError, AnsibleFileNotFound
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.plugins.action import ActionBase
from ansible.utils.hashing import checksum
def _walk_dirs(topdir, loader, base_path=None, local_follow=False, trailing_slash_detector=None, checksum_check=False):
"""
Walk a filesystem tree returning enough information to copy the files.
This is similar to the _walk_dirs function in ``copy.py`` but returns
a dict instead of a tuple for each entry and includes the checksum of
a local file if wanted.
:arg topdir: The directory that the filesystem tree is rooted at
:arg loader: The self._loader object from ActionBase
:kwarg base_path: The initial directory structure to strip off of the
files for the destination directory. If this is None (the default),
the base_path is set to ``top_dir``.
:kwarg local_follow: Whether to follow symlinks on the source. When set
to False, no symlinks are dereferenced. When set to True (the
default), the code will dereference most symlinks. However, symlinks
can still be present if needed to break a circular link.
:kwarg trailing_slash_detector: Function to determine if a path has
a trailing directory separator. Only needed when dealing with paths on
a remote machine (in which case, pass in a function that is aware of the
directory separator conventions on the remote machine).
:kawrg whether to get the checksum of the local file and add to the dict
:returns: dictionary of dictionaries. All of the path elements in the structure are text string.
This separates all the files, directories, and symlinks along with
import information about each::
{
'files'; [{
src: '/absolute/path/to/copy/from',
dest: 'relative/path/to/copy/to',
checksum: 'b54ba7f5621240d403f06815f7246006ef8c7d43'
}, ...],
'directories'; [{
src: '/absolute/path/to/copy/from',
dest: 'relative/path/to/copy/to'
}, ...],
'symlinks'; [{
src: '/symlink/target/path',
dest: 'relative/path/to/copy/to'
}, ...],
}
The ``symlinks`` field is only populated if ``local_follow`` is set to False
*or* a circular symlink cannot be dereferenced. The ``checksum`` entry is set
to None if checksum_check=False.
"""
# Convert the path segments into byte strings
r_files = {'files': [], 'directories': [], 'symlinks': []}
def _recurse(topdir, rel_offset, parent_dirs, rel_base=u'', checksum_check=False):
"""
This is a closure (function utilizing variables from it's parent
function's scope) so that we only need one copy of all the containers.
Note that this function uses side effects (See the Variables used from
outer scope).
:arg topdir: The directory we are walking for files
:arg rel_offset: Integer defining how many characters to strip off of
the beginning of a path
:arg parent_dirs: Directories that we're copying that this directory is in.
:kwarg rel_base: String to prepend to the path after ``rel_offset`` is
applied to form the relative path.
Variables used from the outer scope
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:r_files: Dictionary of files in the hierarchy. See the return value
for :func:`walk` for the structure of this dictionary.
:local_follow: Read-only inside of :func:`_recurse`. Whether to follow symlinks
"""
for base_path, sub_folders, files in os.walk(topdir):
for filename in files:
filepath = os.path.join(base_path, filename)
dest_filepath = os.path.join(rel_base, filepath[rel_offset:])
if os.path.islink(filepath):
# Dereference the symlnk
real_file = loader.get_real_file(os.path.realpath(filepath), decrypt=True)
if local_follow and os.path.isfile(real_file):
# Add the file pointed to by the symlink
r_files['files'].append(
{
"src": real_file,
"dest": dest_filepath,
"checksum": _get_local_checksum(checksum_check, real_file)
}
)
else:
# Mark this file as a symlink to copy
r_files['symlinks'].append({"src": os.readlink(filepath), "dest": dest_filepath})
else:
# Just a normal file
real_file = loader.get_real_file(filepath, decrypt=True)
r_files['files'].append(
{
"src": real_file,
"dest": dest_filepath,
"checksum": _get_local_checksum(checksum_check, real_file)
}
)
for dirname in sub_folders:
dirpath = os.path.join(base_path, dirname)
dest_dirpath = os.path.join(rel_base, dirpath[rel_offset:])
real_dir = os.path.realpath(dirpath)
dir_stats = os.stat(real_dir)
if os.path.islink(dirpath):
if local_follow:
if (dir_stats.st_dev, dir_stats.st_ino) in parent_dirs:
# Just insert the symlink if the target directory
# exists inside of the copy already
r_files['symlinks'].append({"src": os.readlink(dirpath), "dest": dest_dirpath})
else:
# Walk the dirpath to find all parent directories.
new_parents = set()
parent_dir_list = os.path.dirname(dirpath).split(os.path.sep)
for parent in range(len(parent_dir_list), 0, -1):
parent_stat = os.stat(u'/'.join(parent_dir_list[:parent]))
if (parent_stat.st_dev, parent_stat.st_ino) in parent_dirs:
# Reached the point at which the directory
# tree is already known. Don't add any
# more or we might go to an ancestor that
# isn't being copied.
break
new_parents.add((parent_stat.st_dev, parent_stat.st_ino))
if (dir_stats.st_dev, dir_stats.st_ino) in new_parents:
# This was a a circular symlink. So add it as
# a symlink
r_files['symlinks'].append({"src": os.readlink(dirpath), "dest": dest_dirpath})
else:
# Walk the directory pointed to by the symlink
r_files['directories'].append({"src": real_dir, "dest": dest_dirpath})
offset = len(real_dir) + 1
_recurse(real_dir, offset, parent_dirs.union(new_parents),
rel_base=dest_dirpath,
checksum_check=checksum_check)
else:
# Add the symlink to the destination
r_files['symlinks'].append({"src": os.readlink(dirpath), "dest": dest_dirpath})
else:
# Just a normal directory
r_files['directories'].append({"src": dirpath, "dest": dest_dirpath})
# Check if the source ends with a "/" so that we know which directory
# level to work at (similar to rsync)
source_trailing_slash = False
if trailing_slash_detector:
source_trailing_slash = trailing_slash_detector(topdir)
else:
source_trailing_slash = topdir.endswith(os.path.sep)
# Calculate the offset needed to strip the base_path to make relative
# paths
if base_path is None:
base_path = topdir
if not source_trailing_slash:
base_path = os.path.dirname(base_path)
if topdir.startswith(base_path):
offset = len(base_path)
# Make sure we're making the new paths relative
if trailing_slash_detector and not trailing_slash_detector(base_path):
offset += 1
elif not base_path.endswith(os.path.sep):
offset += 1
if os.path.islink(topdir) and not local_follow:
r_files['symlinks'] = {"src": os.readlink(topdir), "dest": os.path.basename(topdir)}
return r_files
dir_stats = os.stat(topdir)
parents = frozenset(((dir_stats.st_dev, dir_stats.st_ino),))
# Actually walk the directory hierarchy
_recurse(topdir, offset, parents, checksum_check=checksum_check)
return r_files
def _get_local_checksum(get_checksum, local_path):
if get_checksum:
return checksum(local_path)
else:
return None
class ActionModule(ActionBase):
WIN_PATH_SEPARATOR = "\\"
def _create_content_tempfile(self, content):
''' Create a tempfile containing defined content '''
fd, content_tempfile = tempfile.mkstemp()
f = os.fdopen(fd, 'wb')
content = to_bytes(content)
try:
f.write(content)
except Exception as err:
os.remove(content_tempfile)
raise Exception(err)
finally:
f.close()
return content_tempfile
def _create_zip_tempfile(self, files, directories):
tmpdir = tempfile.mkdtemp()
zip_file_path = os.path.join(tmpdir, "win_copy.zip")
zip_file = zipfile.ZipFile(zip_file_path, "w")
# encoding the file/dir name with base64 so Windows can unzip a unicode
# filename and get the right name, Windows doesn't handle unicode names
# very well
for directory in directories:
directory_path = to_bytes(directory['src'], errors='surrogate_or_strict')
archive_path = to_bytes(directory['dest'], errors='surrogate_or_strict')
encoded_path = to_text(base64.b64encode(archive_path), errors='surrogate_or_strict')
zip_file.write(directory_path, encoded_path, zipfile.ZIP_DEFLATED)
for file in files:
file_path = to_bytes(file['src'], errors='surrogate_or_strict')
archive_path = to_bytes(file['dest'], errors='surrogate_or_strict')
encoded_path = to_text(base64.b64encode(archive_path), errors='surrogate_or_strict')
zip_file.write(file_path, encoded_path, zipfile.ZIP_DEFLATED)
return zip_file_path
def _remove_tempfile_if_content_defined(self, content, content_tempfile):
if content is not None:
os.remove(content_tempfile)
def _copy_single_file(self, local_file, dest, source_rel, task_vars):
if self._play_context.check_mode:
module_return = dict(changed=True)
return module_return
# copy the file across to the server
tmp_path = self._make_tmp_path()
tmp_src = self._connection._shell.join_path(tmp_path, 'source')
self._transfer_file(local_file, tmp_src)
copy_args = self._task.args.copy()
copy_args.update(
dict(
dest=dest,
src=tmp_src,
original_basename=source_rel,
mode="single"
)
)
copy_args.pop('content', None)
copy_result = self._execute_module(module_name="copy", module_args=copy_args, task_vars=task_vars)
self._remove_tmp_path(tmp_path)
return copy_result
def _copy_zip_file(self, dest, files, directories, task_vars):
# create local zip file containing all the files and directories that
# need to be copied to the server
try:
zip_file = self._create_zip_tempfile(files, directories)
except Exception as e:
module_return = dict(
changed=False,
failed=True,
msg="failed to create tmp zip file: %s" % to_text(e),
exception=traceback.format_exc()
)
return module_return
zip_path = self._loader.get_real_file(zip_file)
if self._play_context.check_mode:
module_return = dict(changed=True)
os.remove(zip_path)
os.removedirs(os.path.dirname(zip_path))
return module_return
# send zip file to remote, file must end in .zip so Com Shell.Application works
tmp_path = self._make_tmp_path()
tmp_src = self._connection._shell.join_path(tmp_path, 'source.zip')
self._transfer_file(zip_path, tmp_src)
# run the explode operation of win_copy on remote
copy_args = self._task.args.copy()
copy_args.update(
dict(
src=tmp_src,
dest=dest,
mode="explode"
)
)
copy_args.pop('content', None)
os.remove(zip_path)
os.removedirs(os.path.dirname(zip_path))
module_return = self._execute_module(module_args=copy_args, task_vars=task_vars)
self._remove_tmp_path(tmp_path)
return module_return
def run(self, tmp=None, task_vars=None):
''' handler for file transfer operations '''
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
source = self._task.args.get('src', None)
content = self._task.args.get('content', None)
dest = self._task.args.get('dest', None)
remote_src = boolean(self._task.args.get('remote_src', False), strict=False)
local_follow = boolean(self._task.args.get('local_follow', False), strict=False)
force = boolean(self._task.args.get('force', True), strict=False)
result['src'] = source
result['dest'] = dest
result['failed'] = True
if (source is None and content is None) or dest is None:
result['msg'] = "src (or content) and dest are required"
elif source is not None and content is not None:
result['msg'] = "src and content are mutually exclusive"
elif content is not None and dest is not None and (
dest.endswith(os.path.sep) or dest.endswith(self.WIN_PATH_SEPARATOR)):
result['msg'] = "dest must be a file if content is defined"
else:
del result['failed']
if result.get('failed'):
return result
# If content is defined make a temp file and write the content into it
content_tempfile = None
if content is not None:
try:
# if content comes to us as a dict it should be decoded json.
# We need to encode it back into a string and write it out
if isinstance(content, dict) or isinstance(content, list):
content_tempfile = self._create_content_tempfile(json.dumps(content))
else:
content_tempfile = self._create_content_tempfile(content)
source = content_tempfile
except Exception as err:
result['failed'] = True
result['msg'] = "could not write content temp file: %s" % to_native(err)
return result
# all actions should occur on the remote server, run win_copy module
elif remote_src:
new_module_args = self._task.args.copy()
new_module_args.update(
dict(
mode="remote",
dest=dest,
src=source,
force=force
)
)
new_module_args.pop('content', None)
result.update(self._execute_module(module_args=new_module_args, task_vars=task_vars))
return result
# find_needle returns a path that may not have a trailing slash on a
# directory so we need to find that out first and append at the end
else:
trailing_slash = source.endswith(os.path.sep)
try:
# find in expected paths
source = self._find_needle('files', source)
except AnsibleError as e:
result['failed'] = True
result['msg'] = to_text(e)
result['exception'] = traceback.format_exc()
return result
if trailing_slash != source.endswith(os.path.sep):
if source[-1] == os.path.sep:
source = source[:-1]
else:
source = source + os.path.sep
# A list of source file tuples (full_path, relative_path) which will try to copy to the destination
source_files = {'files': [], 'directories': [], 'symlinks': []}
# If source is a directory populate our list else source is a file and translate it to a tuple.
if os.path.isdir(to_bytes(source, errors='surrogate_or_strict')):
result['operation'] = 'folder_copy'
# Get a list of the files we want to replicate on the remote side
source_files = _walk_dirs(source, self._loader, local_follow=local_follow,
trailing_slash_detector=self._connection._shell.path_has_trailing_slash,
checksum_check=force)
# If it's recursive copy, destination is always a dir,
# explicitly mark it so (note - win_copy module relies on this).
if not self._connection._shell.path_has_trailing_slash(dest):
dest = "%s%s" % (dest, self.WIN_PATH_SEPARATOR)
check_dest = dest
# Source is a file, add details to source_files dict
else:
result['operation'] = 'file_copy'
# If the local file does not exist, get_real_file() raises AnsibleFileNotFound
try:
source_full = self._loader.get_real_file(source, decrypt=True)
except AnsibleFileNotFound as e:
result['failed'] = True
result['msg'] = "could not find src=%s, %s" % (source_full, to_text(e))
return result
original_basename = os.path.basename(source)
result['original_basename'] = original_basename
# check if dest ends with / or \ and append source filename to dest
if self._connection._shell.path_has_trailing_slash(dest):
check_dest = dest
filename = original_basename
result['dest'] = self._connection._shell.join_path(dest, filename)
else:
# replace \\ with / so we can use os.path to get the filename or dirname
unix_path = dest.replace(self.WIN_PATH_SEPARATOR, os.path.sep)
filename = os.path.basename(unix_path)
check_dest = os.path.dirname(unix_path)
file_checksum = _get_local_checksum(force, source_full)
source_files['files'].append(
dict(
src=source_full,
dest=filename,
checksum=file_checksum
)
)
result['checksum'] = file_checksum
result['size'] = os.path.getsize(to_bytes(source_full, errors='surrogate_or_strict'))
# find out the files/directories/symlinks that we need to copy to the server
query_args = self._task.args.copy()
query_args.update(
dict(
mode="query",
dest=check_dest,
force=force,
files=source_files['files'],
directories=source_files['directories'],
symlinks=source_files['symlinks']
)
)
# src is not required for query, will fail path validation is src has unix allowed chars
query_args.pop('src', None)
query_args.pop('content', None)
query_return = self._execute_module(module_args=query_args, task_vars=task_vars)
if query_return.get('failed') is True:
result.update(query_return)
return result
if len(query_return['files']) == 1 and len(query_return['directories']) == 0:
# we only need to copy 1 file, don't mess around with zips
file_src = query_return['files'][0]['src']
file_dest = query_return['files'][0]['dest']
copy_result = self._copy_single_file(file_src, dest, file_dest, task_vars)
result['changed'] = True
if copy_result.get('failed') is True:
result['failed'] = True
result['msg'] = "failed to copy file %s: %s" % (file_src, copy_result['msg'])
elif len(query_return['files']) > 0 or len(query_return['directories']) > 0:
# either multiple files or directories need to be copied, compress
# to a zip and 'explode' the zip on the server
# TODO: handle symlinks
result.update(self._copy_zip_file(dest, source_files['files'], source_files['directories'], task_vars))
result['changed'] = True
else:
# no operations need to occur
result['failed'] = False
result['changed'] = False
# remove the content temp file if it was created
self._remove_tempfile_if_content_defined(content, content_tempfile)
return result
|
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for multivariate von Mises-Fisher distribution."""
# Dependency imports
from absl.testing import parameterized
import numpy as np
from scipy import special as sp_special
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import tensorshape_util
from tensorflow_probability.python.internal import test_util
@test_util.test_all_tf_execution_regimes
class VonMisesFisherTest(test_util.VectorDistributionTestHelpers,
test_util.TestCase):
def testReproducibleGraph(self):
vmf = tfp.distributions.VonMisesFisher(
mean_direction=tf.math.l2_normalize([1., 2.]),
concentration=1.2)
seed = test_util.test_seed()
s1 = self.evaluate(vmf.sample(50, seed=seed))
if tf.executing_eagerly():
tf.random.set_seed(seed)
s2 = self.evaluate(vmf.sample(50, seed=seed))
self.assertAllEqual(s1, s2)
def testSampleMeanDir2d(self):
mean_dirs = tf.math.l2_normalize([[1., 1], [-2, 1], [0, -1]], axis=-1)
concentration = [[0], [0.1], [2], [40], [1000]]
vmf = tfp.distributions.VonMisesFisher(
mean_direction=mean_dirs,
concentration=concentration,
validate_args=True,
allow_nan_stats=False)
self.assertEqual([5, 3], tensorshape_util.as_list(vmf.batch_shape))
self.assertEqual([2], tensorshape_util.as_list(vmf.event_shape))
nsamples = int(2e4)
samples = vmf.sample(nsamples, seed=test_util.test_seed())
self.assertEqual([nsamples, 5, 3, 2],
tensorshape_util.as_list(samples.shape))
sample_mean = self.evaluate(samples).mean(axis=0)
# Assert that positive-concentration distributions have samples with
# the expected mean direction.
sample_dir = (
sample_mean / np.linalg.norm(sample_mean, axis=-1, keepdims=True))
inner_product = self.evaluate(
tf.reduce_sum(sample_dir * vmf.mean_direction, axis=-1))
# All except the 0-concentration distribution should have >0 inner product
# with the mean direction of the distribution.
self.assertAllGreater(inner_product[1:], 0.1)
# Pick out >1 concentration distributions to assert ~1 inner product with
# mean direction.
self.assertAllClose(np.ones_like(inner_product)[2:], inner_product[2:],
atol=1e-3)
# Inner products should be roughly ascending by concentration.
self.assertAllClose(np.round(np.sort(inner_product, axis=0), decimals=3),
np.round(inner_product, decimals=3),
atol=.005)
means = self.evaluate(vmf.mean())
# Mean vector for 0-concentration is precisely (0, 0).
self.assertAllEqual(np.zeros_like(means[0]), means[0])
mean_lengths = np.linalg.norm(means, axis=-1)
# Length of the mean vector is strictly ascending with concentration.
self.assertAllEqual(mean_lengths, np.sort(mean_lengths, axis=0))
self.assertAllClose(np.linalg.norm(sample_mean, axis=-1), mean_lengths,
atol=0.03)
def testSampleMeanDir3d(self):
mean_dir = tf.math.l2_normalize([[1., 2, 3], [-2, -3, -1]], axis=-1)
concentration = [[0], [0.1], [2], [40], [1000]]
vmf = tfp.distributions.VonMisesFisher(
mean_direction=mean_dir,
concentration=concentration,
validate_args=True,
allow_nan_stats=False)
self.assertEqual([5, 2], tensorshape_util.as_list(vmf.batch_shape))
self.assertEqual([3], tensorshape_util.as_list(vmf.event_shape))
nsamples = int(2e4)
samples = vmf.sample(
sample_shape=[nsamples], seed=test_util.test_seed())
self.assertEqual([nsamples, 5, 2, 3],
tensorshape_util.as_list(samples.shape))
sample_mean = self.evaluate(samples).mean(axis=0)
# Assert that positive-concentration distributions have samples with
# the expected mean direction.
sample_dir = (
sample_mean / np.linalg.norm(sample_mean, axis=-1, keepdims=True))
inner_product = self.evaluate(
tf.reduce_sum(sample_dir * vmf.mean_direction, axis=-1))
# All except the 0-concentration distribution should have >0 inner product
# with the mean direction of the distribution.
self.assertAllGreater(inner_product[1:], 0.1)
# Pick out >1 concentration distributions to assert ~1 inner product with
# mean direction.
self.assertAllClose(np.ones_like(inner_product)[2:], inner_product[2:],
atol=1e-3)
# Inner products should be roughly ascending by concentration.
self.assertAllEqual(np.round(np.sort(inner_product, axis=0), decimals=3),
np.round(inner_product, decimals=3))
means = self.evaluate(vmf.mean())
# Mean vector for 0-concentration is precisely (0, 0, 0).
self.assertAllEqual(np.zeros_like(means[0]), means[0])
mean_lengths = np.linalg.norm(means, axis=-1)
# Length of the mean vector is strictly ascending with concentration.
self.assertAllEqual(mean_lengths, np.sort(mean_lengths, axis=0))
self.assertAllClose(np.linalg.norm(sample_mean, axis=-1), mean_lengths,
atol=0.03)
def _verifyPdfWithNumpy(self, vmf, atol=1e-4):
"""Verifies log_prob evaluations with numpy/scipy.
Both uniform random points and sampled points are evaluated.
Args:
vmf: A `tfp.distributions.VonMisesFisher` instance.
atol: Absolute difference tolerable.
"""
dim = tf.compat.dimension_value(vmf.event_shape[-1])
nsamples = 10
# Sample some random points uniformly over the hypersphere using numpy.
sample_shape = [nsamples] + tensorshape_util.as_list(
vmf.batch_shape) + [dim]
uniforms = np.random.randn(*sample_shape)
uniforms /= np.linalg.norm(uniforms, axis=-1, keepdims=True)
uniforms = uniforms.astype(dtype_util.as_numpy_dtype(vmf.dtype))
# Concatenate in some sampled points from the distribution under test.
vmf_samples = vmf.sample(
sample_shape=[nsamples], seed=test_util.test_seed())
samples = tf.concat([uniforms, vmf_samples], axis=0)
log_prob = vmf.log_prob(samples)
samples = tf.debugging.check_numerics(samples, 'samples')
log_prob = tf.debugging.check_numerics(log_prob, 'log_prob')
samples, log_prob, conc, mean_dir = self.evaluate([
samples, log_prob, vmf.concentration, vmf.mean_direction])
log_true_sphere_surface_area = (
np.log(2) + (dim / 2) * np.log(np.pi) - sp_special.gammaln(dim / 2))
expected = (
conc * np.sum(samples * mean_dir, axis=-1) +
np.where(conc > 0,
(dim / 2 - 1) * np.log(conc) -
(dim / 2) * np.log(2 * np.pi) -
np.log(sp_special.ive(dim / 2 - 1, conc)) -
np.abs(conc),
-log_true_sphere_surface_area))
self.assertAllClose(expected, log_prob, atol=atol)
def _verifySampleAndPdfConsistency(self, vmf, rtol=0.075):
"""Verifies samples are consistent with the PDF using importance sampling.
In particular, we verify an estimate the surface area of the n-dimensional
hypersphere, and the surface areas of the spherical caps demarcated by
a handful of survival rates.
Args:
vmf: A `VonMisesFisher` distribution instance.
rtol: Relative difference tolerable.
"""
dim = tf.compat.dimension_value(vmf.event_shape[-1])
nsamples = int(6e4)
samples = vmf.sample(
sample_shape=[nsamples], seed=test_util.test_seed())
samples = tf.debugging.check_numerics(samples, 'samples')
log_prob = vmf.log_prob(samples)
log_prob = tf.debugging.check_numerics(log_prob, 'log_prob')
log_importance = -log_prob
sphere_surface_area_estimate, samples, importance, conc = self.evaluate([
tf.exp(
tf.reduce_logsumexp(log_importance, axis=0) -
tf.math.log(tf.cast(nsamples, dtype=tf.float32))), samples,
tf.exp(log_importance), vmf.concentration
])
true_sphere_surface_area = 2 * (np.pi)**(dim / 2) * self.evaluate(
tf.exp(-tf.math.lgamma(dim / 2)))
# Broadcast to correct size
true_sphere_surface_area += np.zeros_like(sphere_surface_area_estimate)
# Highly concentrated distributions do not get enough coverage to provide
# a reasonable full-sphere surface area estimate. These are covered below
# by CDF-based hypersphere cap surface area estimates.
self.assertAllClose(
true_sphere_surface_area[np.where(conc < 3)],
sphere_surface_area_estimate[np.where(conc < 3)],
rtol=rtol)
# Assert surface area of hyperspherical cap For some CDFs in [.05,.45],
# (h must be greater than 0 for the hypersphere cap surface area
# calculation to hold).
for survival_rate in 0.95, .9, .75, .6:
cdf = (1 - survival_rate)
mean_dir = self.evaluate(vmf.mean_direction)
dotprods = np.sum(samples * mean_dir, -1)
# Empirical estimate of the effective dot-product of the threshold that
# selects for a given CDF level, that is the cosine of the largest
# passable angle, or the minimum cosine for a within-CDF sample.
dotprod_thresh = np.percentile(
dotprods, 100 * survival_rate, axis=0, keepdims=True)
dotprod_above_thresh = np.float32(dotprods > dotprod_thresh)
sphere_cap_surface_area_ests = (
cdf * (importance * dotprod_above_thresh).sum(0) /
dotprod_above_thresh.sum(0))
h = (1 - dotprod_thresh)
self.assertGreaterEqual(h.min(), 0) # h must be >= 0 for the eqn below
true_sphere_cap_surface_area = (
0.5 * true_sphere_surface_area *
self.evaluate(tf.math.betainc((dim - 1) / 2, 0.5, 2 * h - h**2)))
if dim == 3: # For 3-d we have a simpler form we can double-check.
self.assertAllClose(2 * np.pi * h, true_sphere_cap_surface_area)
self.assertAllClose(
true_sphere_cap_surface_area,
sphere_cap_surface_area_ests +
np.zeros_like(true_sphere_cap_surface_area),
rtol=rtol)
def _verifyCovariance(self, vmf):
dim = tf.compat.dimension_value(vmf.event_shape[-1])
nsamples = 10000
samples = vmf.sample(nsamples, seed=test_util.test_seed())
samples = tf.debugging.check_numerics(samples, 'samples')
cov = vmf.covariance()
samples, cov = self.evaluate([samples, cov])
batched_samples = np.reshape(samples, [nsamples, -1, dim])
batch_size = batched_samples.shape[1]
est_cov = np.zeros([batch_size, dim, dim], dtype=cov.dtype)
for bi in range(batched_samples.shape[1]):
est_cov[bi] = np.cov(batched_samples[:, bi], rowvar=False)
self.assertAllClose(
np.reshape(est_cov, cov.shape),
cov,
atol=0.015)
@parameterized.parameters(2, 3, 4, 5, 10, 20)
def testSampleAndPdfConsistency(self, dim):
seed_stream = test_util.test_seed_stream()
mean_direction = tf.random.uniform(
minval=1., maxval=2., shape=[2, dim], seed=seed_stream())
mean_direction = tf.nn.l2_normalize(mean_direction, axis=-1)
concentration = tf.random.uniform(
minval=1e-4, maxval=10., shape=[5, 1], seed=seed_stream())
vmf = tfp.distributions.VonMisesFisher(
mean_direction=mean_direction, concentration=concentration,
validate_args=True, allow_nan_stats=False)
self._verifySampleAndPdfConsistency(vmf)
self._verifyCovariance(vmf)
self._verifyPdfWithNumpy(vmf)
def VerifyVonMisesFisherUniformZeroKL(self, dim):
seed_stream = test_util.test_seed_stream()
mean_direction = tf.random.uniform(
shape=[5, dim],
minval=1.,
maxval=2.,
seed=seed_stream())
mean_direction = tf.nn.l2_normalize(mean_direction, axis=-1)
# Zero concentration is the same as a uniform distribution on the sphere.
# Check that the KL divergence is zero.
concentration = 0.
vmf = tfp.distributions.VonMisesFisher(
mean_direction=mean_direction,
concentration=concentration)
su = tfp.distributions.SphericalUniform(dimension=dim)
x = vmf.sample(int(5e4), seed=test_util.test_seed())
kl_sample = tf.reduce_mean(vmf.log_prob(x) - su.log_prob(x), axis=0)
true_kl = tfp.distributions.kl_divergence(vmf, su)
vmf_entropy = vmf.entropy()
su_entropy = su.entropy()
print(self.evaluate([vmf_entropy, su_entropy]))
true_kl_, kl_sample_ = self.evaluate([true_kl, kl_sample])
self.assertAllClose(true_kl_, kl_sample_, atol=5e-8, rtol=1e-1)
self.assertAllClose(true_kl_, np.zeros_like(true_kl_), atol=1e-4)
def VerifyVonMisesFisherUniformKL(self, dim):
seed_stream = test_util.test_seed_stream()
mean_direction = tf.random.uniform(
shape=[4, dim],
minval=1.,
maxval=4.,
seed=seed_stream())
mean_direction = tf.nn.l2_normalize(mean_direction, axis=-1)
concentration = tf.math.log(
tf.random.uniform(
shape=[2, 1],
minval=2.,
maxval=20.,
seed=seed_stream()))
vmf = tfp.distributions.VonMisesFisher(
mean_direction=mean_direction,
concentration=concentration)
su = tfp.distributions.SphericalUniform(dimension=dim)
x = vmf.sample(int(5e4), seed=test_util.test_seed())
kl_sample = tf.reduce_mean(vmf.log_prob(x) - su.log_prob(x), axis=0)
true_kl = tfp.distributions.kl_divergence(vmf, su)
true_kl_, kl_sample_ = self.evaluate([true_kl, kl_sample])
self.assertAllClose(true_kl_, kl_sample_, atol=0.0, rtol=0.3)
@parameterized.parameters(2, 3, 5, 10, 20)
def testKLVonMisesFisherSphericalUniformDim(self, dim):
self.VerifyVonMisesFisherUniformZeroKL(dim=dim)
self.VerifyVonMisesFisherUniformKL(dim=dim)
def VerifyEntropy(self, dim):
seed_stream = test_util.test_seed_stream()
mean_direction = tf.random.uniform(
shape=[5, dim],
minval=1.,
maxval=2.,
seed=seed_stream())
mean_direction = tf.nn.l2_normalize(mean_direction, axis=-1)
concentration = tf.math.log(
tf.random.uniform(
shape=[2, 1],
minval=1.,
maxval=100.,
seed=seed_stream()))
vmf = tfp.distributions.VonMisesFisher(
mean_direction=mean_direction,
concentration=concentration,
validate_args=True,
allow_nan_stats=False)
samples = vmf.sample(int(3e4), seed=test_util.test_seed())
sample_entropy = -tf.reduce_mean(vmf.log_prob(samples), axis=0)
true_entropy, sample_entropy = self.evaluate([
vmf.entropy(), sample_entropy])
self.assertAllClose(sample_entropy, true_entropy, rtol=3e-2)
@parameterized.parameters(2, 3, 5, 10, 20)
def testEntropyDim(self, dim):
self.VerifyEntropy(dim=dim)
def testInternalShapeInference(self):
# Regression test for the effect of b/139013403 on vMF sampling.
sample_shape = tf.constant([2])
# There needs to be a 1 dimension in the batch shape to trigger the bug
mean_dir = tf.math.l2_normalize([1., 2, 3, 4], axis=-1)
concentration = [0]
vmf = tfp.distributions.VonMisesFisher(
mean_direction=mean_dir, concentration=concentration,
validate_args=True, allow_nan_stats=False)
self.evaluate(vmf.sample(sample_shape, seed=test_util.test_seed()))
def testAssertsValidImmutableParams(self):
with self.assertRaisesOpError('`concentration` must be non-negative'):
vmf = tfp.distributions.VonMisesFisher(
mean_direction=tf.math.l2_normalize([1., 2, 3], axis=-1),
concentration=-1.,
validate_args=True,
allow_nan_stats=False)
self.evaluate(vmf.mean())
with self.assertRaisesOpError(
'`mean_direction` may not have scalar event shape'):
vmf = tfp.distributions.VonMisesFisher(
mean_direction=[1.],
concentration=0.,
validate_args=True,
allow_nan_stats=False)
self.evaluate(vmf.mean())
with self.assertRaisesOpError('`mean_direction` must be unit-length'):
vmf = tfp.distributions.VonMisesFisher(
mean_direction=tf.convert_to_tensor([1., 2, 3]),
concentration=1.,
validate_args=True,
allow_nan_stats=False)
self.evaluate(vmf.mean())
def testAssertsValidMutableParams(self):
mean_direction = tf.Variable(tf.math.l2_normalize([1., 2, 3], axis=-1))
concentration = tf.Variable(1.)
vmf = tfp.distributions.VonMisesFisher(
mean_direction=mean_direction,
concentration=concentration,
validate_args=True,
allow_nan_stats=False)
self.evaluate([mean_direction.initializer, concentration.initializer])
self.evaluate(concentration.assign(-1.))
with self.assertRaisesOpError('`concentration` must be non-negative'):
self.evaluate(vmf.mean())
self.evaluate((concentration.assign(1.),
mean_direction.assign([1., 2., 3.])))
with self.assertRaisesOpError('`mean_direction` must be unit-length'):
self.evaluate(vmf.mean())
mean_direction = tf.Variable([1.])
vmf = tfp.distributions.VonMisesFisher(
mean_direction=mean_direction,
concentration=concentration,
validate_args=True,
allow_nan_stats=False)
self.evaluate(mean_direction.initializer)
with self.assertRaisesOpError(
'`mean_direction` may not have scalar event shape'):
self.evaluate(vmf.mean())
def testAssertValidSample(self):
mean_dir = tf.math.l2_normalize([[1., 2, 3], [-2, -3, -1]], axis=-1)
concentration = [[0.], [2.]]
vmf = tfp.distributions.VonMisesFisher(
mean_direction=mean_dir,
concentration=concentration,
validate_args=True,
allow_nan_stats=False)
with self.assertRaisesOpError('Samples must be unit length.'):
self.evaluate(vmf.prob([0.5, 0.5, 0.5]))
msg = 'must have innermost dimension matching'
static_shape_assertion = self.assertRaisesRegexp(ValueError, msg)
dynamic_shape_assertion = self.assertRaisesOpError(msg)
x = [[1., 0., 0., 0.]]
with static_shape_assertion:
self.evaluate(vmf.log_prob(x))
x_var = tf.Variable(x, shape=tf.TensorShape(None))
shape_assertion = (static_shape_assertion if tf.executing_eagerly()
else dynamic_shape_assertion)
self.evaluate(x_var.initializer)
with shape_assertion:
self.evaluate(vmf.log_prob(x_var))
def testSupportBijectorOutsideRange(self):
mean_dir = np.array([[1., 2., 3.], [-2., -3., -1.]]).astype(np.float32)
mean_dir /= np.linalg.norm(mean_dir, axis=-1)[:, np.newaxis]
concentration = [[0], [0.1], [2], [40], [1000]]
dist = tfp.distributions.VonMisesFisher(
mean_direction=mean_dir,
concentration=concentration,
validate_args=True)
x = mean_dir
x[0][0] += 0.01
with self.assertRaisesOpError('must sum to `1`'):
self.evaluate(
dist.experimental_default_event_space_bijector().inverse(x[0]))
with self.assertRaisesOpError('must be non-negative'):
self.evaluate(
dist.experimental_default_event_space_bijector().inverse(x[1]))
if __name__ == '__main__':
test_util.main()
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A `traverse` visitor for processing documentation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.util import tf_inspect
class DocGeneratorVisitor(object):
"""A visitor that generates docs for a python object when __call__ed."""
def __init__(self, root_name=''):
"""Make a visitor.
As this visitor is starting its traversal at a module or class, it will not
be old the name of that object during traversal. `root_name` is the name it
should use for that object, effectively prefixing all names with
"root_name.".
Args:
root_name: The name of the root module/class.
"""
self.set_root_name(root_name)
self._index = {}
self._tree = {}
self._reverse_index = None
self._duplicates = None
self._duplicate_of = None
def set_root_name(self, root_name):
"""Sets the root name for subsequent __call__s."""
self._root_name = root_name or ''
self._prefix = (root_name + '.') if root_name else ''
@property
def index(self):
"""A map from fully qualified names to objects to be documented.
The index is filled when the visitor is passed to `traverse`.
Returns:
The index filled by traversal.
"""
return self._index
@property
def tree(self):
"""A map from fully qualified names to all its child names for traversal.
The full name to member names map is filled when the visitor is passed to
`traverse`.
Returns:
The full name to member name map filled by traversal.
"""
return self._tree
@property
def reverse_index(self):
"""A map from `id(object)` to the preferred fully qualified name.
This map only contains non-primitive objects (no numbers or strings) present
in `index` (for primitive objects, `id()` doesn't quite do the right thing).
It is computed when it, `duplicate_of`, or `duplicates` are first accessed.
Returns:
The `id(object)` to full name map.
"""
self._maybe_find_duplicates()
return self._reverse_index
@property
def duplicate_of(self):
"""A map from duplicate full names to a preferred fully qualified name.
This map only contains names that are not themself a preferred name.
It is computed when it, `reverse_index`, or `duplicates` are first accessed.
Returns:
The map from duplicate name to preferred name.
"""
self._maybe_find_duplicates()
return self._duplicate_of
@property
def duplicates(self):
"""A map from preferred full names to a list of all names for this symbol.
This function returns a map from preferred (master) name for a symbol to a
lexicographically sorted list of all aliases for that name (incl. the master
name). Symbols without duplicate names do not appear in this map.
It is computed when it, `reverse_index`, or `duplicate_of` are first
accessed.
Returns:
The map from master name to list of all duplicate names.
"""
self._maybe_find_duplicates()
return self._duplicates
def _add_prefix(self, name):
"""Adds the root name to a name."""
return self._prefix + name if name else self._root_name
def __call__(self, parent_name, parent, children):
"""Visitor interface, see `tensorflow/tools/common:traverse` for details.
This method is called for each symbol found in a traversal using
`tensorflow/tools/common:traverse`. It should not be called directly in
user code.
Args:
parent_name: The fully qualified name of a symbol found during traversal.
parent: The Python object referenced by `parent_name`.
children: A list of `(name, py_object)` pairs enumerating, in alphabetical
order, the children (as determined by `tf_inspect.getmembers`) of
`parent`. `name` is the local name of `py_object` in `parent`.
Raises:
RuntimeError: If this visitor is called with a `parent` that is not a
class or module.
"""
parent_name = self._add_prefix(parent_name)
self._index[parent_name] = parent
self._tree[parent_name] = []
if not (tf_inspect.ismodule(parent) or tf_inspect.isclass(parent)):
raise RuntimeError('Unexpected type in visitor -- %s: %r' % (parent_name,
parent))
for i, (name, child) in enumerate(list(children)):
# Don't document __metaclass__
if name in ['__metaclass__']:
del children[i]
continue
full_name = '.'.join([parent_name, name]) if parent_name else name
self._index[full_name] = child
self._tree[parent_name].append(name)
def _maybe_find_duplicates(self):
"""Compute data structures containing information about duplicates.
Find duplicates in `index` and decide on one to be the "master" name.
Computes a reverse_index mapping each object id to its master name.
Also computes a map `duplicate_of` from aliases to their master name (the
master name itself has no entry in this map), and a map `duplicates` from
master names to a lexicographically sorted list of all aliases for that name
(incl. the master name).
All these are computed and set as fields if they haven't already.
"""
if self._reverse_index is not None:
return
# Maps the id of a symbol to its fully qualified name. For symbols that have
# several aliases, this map contains the first one found.
# We use id(py_object) to get a hashable value for py_object. Note all
# objects in _index are in memory at the same time so this is safe.
reverse_index = {}
# Make a preliminary duplicates map. For all sets of duplicate names, it
# maps the first name found to a list of all duplicate names.
raw_duplicates = {}
for full_name, py_object in six.iteritems(self._index):
# We cannot use the duplicate mechanism for some constants, since e.g.,
# id(c1) == id(c2) with c1=1, c2=1. This is unproblematic since constants
# have no usable docstring and won't be documented automatically.
if (py_object is not None and
not isinstance(py_object, six.integer_types + six.string_types +
(six.binary_type, six.text_type, float, complex, bool))
and py_object is not ()):
object_id = id(py_object)
if object_id in reverse_index:
master_name = reverse_index[object_id]
if master_name in raw_duplicates:
raw_duplicates[master_name].append(full_name)
else:
raw_duplicates[master_name] = [master_name, full_name]
else:
reverse_index[object_id] = full_name
# Decide on master names, rewire duplicates and make a duplicate_of map
# mapping all non-master duplicates to the master name. The master symbol
# does not have an entry in this map.
duplicate_of = {}
# Duplicates maps the main symbols to the set of all duplicates of that
# symbol (incl. itself).
duplicates = {}
for names in raw_duplicates.values():
names = sorted(names)
# Choose the lexicographically first name with the minimum number of
# submodules. This will prefer highest level namespace for any symbol.
master_name = min(names, key=lambda name: name.count('.'))
duplicates[master_name] = names
for name in names:
if name != master_name:
duplicate_of[name] = master_name
# Set the reverse index to the canonical name.
reverse_index[id(self._index[master_name])] = master_name
self._duplicate_of = duplicate_of
self._duplicates = duplicates
self._reverse_index = reverse_index
|
|
from __future__ import unicode_literals
from datetime import timedelta
from django.conf import settings
from django.contrib.gis.db import models
from django.contrib.postgres.fields import JSONField
from django.core.urlresolvers import reverse
from django.utils.encoding import python_2_unicode_compatible
from model_utils import Choices
from django.contrib.auth.models import Group
from ledger.accounts.models import Organisation
#from ledger.payments.models import Invoice
@python_2_unicode_compatible
class Record(models.Model):
"""This model represents a record that needs to be saved for
future reference. It also records metadata and optional text content to be
indexed for search.
"""
DOC_CATEGORY_CHOICES = Choices(
(1, 'consent', ('Landowner consent')),
(2, 'deed', ('Deed')),
(3, 'assessment', ('Assessment report')),
(4, 'referee_response', ('Referee response')),
(5, 'lodgement', ('Lodgement document')),
(6, 'draft', ('Draft document')),
(7, 'final', ('Final document')),
(8, 'determination', ('Determination document')),
(9, 'completion', ('Completed document')),
)
upload = models.FileField(max_length=512, upload_to='uploads/%Y/%m/%d')
name = models.CharField(max_length=256)
category = models.IntegerField(choices=DOC_CATEGORY_CHOICES, null=True, blank=True)
metadata = JSONField(null=True, blank=True)
text_content = models.TextField(null=True, blank=True, editable=False) # Text for indexing
def __str__(self):
if self.category:
return '{} ({})'.format(self.name, self.get_category_display())
return self.name
@python_2_unicode_compatible
class Vessel(models.Model):
"""This model represents a vessel/craft that will be used
in relation to the application
"""
VESSEL_TYPE_CHOICES = Choices(
(0, 'vessel', ('Vessel')),
(1, 'craft', ('Craft')),
)
vessel_type = models.SmallIntegerField(choices=VESSEL_TYPE_CHOICES, null=True, blank=True)
name = models.CharField(max_length=256)
vessel_id = models.CharField(max_length=256, null=True, blank=True, verbose_name='Vessel identification')
registration = models.ManyToManyField(Record, blank=True)
size = models.PositiveIntegerField(null=True, blank=True, verbose_name='size (m)')
engine = models.PositiveIntegerField(null=True, blank=True, verbose_name='engine (kW)')
passenger_capacity = models.PositiveIntegerField(null=True, blank=True)
def __str__(self):
return self.name
@python_2_unicode_compatible
class ApplicationPurpose(models.Model):
purpose = models.CharField(max_length=256)
def __str__(self):
return self.purpose
@python_2_unicode_compatible
class Application(models.Model):
"""This model represents an application by a customer to P&W for a single
permit, licence/permit, part 5, etc.
"""
APP_TYPE_CHOICES = Choices(
(1, 'permit', ('Permit')),
(2, 'licence', ('Licence/permit')),
(3, 'part5', ('Part 5 - New Application')),
(4, 'emergency', ('Emergency works')),
(5, 'part5cr', ('Part 5 - Amendment Request')),
(6, 'part5amend', ('Part 5 - Amendment Application')),
(7, 'test', ('Test - Application'))
)
APP_STATE_CHOICES = Choices(
(1, 'draft', ('Draft')),
(2, 'with_admin', ('With Admin Officer')),
(3, 'with_referee', ('With Referrals')),
(4, 'with_assessor', ('With Assessor')),
(5, 'with_manager', ('With Manager')),
(6, 'issued', ('Issued')),
(7, 'issued_with_admin', ('Issued (with admin)')),
(8, 'declined', ('Declined')),
(9, 'new', ('New')),
(10, 'approved', ('Approved')),
(11, 'expird', ('Expired')),
(12, 'with_director', ('With Director')),
(13, 'with_exec', ('With Executive')),
(14, 'completed', ('Completed'))
)
APP_LOCATION_CHOICES = Choices(
(0, 'onland', ('On Land')),
(1, 'onwater', ('On Water')),
(2, 'both', ('Both')),
)
applicant = models.ForeignKey(settings.AUTH_USER_MODEL, blank=True, null=True, on_delete=models.PROTECT, related_name='applicant')
organisation = models.ForeignKey(Organisation, blank=True, null=True, on_delete=models.PROTECT)
app_type = models.IntegerField(choices=APP_TYPE_CHOICES)
assignee = models.ForeignKey(settings.AUTH_USER_MODEL, blank=True, null=True, on_delete=models.PROTECT, related_name='assignee')
state = models.IntegerField(choices=APP_STATE_CHOICES, default=APP_STATE_CHOICES.draft, editable=False)
title = models.CharField(max_length=256)
description = models.TextField(null=True, blank=True)
submit_date = models.DateField()
expire_date = models.DateField(blank=True, null=True)
proposed_commence = models.DateField(null=True, blank=True)
proposed_end = models.DateField(null=True, blank=True)
issue_date = models.DateField(null=True, blank=True)
cost = models.CharField(max_length=256, null=True, blank=True)
project_no = models.CharField(max_length=256, null=True, blank=True)
related_permits = models.TextField(null=True, blank=True)
over_water = models.BooleanField(default=False)
records = models.ManyToManyField(Record, blank=True, related_name='records')
vessels = models.ManyToManyField(Vessel, blank=True)
purpose = models.ForeignKey(ApplicationPurpose, null=True, blank=True)
max_participants = models.IntegerField(null=True, blank=True)
proposed_location = models.SmallIntegerField(choices=APP_LOCATION_CHOICES, null=True, blank=True)
address = models.TextField(null=True, blank=True)
location_route_access = models.ForeignKey(Record, null=True, blank=True, related_name='location_route_access')
jetties = models.TextField(null=True, blank=True)
jetty_dot_approval = models.NullBooleanField(default=None)
jetty_dot_approval_expiry = models.DateField(null=True, blank=True)
drop_off_pick_up = models.TextField(null=True, blank=True)
food = models.NullBooleanField(default=None)
beverage = models.NullBooleanField(default=None)
byo_alcohol = models.NullBooleanField(default=None)
sullage_disposal = models.TextField(null=True, blank=True)
waste_disposal = models.TextField(null=True, blank=True)
refuel_location_method = models.TextField(null=True, blank=True)
berth_location = models.TextField(null=True, blank=True)
anchorage = models.TextField(null=True, blank=True)
operating_details = models.TextField(null=True, blank=True)
cert_survey = models.ForeignKey(Record, blank=True, null=True, related_name='cert_survey')
cert_public_liability_insurance = models.ForeignKey(Record, blank=True, null=True, related_name='cert_public_liability_insurace')
risk_mgmt_plan = models.ForeignKey(Record, blank=True, null=True, related_name='risk_mgmt_plan')
safety_mgmt_procedures = models.ForeignKey(Record, blank=True, null=True, related_name='safety_mgmt_plan')
brochures_itineries_adverts = models.ManyToManyField(Record, blank=True, related_name='brochures_itineries_adverts')
land_owner_consent = models.ManyToManyField(Record, blank=True, related_name='land_owner_consent')
deed = models.ForeignKey(Record, blank=True, null=True, related_name='deed')
submitted_by = models.ForeignKey(settings.AUTH_USER_MODEL, blank=True, null=True, on_delete=models.PROTECT, related_name='Submitted_by')
river_lease_require_river_lease = models.NullBooleanField(default=None, null=True, blank=True)
river_lease_scan_of_application = models.ForeignKey(Record, null=True, blank=True, related_name='river_lease_scan_of_application')
river_lease_reserve_licence = models.NullBooleanField(default=None, null=True, blank=True)
river_lease_application_number = models.CharField(max_length=30, null=True, blank=True)
proposed_development_current_use_of_land = models.TextField(null=True, blank=True)
proposed_development_plans = models.ManyToManyField(Record, blank=True, related_name='proposed_development_plans')
proposed_development_description = models.TextField(null=True, blank=True)
document_draft = models.ForeignKey(Record, null=True, blank=True, related_name='document_draft')
document_new_draft = models.ForeignKey(Record, null=True, blank=True, related_name='document_newdraft')
document_new_draft_v3 = models.ForeignKey(Record, null=True, blank=True, related_name='document_newdraftv3')
document_draft_signed = models.ForeignKey(Record, null=True, blank=True, related_name='document_draft_signed')
document_final = models.ForeignKey(Record, null=True, blank=True, related_name='document_final')
document_final_signed = models.ForeignKey(Record, null=True, blank=True, related_name='document_final_signed')
document_determination = models.ForeignKey(Record, null=True, blank=True, related_name='document_determination')
document_completion = models.ForeignKey(Record, null=True, blank=True, related_name='document_completion')
publish_documents = models.DateField(null=True, blank=True)
publish_draft_report = models.DateField(null=True, blank=True)
publish_final_report = models.DateField(null=True, blank=True)
publish_determination_report = models.DateField(null=True, blank=True)
routeid = models.CharField(null=True, blank=True, default=1, max_length=4)
assessment_start_date = models.DateField(null=True, blank=True)
group = models.ForeignKey(Group, null=True, blank=True, related_name='application_group_assignment')
swan_river_trust_board_feedback = models.ForeignKey(Record, null=True, blank=True, related_name='document_swan_river_board_feedback')
document_memo = models.ForeignKey(Record, null=True, blank=True, related_name='document_memo')
document_briefing_note = models.ForeignKey(Record, null=True, blank=True, related_name='document_briefing_note')
document_determination_approved = models.ForeignKey(Record, null=True, blank=True, related_name='document_determination_approved')
approval_id = models.IntegerField(null=True, blank=True)
assessed_by = models.ForeignKey(settings.AUTH_USER_MODEL, blank=True, null=True, on_delete=models.PROTECT, related_name='assessed_by')
def __str__(self):
return 'Application {}: {} - {} ({})'.format(
self.pk, self.get_app_type_display(), self.title, self.get_state_display())
def get_absolute_url(self):
return reverse('application_detail', args=(self.pk,))
@python_2_unicode_compatible
class PublicationFeedback(models.Model):
PUB_STATES_CHOICES = Choices(
(1, 'Western Australia', ('Western Australia')),
(2, 'New South Wales', ('New South Wales')),
(3, 'Victoria', ('Victoria')),
(4, 'South Australia', ('South Australia')),
(5, 'Northern Territory', ('Northern Territory')),
(6, 'Queensland', ('Queensland')),
(7, 'Australian Capital Territory', ('Australian Capital Territory')),
(8, 'Tasmania', ('Tasmania')),
)
application = models.ForeignKey(Application, on_delete=models.CASCADE)
name = models.CharField(max_length=256)
address = models.CharField(max_length=256)
suburb = models.CharField(max_length=100)
state = models.IntegerField(choices=PUB_STATES_CHOICES)
postcode = models.CharField(max_length=4)
phone = models.CharField(max_length=20)
email = models.EmailField()
comments = models.TextField(null=True, blank=True)
records = models.ManyToManyField(Record, blank=True, related_name='feedback')
status = models.CharField(max_length=20)
def __str__(self):
return 'PublicationFeedback {} ({})'.format(self.pk, self.application)
@python_2_unicode_compatible
class PublicationNewspaper(models.Model):
"""This model represents Application Published in newspapert
"""
application = models.ForeignKey(Application, on_delete=models.CASCADE)
date = models.DateField(null=True, blank=True)
newspaper = models.CharField(max_length=150)
records = models.ManyToManyField(Record, blank=True, related_name='newspaper')
def __str__(self):
return 'PublicationNewspaper {} ({})'.format(self.pk, self.application)
@python_2_unicode_compatible
class PublicationWebsite(models.Model):
"""This model represents Application Published in Website
"""
application = models.ForeignKey(Application, on_delete=models.CASCADE)
original_document = models.ForeignKey(Record, blank=True, null=True, related_name='original_document')
published_document = models.ForeignKey(Record, blank=True, null=True, related_name='published_document')
def __str__(self):
return 'PublicationWebsite {} ({})'.format(self.pk, self.application)
@python_2_unicode_compatible
class Location(models.Model):
"""This model represents a single spatial location associated with an
application.
"""
application = models.ForeignKey(Application, on_delete=models.CASCADE)
lot = models.CharField(max_length=256, null=True, blank=True)
reserve = models.CharField(max_length=256, null=True, blank=True)
suburb = models.CharField(max_length=256, null=True, blank=True)
intersection = models.CharField(max_length=256, null=True, blank=True)
# TODO: validation related to LGA name (possible FK).
lga = models.CharField(max_length=256, null=True, blank=True)
poly = models.PolygonField(null=True, blank=True)
records = models.ManyToManyField(Record, blank=True)
# TODO: certificate of title fields (ref. screen 30)
title_volume = models.CharField(max_length=256, null=True, blank=True)
folio = models.CharField(max_length=30, null=True, blank=True)
dpd_number = models.CharField(max_length=30, null=True, blank=True)
location = models.CharField(max_length=256, null=True, blank=True) # this seem like it different from street address based on the example form.
street_number_name = models.CharField(max_length=256, null=True, blank=True)
def __str__(self):
return 'Location {} ({})'.format(self.pk, self.application)
@python_2_unicode_compatible
class Referral(models.Model):
"""This model represents a referral of an application to a referee
(external or internal) for comment/conditions.
"""
REFERRAL_STATUS_CHOICES = Choices(
(1, 'referred', ('Referred')),
(2, 'responded', ('Responded')),
(3, 'recalled', ('Recalled')),
(4, 'expired', ('Expired')),
)
application = models.ForeignKey(Application, on_delete=models.CASCADE)
referee = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.PROTECT)
details = models.TextField(blank=True, null=True)
sent_date = models.DateField()
period = models.PositiveIntegerField(verbose_name='period (days)')
expire_date = models.DateField(blank=True, null=True, editable=False)
response_date = models.DateField(blank=True, null=True)
feedback = models.TextField(blank=True, null=True)
records = models.ManyToManyField(Record, blank=True)
status = models.IntegerField(choices=REFERRAL_STATUS_CHOICES, default=REFERRAL_STATUS_CHOICES.referred)
class Meta:
unique_together = ('application', 'referee')
def __str__(self):
return 'Referral {} to {} ({})'.format(self.pk, self.referee, self.application)
def save(self, *args, **kwargs):
"""Override save to set the expire_date field.
"""
self.expire_date = self.sent_date + timedelta(days=self.period)
super(Referral, self).save(*args, **kwargs)
@python_2_unicode_compatible
class Condition(models.Model):
"""This model represents a condition of approval for an application
(either proposed by a referee or applied by P&W).
"""
CONDITION_STATUS_CHOICES = Choices(
(1, 'proposed', ('Proposed')),
(2, 'applied', ('Applied')),
(3, 'rejected', ('Rejected')),
(4, 'cancelled', ('Cancelled')),
)
CONDITION_RECUR_CHOICES = Choices(
(1, 'weekly', ('Weekly')),
(2, 'monthly', ('Monthly')),
(3, 'annually', ('Annually')),
)
application = models.ForeignKey(Application, on_delete=models.PROTECT)
condition = models.TextField(blank=True, null=True)
referral = models.ForeignKey(Referral, null=True, blank=True, on_delete=models.PROTECT)
status = models.IntegerField(choices=CONDITION_STATUS_CHOICES, default=CONDITION_STATUS_CHOICES.proposed)
records = models.ManyToManyField(Record, blank=True)
due_date = models.DateField(blank=True, null=True)
# Rule: recurrence patterns (if present) begin on the due date.
recur_pattern = models.IntegerField(choices=CONDITION_RECUR_CHOICES, null=True, blank=True)
recur_freq = models.PositiveIntegerField(
null=True, blank=True, verbose_name='recurrence frequency',
help_text='How frequently is the recurrence pattern applied (e.g. every 2 months)')
def __str__(self):
return 'Condition {}: {}'.format(self.pk, self.condition)
@python_2_unicode_compatible
class Compliance(models.Model):
"""This model represents a request for confirmation of fulfilment of the
requirements for a single condition, based upon supplied evidence.
"""
COMPLIANCE_STATUS_CHOICES = Choices(
(1, 'requested', ('Requested')),
(2, 'approved', ('Approved')),
(3, 'returned', ('Returned')),
)
condition = models.ForeignKey(Condition, on_delete=models.PROTECT)
applicant = models.ForeignKey(settings.AUTH_USER_MODEL, blank=True, null=True, on_delete=models.PROTECT, related_name='compliance_applicant')
assignee = models.ForeignKey(settings.AUTH_USER_MODEL, blank=True, null=True, on_delete=models.PROTECT, related_name='compliance_assignee')
status = models.IntegerField(choices=COMPLIANCE_STATUS_CHOICES, default=COMPLIANCE_STATUS_CHOICES.requested)
submit_date = models.DateField()
compliance = models.TextField(blank=True, null=True, help_text='Information to fulfil requirement of condition.')
comments = models.TextField(blank=True, null=True)
approve_date = models.DateField(blank=True, null=True)
records = models.ManyToManyField(Record, blank=True)
def __str__(self):
return 'Compliance {} ({})'.format(self.pk, self.condition)
class Communication(models.Model):
"""This model represents the communication model
"""
COMM_TYPE = Choices(
(0, 'none', ('None')),
(1, 'phone', ('Phone')),
(2, 'email', ('Email')),
(3, 'mail', ('Mail')),
)
application = models.ForeignKey(Application, on_delete=models.PROTECT)
comms_to = models.CharField(max_length=256, null=True, blank=True)
comms_from = models.CharField(max_length=256, null=True, blank=True)
subject = models.CharField(max_length=256, null=True, blank=True)
comms_type = models.IntegerField(choices=COMM_TYPE, default=COMM_TYPE.none )
details = models.TextField(blank=True, null=True)
records = models.ManyToManyField(Record, blank=True, related_name='communication_docs')
state = models.IntegerField(blank=True, null=True) # move to foreign key once APP_STATE_CHOICES becomes a model
created = models.DateTimeField(auto_now_add=True)
@python_2_unicode_compatible
class Delegate(models.Model):
"""This model represents the delegation of authority for an EmailUser to
submit applications on behalf of an Organisation, within the Statutory
Development application.
"""
email_user = models.ForeignKey(settings.AUTH_USER_MODEL, blank=False, on_delete=models.PROTECT)
organisation = models.ForeignKey(Organisation, blank=False, on_delete=models.PROTECT)
def __str__(self):
return '{}: {}'. format(self.email_user.email, self.organisation.name)
class Meta:
unique_together = ('email_user', 'organisation')
@python_2_unicode_compatible
class ApplicationInvoice(models.Model):
"""This model represents a reference to an invoice for payment raised against
an application.
"""
application = models.ForeignKey(Application)
invoice_reference = models.CharField(max_length=64)
def __str__(self):
return 'Application {} invoice {}'.format(self.application, self.invoice_reference)
|
|
#!/usr/bin/env python3
import argparse
import csv
import json
import logging
import math
import socket
import subprocess
import sys
import time
import traceback
from datetime import datetime
from collections import namedtuple
import requests
import six.moves.urllib as urllib
from common import (get_marathon_auth_params, set_logging_args,
set_marathon_auth_args, setup_logging, cleanup_json)
from utils import (get_task_ip_and_ports, get_app_port_mappings)
from zdd_exceptions import (
AppCreateException, AppDeleteException, AppScaleException,
InvalidArgException, MarathonEndpointException,
MarathonLbEndpointException, MissingFieldException)
logger = logging.getLogger('zdd')
def query_yes_no(question, default="yes"):
# Thanks stackoverflow:
# https://stackoverflow.com/questions/3041986/python-command-line-yes-no-input
"""Ask a yes/no question via input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
def marathon_get_request(args, path):
url = args.marathon + path
try:
response = requests.get(url, auth=get_marathon_auth_params(args))
response.raise_for_status()
except requests.exceptions.RequestException:
raise MarathonEndpointException(
"Error while querying marathon", url, traceback.format_exc())
return response
def list_marathon_apps(args):
response = marathon_get_request(args, "/v2/apps")
return cleanup_json(response.json())['apps']
def fetch_marathon_app(args, app_id):
response = marathon_get_request(args, "/v2/apps" + app_id)
return cleanup_json(response.json())['app']
def _get_alias_records(hostname):
"""Return all IPv4 A records for a given hostname
"""
return socket.gethostbyname_ex(hostname)[2]
def _unparse_url_alias(url, addr):
"""Reassemble a url object into a string but with a new address
"""
return urllib.parse.urlunparse((url[0],
addr + ":" + str(url.port),
url[2],
url[3],
url[4],
url[5]))
def get_marathon_lb_urls(args):
"""Return a list of urls for all Aliases of the
marathon_lb url passed in as an argument
"""
url = urllib.parse.urlparse(args.marathon_lb)
addrs = _get_alias_records(url.hostname)
return [_unparse_url_alias(url, addr) for addr in addrs]
def fetch_haproxy_pids(haproxy_url):
try:
response = requests.get(haproxy_url + "/_haproxy_getpids")
response.raise_for_status()
except requests.exceptions.RequestException:
logger.exception("Caught exception when retrieving HAProxy"
" pids from " + haproxy_url)
raise
return response.text.split()
def check_haproxy_reloading(haproxy_url):
"""Return False if haproxy has only one pid, it is not reloading.
Return True if we catch an exception while making a request to
haproxy or if more than one pid is returned
"""
try:
pids = fetch_haproxy_pids(haproxy_url)
except requests.exceptions.RequestException:
# Assume reloading on any error, this should be caught with a timeout
return True
if len(pids) > 1:
logger.info("Waiting for {} pids on {}".format(len(pids), haproxy_url))
return True
return False
def any_marathon_lb_reloading(marathon_lb_urls):
return any([check_haproxy_reloading(url) for url in marathon_lb_urls])
def fetch_haproxy_stats(haproxy_url):
try:
response = requests.get(haproxy_url + "/haproxy?stats;csv")
response.raise_for_status()
except requests.exceptions.RequestException:
logger.exception("Caught exception when retrieving HAProxy"
" stats from " + haproxy_url)
raise
return response.text
def fetch_combined_haproxy_stats(marathon_lb_urls):
raw = ''.join([fetch_haproxy_stats(url) for url in marathon_lb_urls])
return parse_haproxy_stats(raw)
def parse_haproxy_stats(csv_data):
rows = csv_data.splitlines()
headings = rows.pop(0).lstrip('# ').rstrip(',\n').split(',')
csv_reader = csv.reader(rows, delimiter=',', quotechar="'")
Row = namedtuple('Row', headings)
return [Row(*row[0:-1]) for row in csv_reader if row[0][0] != '#']
def get_deployment_label(app):
return get_deployment_group(app) + "_" + app['labels']['HAPROXY_0_PORT']
def _if_app_listener(app, listener):
return (listener.pxname == get_deployment_label(app) and
listener.svname not in ['BACKEND', 'FRONTEND'])
def fetch_app_listeners(app, marathon_lb_urls):
haproxy_stats = fetch_combined_haproxy_stats(marathon_lb_urls)
return [l for l in haproxy_stats if _if_app_listener(app, l)]
def waiting_for_listeners(new_app, old_app, listeners, haproxy_count):
listener_count = (len(listeners) / haproxy_count)
return listener_count != new_app['instances'] + old_app['instances']
def get_deployment_target(app):
if 'HAPROXY_DEPLOYMENT_TARGET_INSTANCES' in app['labels']:
return int(app['labels']['HAPROXY_DEPLOYMENT_TARGET_INSTANCES'])
else:
return app['instances']
def get_new_instance_count(app):
if 'HAPROXY_DEPLOYMENT_NEW_INSTANCES' in app['labels']:
return int(app['labels']['HAPROXY_DEPLOYMENT_NEW_INSTANCES'])
else:
return 0
def waiting_for_up_listeners(app, listeners, haproxy_count):
up_listeners = [l for l in listeners if l.status == 'UP']
up_listener_count = (len(up_listeners) / haproxy_count)
return up_listener_count < get_deployment_target(app)
def select_draining_listeners(listeners):
return [l for l in listeners if l.status == 'MAINT']
def select_drained_listeners(listeners):
draining_listeners = select_draining_listeners(listeners)
return [l for l in draining_listeners if not _has_pending_requests(l)]
def get_svnames_from_task(app, task):
prefix = task['host'].replace('.', '_')
task_ip, _ = get_task_ip_and_ports(app, task)
if task['host'] == task_ip:
for port in task['ports']:
yield('{}_{}'.format(prefix, port))
else:
for port in task['ports']:
yield('{}_{}_{}'.format(prefix, task_ip.replace('.', '_'), port))
def get_svnames_from_tasks(app, tasks):
svnames = []
for task in tasks:
svnames += get_svnames_from_task(app, task)
return svnames
def _has_pending_requests(listener):
return int(listener.qcur or 0) > 0 or int(listener.scur or 0) > 0
def is_hybrid_deployment(args, app):
if (get_new_instance_count(app) != 0 and not args.complete_cur and
not args.complete_prev):
return True
else:
return False
def find_drained_task_ids(app, listeners, haproxy_count):
"""Return app tasks which have all haproxy listeners down and draining
of any pending sessions or connections
"""
tasks = zip(get_svnames_from_tasks(app, app['tasks']), app['tasks'])
drained_listeners = select_drained_listeners(listeners)
drained_task_ids = []
for svname, task in tasks:
task_listeners = [l for l in drained_listeners if l.svname == svname]
if len(task_listeners) == haproxy_count:
drained_task_ids.append(task['id'])
return drained_task_ids
def find_draining_task_ids(app, listeners, haproxy_count):
"""Return app tasks which have all haproxy listeners draining
"""
tasks = zip(get_svnames_from_tasks(app, app['tasks']), app['tasks'])
draining_listeners = select_draining_listeners(listeners)
draining_task_ids = []
for svname, task in tasks:
task_listeners = [l for l in draining_listeners if l.svname == svname]
if len(task_listeners) == haproxy_count:
draining_task_ids.append(task['id'])
return draining_task_ids
def max_wait_not_exceeded(max_wait, timestamp):
return time.time() - timestamp < max_wait
def find_tasks_to_kill(args, new_app, old_app, timestamp):
marathon_lb_urls = get_marathon_lb_urls(args)
haproxy_count = len(marathon_lb_urls)
try:
listeners = fetch_app_listeners(new_app, marathon_lb_urls)
except requests.exceptions.RequestException:
raise MarathonLbEndpointException(
"Error while querying Marathon-LB",
marathon_lb_urls,
traceback.format_exc())
while max_wait_not_exceeded(args.max_wait, timestamp):
time.sleep(args.step_delay)
logger.info("Existing app running {} instances, "
"new app running {} instances"
.format(old_app['instances'], new_app['instances']))
if any_marathon_lb_reloading(marathon_lb_urls):
continue
try:
listeners = fetch_app_listeners(new_app, marathon_lb_urls)
except requests.exceptions.RequestException:
# Restart loop if we hit an exception while loading listeners,
# this may be normal behaviour
continue
logger.info("Found {} app listeners across {} HAProxy instances"
.format(len(listeners), haproxy_count))
if waiting_for_listeners(new_app, old_app, listeners, haproxy_count):
continue
if waiting_for_up_listeners(new_app, listeners, haproxy_count):
continue
if waiting_for_drained_listeners(listeners):
continue
return find_drained_task_ids(old_app, listeners, haproxy_count)
logger.info('Timed out waiting for tasks to fully drain, find any draining'
' tasks and continue with deployment...')
return find_draining_task_ids(old_app, listeners, haproxy_count)
def deployment_in_progress(app):
return len(app['deployments']) > 0
def execute_pre_kill_hook(args, old_app, tasks_to_kill, new_app):
if args.pre_kill_hook is not None:
logger.info("Calling pre-kill hook '{}'".format(args.pre_kill_hook))
subprocess.check_call([args.pre_kill_hook,
json.dumps(old_app),
json.dumps(tasks_to_kill),
json.dumps(new_app)])
def swap_zdd_apps(args, new_app, old_app):
func_args = (args, new_app, old_app)
while True:
res = _swap_zdd_apps(func_args[0], func_args[1], func_args[2])
if isinstance(res, bool):
return res
func_args = res
def _swap_zdd_apps(args, new_app, old_app):
old_app = fetch_marathon_app(args, old_app['id'])
new_app = fetch_marathon_app(args, new_app['id'])
if deployment_in_progress(new_app):
time.sleep(args.step_delay)
return args, new_app, old_app
tasks_to_kill = find_tasks_to_kill(args, new_app, old_app, time.time())
if ready_to_delete_old_app(args, new_app, old_app, tasks_to_kill):
return safe_delete_app(args, old_app, new_app)
if len(tasks_to_kill) > 0:
execute_pre_kill_hook(args, old_app, tasks_to_kill, new_app)
logger.info("There are {} draining listeners, "
"about to kill the following tasks:\n - {}"
.format(len(tasks_to_kill),
"\n - ".join(tasks_to_kill)))
if args.force or query_yes_no("Continue?"):
logger.info("Scaling down old app by {} instances"
.format(len(tasks_to_kill)))
kill_marathon_tasks(args, tasks_to_kill)
else:
return False
if is_hybrid_deployment(args, new_app):
if new_app['instances'] < get_new_instance_count(new_app):
scale_new_app_instances(args, new_app, old_app)
else:
if new_app['instances'] < get_deployment_target(new_app):
scale_new_app_instances(args, new_app, old_app)
return (args, new_app, old_app)
def ready_to_delete_old_app(args, new_app, old_app, draining_task_ids):
new_instances = get_new_instance_count(new_app)
if is_hybrid_deployment(args, new_app):
return (int(new_app['instances']) == new_instances and
int(old_app['instances']) == (
get_deployment_target(old_app) - new_instances))
else:
return (int(new_app['instances']) == get_deployment_target(new_app) and
len(draining_task_ids) == int(old_app['instances']))
def waiting_for_drained_listeners(listeners):
return len(select_drained_listeners(listeners)) < 1
def scale_new_app_instances(args, new_app, old_app):
"""Scale the app by 50% of its existing instances until we
meet or surpass instances deployed for old_app.
At which point go right to the new_app deployment target
"""
instances = (math.floor(new_app['instances'] +
(new_app['instances'] + 1) / 2))
if is_hybrid_deployment(args, new_app):
if instances > get_new_instance_count(new_app):
instances = get_new_instance_count(new_app)
else:
if instances >= old_app['instances']:
instances = get_deployment_target(new_app)
logger.info("Scaling new app up to {} instances".format(instances))
return scale_marathon_app_instances(args, new_app, instances)
def safe_delete_app(args, app, new_app):
if is_hybrid_deployment(args, new_app):
logger.info("Not deleting old app, as its hybrid configuration")
return True
else:
logger.info("About to delete old app {}".format(app['id']))
if args.force or query_yes_no("Continue?"):
delete_marathon_app(args, app)
return True
else:
return False
def delete_marathon_app(args, app):
url = args.marathon + '/v2/apps' + app['id']
try:
response = requests.delete(url,
auth=get_marathon_auth_params(args))
response.raise_for_status()
except requests.exceptions.RequestException:
raise AppDeleteException(
"Error while deleting the app", url, traceback.format_exc())
return response
def kill_marathon_tasks(args, ids):
data = json.dumps({'ids': ids})
url = args.marathon + "/v2/tasks/delete?scale=true"
headers = {'Content-Type': 'application/json'}
try:
response = requests.post(url, headers=headers, data=data,
auth=get_marathon_auth_params(args))
response.raise_for_status()
except requests.exceptions.RequestException:
# This is App Scale Down, so raising AppScale Exception
raise AppScaleException(
"Error while scaling the app", url, data, traceback.format_exc())
return response
def scale_marathon_app_instances(args, app, instances):
url = args.marathon + "/v2/apps" + app['id']
data = json.dumps({'instances': instances})
headers = {'Content-Type': 'application/json'}
try:
response = requests.put(url, headers=headers, data=data,
auth=get_marathon_auth_params(args))
response.raise_for_status()
except requests.exceptions.RequestException:
# This is App Scale Up, so raising AppScale Exception
raise AppScaleException(
"Error while scaling the app", url, data, traceback.format_exc())
return response
def deploy_marathon_app(args, app):
url = args.marathon + "/v2/apps"
data = json.dumps(app)
headers = {'Content-Type': 'application/json'}
try:
response = requests.post(url, headers=headers, data=data,
auth=get_marathon_auth_params(args))
response.raise_for_status()
except requests.exceptions.RequestException:
raise AppCreateException(
"Error while creating the app", url, data, traceback.format_exc())
return response
def get_service_port(app):
portMappings = get_app_port_mappings(app)
if len(portMappings) > 0:
servicePort = portMappings[0].get('servicePort')
if servicePort:
return servicePort
portDefinitions = app.get('portDefinitions', [])
if len(portDefinitions) > 0:
port = ['portDefinitions'][0].get('port')
if port:
return int(port)
ports = app.get('ports', [])
if len(ports) > 0:
return int(ports[0])
raise MissingFieldException("App doesn't contain a service port",
'container.portMappings')
def set_service_port(app, servicePort):
container = app.get('container', {})
portMappings = container.get('docker', {}).get('portMappings', [])
if len(portMappings) > 0:
app['container']['docker']['portMappings'][0]['servicePort'] =\
int(servicePort)
return app
portMappings = container.get('portMappings', [])
if len(portMappings) > 0:
app['container']['portMappings'][0]['servicePort'] =\
int(servicePort)
return app
portDefinitions = app.get('portDefinitions', [])
if len(portDefinitions) > 0:
app['portDefinitions'][0]['port'] = int(servicePort)
return app
app['ports'][0] = int(servicePort)
return app
def validate_app(app):
if app['id'] is None:
raise MissingFieldException("App doesn't contain a valid App ID",
'id')
if 'labels' not in app:
raise MissingFieldException("No labels found. Please define the"
" HAPROXY_DEPLOYMENT_GROUP label",
'label')
if 'HAPROXY_DEPLOYMENT_GROUP' not in app['labels']:
raise MissingFieldException("Please define the "
"HAPROXY_DEPLOYMENT_GROUP label",
'HAPROXY_DEPLOYMENT_GROUP')
if 'HAPROXY_DEPLOYMENT_ALT_PORT' not in app['labels']:
raise MissingFieldException("Please define the "
"HAPROXY_DEPLOYMENT_ALT_PORT label",
'HAPROXY_DEPLOYMENT_ALT_PORT')
def set_app_ids(app, colour):
app['labels']['HAPROXY_APP_ID'] = app['id']
app['id'] = app['id'] + '-' + colour
if app['id'][0] != '/':
app['id'] = '/' + app['id']
return app
def set_service_ports(app, servicePort):
app['labels']['HAPROXY_0_PORT'] = str(get_service_port(app))
return set_service_port(app, servicePort)
def select_next_port(app):
alt_port = int(app['labels']['HAPROXY_DEPLOYMENT_ALT_PORT'])
if 'ports' in app:
if int(app['ports'][0]) == alt_port:
return int(app['labels']['HAPROXY_0_PORT'])
return alt_port
def select_next_colour(app):
if app.get('labels', {}).get('HAPROXY_DEPLOYMENT_COLOUR') == 'blue':
return 'green'
else:
return 'blue'
def sort_deploys(apps):
return sorted(apps, key=lambda a: a.get('labels', {})
.get('HAPROXY_DEPLOYMENT_STARTED_AT', '0'))
def select_last_deploy(apps):
return sort_deploys(apps).pop()
def select_last_two_deploys(apps):
return sort_deploys(apps)[:-3:-1]
def get_deployment_group(app):
return app.get('labels', {}).get('HAPROXY_DEPLOYMENT_GROUP')
def fetch_previous_deploys(args, app):
apps = list_marathon_apps(args)
app_deployment_group = get_deployment_group(app)
return [a for a in apps if get_deployment_group(a) == app_deployment_group]
def prepare_deploy(args, previous_deploys, app):
""" Return a blue or a green version of `app` based on preexisting deploys
"""
if len(previous_deploys) > 0:
last_deploy = select_last_deploy(previous_deploys)
next_colour = select_next_colour(last_deploy)
next_port = select_next_port(last_deploy)
deployment_target_instances = last_deploy['instances']
if args.new_instances > deployment_target_instances:
args.new_instances = deployment_target_instances
if args.new_instances and args.new_instances > 0:
if args.initial_instances > args.new_instances:
app['instances'] = args.new_instances
else:
app['instances'] = args.initial_instances
else:
if args.initial_instances > deployment_target_instances:
app['instances'] = deployment_target_instances
else:
app['instances'] = args.initial_instances
app['labels']['HAPROXY_DEPLOYMENT_NEW_INSTANCES'] = str(
args.new_instances)
else:
next_colour = 'blue'
next_port = get_service_port(app)
deployment_target_instances = app['instances']
app['labels']['HAPROXY_DEPLOYMENT_NEW_INSTANCES'] = "0"
app = set_app_ids(app, next_colour)
app = set_service_ports(app, next_port)
app['labels']['HAPROXY_DEPLOYMENT_TARGET_INSTANCES'] = \
str(deployment_target_instances)
app['labels']['HAPROXY_DEPLOYMENT_COLOUR'] = next_colour
app['labels']['HAPROXY_DEPLOYMENT_STARTED_AT'] = datetime.now().isoformat()
return app
def load_app_json(args):
with open(args.json) as content_file:
return cleanup_json(json.load(content_file))
def safe_resume_deploy(args, previous_deploys):
if args.complete_cur:
logger.info("Converting all instances to current config")
new_app, old_app = select_last_two_deploys(previous_deploys)
logger.info("Current config color is %s" % new_app[
'labels']['HAPROXY_DEPLOYMENT_COLOUR'])
logger.info("Considering %s color as existing app"
% old_app['labels']['HAPROXY_DEPLOYMENT_COLOUR'] +
" and %s color as new app"
% new_app['labels']['HAPROXY_DEPLOYMENT_COLOUR'])
return swap_zdd_apps(args, new_app, old_app)
elif args.complete_prev:
logger.info("Converting all instances to previous config")
old_app, new_app = select_last_two_deploys(previous_deploys)
logger.info("Previous config color is %s" % new_app[
'labels']['HAPROXY_DEPLOYMENT_COLOUR'])
logger.info("Considering %s color as existing app"
% old_app['labels']['HAPROXY_DEPLOYMENT_COLOUR'] +
" and %s color as new app"
% new_app['labels']['HAPROXY_DEPLOYMENT_COLOUR'])
return swap_zdd_apps(args, new_app, old_app)
elif args.resume:
logger.info("Found previous deployment, resuming")
new_app, old_app = select_last_two_deploys(previous_deploys)
return swap_zdd_apps(args, new_app, old_app)
else:
raise Exception("There appears to be an"
" existing deployment in progress")
def do_zdd(args, out=sys.stdout):
app = load_app_json(args)
validate_app(app)
previous_deploys = fetch_previous_deploys(args, app)
if len(previous_deploys) > 1:
# There is a stuck deploy or hybrid deploy
return safe_resume_deploy(args, previous_deploys)
if args.complete_cur or args.complete_prev:
raise InvalidArgException("Cannot use --complete-cur, --complete-prev"
" flags when config is not hybrid")
new_app = prepare_deploy(args, previous_deploys, app)
logger.info('Final app definition:')
out.write(json.dumps(new_app, sort_keys=True, indent=2))
out.write("\n")
if args.dry_run:
return True
if args.force or query_yes_no("Continue with deployment?"):
deploy_marathon_app(args, new_app)
if len(previous_deploys) == 0:
# This was the first deploy, nothing to swap
return True
else:
# This is a standard blue/green deploy, swap new app with old
old_app = select_last_deploy(previous_deploys)
return swap_zdd_apps(args, new_app, old_app)
def get_arg_parser():
parser = argparse.ArgumentParser(
description="Zero-downtime deployment orchestrator for marathon-lb",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--longhelp",
help="Print out configuration details",
action="store_true"
)
parser.add_argument("--marathon", "-m",
help="[required] Marathon endpoint, eg. -m " +
"http://marathon1:8080"
)
parser.add_argument("--marathon-lb", "-l",
help="[required] Marathon-lb stats endpoint, eg. -l " +
"http://marathon-lb.marathon.mesos:9090"
)
parser.add_argument("--json", "-j",
help="[required] App JSON"
)
parser.add_argument("--dry-run", "-d",
help="Perform a dry run",
action="store_true"
)
parser.add_argument("--force", "-f",
help="Perform deployment un-prompted",
action="store_true"
)
parser.add_argument("--step-delay", "-s",
help="Delay (in seconds) between each successive"
" deployment step",
type=int, default=5
)
parser.add_argument("--initial-instances", "-i",
help="Initial number of app instances to launch."
" If this number is greater than total number of"
" existing instances, then this will be overridden"
" by the latter number",
type=int, default=1
)
parser.add_argument("--resume", "-r",
help="Resume from a previous deployment",
action="store_true"
)
parser.add_argument("--max-wait", "-w",
help="Maximum amount of time (in seconds) to wait"
" for HAProxy to drain connections",
type=int, default=300
)
parser.add_argument("--new-instances", "-n",
help="Number of new instances to replace the existing"
" instances. This is for having instances of both blue"
" and green at the same time",
type=int, default=0)
parser.add_argument("--complete-cur", "-c",
help="Change hybrid app entirely to"
" current (new) app's instances", action="store_true")
parser.add_argument("--complete-prev", "-p",
help="Change hybrid app entirely to"
" previous (old) app's instances", action="store_true")
parser.add_argument("--pre-kill-hook",
help="A path to an executable (such as a script) "
"which will be called before killing any tasks marked "
"for draining at each step. The script will be called "
"with 3 arguments (in JSON): the old app definition, "
"the list of tasks which will be killed, "
"and the new app definition. An exit "
"code of 0 indicates the deploy may continue. "
"If the hook returns a non-zero exit code, the deploy "
"will stop, and an operator must intervene."
)
parser = set_logging_args(parser)
parser = set_marathon_auth_args(parser)
return parser
def set_request_retries():
s = requests.Session()
a = requests.adapters.HTTPAdapter(max_retries=3)
s.mount('http://', a)
def process_arguments():
# Process arguments
arg_parser = get_arg_parser()
args = arg_parser.parse_args()
if args.longhelp:
print(__doc__)
sys.exit()
# otherwise make sure that a Marathon URL was specified
else:
if args.marathon is None:
arg_parser.error('argument --marathon/-m is required')
if args.marathon_lb is None:
arg_parser.error('argument --marathon-lb/-l is required')
if args.json is None:
arg_parser.error('argument --json/-j is required')
return args
if __name__ == '__main__':
args = process_arguments()
set_request_retries()
setup_logging(logger, args.syslog_socket, args.log_format, args.log_level)
try:
if do_zdd(args):
sys.exit(0)
else:
sys.exit(1)
except Exception as e:
if hasattr(e, 'zdd_exit_status'):
if hasattr(e, 'error'):
logger.exception(str(e.error))
else:
logger.exception(traceback.print_exc())
sys.exit(e.zdd_exit_status)
else:
# For Unknown Exceptions
logger.exception(traceback.print_exc())
sys.exit(2)
|
|
import unittest
import functools
import mock
import numpy
from operator import mul
import chainer
from chainer import cuda
import chainer.functions as F
from chainer.functions.connection import deconvolution_nd
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
from chainer.testing import parameterize
from chainer.utils import conv
from chainer.utils import type_check
@parameterize(*testing.product({
'dims': [(4, 3, 2), (2,)],
'nobias': [False],
'test_outsize': [False],
'c_contiguous': [True],
'x_dtype': [numpy.float32],
'W_dtype': [numpy.float32],
}) + testing.product({
'dims': [(3, 2)],
'nobias': [False],
'test_outsize': [False],
'c_contiguous': [True],
'x_dtype': [numpy.float16, numpy.float32, numpy.float64],
'W_dtype': [numpy.float16, numpy.float32, numpy.float64],
}) + testing.product({
'dims': [(3, 2)],
'nobias': [True, False],
'test_outsize': [True, False],
'c_contiguous': [True, False],
'x_dtype': [numpy.float32],
'W_dtype': [numpy.float32],
}))
class TestDeconvolutionND(unittest.TestCase):
def setUp(self):
in_channels = 3
out_channels = 2
ndim = len(self.dims)
ksize = (3,) * ndim
self.stride = (2,) * ndim
self.pad = (1,) * ndim
W_scale = numpy.sqrt(1. / functools.reduce(mul, ksize, in_channels))
W_shape = (in_channels, out_channels) + ksize
self.W = numpy.random.normal(0, W_scale, W_shape).astype(self.W_dtype)
self.b = numpy.random.uniform(-1, 1, out_channels).astype(self.x_dtype)
outs = tuple(
conv.get_deconv_outsize(d, k, s, p)
for (d, k, s, p) in zip(self.dims, ksize, self.stride, self.pad))
self.outsize = outs if self.test_outsize else None
x_shape = (2, in_channels) + self.dims
self.x = numpy.random.uniform(-1, 1, x_shape).astype(self.x_dtype)
gy_shape = (2, out_channels) + outs
self.gy = numpy.random.uniform(-1, 1, gy_shape).astype(self.x_dtype)
self.test_forward_options = {}
self.check_backward_options = {
'eps': 1e-2, 'atol': 1e-4, 'rtol': 1e-3}
if self.x_dtype == numpy.float16:
self.test_forward_options = {'atol': 5e-3, 'rtol': 5e-2}
self.check_backward_options = {
'eps': 2 ** -3, 'atol': 1e-2, 'rtol': 1e-1}
elif self.W_dtype == numpy.float16:
self.check_backward_options = {
'eps': 2 ** -3, 'atol': 1e-3, 'rtol': 1e-2}
def check_forward_consistency(self, use_cudnn='always'):
x_cpu = chainer.Variable(self.x)
W_cpu = chainer.Variable(self.W)
b_cpu = None if self.nobias else chainer.Variable(self.b)
y_cpu = F.deconvolution_nd(
x_cpu, W_cpu, b_cpu, stride=self.stride, pad=self.pad,
outsize=self.outsize)
x_gpu = chainer.Variable(cuda.to_gpu(self.x))
W_gpu = chainer.Variable(cuda.to_gpu(self.W))
b_gpu = None if self.nobias else chainer.Variable(cuda.to_gpu(self.b))
with chainer.using_config('use_cudnn', use_cudnn):
y_gpu = F.deconvolution_nd(
x_gpu, W_gpu, b_gpu, stride=self.stride, pad=self.pad,
outsize=self.outsize)
self.assertEqual(y_cpu.data.dtype, self.x_dtype)
self.assertEqual(y_gpu.data.dtype, self.x_dtype)
testing.assert_allclose(
y_cpu.data, y_gpu.data.get(), **self.test_forward_options)
@attr.cudnn
def test_forward_consistency_cudnn(self):
self.check_forward_consistency(use_cudnn='always')
@attr.gpu
def test_forward_consistency_im2col(self):
self.check_forward_consistency(use_cudnn='never')
def check_forward_consistency_regression(self, x_data, W_data, b_data,
use_cudnn='always'):
x = chainer.Variable(x_data)
W = chainer.Variable(W_data)
b = None if self.nobias else chainer.Variable(b_data)
with chainer.using_config('use_cudnn', use_cudnn):
y_nd = F.deconvolution_nd(x, W, b, stride=self.stride,
pad=self.pad, outsize=self.outsize)
y_2d = F.deconvolution_2d(x, W, b, stride=self.stride,
pad=self.pad, outsize=self.outsize)
testing.assert_allclose(
y_nd.data, y_2d.data, **self.test_forward_options)
def test_forward_consistency_regression_cpu(self):
# Regression test to deconvolution_nd.
if len(self.dims) == 2:
self.check_forward_consistency_regression(self.x, self.W, self.b)
@attr.cudnn
def test_forward_consistency_regression_cudnn(self):
# Regression test to deconvolution_nd.
if len(self.dims) == 2:
self.check_forward_consistency_regression(
cuda.to_gpu(self.x), cuda.to_gpu(self.W), cuda.to_gpu(self.b),
use_cudnn='always')
@attr.gpu
def test_forward_consistency_regression_im2col(self):
# Regression test to deconvolution_nd.
if len(self.dims) == 2:
self.check_forward_consistency_regression(
cuda.to_gpu(self.x), cuda.to_gpu(self.W), cuda.to_gpu(self.b),
use_cudnn='never')
def check_backward(self, x_data, W_data, b_data, y_grad,
use_cudnn='never'):
if not self.c_contiguous:
xp = cuda.get_array_module(x_data)
x_data = xp.asfortranarray(x_data)
W_data = xp.asfortranarray(W_data)
y_grad = xp.asfortranarray(y_grad)
self.assertFalse(x_data.flags.c_contiguous)
self.assertFalse(W_data.flags.c_contiguous)
self.assertFalse(y_grad.flags.c_contiguous)
if b_data is not None:
b = xp.empty((len(b_data) * 2,), dtype=self.b.dtype)
b[::2] = b_data
b_data = b[::2]
self.assertFalse(b_data.flags.c_contiguous)
inputs = (x_data, W_data)
if b_data is not None:
inputs = inputs + (b_data,)
ndim = len(self.dims)
with chainer.using_config('use_cudnn', use_cudnn):
gradient_check.check_backward(
deconvolution_nd.DeconvolutionND(
ndim, self.stride, self.pad, self.outsize),
inputs, y_grad, **self.check_backward_options)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.W, self.b, self.gy)
@attr.cudnn
@condition.retry(3)
def test_backward_cudnn(self):
b = None if self.b is None else cuda.to_gpu(self.b)
self.check_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.W), b,
cuda.to_gpu(self.gy), use_cudnn='always')
@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
b = None if self.b is None else cuda.to_gpu(self.b)
self.check_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.W), b,
cuda.to_gpu(self.gy), use_cudnn='never')
@testing.parameterize(*testing.product({
'dims': [(5, 4, 3), (4, 3), (3,)],
'use_cudnn': ['always', 'auto', 'never'],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@attr.cudnn
class TestDeconvolutionNDCudnnCall(unittest.TestCase):
def setUp(self):
in_channels = 3
out_channels = 2
ndim = len(self.dims)
ksize = (3,) * ndim
stride = (1,) * ndim
pad = (1,) * ndim
W_scale = numpy.sqrt(1. / functools.reduce(mul, ksize, in_channels))
W_shape = (in_channels, out_channels) + ksize
self.W = cuda.cupy.random.normal(
0, W_scale, W_shape).astype(self.dtype)
outs = tuple(
conv.get_deconv_outsize(d, k, s, p)
for (d, k, s, p) in zip(self.dims, ksize, stride, pad))
x_shape = (2, in_channels) + self.dims
self.x = cuda.cupy.random.uniform(-1, 1, x_shape).astype(self.dtype)
gy_shape = (2, out_channels) + outs
self.gy = cuda.cupy.random.uniform(-1, 1, gy_shape).astype(self.dtype)
with chainer.using_config('use_cudnn', self.use_cudnn):
self.expected = chainer.should_use_cudnn('>=auto') and ndim > 1
def forward(self):
x = chainer.Variable(self.x)
W = chainer.Variable(self.W)
return F.deconvolution_nd(x, W, None, stride=1, pad=1)
def test_call_cudnn_forward(self):
name = 'cupy.cudnn.cudnn.convolutionBackwardData_v3'
with chainer.using_config('use_cudnn', self.use_cudnn):
with mock.patch(name) as func:
self.forward()
self.assertEqual(func.called, self.expected)
def test_call_cudnn_backward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
y = self.forward()
y.grad = self.gy
with mock.patch('cupy.cudnn.cudnn.convolutionForward') as func:
y.backward()
self.assertEqual(func.called, self.expected)
class TestDeconvolutionNDarraySupplied(unittest.TestCase):
def setUp(self):
N = 2
in_channels = 3
out_channels = 2
dtype = numpy.float32
x_shape = (N, in_channels, 3, 3, 3)
self.x_data = numpy.random.uniform(-1, 1, x_shape).astype(dtype)
W_shape = (in_channels, out_channels, 1, 1, 1)
self.W_data = numpy.random.uniform(-1, 1, W_shape).astype(dtype)
self.b_data = numpy.random.uniform(-1, 1, out_channels).astype(dtype)
def check_array_supplied(self, x_ary, W_ary, b_ary):
y_ary = F.deconvolution_nd(x_ary, W_ary, b_ary)
x_var = chainer.Variable(x_ary)
W_var = chainer.Variable(W_ary)
b_var = chainer.Variable(b_ary)
y_var = F.deconvolution_nd(x_var, W_var, b_var)
testing.assert_allclose(y_ary.data, y_var.data)
def test_array_supplied_cpu(self):
self.check_array_supplied(self.x_data, self.W_data, self.b_data)
@attr.gpu
def test_array_supplied_gpu(self):
self.check_array_supplied(cuda.to_gpu(self.x_data),
cuda.to_gpu(self.W_data),
cuda.to_gpu(self.b_data))
class TestDeconvolutionNDTypeCheck(unittest.TestCase):
def test_number_of_inputs(self):
# Too few inputs
x = numpy.random.uniform(-1, 1, (2, 3, 4)).astype(numpy.float32)
with self.assertRaises(type_check.InvalidType):
F.connection.deconvolution_nd.DeconvolutionND(1)(x)
# Too much inputs
x = numpy.random.uniform(-1, 1, (2, 3, 4)).astype(numpy.float32)
W = numpy.random.uniform(-1, 1, (3, 2, 2)).astype(numpy.float32)
b = numpy.random.uniform(-1, 1, (2,)).astype(numpy.float32)
with self.assertRaises(type_check.InvalidType):
F.connection.deconvolution_nd.DeconvolutionND(1)(x, W, b, x)
def test_data_and_weight(self):
# dtype of data
x = numpy.random.uniform(-1, 1, (2, 3, 4)).astype(numpy.int32)
W = numpy.random.uniform(-1, 1, (3, 2, 2)).astype(numpy.float32)
with self.assertRaises(type_check.InvalidType):
F.deconvolution_nd(x, W)
# dtype of weight
x = numpy.random.uniform(-1, 1, (2, 3, 4)).astype(numpy.float32)
W = numpy.random.uniform(-1, 1, (3, 2, 2)).astype(numpy.int32)
with self.assertRaises(type_check.InvalidType):
F.deconvolution_nd(x, W)
# ndim of weight
x = numpy.random.uniform(-1, 1, (2, 3, 4, 4)).astype(numpy.float32)
W = numpy.random.uniform(-1, 1, (3, 2, 2)).astype(numpy.float32)
with self.assertRaises(type_check.InvalidType):
F.deconvolution_nd(x, W)
# shapes of data and weight
x = numpy.random.uniform(-1, 1, (2, 3, 4)).astype(numpy.float32)
W = numpy.random.uniform(-1, 1, (2, 2, 2)).astype(numpy.float32)
with self.assertRaises(type_check.InvalidType):
F.deconvolution_nd(x, W)
def test_supplied_outsize(self):
x = numpy.random.uniform(-1, 1, (2, 3, 4)).astype(numpy.float32)
W = numpy.random.uniform(-1, 1, (3, 2, 2)).astype(numpy.float32)
outsize = (10,)
with self.assertRaises(type_check.InvalidType):
F.deconvolution_nd(x, W, outsize=outsize)
def test_bias(self):
# dtype
x = numpy.random.uniform(-1, 1, (2, 3, 4)).astype(numpy.float32)
W = numpy.random.uniform(-1, 1, (3, 2, 2)).astype(numpy.float32)
b = numpy.random.uniform(-1, 1, (2,)).astype(numpy.int32)
with self.assertRaises(type_check.InvalidType):
F.deconvolution_nd(x, W, b=b)
# ndim
x = numpy.random.uniform(-1, 1, (2, 3, 4)).astype(numpy.float32)
W = numpy.random.uniform(-1, 1, (3, 2, 2)).astype(numpy.float32)
b = numpy.random.uniform(-1, 1, (2, 2)).astype(numpy.float32)
with self.assertRaises(type_check.InvalidType):
F.deconvolution_nd(x, W, b=b)
# shape
x = numpy.random.uniform(-1, 1, (2, 3, 4)).astype(numpy.float32)
W = numpy.random.uniform(-1, 1, (3, 2, 2)).astype(numpy.float32)
b = numpy.random.uniform(-1, 1, (3,)).astype(numpy.float32)
with self.assertRaises(type_check.InvalidType):
F.deconvolution_nd(x, W, b=b)
def test_estimated_outsize(self):
x = numpy.random.uniform(-1, 1, (2, 3, 4)).astype(numpy.float32)
W = numpy.random.uniform(-1, 1, (3, 2, 2)).astype(numpy.float32)
stride = 1
pad = 10
with self.assertRaises(AssertionError):
F.deconvolution_nd(x, W, stride=stride, pad=pad)
testing.run_module(__name__, __file__)
|
|
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
"""
Exception-catching middleware that allows interactive debugging.
This middleware catches all unexpected exceptions. A normal
traceback, like produced by
``paste.exceptions.errormiddleware.ErrorMiddleware`` is given, plus
controls to see local variables and evaluate expressions in a local
context.
This can only be used in single-process environments, because
subsequent requests must go back to the same process that the
exception originally occurred in. Threaded or non-concurrent
environments both work.
This shouldn't be used in production in any way. That would just be
silly.
If calling from an XMLHttpRequest call, if the GET variable ``_`` is
given then it will make the response more compact (and less
Javascripty), since if you use innerHTML it'll kill your browser. You
can look for the header X-Debug-URL in your 500 responses if you want
to see the full debuggable traceback. Also, this URL is printed to
``wsgi.errors``, so you can open it up in another browser window.
"""
from __future__ import print_function
import sys
import os
import cgi
import traceback
import six
from six.moves import cStringIO as StringIO
import pprint
import itertools
import time
import re
from paste.exceptions import errormiddleware, formatter, collector
from paste import wsgilib
from paste import urlparser
from paste import httpexceptions
from paste import registry
from paste import request
from paste import response
from paste.evalexception import evalcontext
limit = 200
def html_quote(v):
"""
Escape HTML characters, plus translate None to ''
"""
if v is None:
return ''
return cgi.escape(str(v), 1)
def preserve_whitespace(v, quote=True):
"""
Quote a value for HTML, preserving whitespace (translating
newlines to ``<br>`` and multiple spaces to use `` ``).
If ``quote`` is true, then the value will be HTML quoted first.
"""
if quote:
v = html_quote(v)
v = v.replace('\n', '<br>\n')
v = re.sub(r'()( +)', _repl_nbsp, v)
v = re.sub(r'(\n)( +)', _repl_nbsp, v)
v = re.sub(r'^()( +)', _repl_nbsp, v)
return '<code>%s</code>' % v
def _repl_nbsp(match):
if len(match.group(2)) == 1:
return ' '
return match.group(1) + ' ' * (len(match.group(2))-1) + ' '
def simplecatcher(application):
"""
A simple middleware that catches errors and turns them into simple
tracebacks.
"""
def simplecatcher_app(environ, start_response):
try:
return application(environ, start_response)
except:
out = StringIO()
traceback.print_exc(file=out)
start_response('500 Server Error',
[('content-type', 'text/html')],
sys.exc_info())
res = out.getvalue()
return ['<h3>Error</h3><pre>%s</pre>'
% html_quote(res)]
return simplecatcher_app
def wsgiapp():
"""
Turns a function or method into a WSGI application.
"""
def decorator(func):
def wsgiapp_wrapper(*args):
# we get 3 args when this is a method, two when it is
# a function :(
if len(args) == 3:
environ = args[1]
start_response = args[2]
args = [args[0]]
else:
environ, start_response = args
args = []
def application(environ, start_response):
form = wsgilib.parse_formvars(environ,
include_get_vars=True)
headers = response.HeaderDict(
{'content-type': 'text/html',
'status': '200 OK'})
form['environ'] = environ
form['headers'] = headers
res = func(*args, **form.mixed())
status = headers.pop('status')
start_response(status, headers.headeritems())
return [res]
app = httpexceptions.make_middleware(application)
app = simplecatcher(app)
return app(environ, start_response)
wsgiapp_wrapper.exposed = True
return wsgiapp_wrapper
return decorator
def get_debug_info(func):
"""
A decorator (meant to be used under ``wsgiapp()``) that resolves
the ``debugcount`` variable to a ``DebugInfo`` object (or gives an
error if it can't be found).
"""
def debug_info_replacement(self, **form):
try:
if 'debugcount' not in form:
raise ValueError('You must provide a debugcount parameter')
debugcount = form.pop('debugcount')
try:
debugcount = int(debugcount)
except ValueError:
raise ValueError('Bad value for debugcount')
if debugcount not in self.debug_infos:
raise ValueError(
'Debug %s no longer found (maybe it has expired?)'
% debugcount)
debug_info = self.debug_infos[debugcount]
return func(self, debug_info=debug_info, **form)
except ValueError as e:
form['headers']['status'] = '500 Server Error'
return '<html>There was an error: %s</html>' % html_quote(e)
return debug_info_replacement
debug_counter = itertools.count(int(time.time()))
def get_debug_count(environ):
"""
Return the unique debug count for the current request
"""
if 'paste.evalexception.debug_count' in environ:
return environ['paste.evalexception.debug_count']
else:
environ['paste.evalexception.debug_count'] = next = six.next(debug_counter)
return next
class EvalException(object):
def __init__(self, application, global_conf=None,
xmlhttp_key=None):
self.application = application
self.debug_infos = {}
if xmlhttp_key is None:
if global_conf is None:
xmlhttp_key = '_'
else:
xmlhttp_key = global_conf.get('xmlhttp_key', '_')
self.xmlhttp_key = xmlhttp_key
def __call__(self, environ, start_response):
assert not environ['wsgi.multiprocess'], (
"The EvalException middleware is not usable in a "
"multi-process environment")
environ['paste.evalexception'] = self
if environ.get('PATH_INFO', '').startswith('/_debug/'):
return self.debug(environ, start_response)
else:
return self.respond(environ, start_response)
def debug(self, environ, start_response):
assert request.path_info_pop(environ) == '_debug'
next_part = request.path_info_pop(environ)
method = getattr(self, next_part, None)
if not method:
exc = httpexceptions.HTTPNotFound(
'%r not found when parsing %r'
% (next_part, wsgilib.construct_url(environ)))
return exc.wsgi_application(environ, start_response)
if not getattr(method, 'exposed', False):
exc = httpexceptions.HTTPForbidden(
'%r not allowed' % next_part)
return exc.wsgi_application(environ, start_response)
return method(environ, start_response)
def media(self, environ, start_response):
"""
Static path where images and other files live
"""
app = urlparser.StaticURLParser(
os.path.join(os.path.dirname(__file__), 'media'))
return app(environ, start_response)
media.exposed = True
def mochikit(self, environ, start_response):
"""
Static path where MochiKit lives
"""
app = urlparser.StaticURLParser(
os.path.join(os.path.dirname(__file__), 'mochikit'))
return app(environ, start_response)
mochikit.exposed = True
def summary(self, environ, start_response):
"""
Returns a JSON-format summary of all the cached
exception reports
"""
start_response('200 OK', [('Content-type', 'text/x-json')])
data = [];
items = self.debug_infos.values()
items.sort(lambda a, b: cmp(a.created, b.created))
data = [item.json() for item in items]
return [repr(data)]
summary.exposed = True
def view(self, environ, start_response):
"""
View old exception reports
"""
id = int(request.path_info_pop(environ))
if id not in self.debug_infos:
start_response(
'500 Server Error',
[('Content-type', 'text/html')])
return [
"Traceback by id %s does not exist (maybe "
"the server has been restarted?)"
% id]
debug_info = self.debug_infos[id]
return debug_info.wsgi_application(environ, start_response)
view.exposed = True
def make_view_url(self, environ, base_path, count):
return base_path + '/_debug/view/%s' % count
#@wsgiapp()
#@get_debug_info
def show_frame(self, tbid, debug_info, **kw):
frame = debug_info.frame(int(tbid))
vars = frame.tb_frame.f_locals
if vars:
registry.restorer.restoration_begin(debug_info.counter)
local_vars = make_table(vars)
registry.restorer.restoration_end()
else:
local_vars = 'No local vars'
return input_form(tbid, debug_info) + local_vars
show_frame = wsgiapp()(get_debug_info(show_frame))
#@wsgiapp()
#@get_debug_info
def exec_input(self, tbid, debug_info, input, **kw):
if not input.strip():
return ''
input = input.rstrip() + '\n'
frame = debug_info.frame(int(tbid))
vars = frame.tb_frame.f_locals
glob_vars = frame.tb_frame.f_globals
context = evalcontext.EvalContext(vars, glob_vars)
registry.restorer.restoration_begin(debug_info.counter)
output = context.exec_expr(input)
registry.restorer.restoration_end()
input_html = formatter.str2html(input)
return ('<code style="color: #060">>>></code> '
'<code>%s</code><br>\n%s'
% (preserve_whitespace(input_html, quote=False),
preserve_whitespace(output)))
exec_input = wsgiapp()(get_debug_info(exec_input))
def respond(self, environ, start_response):
if environ.get('paste.throw_errors'):
return self.application(environ, start_response)
base_path = request.construct_url(environ, with_path_info=False,
with_query_string=False)
environ['paste.throw_errors'] = True
started = []
def detect_start_response(status, headers, exc_info=None):
try:
return start_response(status, headers, exc_info)
except:
raise
else:
started.append(True)
try:
__traceback_supplement__ = errormiddleware.Supplement, self, environ
app_iter = self.application(environ, detect_start_response)
try:
return_iter = list(app_iter)
return return_iter
finally:
if hasattr(app_iter, 'close'):
app_iter.close()
except:
exc_info = sys.exc_info()
for expected in environ.get('paste.expected_exceptions', []):
if isinstance(exc_info[1], expected):
raise
# Tell the Registry to save its StackedObjectProxies current state
# for later restoration
registry.restorer.save_registry_state(environ)
count = get_debug_count(environ)
view_uri = self.make_view_url(environ, base_path, count)
if not started:
headers = [('content-type', 'text/html')]
headers.append(('X-Debug-URL', view_uri))
start_response('500 Internal Server Error',
headers,
exc_info)
msg = 'Debug at: %s\n' % view_uri
if six.PY3:
msg = msg.encode('utf8')
environ['wsgi.errors'].write(msg)
exc_data = collector.collect_exception(*exc_info)
debug_info = DebugInfo(count, exc_info, exc_data, base_path,
environ, view_uri)
assert count not in self.debug_infos
self.debug_infos[count] = debug_info
if self.xmlhttp_key:
get_vars = request.parse_querystring(environ)
if dict(get_vars).get(self.xmlhttp_key):
exc_data = collector.collect_exception(*exc_info)
html = formatter.format_html(
exc_data, include_hidden_frames=False,
include_reusable=False, show_extra_data=False)
return [html]
# @@: it would be nice to deal with bad content types here
return debug_info.content()
def exception_handler(self, exc_info, environ):
simple_html_error = False
if self.xmlhttp_key:
get_vars = request.parse_querystring(environ)
if dict(get_vars).get(self.xmlhttp_key):
simple_html_error = True
return errormiddleware.handle_exception(
exc_info, environ['wsgi.errors'],
html=True,
debug_mode=True,
simple_html_error=simple_html_error)
class DebugInfo(object):
def __init__(self, counter, exc_info, exc_data, base_path,
environ, view_uri):
self.counter = counter
self.exc_data = exc_data
self.base_path = base_path
self.environ = environ
self.view_uri = view_uri
self.created = time.time()
self.exc_type, self.exc_value, self.tb = exc_info
__exception_formatter__ = 1
self.frames = []
n = 0
tb = self.tb
while tb is not None and (limit is None or n < limit):
if tb.tb_frame.f_locals.get('__exception_formatter__'):
# Stop recursion. @@: should make a fake ExceptionFrame
break
self.frames.append(tb)
tb = tb.tb_next
n += 1
def json(self):
"""Return the JSON-able representation of this object"""
return {
'uri': self.view_uri,
'created': time.strftime('%c', time.gmtime(self.created)),
'created_timestamp': self.created,
'exception_type': str(self.exc_type),
'exception': str(self.exc_value),
}
def frame(self, tbid):
for frame in self.frames:
if id(frame) == tbid:
return frame
else:
raise ValueError("No frame by id %s found from %r" % (tbid, self.frames))
def wsgi_application(self, environ, start_response):
start_response('200 OK', [('content-type', 'text/html')])
return self.content()
def content(self):
html = format_eval_html(self.exc_data, self.base_path, self.counter)
head_html = (formatter.error_css + formatter.hide_display_js)
head_html += self.eval_javascript()
repost_button = make_repost_button(self.environ)
page = error_template % {
'repost_button': repost_button or '',
'head_html': head_html,
'body': html}
if six.PY3:
page = page.encode('utf8')
return [page]
def eval_javascript(self):
base_path = self.base_path + '/_debug'
return (
'<script type="text/javascript" src="%s/media/MochiKit.packed.js">'
'</script>\n'
'<script type="text/javascript" src="%s/media/debug.js">'
'</script>\n'
'<script type="text/javascript">\n'
'debug_base = %r;\n'
'debug_count = %r;\n'
'</script>\n'
% (base_path, base_path, base_path, self.counter))
class EvalHTMLFormatter(formatter.HTMLFormatter):
def __init__(self, base_path, counter, **kw):
super(EvalHTMLFormatter, self).__init__(**kw)
self.base_path = base_path
self.counter = counter
def format_source_line(self, filename, frame):
line = formatter.HTMLFormatter.format_source_line(
self, filename, frame)
return (line +
' <a href="#" class="switch_source" '
'tbid="%s" onClick="return showFrame(this)"> '
'<img src="%s/_debug/media/plus.jpg" border=0 width=9 '
'height=9> </a>'
% (frame.tbid, self.base_path))
def make_table(items):
if isinstance(items, dict):
items = items.items()
items.sort()
rows = []
i = 0
for name, value in items:
i += 1
out = StringIO()
try:
pprint.pprint(value, out)
except Exception as e:
print('Error: %s' % e, file=out)
value = html_quote(out.getvalue())
if len(value) > 100:
# @@: This can actually break the HTML :(
# should I truncate before quoting?
orig_value = value
value = value[:100]
value += '<a class="switch_source" style="background-color: #999" href="#" onclick="return expandLong(this)">...</a>'
value += '<span style="display: none">%s</span>' % orig_value[100:]
value = formatter.make_wrappable(value)
if i % 2:
attr = ' class="even"'
else:
attr = ' class="odd"'
rows.append('<tr%s style="vertical-align: top;"><td>'
'<b>%s</b></td><td style="overflow: auto">%s<td></tr>'
% (attr, html_quote(name),
preserve_whitespace(value, quote=False)))
return '<table>%s</table>' % (
'\n'.join(rows))
def format_eval_html(exc_data, base_path, counter):
short_formatter = EvalHTMLFormatter(
base_path=base_path,
counter=counter,
include_reusable=False)
short_er = short_formatter.format_collected_data(exc_data)
long_formatter = EvalHTMLFormatter(
base_path=base_path,
counter=counter,
show_hidden_frames=True,
show_extra_data=False,
include_reusable=False)
long_er = long_formatter.format_collected_data(exc_data)
text_er = formatter.format_text(exc_data, show_hidden_frames=True)
if short_formatter.filter_frames(exc_data.frames) != \
long_formatter.filter_frames(exc_data.frames):
# Only display the full traceback when it differs from the
# short version
full_traceback_html = """
<br>
<script type="text/javascript">
show_button('full_traceback', 'full traceback')
</script>
<div id="full_traceback" class="hidden-data">
%s
</div>
""" % long_er
else:
full_traceback_html = ''
return """
%s
%s
<br>
<script type="text/javascript">
show_button('text_version', 'text version')
</script>
<div id="text_version" class="hidden-data">
<textarea style="width: 100%%" rows=10 cols=60>%s</textarea>
</div>
""" % (short_er, full_traceback_html, cgi.escape(text_er))
def make_repost_button(environ):
url = request.construct_url(environ)
if environ['REQUEST_METHOD'] == 'GET':
return ('<button onclick="window.location.href=%r">'
'Re-GET Page</button><br>' % url)
else:
# @@: I'd like to reconstruct this, but I can't because
# the POST body is probably lost at this point, and
# I can't get it back :(
return None
# @@: Use or lose the following code block
"""
fields = []
for name, value in wsgilib.parse_formvars(
environ, include_get_vars=False).items():
if hasattr(value, 'filename'):
# @@: Arg, we'll just submit the body, and leave out
# the filename :(
value = value.value
fields.append(
'<input type="hidden" name="%s" value="%s">'
% (html_quote(name), html_quote(value)))
return '''
<form action="%s" method="POST">
%s
<input type="submit" value="Re-POST Page">
</form>''' % (url, '\n'.join(fields))
"""
def input_form(tbid, debug_info):
return '''
<form action="#" method="POST"
onsubmit="return submitInput($(\'submit_%(tbid)s\'), %(tbid)s)">
<div id="exec-output-%(tbid)s" style="width: 95%%;
padding: 5px; margin: 5px; border: 2px solid #000;
display: none"></div>
<input type="text" name="input" id="debug_input_%(tbid)s"
style="width: 100%%"
autocomplete="off" onkeypress="upArrow(this, event)"><br>
<input type="submit" value="Execute" name="submitbutton"
onclick="return submitInput(this, %(tbid)s)"
id="submit_%(tbid)s"
input-from="debug_input_%(tbid)s"
output-to="exec-output-%(tbid)s">
<input type="submit" value="Expand"
onclick="return expandInput(this)">
</form>
''' % {'tbid': tbid}
error_template = '''
<html>
<head>
<title>Server Error</title>
%(head_html)s
</head>
<body>
<div id="error-area" style="display: none; background-color: #600; color: #fff; border: 2px solid black">
<div id="error-container"></div>
<button onclick="return clearError()">clear this</button>
</div>
%(repost_button)s
%(body)s
</body>
</html>
'''
def make_eval_exception(app, global_conf, xmlhttp_key=None):
"""
Wraps the application in an interactive debugger.
This debugger is a major security hole, and should only be
used during development.
xmlhttp_key is a string that, if present in QUERY_STRING,
indicates that the request is an XMLHttp request, and the
Javascript/interactive debugger should not be returned. (If you
try to put the debugger somewhere with innerHTML, you will often
crash the browser)
"""
if xmlhttp_key is None:
xmlhttp_key = global_conf.get('xmlhttp_key', '_')
return EvalException(app, xmlhttp_key=xmlhttp_key)
|
|
# Copyright 2017-2021 TensorHub, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
import logging
import os
import re
import sys
from guild import util
log = logging.getLogger("guild")
class InstallError(Exception):
pass
def SearchCommand(spec, operator, *args, **kw):
"""Guild specific pip search implementation.
This exposes the search fields and operator, which were are hard
coded in the pip implementation.
Implemented as a function to defer import of upstream
implementation.
"""
from pip._internal.commands.search import SearchCommand
cmd = SearchCommand(*args, **kw)
cmd._spec = spec
cmd._operator = operator
util.bind_method(cmd, "search", _SearchCommand_search)
return cmd
def _SearchCommand_search(cmd, _query, options):
from six.moves import xmlrpc_client
from pip._internal.download import PipXmlrpcTransport
index_url = options.index
with cmd._build_session(options) as session:
transport = PipXmlrpcTransport(index_url, session)
pypi = xmlrpc_client.ServerProxy(index_url, transport)
return pypi.search(cmd._spec, cmd._operator)
def install(
reqs,
index_urls=None,
upgrade=False,
pre_releases=False,
no_cache=False,
no_deps=False,
reinstall=False,
target=None,
):
from pip._internal.commands.install import InstallCommand
from pip._internal.exceptions import InstallationError
_reset_env_for_install()
_ensure_patch_pip_get_entry_points()
cmd = _pip_cmd(InstallCommand)
args = []
if pre_releases:
args.append("--pre")
if not running_under_virtualenv() and not target:
args.append("--user")
if upgrade:
args.append("--upgrade")
if no_cache:
args.append("--no-cache-dir")
if no_deps:
args.append("--no-deps")
if reinstall:
args.append("--force-reinstall")
if index_urls:
args.extend(["--index-url", index_urls[0]])
for url in index_urls[1:]:
args.extend(["--extra-index-url", url])
if target:
args.extend(["--target", target])
args.extend(reqs)
options, cmd_args = cmd.parse_args(args)
try:
return cmd.run(options, cmd_args)
except InstallationError as e:
raise InstallError(str(e))
def _reset_env_for_install():
util.del_env(["PIP_REQ_TRACKER"])
def _pip_cmd(cls, *args, **kw):
cmd = cls(*args, **kw)
cmd.verbosity = False
return cmd
def running_under_virtualenv():
return "VIRTUAL_ENV" in os.environ or "CONDA_PREFIX" in os.environ
def _ensure_patch_pip_get_entry_points():
"""Patch pip's get_entrypoints function.
Older versions of pip use configparse to load the entrypoints file
in a wheel, which imposes its own syntax requirements on entry
point keys causing problems for our key naming conventions.
We replace their `get_entrypoints` which is
`_get_entrypoints_patch`, which is copied from their more recent
source.
"""
from pip._internal import wheel
if wheel.get_entrypoints != _pip_get_entrypoints_patch:
wheel.get_entrypoints = _pip_get_entrypoints_patch
def _pip_get_entrypoints_patch(filename):
"""See `_ensure_pip_get_entrypoints_patch` for details."""
from pip._vendor.six import StringIO
from pip._vendor import pkg_resources
if not os.path.exists(filename):
return {}, {}
# This is done because you can pass a string to entry_points wrappers which
# means that they may or may not be valid INI files. The attempt here is to
# strip leading and trailing whitespace in order to make them valid INI
# files.
with open(filename) as fp:
data = StringIO()
for line in fp:
data.write(line.strip())
data.write("\n")
data.seek(0)
# get the entry points and then the script names
entry_points = pkg_resources.EntryPoint.parse_map(data)
console = entry_points.get('console_scripts', {})
gui = entry_points.get('gui_scripts', {})
def _split_ep(s):
"""get the string representation of EntryPoint, remove space and split
on '='"""
return str(s).replace(" ", "").split("=")
# convert the EntryPoint objects into strings with module:function
console = dict(_split_ep(v) for v in console.values())
gui = dict(_split_ep(v) for v in gui.values())
return console, gui
def get_installed():
from pip._internal.utils.misc import get_installed_distributions
user_only = not running_under_virtualenv()
return get_installed_distributions(local_only=False, user_only=user_only)
def search(spec, operator):
_ensure_search_logger()
cmd = _pip_cmd(SearchCommand, spec, operator)
options, unused_parsed_query = cmd.parse_args([])
return cmd.search(unused_parsed_query, options)
class QuietLogger(logging.Logger):
def __init__(self, parent):
super(QuietLogger, self).__init__(parent.name)
self.parent = parent
self.level = logging.WARNING
def _ensure_search_logger():
try:
from pip._vendor.requests.packages.urllib3 import connectionpool
except ImportError:
pass
else:
if not isinstance(connectionpool.log, QuietLogger):
connectionpool.log = QuietLogger(connectionpool.log)
def uninstall(reqs, dont_prompt=False):
from pip._internal.commands.uninstall import UninstallCommand
cmd = _pip_cmd(UninstallCommand)
for req in reqs:
_uninstall(req, cmd, dont_prompt)
def _uninstall(req, cmd, dont_prompt):
from pip._internal.exceptions import UninstallationError
args = [req]
if dont_prompt:
args.append("--yes")
options, cmd_args = cmd.parse_args(args)
try:
cmd.run(options, cmd_args)
except UninstallationError as e:
if "not installed" not in str(e):
raise
log.warning("%s is not installed, skipping", req)
def download_url(url, download_dir, sha256=None):
"""Download and optionally verify a file.
Returns the downloaded file path.
If sha256 is not specified (default), the file is not verified.
Raises HashMismatch if the file hash does not match the specified
sha256 hash.
If the file was already downloaded, returns its path after
verifying it. If the file cannot be verified, raises HashMismatch
without attempting download again. If the hash is valid but the
download is not, the download must be deleted before trying
again. This behavior is designed to preserve downloads at the cost
of requiring that invalid files be explicitly deleted.
"""
from pip._internal.index import Link
link = Link(url)
downloaded_path = _check_download_path(link, download_dir, sha256)
if not downloaded_path:
orig_path = _pip_download(link, download_dir)
downloaded_path = _ensure_expected_download_path(orig_path, link)
if sha256:
_verify_and_cache_hash(downloaded_path, sha256)
return downloaded_path
def _check_download_path(link, download_dir, expected_hash):
download_path = os.path.join(download_dir, link.filename)
if not os.path.exists(download_path):
return None
log.info("Using cached file %s", download_path)
if not expected_hash:
return download_path
cached_hash = util.try_cached_sha(download_path)
if cached_hash and cached_hash == expected_hash:
return download_path
_verify_and_cache_hash(download_path, expected_hash)
return download_path
class HashMismatch(Exception):
def __init__(self, path, expected, actual):
super(HashMismatch, self).__init__(path, expected, actual)
self.path = path
self.expected = expected
self.actual = actual
def _verify_and_cache_hash(path, expected_hash):
calculated_hash = util.file_sha256(path)
if calculated_hash != expected_hash:
raise HashMismatch(path, expected_hash, calculated_hash)
_cache_sha256(calculated_hash, path)
def _cache_sha256(sha256, download_path):
util.write_cached_sha(sha256, download_path)
def _pip_download(link, download_dir):
# We disable cache control for downloads for two reasons: First,
# we're already caching our downloads as resources, so an
# additional level of caching, even if efficiently managed, is
# probably not worth the cost. Second, the cachecontrol module
# used with pip's download facility is unusable with large files
# as it reads files into memory:
#
# https://github.com/ionrock/cachecontrol/issues/145
#
from pip._internal.commands.download import DownloadCommand
from pip._internal.download import _download_http_url
cmd = _pip_cmd(DownloadCommand)
options, _ = cmd.parse_args(["--no-cache-dir"])
session = cmd._build_session(options)
orig_path, _ = _download_http_url(
link, session, download_dir, hashes=None, progress_bar="on"
)
return orig_path
def _ensure_expected_download_path(downloaded, link):
expected = os.path.join(os.path.dirname(downloaded), link.filename)
if downloaded != expected:
os.rename(downloaded, expected)
return expected
def print_package_info(pkg, verbose=False, show_files=False):
from pip._internal.commands.show import ShowCommand
_ensure_print_package_logger()
cmd = _pip_cmd(ShowCommand)
args = []
if verbose:
args.append("--verbose")
if show_files:
args.append("--files")
args.append(pkg)
return cmd.run(*cmd.parse_args(args))
class PrintPackageLogger(object):
def info(self, msg, args=None):
args = args or []
out = self._normalize_attr_case(msg % args)
sys.stdout.write(out)
sys.stdout.write("\n")
@staticmethod
def _normalize_attr_case(s):
m = re.match("([^:]+:)(.*)", s)
if m:
return m.group(1).lower() + m.group(2)
return s
def _ensure_print_package_logger():
from pip._internal.commands import show
if not isinstance(show.logger, PrintPackageLogger):
show.logger = PrintPackageLogger()
def parse_requirements(path):
from pip._internal.req import req_file
return req_file.parse_requirements(path, session="unused")
def is_requirements(path):
if not util.is_text_file(path):
return False
try:
list(parse_requirements(path))
except Exception:
return False
else:
return True
def lib_dir(
name, wheeldir, user=False, home=None, root=None, isolated=False, prefix=None
):
from pip._internal.locations import distutils_scheme
from pip._internal.wheel import root_is_purelib
scheme = distutils_scheme(
"", user=user, home=home, root=root, isolated=isolated, prefix=prefix
)
if root_is_purelib(name, wheeldir):
return scheme['purelib']
else:
return scheme['platlib']
def freeze():
from pip._internal.operations.freeze import freeze
try:
return list(freeze())
except Exception as e:
if log.getEffectiveLevel() <= logging.DEBUG:
log.exception("reading pip freeze")
else:
log.warning("error reading pip freeze: %s", e)
return None
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SoftmaxOp and LogSoftmaxOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
class SoftmaxTest(test.TestCase):
def _npSoftmax(self, features, dim=-1, log=False):
if dim is -1:
dim = len(features.shape) - 1
one_only_on_dim = list(features.shape)
one_only_on_dim[dim] = 1
is_fp16 = features.dtype == np.float16
if is_fp16:
# Do the compute in fp32 and cast the input back to fp32.
features = features.astype(np.float32)
e = np.exp(features - np.reshape(
np.amax(
features, axis=dim), one_only_on_dim))
softmax = e / np.reshape(np.sum(e, axis=dim), one_only_on_dim)
if log:
res = np.log(softmax)
else:
res = softmax
if is_fp16:
res = res.astype(np.float16)
return res
def _testSoftmax(self, np_features, dim=-1, log=False, use_gpu=False):
# A previous version of the code checked the op name rather than the op type
# to distinguish between log and non-log. Use an arbitrary name to catch
# this bug in future.
name = "arbitrary"
np_softmax = self._npSoftmax(np_features, dim=dim, log=log)
with self.cached_session(use_gpu=use_gpu):
if log:
tf_softmax = nn_ops.log_softmax(np_features, axis=dim, name=name)
else:
tf_softmax = nn_ops.softmax(np_features, axis=dim, name=name)
out = self.evaluate(tf_softmax)
self.assertAllCloseAccordingToType(np_softmax, out)
self.assertShapeEqual(np_softmax, tf_softmax)
if not log:
# Bonus check: the softmaxes should add to one in dimension dim.
sum_along_dim = np.sum(out, axis=dim)
self.assertAllCloseAccordingToType(
np.ones(sum_along_dim.shape), sum_along_dim)
def _testAll(self, features):
self._testSoftmax(features, use_gpu=True)
self._testSoftmax(features, log=True, use_gpu=True)
self._testOverflow(use_gpu=True)
def testNpSoftmax(self):
features = [[1., 1., 1., 1.], [1., 2., 3., 4.]]
# Batch 0: All exps are 1. The expected result is
# Softmaxes = [0.25, 0.25, 0.25, 0.25]
# LogSoftmaxes = [-1.386294, -1.386294, -1.386294, -1.386294]
#
# Batch 1:
# exps = [1., 2.718, 7.389, 20.085]
# sum = 31.192
# Softmaxes = exps / sum = [0.0320586, 0.08714432, 0.23688282, 0.64391426]
# LogSoftmaxes = [-3.44019 , -2.44019 , -1.44019 , -0.44019]
np_sm = self._npSoftmax(np.array(features))
self.assertAllClose(
np.array([[0.25, 0.25, 0.25, 0.25],
[0.0320586, 0.08714432, 0.23688282, 0.64391426]]),
np_sm,
rtol=1.e-5,
atol=1.e-5)
np_lsm = self._npSoftmax(np.array(features), log=True)
self.assertAllClose(
np.array([[-1.386294, -1.386294, -1.386294, -1.386294],
[-3.4401897, -2.4401897, -1.4401897, -0.4401897]]),
np_lsm,
rtol=1.e-5,
atol=1.e-5)
def _testOverflow(self, use_gpu=False):
if use_gpu:
type = np.float32 # pylint: disable=redefined-builtin
else:
type = np.float64 # pylint: disable=redefined-builtin
max = np.finfo(type).max # pylint: disable=redefined-builtin
features = np.array([[1., 1., 1., 1.], [max, 1., 2., 3.]]).astype(type)
with self.cached_session(use_gpu=use_gpu):
tf_log_softmax = nn_ops.log_softmax(features)
out = self.evaluate(tf_log_softmax)
self.assertAllClose(
np.array([[-1.386294, -1.386294, -1.386294, -1.386294],
[0, -max, -max, -max]]),
out,
rtol=1.e-5,
atol=1.e-5)
def testFloat(self):
self._testAll(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float32))
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testFloatGPU(self):
if test.is_gpu_available(cuda_only=True):
rows = [2**x + np.random.randint(0, 16) for x in range(1, 4)]
cols = [2**x + np.random.randint(0, 16) for x in range(1, 4)]
for row, col in zip(rows, cols):
logging.info("Testing softmax float dtype in shape [%d, %d]", row, col)
data = np.random.rand(row, col)
self._testAll(data.astype(np.float32))
def testHalf(self):
self._testAll(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float16))
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testHalfGPU(self):
if test.is_gpu_available(cuda_only=True):
rows = [2**x + np.random.randint(0, 16) for x in range(1, 4)]
cols = [2**x + np.random.randint(0, 16) for x in range(1, 4)]
for row, col in zip(rows, cols):
logging.info("Testing softmax half dtype in shape [%d, %d]", row, col)
data = np.random.rand(row, col)
self._testAll(data.astype(np.float16))
def testDouble(self):
self._testSoftmax(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float64))
self._testOverflow()
def test1DTensorAsInput(self):
self._testSoftmax(
np.array([3., 2., 3., 9.]).astype(np.float64), use_gpu=False)
self._testOverflow(use_gpu=False)
def test1DTensorAsInputNoReshape(self):
self._testSoftmax(
np.array([3., 2., 3., 9.]).astype(np.float64), use_gpu=False)
self._testOverflow(use_gpu=False)
def test3DTensorAsInput(self):
self._testSoftmax(
np.array([[[1., 1., 1., 1.], [1., 2., 3., 4.]],
[[2., 3., 4., 5.], [6., 7., 8., 9.]],
[[5., 4., 3., 2.], [1., 2., 3., 4.]]]).astype(np.float32),
use_gpu=False)
self._testOverflow(use_gpu=False)
def test3DTensorAsInputNoReshape(self):
self._testSoftmax(
np.array([[[1., 1., 1., 1.], [1., 2., 3., 4.]],
[[2., 3., 4., 5.], [6., 7., 8., 9.]],
[[5., 4., 3., 2.], [1., 2., 3., 4.]]]).astype(np.float32),
use_gpu=False)
self._testOverflow(use_gpu=False)
def testAlongFirstDimension(self):
self._testSoftmax(
np.array([[[1., 1., 1., 1.], [1., 2., 3., 4.]],
[[2., 3., 4., 5.], [6., 7., 8., 9.]],
[[5., 4., 3., 2.], [1., 2., 3., 4.]]]).astype(np.float32),
dim=0,
use_gpu=False)
self._testOverflow(use_gpu=False)
def testAlongSecondDimension(self):
self._testSoftmax(
np.array([[[1., 1., 1., 1.], [1., 2., 3., 4.]],
[[2., 3., 4., 5.], [6., 7., 8., 9.]],
[[5., 4., 3., 2.], [1., 2., 3., 4.]]]).astype(np.float32),
dim=1,
use_gpu=False)
self._testOverflow(use_gpu=False)
def testShapeInference(self):
op = nn_ops.softmax([[[1., 1., 1., 1.], [1., 2., 3., 4.]],
[[2., 3., 4., 5.], [6., 7., 8., 9.]],
[[5., 4., 3., 2.], [1., 2., 3., 4.]]])
self.assertEqual([3, 2, 4], op.get_shape())
def testEmptyInput(self):
with self.cached_session():
x = array_ops.placeholder(dtypes.float32, shape=[0, 3])
self.assertEqual(0, array_ops.size(x).eval())
# reshape would raise if logits is empty
with self.assertRaises(errors_impl.InvalidArgumentError):
nn_ops.softmax(x, axis=0).eval()
def testDimTooLarge(self):
with self.cached_session():
# Use placeholder to make sure we get runtime error instead of shape
# inference error.
dim = array_ops.placeholder_with_default(100, shape=[])
with self.assertRaises(errors_impl.InvalidArgumentError):
nn_ops.softmax([1., 2., 3., 4.], axis=dim).eval()
def testInvalidAxis(self):
# Test case for GitHub issue 22793.
with self.cached_session():
ones = array_ops.ones(shape=[2, 3])
with self.assertRaises(errors_impl.InvalidArgumentError):
nn_ops.softmax(ones, axis=2).eval()
def testLargeDims(self):
# Make sure that we properly handle large inputs. See
# https://github.com/tensorflow/tensorflow/issues/4425 for details
for dims in [129, 256]:
ones = np.random.rand(dims, dims).astype(np.float32)
np_softmax = self._npSoftmax(ones)
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu) as sess:
x = array_ops.placeholder(dtypes.float32)
y = nn_ops.softmax(x)
tf_softmax = sess.run(y, feed_dict={x: ones})
self.assertAllClose(tf_softmax, np_softmax)
if __name__ == "__main__":
test.main()
|
|
from __future__ import absolute_import
__author__ = 'UshareSoft'
import unittest
import json
import yaml
from mock import patch
from ussclicore.utils import generics_utils
from hammr.utils import bundle_utils
from tests.unit.utils.file_utils import find_relative_path_for
class TestFiles(unittest.TestCase):
@patch("ussclicore.utils.printer.out")
def test_check_bundle_should_failed_when_no_name(self, mock_method):
#Given
jsonPath = find_relative_path_for("tests/integration/data/bundle/bundleWithoutName.json")
#When
bundle = generics_utils.check_json_syntax(jsonPath)
bundle_utils.check_bundle(bundle)
#Then
mock_method.assert_called_with("There is no attribute [name] for a [bundle]", "ERROR")
@patch("ussclicore.utils.printer.out")
def test_check_bundle_should_failed_when_no_version(self, mock_method):
#Given
jsonPath = find_relative_path_for("tests/integration/data/bundle/bundleWithoutVersion.json")
#When
bundle = generics_utils.check_json_syntax(jsonPath)
bundle_utils.check_bundle(bundle)
#Then
mock_method.assert_called_with("no attribute [version] for [bundle]", "ERROR")
@patch("ussclicore.utils.printer.out")
def test_check_bundle_should_failed_when_no_files(self, mock_method):
#Given
jsonPath = find_relative_path_for("tests/integration/data/bundle/bundleWithoutFiles.json")
#When
bundle = generics_utils.check_json_syntax(jsonPath)
bundle_utils.check_bundle(bundle)
#Then
mock_method.assert_called_with("no attribute [files] for [bundle]", "ERROR")
@patch("ussclicore.utils.printer.out")
def test_check_bundle_should_failed_when_files_no_name(self, mock_method):
#Given
jsonPath = find_relative_path_for("tests/integration/data/bundle/bundleFilesWithoutName.json")
#When
bundle = generics_utils.check_json_syntax(jsonPath)
bundle_utils.check_bundle(bundle)
#Then
mock_method.assert_called_with("There is no attribute [name] for a [file]", "ERROR")
@patch("ussclicore.utils.printer.out")
def test_check_bundle_should_failed_when_files_no_source(self, mock_method):
#Given
jsonPath = find_relative_path_for("tests/integration/data/bundle/bundleFilesWithoutSource.json")
#When
bundle = generics_utils.check_json_syntax(jsonPath)
bundle_utils.check_bundle(bundle)
#Then
mock_method.assert_called_with("There is no attribute [source] for a [file]", "ERROR")
@patch("ussclicore.utils.printer.out")
def test_check_bundle_should_failed_when_tag_softwarefile_and_bootorder(self, mock_method):
#Given
jsonPath = find_relative_path_for("tests/integration/data/bundle/bundleFilesTagSoftwareFileKeyBootOrder.json")
#When
bundle = generics_utils.check_json_syntax(jsonPath)
bundle_utils.check_bundle(bundle)
#Then
mock_method.assert_called_with("There is the attribute [bootOrder] or [bootType] for file 'directoryTest' but is not tagged as 'bootscript'", "ERROR")
@patch("ussclicore.utils.printer.out")
def test_check_bundle_should_failed_when_tag_bootscript_and_rights(self, mock_method):
#Given
jsonPath = find_relative_path_for("tests/integration/data/bundle/bundleFilesTagBootScriptKeyRights.json")
#When
bundle = generics_utils.check_json_syntax(jsonPath)
bundle_utils.check_bundle(bundle)
#Then
mock_method.assert_called_with("There is the attribute [ownerGroup], [rights] or [symlink] for file 'cleanup_tmp.sh' but is not tagged as 'softwarefile'", "ERROR")
@patch("ussclicore.utils.printer.out")
def test_check_bundle_should_failed_when_tag_ospkg_in_directory(self, mock_method):
#Given
jsonPath = find_relative_path_for("tests/integration/data/bundle/bundleFilesTagOSPkgInDirectory.json")
#When
bundle = generics_utils.check_json_syntax(jsonPath)
bundle_utils.check_bundle(bundle)
#Then
mock_method.assert_called_with("The file 'iotop-0.6-2.el7.noarch.rpm, with tag 'ospkg' must be in the first level files section", "ERROR")
def test_check_bundle_should_succeed_when_no_restrictionRule(self):
# Given
jsonPath = find_relative_path_for("tests/integration/data/bundle/bundleWithoutRestrictionRule.json")
# When
bundle = generics_utils.check_json_syntax(jsonPath)
bundle_utils.check_bundle(bundle)
# Then
self.assertIsNotNone(bundle)
def test_check_bundle_should_succeed_when_empty_restrictionRule(self):
# Given
jsonPath = find_relative_path_for("tests/integration/data/bundle/bundleWithEmptyRestrictionRule.json")
# When
bundle = generics_utils.check_json_syntax(jsonPath)
bundle_utils.check_bundle(bundle)
# Then
self.assertIsNotNone(bundle)
def test_check_bundle_should_succeed(self):
#Given
jsonPath = find_relative_path_for("tests/integration/data/bundle/bundleFull.json")
#When
bundle = generics_utils.check_json_syntax(jsonPath)
bundle = bundle_utils.check_bundle(bundle)
#Then
self.assertIsNotNone(bundle)
def test_recursivelyAppendToArchive_should_failed_when_two_files_have_same_archive_path(self):
# Given
bundle = { 'name': 'MyBundle', 'version': '1.0' }
parent_dir = ""
check_list = []
archive_files = []
files = {
'name': 'myDirectory',
'source': 'tests/integration/data/aDirectory',
'tag': 'softwarefile',
'destination': '/usr/local/myBundle',
'files': [
{
"name": "file.txt",
"source": "tests/integration/data/aDirectory/file1of3.txt"
},
{
"name": "file.txt",
"source": "tests/integration/data/aDirectory/file2of3.txt"
}
]
}
# When
with self.assertRaises(ValueError) as context_manager:
bundle_utils.recursivelyAppendToArchive(bundle, files, parent_dir, check_list, archive_files)
# Then
self.assertEqual(
context_manager.exception.message,
"Cannot have identical files in the bundles section: bundles/MyBundle/1.0/myDirectory/file.txt from tests/integration/data/aDirectory/file2of3.txt"
)
def test_recursivelyAppendToArchive_should_succeed_when_several_files_have_same_source(self):
# Given
bundle = { 'name': 'MyBundle', 'version': '1.0' }
parent_dir = ""
check_list = []
archive_files = []
files = {
'name': 'myDirectory',
'source': 'tests/integration/data/aDirectory',
'tag': 'softwarefile',
'destination': '/usr/local/myBundle',
'files': [
{
'name': 'file1.txt',
'source': 'tests/integration/data/aDirectory/file1of3.txt'
},
{
'name': 'file2.txt',
'source': 'tests/integration/data/aDirectory/file1of3.txt'
},
{
'name': 'pkg1.rpm',
'source': 'http://myServer.com/pkg1/download',
'install': 'true'
},
{
'name': 'pkg2.rpm',
'source': 'http://myServer.com/pkg2/download',
'install': 'true'
}
]
}
# When
r_check_list, r_archive_files = bundle_utils.recursivelyAppendToArchive(bundle, files, parent_dir, check_list, archive_files)
# Then
self.assertEqual(archive_files, [
['bundles/MyBundle/1.0/myDirectory', 'tests/integration/data/aDirectory'],
['bundles/MyBundle/1.0/myDirectory/file1.txt', 'tests/integration/data/aDirectory/file1of3.txt'],
['bundles/MyBundle/1.0/myDirectory/file2.txt', 'tests/integration/data/aDirectory/file1of3.txt'],
['bundles/MyBundle/1.0/myDirectory/pkg1.rpm','http://myServer.com/pkg1/download'],
['bundles/MyBundle/1.0/myDirectory/pkg2.rpm', 'http://myServer.com/pkg2/download']
])
self.assertEqual(r_archive_files, archive_files)
if __name__ == '__main__':
unittest.main()
|
|
"""
This platform enables the possibility to control a MQTT alarm.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/alarm_control_panel.mqtt/
"""
import logging
import re
import voluptuous as vol
from homeassistant.components import mqtt
import homeassistant.components.alarm_control_panel as alarm
from homeassistant.components.mqtt import (
ATTR_DISCOVERY_HASH, CONF_COMMAND_TOPIC, CONF_QOS, CONF_RETAIN,
CONF_STATE_TOPIC, CONF_UNIQUE_ID, MqttAttributes, MqttAvailability,
MqttDiscoveryUpdate, MqttEntityDeviceInfo, subscription)
from homeassistant.components.mqtt.discovery import (
MQTT_DISCOVERY_NEW, clear_discovery_hash)
from homeassistant.const import (
CONF_CODE, CONF_DEVICE, CONF_NAME, STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME, STATE_ALARM_DISARMED, STATE_ALARM_PENDING,
STATE_ALARM_TRIGGERED)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
_LOGGER = logging.getLogger(__name__)
CONF_PAYLOAD_DISARM = 'payload_disarm'
CONF_PAYLOAD_ARM_HOME = 'payload_arm_home'
CONF_PAYLOAD_ARM_AWAY = 'payload_arm_away'
DEFAULT_ARM_AWAY = 'ARM_AWAY'
DEFAULT_ARM_HOME = 'ARM_HOME'
DEFAULT_DISARM = 'DISARM'
DEFAULT_NAME = 'MQTT Alarm'
DEPENDENCIES = ['mqtt']
PLATFORM_SCHEMA = mqtt.MQTT_BASE_PLATFORM_SCHEMA.extend({
vol.Required(CONF_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_RETAIN, default=mqtt.DEFAULT_RETAIN): cv.boolean,
vol.Required(CONF_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_CODE): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PAYLOAD_ARM_AWAY, default=DEFAULT_ARM_AWAY): cv.string,
vol.Optional(CONF_PAYLOAD_ARM_HOME, default=DEFAULT_ARM_HOME): cv.string,
vol.Optional(CONF_PAYLOAD_DISARM, default=DEFAULT_DISARM): cv.string,
vol.Optional(CONF_UNIQUE_ID): cv.string,
vol.Optional(CONF_DEVICE): mqtt.MQTT_ENTITY_DEVICE_INFO_SCHEMA,
}).extend(mqtt.MQTT_AVAILABILITY_SCHEMA.schema).extend(
mqtt.MQTT_JSON_ATTRS_SCHEMA.schema)
async def async_setup_platform(hass: HomeAssistantType, config: ConfigType,
async_add_entities, discovery_info=None):
"""Set up MQTT alarm control panel through configuration.yaml."""
await _async_setup_entity(config, async_add_entities)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up MQTT alarm control panel dynamically through MQTT discovery."""
async def async_discover(discovery_payload):
"""Discover and add an MQTT alarm control panel."""
try:
discovery_hash = discovery_payload.pop(ATTR_DISCOVERY_HASH)
config = PLATFORM_SCHEMA(discovery_payload)
await _async_setup_entity(config, async_add_entities, config_entry,
discovery_hash)
except Exception:
if discovery_hash:
clear_discovery_hash(hass, discovery_hash)
raise
async_dispatcher_connect(
hass, MQTT_DISCOVERY_NEW.format(alarm.DOMAIN, 'mqtt'),
async_discover)
async def _async_setup_entity(config, async_add_entities, config_entry=None,
discovery_hash=None):
"""Set up the MQTT Alarm Control Panel platform."""
async_add_entities([MqttAlarm(config, config_entry, discovery_hash)])
class MqttAlarm(MqttAttributes, MqttAvailability, MqttDiscoveryUpdate,
MqttEntityDeviceInfo, alarm.AlarmControlPanel):
"""Representation of a MQTT alarm status."""
def __init__(self, config, config_entry, discovery_hash):
"""Init the MQTT Alarm Control Panel."""
self._state = None
self._config = config
self._unique_id = config.get(CONF_UNIQUE_ID)
self._sub_state = None
device_config = config.get(CONF_DEVICE)
MqttAttributes.__init__(self, config)
MqttAvailability.__init__(self, config)
MqttDiscoveryUpdate.__init__(self, discovery_hash,
self.discovery_update)
MqttEntityDeviceInfo.__init__(self, device_config, config_entry)
async def async_added_to_hass(self):
"""Subscribe mqtt events."""
await super().async_added_to_hass()
await self._subscribe_topics()
async def discovery_update(self, discovery_payload):
"""Handle updated discovery message."""
config = PLATFORM_SCHEMA(discovery_payload)
self._config = config
await self.attributes_discovery_update(config)
await self.availability_discovery_update(config)
await self.device_info_discovery_update(config)
await self._subscribe_topics()
self.async_schedule_update_ha_state()
async def _subscribe_topics(self):
"""(Re)Subscribe to topics."""
@callback
def message_received(topic, payload, qos):
"""Run when new MQTT message has been received."""
if payload not in (STATE_ALARM_DISARMED, STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_AWAY, STATE_ALARM_PENDING,
STATE_ALARM_TRIGGERED):
_LOGGER.warning("Received unexpected payload: %s", payload)
return
self._state = payload
self.async_schedule_update_ha_state()
self._sub_state = await subscription.async_subscribe_topics(
self.hass, self._sub_state,
{'state_topic': {'topic': self._config.get(CONF_STATE_TOPIC),
'msg_callback': message_received,
'qos': self._config.get(CONF_QOS)}})
async def async_will_remove_from_hass(self):
"""Unsubscribe when removed."""
self._sub_state = await subscription.async_unsubscribe_topics(
self.hass, self._sub_state)
await MqttAttributes.async_will_remove_from_hass(self)
await MqttAvailability.async_will_remove_from_hass(self)
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return the name of the device."""
return self._config.get(CONF_NAME)
@property
def unique_id(self):
"""Return a unique ID."""
return self._unique_id
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def code_format(self):
"""Return one or more digits/characters."""
code = self._config.get(CONF_CODE)
if code is None:
return None
if isinstance(code, str) and re.search('^\\d+$', code):
return alarm.FORMAT_NUMBER
return alarm.FORMAT_TEXT
async def async_alarm_disarm(self, code=None):
"""Send disarm command.
This method is a coroutine.
"""
if not self._validate_code(code, 'disarming'):
return
mqtt.async_publish(
self.hass, self._config.get(CONF_COMMAND_TOPIC),
self._config.get(CONF_PAYLOAD_DISARM),
self._config.get(CONF_QOS),
self._config.get(CONF_RETAIN))
async def async_alarm_arm_home(self, code=None):
"""Send arm home command.
This method is a coroutine.
"""
if not self._validate_code(code, 'arming home'):
return
mqtt.async_publish(
self.hass, self._config.get(CONF_COMMAND_TOPIC),
self._config.get(CONF_PAYLOAD_ARM_HOME),
self._config.get(CONF_QOS),
self._config.get(CONF_RETAIN))
async def async_alarm_arm_away(self, code=None):
"""Send arm away command.
This method is a coroutine.
"""
if not self._validate_code(code, 'arming away'):
return
mqtt.async_publish(
self.hass, self._config.get(CONF_COMMAND_TOPIC),
self._config.get(CONF_PAYLOAD_ARM_AWAY),
self._config.get(CONF_QOS),
self._config.get(CONF_RETAIN))
def _validate_code(self, code, state):
"""Validate given code."""
conf_code = self._config.get(CONF_CODE)
check = conf_code is None or code == conf_code
if not check:
_LOGGER.warning('Wrong code entered for %s', state)
return check
|
|
#!/usr/bin/env python2.7
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit test suite for common.cros.chromite."""
import test_env
import base64
import json
import unittest
from common import cros_chromite
class MockConfigCache(object):
def __init__(self, data):
self.data = data
def Get(self, name, version=None):
return self.data.get((name, version))
class ChromiteConfigTestCase(unittest.TestCase):
CHROMITE_CONFIG = {
'_default': {
'foo': 'bar',
'key': 'value',
'hw_tests': [
'default0',
'default1',
],
},
'_templates': {
'no-hwtest-pre-cq': {
'hw_tests': [
],
},
},
'test': {
'foo': 'baz',
},
'no-hwtest-pre-cq': {
'_template': 'no-hwtest-pre-cq',
'name': 'no-hwtest-pre-cq',
},
'parent': {
'name': 'parent',
'child_configs': [
{
'name': 'alice',
'vm_tests': [
'test',
],
'hw_tests': [
'test',
],
'unittests': True,
},
{'name': 'bob'}
],
},
'parent-template': {
'name': 'parent-template',
'hw_tests': [],
'child_configs': [
{
'_template': 'no-hwtest-pre-cq',
'name': 'joe',
}
],
},
'baremetal-pre-cq': {
'vm_tests': [
'test',
],
'hw_tests': [
'test',
],
'unittests': True,
},
'pre-cq-group': {},
'master-thing': {
'master': True,
},
}
def setUp(self):
self.config = cros_chromite.ChromiteConfig.FromConfigDict(
self.CHROMITE_CONFIG)
self.test = self.config['test']
self.no_hwtest_pre_cq = self.config['no-hwtest-pre-cq']
self.parent = self.config['parent']
self.parent_template = self.config['parent-template']
self.baremetal = self.config['baremetal-pre-cq']
def testChildren(self):
self.assertEqual(len(self.test.children), 0)
self.assertEqual(len(self.parent.children), 2)
self.assertEqual(self.parent.children[0]['name'], 'alice')
self.assertEqual(self.parent.children[1]['name'], 'bob')
def testDefaultFallthrough_UsesLocalWhenAvailable(self):
self.assertEqual(self.test['foo'], 'baz')
def testDefaultFallthrough_UsesDefaultWhenMissing(self):
self.assertEqual(self.test['key'], 'value')
def testDefaultFallthrough_ParentUsesDefaults(self):
self.assertEqual(self.parent['hw_tests'], ['default0', 'default1'])
def testHasTests(self):
self.assertFalse(self.test.HasVmTests())
self.assertTrue(self.test.HasHwTests())
self.assertFalse(self.no_hwtest_pre_cq.HasHwTests())
self.assertFalse(self.test.HasUnitTests())
self.assertTrue(self.baremetal.HasVmTests())
self.assertTrue(self.baremetal.HasHwTests())
self.assertTrue(self.baremetal.HasUnitTests())
def testHasTests_DetectsInChildren(self):
self.assertTrue(self.parent.HasVmTests())
self.assertTrue(self.parent.HasHwTests())
self.assertFalse(self.parent_template.HasHwTests())
self.assertTrue(self.baremetal.HasUnitTests())
def testPreCqDetection(self):
self.assertFalse(self.test.IsPreCqBuilder())
self.assertTrue(self.baremetal.IsPreCqBuilder())
self.assertFalse(self.baremetal.IsGeneralPreCqBuilder())
pre_cq_group = self.config['pre-cq-group']
self.assertTrue(pre_cq_group.IsPreCqBuilder())
self.assertTrue(pre_cq_group.IsGeneralPreCqBuilder())
def testIsMaster(self):
self.assertTrue(self.config['master-thing'].is_master)
def testCategorize(self):
# Type-based: name, build_type => base, suffix, category
expectations = (
)
# Name-based: name => base, suffix, category
expectations = (
# (With Board Type)
('pre-cq-launcher', 'priest', 'pre-cq-launcher', None, 'PRE_CQ_LAUNCHER'),
# The canary board type should override the name-based inferences,
# marking this board as a canary.
('odd-name-paladin', 'canary', 'odd-name-paladin', None, 'CANARY'),
('my-board-asan', None, 'my-board', 'asan', 'ASAN'),
('my-board-pre-cq', None, 'my-board', 'pre-cq', 'PRE_CQ'),
('my-board-chrome-pfq', None, 'my-board', 'chrome-pfq', 'PFQ'),
('my-board-chromium-pfq', None, 'my-board', 'chromium-pfq', 'PFQ'),
('my-board-paladin', None, 'my-board', 'paladin', 'PALADIN'),
('my-board-release', None, 'my-board', 'release', 'CANARY'),
('my-board-release-group', None, 'my-board', 'release-group', 'CANARY'),
('my-board-firmware', None, 'my-board', 'firmware', 'FIRMWARE'),
('my-board-incremental', None, 'my-board', 'incremental', 'INCREMENTAL'),
('my-board-factory', None, 'my-board', 'factory', 'FACTORY'),
('my-board-project-sdk', None, 'my-board-project', 'sdk', 'SDK'),
('my-board-toolchain-major', None,
'my-board', 'toolchain-major', 'TOOLCHAIN'),
('my-board-toolchain-minor', None,
'my-board', 'toolchain-minor', 'TOOLCHAIN'),
('master-toolchain', None,
'master', 'toolchain', 'TOOLCHAIN'),
('llvm-toolchain-group', None,
'llvm', 'toolchain-group', 'TOOLCHAIN'),
)
for name, build_type, exp_base, exp_suffix, exp_cat_attr in expectations:
exp_category = getattr(cros_chromite.ChromiteTarget, exp_cat_attr)
base, suffix, category = cros_chromite.ChromiteTarget.Categorize(
name,
build_type=build_type)
self.assertEqual(
(base, suffix, category), (exp_base, exp_suffix, exp_category))
class ChromitePinManagerTestCase(unittest.TestCase):
def testGetPinnedBranch_PinnedBranchReturnsPinnedValue(self):
pm = cros_chromite.ChromitePinManager(
'test',
pinned={'a': 'b'},
require=True)
self.assertEqual(pm.GetPinnedBranch('a'), 'b')
def testGetPinnedBranch_UnpinnedBranchReturnsBranch(self):
pm = cros_chromite.ChromitePinManager(
'test',
pinned={'a': 'b'},
require=False)
self.assertEqual(pm.GetPinnedBranch('foo'), 'foo')
def testGetPinnedBranch_UnpinnedBranchReturnsErrorWithRequiredPinning(self):
pm = cros_chromite.ChromitePinManager(
'test',
pinned={'a': 'b'},
require=True)
self.assertRaises(cros_chromite.ChromiteError,
pm.GetPinnedBranch, 'foo')
class ChromiteConfigManagerTestCase(unittest.TestCase):
def setUp(self):
self.cache = MockConfigCache({
('test', 'v1'): '{}',
('test', 'v_invalid'): '{NOT JSON}',
})
def testGetConfig_ValidSucceeds(self):
manager = cros_chromite.ChromiteConfigManager(self.cache,
cros_chromite.ChromitePinManager(
'test',
{'test': 'v1'}))
self.assertTrue(isinstance(manager.GetConfig('test'),
cros_chromite.ChromiteConfig))
def testGetConfig_InvalidJsonRaises(self):
manager = cros_chromite.ChromiteConfigManager(self.cache,
cros_chromite.ChromitePinManager(
'test',
{'test': 'v_invalid'}))
self.assertRaises(cros_chromite.ChromiteError, manager.GetConfig, 'test')
def testGetConfig_MissingRaises(self):
manager = cros_chromite.ChromiteConfigManager(self.cache)
self.assertRaises(cros_chromite.ChromiteError, manager.GetConfig, 'foo')
class ChromiteFetcherTestCase(unittest.TestCase):
def setUp(self):
self.fetcher = cros_chromite.ChromiteFetcher(
cros_chromite.ChromitePinManager(
'test',
{'test': 'v1'})
)
@staticmethod
def _configUrlForBranch(branch):
return '%s/+/%s/%s?format=text' % (
cros_chromite.ChromiteFetcher.CHROMITE_GITILES_BASE,
branch,
cros_chromite.ChromiteFetcher.CHROMITE_CONFIG_PATH,
)
def testFetch_Valid(self):
fetched_urls = []
def _MockGetText(url):
fetched_urls.append(url)
return base64.b64encode('content')
self.fetcher._GetText = _MockGetText
data = self.fetcher('test', None)
self.assertEqual(data, ('content', 'v1'))
self.assertEqual(fetched_urls, [self._configUrlForBranch('v1')])
def testFetch_NotBase64(self):
def _MockGetText(_url):
return 'Not Valid Base64'
self.fetcher._GetText = _MockGetText
self.assertRaises(cros_chromite.GitilesError, self.fetcher, 'test', None)
if __name__ == '__main__':
unittest.main()
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import sys
import warnings
from functools import reduce
from threading import RLock
if sys.version >= '3':
basestring = unicode = str
xrange = range
else:
from itertools import izip as zip, imap as map
from pyspark import since
from pyspark.rdd import RDD, ignore_unicode_prefix
from pyspark.sql.catalog import Catalog
from pyspark.sql.conf import RuntimeConfig
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.readwriter import DataFrameReader
from pyspark.sql.streaming import DataStreamReader
from pyspark.sql.types import Row, DataType, StringType, StructType, _make_type_verifier, \
_infer_schema, _has_nulltype, _merge_type, _create_converter, _parse_datatype_string
from pyspark.sql.utils import install_exception_handler
__all__ = ["SparkSession"]
def _monkey_patch_RDD(sparkSession):
def toDF(self, schema=None, sampleRatio=None):
"""
Converts current :class:`RDD` into a :class:`DataFrame`
This is a shorthand for ``spark.createDataFrame(rdd, schema, sampleRatio)``
:param schema: a :class:`pyspark.sql.types.StructType` or list of names of columns
:param samplingRatio: the sample ratio of rows used for inferring
:return: a DataFrame
>>> rdd.toDF().collect()
[Row(name=u'Alice', age=1)]
"""
return sparkSession.createDataFrame(self, schema, sampleRatio)
RDD.toDF = toDF
class SparkSession(object):
"""The entry point to programming Spark with the Dataset and DataFrame API.
A SparkSession can be used create :class:`DataFrame`, register :class:`DataFrame` as
tables, execute SQL over tables, cache tables, and read parquet files.
To create a SparkSession, use the following builder pattern:
>>> spark = SparkSession.builder \\
... .master("local") \\
... .appName("Word Count") \\
... .config("spark.some.config.option", "some-value") \\
... .getOrCreate()
.. autoattribute:: builder
:annotation:
"""
class Builder(object):
"""Builder for :class:`SparkSession`.
"""
_lock = RLock()
_options = {}
@since(2.0)
def config(self, key=None, value=None, conf=None):
"""Sets a config option. Options set using this method are automatically propagated to
both :class:`SparkConf` and :class:`SparkSession`'s own configuration.
For an existing SparkConf, use `conf` parameter.
>>> from pyspark.conf import SparkConf
>>> SparkSession.builder.config(conf=SparkConf())
<pyspark.sql.session...
For a (key, value) pair, you can omit parameter names.
>>> SparkSession.builder.config("spark.some.config.option", "some-value")
<pyspark.sql.session...
:param key: a key name string for configuration property
:param value: a value for configuration property
:param conf: an instance of :class:`SparkConf`
"""
with self._lock:
if conf is None:
self._options[key] = str(value)
else:
for (k, v) in conf.getAll():
self._options[k] = v
return self
@since(2.0)
def master(self, master):
"""Sets the Spark master URL to connect to, such as "local" to run locally, "local[4]"
to run locally with 4 cores, or "spark://master:7077" to run on a Spark standalone
cluster.
:param master: a url for spark master
"""
return self.config("spark.master", master)
@since(2.0)
def appName(self, name):
"""Sets a name for the application, which will be shown in the Spark web UI.
If no application name is set, a randomly generated name will be used.
:param name: an application name
"""
return self.config("spark.app.name", name)
@since(2.0)
def enableHiveSupport(self):
"""Enables Hive support, including connectivity to a persistent Hive metastore, support
for Hive serdes, and Hive user-defined functions.
"""
return self.config("spark.sql.catalogImplementation", "hive")
@since(2.0)
def getOrCreate(self):
"""Gets an existing :class:`SparkSession` or, if there is no existing one, creates a
new one based on the options set in this builder.
This method first checks whether there is a valid global default SparkSession, and if
yes, return that one. If no valid global default SparkSession exists, the method
creates a new SparkSession and assigns the newly created SparkSession as the global
default.
>>> s1 = SparkSession.builder.config("k1", "v1").getOrCreate()
>>> s1.conf.get("k1") == s1.sparkContext.getConf().get("k1") == "v1"
True
In case an existing SparkSession is returned, the config options specified
in this builder will be applied to the existing SparkSession.
>>> s2 = SparkSession.builder.config("k2", "v2").getOrCreate()
>>> s1.conf.get("k1") == s2.conf.get("k1")
True
>>> s1.conf.get("k2") == s2.conf.get("k2")
True
"""
with self._lock:
from pyspark.context import SparkContext
from pyspark.conf import SparkConf
session = SparkSession._instantiatedSession
if session is None or session._sc._jsc is None:
sparkConf = SparkConf()
for key, value in self._options.items():
sparkConf.set(key, value)
sc = SparkContext.getOrCreate(sparkConf)
# This SparkContext may be an existing one.
for key, value in self._options.items():
# we need to propagate the confs
# before we create the SparkSession. Otherwise, confs like
# warehouse path and metastore url will not be set correctly (
# these confs cannot be changed once the SparkSession is created).
sc._conf.set(key, value)
session = SparkSession(sc)
for key, value in self._options.items():
session._jsparkSession.sessionState().conf().setConfString(key, value)
for key, value in self._options.items():
session.sparkContext._conf.set(key, value)
return session
builder = Builder()
"""A class attribute having a :class:`Builder` to construct :class:`SparkSession` instances"""
_instantiatedSession = None
@ignore_unicode_prefix
def __init__(self, sparkContext, jsparkSession=None):
"""Creates a new SparkSession.
>>> from datetime import datetime
>>> spark = SparkSession(sc)
>>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,
... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
... time=datetime(2014, 8, 1, 14, 1, 5))])
>>> df = allTypes.toDF()
>>> df.createOrReplaceTempView("allTypes")
>>> spark.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
... 'from allTypes where b and i > 0').collect()
[Row((i + CAST(1 AS BIGINT))=2, (d + CAST(1 AS DOUBLE))=2.0, (NOT b)=False, list[1]=2, \
dict[s]=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]
>>> df.rdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect()
[(1, u'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]
"""
from pyspark.sql.context import SQLContext
self._sc = sparkContext
self._jsc = self._sc._jsc
self._jvm = self._sc._jvm
if jsparkSession is None:
jsparkSession = self._jvm.SparkSession(self._jsc.sc())
self._jsparkSession = jsparkSession
self._jwrapped = self._jsparkSession.sqlContext()
self._wrapped = SQLContext(self._sc, self, self._jwrapped)
_monkey_patch_RDD(self)
install_exception_handler()
# If we had an instantiated SparkSession attached with a SparkContext
# which is stopped now, we need to renew the instantiated SparkSession.
# Otherwise, we will use invalid SparkSession when we call Builder.getOrCreate.
if SparkSession._instantiatedSession is None \
or SparkSession._instantiatedSession._sc._jsc is None:
SparkSession._instantiatedSession = self
def _repr_html_(self):
return """
<div>
<p><b>SparkSession - {catalogImplementation}</b></p>
{sc_HTML}
</div>
""".format(
catalogImplementation=self.conf.get("spark.sql.catalogImplementation"),
sc_HTML=self.sparkContext._repr_html_()
)
@since(2.0)
def newSession(self):
"""
Returns a new SparkSession as new session, that has separate SQLConf,
registered temporary views and UDFs, but shared SparkContext and
table cache.
"""
return self.__class__(self._sc, self._jsparkSession.newSession())
@property
@since(2.0)
def sparkContext(self):
"""Returns the underlying :class:`SparkContext`."""
return self._sc
@property
@since(2.0)
def version(self):
"""The version of Spark on which this application is running."""
return self._jsparkSession.version()
@property
@since(2.0)
def conf(self):
"""Runtime configuration interface for Spark.
This is the interface through which the user can get and set all Spark and Hadoop
configurations that are relevant to Spark SQL. When getting the value of a config,
this defaults to the value set in the underlying :class:`SparkContext`, if any.
"""
if not hasattr(self, "_conf"):
self._conf = RuntimeConfig(self._jsparkSession.conf())
return self._conf
@property
@since(2.0)
def catalog(self):
"""Interface through which the user may create, drop, alter or query underlying
databases, tables, functions etc.
:return: :class:`Catalog`
"""
if not hasattr(self, "_catalog"):
self._catalog = Catalog(self)
return self._catalog
@property
@since(2.0)
def udf(self):
"""Returns a :class:`UDFRegistration` for UDF registration.
:return: :class:`UDFRegistration`
"""
from pyspark.sql.context import UDFRegistration
return UDFRegistration(self._wrapped)
@since(2.0)
def range(self, start, end=None, step=1, numPartitions=None):
"""
Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named
``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with
step value ``step``.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numPartitions: the number of partitions of the DataFrame
:return: :class:`DataFrame`
>>> spark.range(1, 7, 2).collect()
[Row(id=1), Row(id=3), Row(id=5)]
If only one argument is specified, it will be used as the end value.
>>> spark.range(3).collect()
[Row(id=0), Row(id=1), Row(id=2)]
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
if end is None:
jdf = self._jsparkSession.range(0, int(start), int(step), int(numPartitions))
else:
jdf = self._jsparkSession.range(int(start), int(end), int(step), int(numPartitions))
return DataFrame(jdf, self._wrapped)
def _inferSchemaFromList(self, data):
"""
Infer schema from list of Row or tuple.
:param data: list of Row or tuple
:return: :class:`pyspark.sql.types.StructType`
"""
if not data:
raise ValueError("can not infer schema from empty dataset")
first = data[0]
if type(first) is dict:
warnings.warn("inferring schema from dict is deprecated,"
"please use pyspark.sql.Row instead")
schema = reduce(_merge_type, map(_infer_schema, data))
if _has_nulltype(schema):
raise ValueError("Some of types cannot be determined after inferring")
return schema
def _inferSchema(self, rdd, samplingRatio=None):
"""
Infer schema from an RDD of Row or tuple.
:param rdd: an RDD of Row or tuple
:param samplingRatio: sampling ratio, or no sampling (default)
:return: :class:`pyspark.sql.types.StructType`
"""
first = rdd.first()
if not first:
raise ValueError("The first row in RDD is empty, "
"can not infer schema")
if type(first) is dict:
warnings.warn("Using RDD of dict to inferSchema is deprecated. "
"Use pyspark.sql.Row instead")
if samplingRatio is None:
schema = _infer_schema(first)
if _has_nulltype(schema):
for row in rdd.take(100)[1:]:
schema = _merge_type(schema, _infer_schema(row))
if not _has_nulltype(schema):
break
else:
raise ValueError("Some of types cannot be determined by the "
"first 100 rows, please try again with sampling")
else:
if samplingRatio < 0.99:
rdd = rdd.sample(False, float(samplingRatio))
schema = rdd.map(_infer_schema).reduce(_merge_type)
return schema
def _createFromRDD(self, rdd, schema, samplingRatio):
"""
Create an RDD for DataFrame from an existing RDD, returns the RDD and schema.
"""
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchema(rdd, samplingRatio)
converter = _create_converter(struct)
rdd = rdd.map(converter)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
rdd = rdd.map(schema.toInternal)
return rdd, schema
def _createFromLocal(self, data, schema):
"""
Create an RDD for DataFrame from a list or pandas.DataFrame, returns
the RDD and schema.
"""
# make sure data could consumed multiple times
if not isinstance(data, list):
data = list(data)
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchemaFromList(data)
converter = _create_converter(struct)
data = map(converter, data)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
data = [schema.toInternal(row) for row in data]
return self._sc.parallelize(data), schema
def _get_numpy_record_dtype(self, rec):
"""
Used when converting a pandas.DataFrame to Spark using to_records(), this will correct
the dtypes of fields in a record so they can be properly loaded into Spark.
:param rec: a numpy record to check field dtypes
:return corrected dtype for a numpy.record or None if no correction needed
"""
import numpy as np
cur_dtypes = rec.dtype
col_names = cur_dtypes.names
record_type_list = []
has_rec_fix = False
for i in xrange(len(cur_dtypes)):
curr_type = cur_dtypes[i]
# If type is a datetime64 timestamp, convert to microseconds
# NOTE: if dtype is datetime[ns] then np.record.tolist() will output values as longs,
# conversion from [us] or lower will lead to py datetime objects, see SPARK-22417
if curr_type == np.dtype('datetime64[ns]'):
curr_type = 'datetime64[us]'
has_rec_fix = True
record_type_list.append((str(col_names[i]), curr_type))
return np.dtype(record_type_list) if has_rec_fix else None
def _convert_from_pandas(self, pdf):
"""
Convert a pandas.DataFrame to list of records that can be used to make a DataFrame
:return list of records
"""
# Convert pandas.DataFrame to list of numpy records
np_records = pdf.to_records(index=False)
# Check if any columns need to be fixed for Spark to infer properly
if len(np_records) > 0:
record_dtype = self._get_numpy_record_dtype(np_records[0])
if record_dtype is not None:
return [r.astype(record_dtype).tolist() for r in np_records]
# Convert list of numpy records to python lists
return [r.tolist() for r in np_records]
def _create_from_pandas_with_arrow(self, pdf, schema):
"""
Create a DataFrame from a given pandas.DataFrame by slicing it into partitions, converting
to Arrow data, then sending to the JVM to parallelize. If a schema is passed in, the
data types will be used to coerce the data in Pandas to Arrow conversion.
"""
from pyspark.serializers import ArrowSerializer, _create_batch
from pyspark.sql.types import from_arrow_schema, to_arrow_type, TimestampType
from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype
# Determine arrow types to coerce data when creating batches
if isinstance(schema, StructType):
arrow_types = [to_arrow_type(f.dataType) for f in schema.fields]
elif isinstance(schema, DataType):
raise ValueError("Single data type %s is not supported with Arrow" % str(schema))
else:
# Any timestamps must be coerced to be compatible with Spark
arrow_types = [to_arrow_type(TimestampType())
if is_datetime64_dtype(t) or is_datetime64tz_dtype(t) else None
for t in pdf.dtypes]
# Slice the DataFrame to be batched
step = -(-len(pdf) // self.sparkContext.defaultParallelism) # round int up
pdf_slices = (pdf[start:start + step] for start in xrange(0, len(pdf), step))
# Create Arrow record batches
batches = [_create_batch([(c, t) for (_, c), t in zip(pdf_slice.iteritems(), arrow_types)])
for pdf_slice in pdf_slices]
# Create the Spark schema from the first Arrow batch (always at least 1 batch after slicing)
if isinstance(schema, (list, tuple)):
struct = from_arrow_schema(batches[0].schema)
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
# Create the Spark DataFrame directly from the Arrow data and schema
jrdd = self._sc._serialize_to_jvm(batches, len(batches), ArrowSerializer())
jdf = self._jvm.PythonSQLUtils.arrowPayloadToDataFrame(
jrdd, schema.json(), self._wrapped._jsqlContext)
df = DataFrame(jdf, self._wrapped)
df._schema = schema
return df
@since(2.0)
@ignore_unicode_prefix
def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True):
"""
Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.
When ``schema`` is a list of column names, the type of each column
will be inferred from ``data``.
When ``schema`` is ``None``, it will try to infer the schema (column names and types)
from ``data``, which should be an RDD of :class:`Row`,
or :class:`namedtuple`, or :class:`dict`.
When ``schema`` is :class:`pyspark.sql.types.DataType` or a datatype string, it must match
the real data, or an exception will be thrown at runtime. If the given schema is not
:class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value",
each record will also be wrapped into a tuple, which can be converted to row later.
If schema inference is needed, ``samplingRatio`` is used to determined the ratio of
rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``.
:param data: an RDD of any kind of SQL data representation(e.g. row, tuple, int, boolean,
etc.), or :class:`list`, or :class:`pandas.DataFrame`.
:param schema: a :class:`pyspark.sql.types.DataType` or a datatype string or a list of
column names, default is ``None``. The data type string format equals to
:class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can
omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use
``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`. We can also use
``int`` as a short name for ``IntegerType``.
:param samplingRatio: the sample ratio of rows used for inferring
:param verifySchema: verify data types of every row against schema.
:return: :class:`DataFrame`
.. versionchanged:: 2.1
Added verifySchema.
>>> l = [('Alice', 1)]
>>> spark.createDataFrame(l).collect()
[Row(_1=u'Alice', _2=1)]
>>> spark.createDataFrame(l, ['name', 'age']).collect()
[Row(name=u'Alice', age=1)]
>>> d = [{'name': 'Alice', 'age': 1}]
>>> spark.createDataFrame(d).collect()
[Row(age=1, name=u'Alice')]
>>> rdd = sc.parallelize(l)
>>> spark.createDataFrame(rdd).collect()
[Row(_1=u'Alice', _2=1)]
>>> df = spark.createDataFrame(rdd, ['name', 'age'])
>>> df.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql import Row
>>> Person = Row('name', 'age')
>>> person = rdd.map(lambda r: Person(*r))
>>> df2 = spark.createDataFrame(person)
>>> df2.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql.types import *
>>> schema = StructType([
... StructField("name", StringType(), True),
... StructField("age", IntegerType(), True)])
>>> df3 = spark.createDataFrame(rdd, schema)
>>> df3.collect()
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(df.toPandas()).collect() # doctest: +SKIP
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP
[Row(0=1, 1=2)]
>>> spark.createDataFrame(rdd, "a: string, b: int").collect()
[Row(a=u'Alice', b=1)]
>>> rdd = rdd.map(lambda row: row[1])
>>> spark.createDataFrame(rdd, "int").collect()
[Row(value=1)]
>>> spark.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError: ...
"""
if isinstance(data, DataFrame):
raise TypeError("data is already a DataFrame")
if isinstance(schema, basestring):
schema = _parse_datatype_string(schema)
elif isinstance(schema, (list, tuple)):
# Must re-encode any unicode strings to be consistent with StructField names
schema = [x.encode('utf-8') if not isinstance(x, str) else x for x in schema]
try:
import pandas
has_pandas = True
except Exception:
has_pandas = False
if has_pandas and isinstance(data, pandas.DataFrame):
# If no schema supplied by user then get the names of columns only
if schema is None:
schema = [x.encode('utf-8') if not isinstance(x, str) else x for x in data.columns]
if self.conf.get("spark.sql.execution.arrow.enabled", "false").lower() == "true" \
and len(data) > 0:
try:
return self._create_from_pandas_with_arrow(data, schema)
except Exception as e:
warnings.warn("Arrow will not be used in createDataFrame: %s" % str(e))
# Fallback to create DataFrame without arrow if raise some exception
data = self._convert_from_pandas(data)
if isinstance(schema, StructType):
verify_func = _make_type_verifier(schema) if verifySchema else lambda _: True
def prepare(obj):
verify_func(obj)
return obj
elif isinstance(schema, DataType):
dataType = schema
schema = StructType().add("value", schema)
verify_func = _make_type_verifier(
dataType, name="field value") if verifySchema else lambda _: True
def prepare(obj):
verify_func(obj)
return obj,
else:
prepare = lambda obj: obj
if isinstance(data, RDD):
rdd, schema = self._createFromRDD(data.map(prepare), schema, samplingRatio)
else:
rdd, schema = self._createFromLocal(map(prepare, data), schema)
jrdd = self._jvm.SerDeUtil.toJavaArray(rdd._to_java_object_rdd())
jdf = self._jsparkSession.applySchemaToPythonRDD(jrdd.rdd(), schema.json())
df = DataFrame(jdf, self._wrapped)
df._schema = schema
return df
@ignore_unicode_prefix
@since(2.0)
def sql(self, sqlQuery):
"""Returns a :class:`DataFrame` representing the result of the given query.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.sql("SELECT field1 AS f1, field2 as f2 from table1")
>>> df2.collect()
[Row(f1=1, f2=u'row1'), Row(f1=2, f2=u'row2'), Row(f1=3, f2=u'row3')]
"""
return DataFrame(self._jsparkSession.sql(sqlQuery), self._wrapped)
@since(2.0)
def table(self, tableName):
"""Returns the specified table as a :class:`DataFrame`.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.table("table1")
>>> sorted(df.collect()) == sorted(df2.collect())
True
"""
return DataFrame(self._jsparkSession.table(tableName), self._wrapped)
@property
@since(2.0)
def read(self):
"""
Returns a :class:`DataFrameReader` that can be used to read data
in as a :class:`DataFrame`.
:return: :class:`DataFrameReader`
"""
return DataFrameReader(self._wrapped)
@property
@since(2.0)
def readStream(self):
"""
Returns a :class:`DataStreamReader` that can be used to read data streams
as a streaming :class:`DataFrame`.
.. note:: Evolving.
:return: :class:`DataStreamReader`
"""
return DataStreamReader(self._wrapped)
@property
@since(2.0)
def streams(self):
"""Returns a :class:`StreamingQueryManager` that allows managing all the
:class:`StreamingQuery` StreamingQueries active on `this` context.
.. note:: Evolving.
:return: :class:`StreamingQueryManager`
"""
from pyspark.sql.streaming import StreamingQueryManager
return StreamingQueryManager(self._jsparkSession.streams())
@since(2.0)
def stop(self):
"""Stop the underlying :class:`SparkContext`.
"""
self._sc.stop()
SparkSession._instantiatedSession = None
@since(2.0)
def __enter__(self):
"""
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
"""
return self
@since(2.0)
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
Specifically stop the SparkSession on exit of the with block.
"""
self.stop()
def _test():
import os
import doctest
from pyspark.context import SparkContext
from pyspark.sql import Row
import pyspark.sql.session
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.session.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['spark'] = SparkSession(sc)
globs['rdd'] = rdd = sc.parallelize(
[Row(field1=1, field2="row1"),
Row(field1=2, field2="row2"),
Row(field1=3, field2="row3")])
globs['df'] = rdd.toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.session, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
|
#!/usr/bin/python
from __future__ import division
import time
import os
import sys
import signal
from PyMata.pymata import PyMata as pm
import socket
import OSC
import json
board = None
config = None
pathname = None
filename = None
led_pin = 13 # overwritten by config.json
midi_min = 0 # overwritten by config.json
midi_max = 127 # overwritten by config.json
"""
Trigger the 32U4_reset function (equivalent to pressing 32U4 RESET button on the Yun)
"""
def reset_yun():
def writeFile(value, file):
with open(file, "w") as f:
f.write(value)
writeFile("18", "/sys/class/gpio/export") # make GPIO18 available
writeFile("high", "/sys/class/gpio/gpio18/direction") # set pin 18 as output
writeFile("1", "/sys/class/gpio/gpio18/value") # Set pin 18 high
writeFile("0", "/sys/class/gpio/gpio18/value") # Set pin 18 low
writeFile("18", "/sys/class/gpio/unexport") # close out GPIO18
"""
Handle Ctrl + C interrupts to stop the program
"""
def signal_handler(sig, frame):
global board
global config
global pathname
global filename
print("Exiting ...")
if config is not None:
with open(os.path.join(pathname, filename), 'w') as config_file:
config = json.dump(config, config_file, sort_keys=True, indent=4)
if board is not None:
board.close()
sys.exit(0)
"""
Map the input range to a desired output range. Similar to the Arduino map() function.
"""
def map_value(x, lower, upper, min, max):
return min + ((x - lower) / (upper - lower)) * (max - min)
"""
Generic handler for debugging purposes
"""
def debug_handler(addr, tags, data, client_address):
global board
global config
txt = "OSCMessage '%s' from %s: " % (addr, client_address)
txt += str(data)
print(txt)
"""
Handle incoming midi note messages. Note messages are used to tell servos to move to maximum up or down position.
"""
def note_handler(addr, tags, data, client_address):
global board
global config
txt = "OSCMessage '%s' from %s: " % (addr, client_address)
txt += str(data)
print(txt)
chan = data[0]
note = data[1]
velocity = data[2]
# Tell the servo where to go upon receipt of corresponding midi note value
if board is not None:
for c in config["servo"]:
if note == c["note"]:
servo_pos = map_value(velocity, midi_min, midi_max, c["pos"]["home"], c["pos"]["max"])
if c["reverse_servo_direction"] == True: # reverse the direction of the input
servo_pos = map_value(servo_pos, c["pos"]["abs_min"], c["pos"]["abs_max"], c["pos"]["abs_max"],
c["pos"]["abs_min"])
board.analog_write(c["pwm_pin"], int(servo_pos)) # move servo
"""
Handle incoming control change messages. CC messages are used to adjust maximum up or down position of servos and adjust speed of steppers.
"""
def cc_handler(addr, tags, data, client_address):
global board
global config
txt = "OSCMessage '%s' from %s: " % (addr, client_address)
txt += str(data)
print(txt)
chan = data[0]
ccNum = data[1]
ccVal = data[2]
# Cycle through all servo/stepper control change messages
if board is not None:
for c in config["servo"]: # Cycle through all defined servos
if ccNum == c["cc"]["home"]: # Check if CC val matches identifing servo minimum value
if c["reverse_home_direction"] == True:
ccVal = map_value(ccVal, midi_min, midi_max, midi_max,
midi_min) # reverse the direction of the input
servo_pos = map_value(ccVal, midi_min, midi_max, c["pos"]["abs_min"], c["pos"]["abs_max"])
c["pos"]["home"] = int(servo_pos)
if c["reverse_servo_direction"] == True:
servo_pos = map_value(servo_pos, c["pos"]["abs_min"], c["pos"]["abs_max"], c["pos"]["abs_max"],
c["pos"]["abs_min"]) # reverse the direction of the input
board.analog_write(c["pwm_pin"], int(servo_pos))
elif ccNum == c["cc"]["max"]: # Check if CC val matches identifing servo maximum value
if c["reverse_max_direction"] == True:
ccVal = map_value(ccVal, midi_min, midi_max, midi_max, midi_min)
servo_pos = map_value(ccVal, midi_min, midi_max, c["pos"]["home"], c["pos"]["abs_max"])
c["pos"]["max"] = int(servo_pos)
if c["reverse_servo_direction"] == True:
servo_pos = map_value(servo_pos, c["pos"]["abs_min"], c["pos"]["abs_max"], c["pos"]["abs_max"],
c["pos"]["abs_min"]) # reverse the direction of the input
board.analog_write(c["pwm_pin"], int(servo_pos))
if ccNum == config["stepper"]["cc"]["speed"]: # Check if CC val matches identifing stepper value
stepper_speed = map_value(ccVal, midi_min, midi_max, config["stepper"]["move"]["min_speed"],
config["stepper"]["move"]["max_speed"])
board.timerthree_set_frequency(int(stepper_speed))
board.timerthree_pwm(step_pin, duty_cycle)
"""
return IP address so code can be reused between arduinos
"""
def getIPAddress():
so = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
so.connect((config["router_ip"], 0)) # Connect to the router's IP address
address = so.getsockname()[0]
return address
"""
return IP address of 2nd port so code can be reused between arduinos
"""
def getIPAddress2(address):
"""
Takes in str of ip address.
If orig port is wifi port, return new address increased by 1. This is the address of the ethernet port.
If orig port is ethernet port, return new address decreased by 1. This is the address of the ethernet port.
"""
endNum = int(address[-3:]) # grab last three numbers of ip address
if endNum % 2 == 0:
endNumNew = endNum + 1
else:
endNumNew = endNum - 1
addressNew = '192.168.1.' + str(endNumNew)
return addressNew
"""
Main
"""
if __name__ == "__main__":
# Read the JSON configuration file
pathname = os.path.dirname(os.path.realpath(__file__))
filename = "config.json"
print("Reading JSON file ...")
with open(os.path.join(pathname, filename)) as config_file:
config = json.load(config_file)
# handle Ctrl + C messages
signal.signal(signal.SIGINT, signal_handler)
# Connect to the Atmega32u4
print("Initializing board ...")
try:
board = pm("/dev/ttyATH0") # connect to the Atmega32u4 on ATH0
except Exception as inst:
print(type(inst))
print(inst.args)
print(inst)
sys.exit(0) # Exit the script upon failure
# Set the pwm pins as servo control pins
for c in config["servo"]:
board.servo_config(c["pwm_pin"])
# Initialize board settings
midi_min = config["midi_min"]
midi_max = config["midi_max"]
direction_pin = config["stepper"]["direction_pin"]
stepper_direction = config["stepper"]["move"]["direction"]
step_pin = config["stepper"]["step_pin"]
duty_cycle = 511 # 50% duty cycle (range 0 - 1023)
stepper_min_speed = config["stepper"]["move"]["min_speed"]
stepper_max_speed = config["stepper"]["move"]["max_speed"]
led_pin = config["led_pin"]
board.set_pin_mode(direction_pin, board.OUTPUT, board.DIGITAL)
board.digital_write(direction_pin, stepper_direction)
board.timerthree_initialize()
board.timerthree_set_frequency(0)
board.timerthree_pwm(step_pin, duty_cycle)
print("Initializing server ...")
# find board IP
address1 = getIPAddress()
address2 = getIPAddress2(address1)
# WLAN ports are DHCP reserved as even numbers starting at 100, corresponding LAN ports are WLAN + 1
if int(address1[-3:]) % 2 == 0:
addressWLAN = address1
addressLAN = address2
else:
addressWLAN = address2
addressLAN = address1
# Port to use is specified in startup script. Use WLAN unless LAN is given as additional argument
if len(sys.argv) > 1 and sys.argv[1] == 'LAN':
s = OSC.OSCServer((addressLAN, config["port"])) # port 2346
else:
s = OSC.OSCServer((addressWLAN, config["port"])) # port 2346
#s.addMsgHandler('/test', note_handler) # call handler() for OSC messages received with the /test address
s.addMsgHandler('/note', note_handler)
s.addMsgHandler('/cc', cc_handler)
s.addMsgHandler('list', debug_handler)
board.set_pin_mode(led_pin, board.OUTPUT, board.DIGITAL)
board.digital_write(led_pin, 1) # Turn on the builtin LED
# Serve forever
print("Serving ...")
s.timed_out = False
while not s.timed_out:
s.handle_request()
# s.serve_forever()
|
|
# openstack_dashboard.local.nci.crypto
#
# Copyright (c) 2015, NCI, Australian National University.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import base64
import binascii
import datetime
import hashlib
import hmac
import logging
import os
import os.path
import paramiko.rsakey
#import pdb ## DEBUG
import six
import subprocess
import time
import uuid
import urllib
import urlparse
from StringIO import StringIO
try:
from cryptography import x509
from cryptography.exceptions import InvalidSignature
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import padding, rsa
from cryptography.hazmat.primitives.serialization import BestAvailableEncryption, Encoding, load_pem_private_key, NoEncryption, PrivateFormat
from cryptography.x509.oid import NameOID
USE_NEW_CRYPTO_LIB=True
except:
from OpenSSL import crypto
from Crypto.PublicKey import RSA as pycrypto_RSA
USE_NEW_CRYPTO_LIB=False
from django.conf import settings
from openstack_dashboard import api
from .constants import *
from .exceptions import CryptoError
LOG = logging.getLogger(__name__)
TEMP_URL_KEY_METADATA_HDR = "X-Account-Meta-Temp-URL-Key"
class CryptoStashItem(object):
def __init__(self, impl, stash, metadata):
self._impl = impl
self._stash = stash
if metadata is None:
# Avoid overwriting an existing object in case we happen to get a
# duplicate UUID (should be very rare). Swift API doesn't have an
# atomic "create unique" function so there is a race condition here
# but risk should be low.
container = nci_private_container_name(self._request)
ref = "{0}/{1}".format(self._stash._base_ref, uuid.uuid4())
if api.swift.swift_object_exists(self._request, container, ref):
ref = "{0}/{1}".format(self._stash._base_ref, uuid.uuid4())
if api.swift.swift_object_exists(self._request, container, ref):
raise CryptoError("Unable to generate unique stash item reference")
else:
ref = metadata.get("ref")
if not ref:
raise CryptoError("Incomplete metadata for crypto stash item")
self._ref = ref
@property
def _request(self):
return self._stash._request
@property
def ref(self):
"""Returns the stash reference for this item."""
assert self._ref
return self._ref
@property
def public_ref(self):
"""Returns the full public URL stash reference for this item."""
endpoint = urlparse.urlsplit(api.base.url_for(self._request, "object-store"))
path = "/".join([
endpoint.path,
urllib.quote(nci_private_container_name(self._request)),
urllib.quote(self.ref),
])
return urlparse.urlunsplit(list(endpoint[:2]) + [path, "", ""])
def metadata(self):
"""Returns a dictionary of the item's metadata for storage."""
return {
"version": 1,
"ref": self.ref,
}
def generate_temp_url(self):
"""Generates a signed temporary URL for this item."""
secret = swift_get_temp_url_key(self._request)
if not secret:
raise CryptoError("Temporary URL key not configured in object storage")
# The signature needs to include the full path to the object as
# requested by the client.
public_url = urlparse.urlsplit(self.public_ref)
sig_path = public_url.path
if sig_path.startswith("/swift/"):
# Ceph uses a URI prefix to distinguish between S3 and Swift API
# calls so we need to remove this otherwise the calculated
# signature will be wrong.
# https://github.com/ceph/ceph/blob/v0.80.7/src/rgw/rgw_swift.cc#L578
sig_path = sig_path[6:]
expires = int(time.time()) + 3600
data = "\n".join(["GET", str(expires), sig_path])
LOG.debug("Temporary URL data for signature: {0}".format(repr(data)))
sig = hmac.new(secret.encode(), data.encode(), hashlib.sha1).hexdigest()
params = urllib.urlencode({
"temp_url_sig": sig,
"temp_url_expires": expires,
})
return urlparse.urlunsplit(list(public_url[:3]) + [params, ""])
def cloud_config_dict(self):
"""Dictionary for referencing item in user-data for a VM instance."""
return {
"url": self.generate_temp_url(),
}
class CryptoStashItemWithPwd(CryptoStashItem):
def __init__(self, impl, stash, metadata):
super(CryptoStashItemWithPwd, self).__init__(impl, stash, metadata)
@property
def password(self):
s1key = self._stash._s1key
assert len(s1key) >= hashlib.sha256().digest_size
# Second stage of HKDF simplified since we only need one round to
# reach a key length equal to the digest size.
h = hmac.new(s1key, digestmod=hashlib.sha256)
h.update(self.ref)
h.update(six.int2byte(1))
k = h.digest()
return base64.b64encode(k)
def cloud_config_dict(self):
d = super(CryptoStashItemWithPwd, self).cloud_config_dict()
d["pw"] = self.password
return d
class PrivateKey(CryptoStashItemWithPwd):
def __init__(self, impl, stash, metadata):
super(PrivateKey, self).__init__(impl, stash, metadata)
self._cache = {}
def export(self):
"""Exports the private key in encrypted PEM format."""
pw = self.password
try:
if USE_NEW_CRYPTO_LIB:
return self._impl.private_bytes(Encoding.PEM,
PrivateFormat.PKCS8,
BestAvailableEncryption(pw))
else:
return crypto.dump_privatekey(crypto.FILETYPE_PEM,
self._impl,
"aes-256-cbc",
pw)
except Exception as e:
LOG.exception("Error exporting private key (ref {0}): {1}".format(self.ref, e))
raise CryptoError("Failed to export private key with ref: {0}".format(self.ref))
def fingerprint(self):
"""Returns the fingerprint of the PKCS#8 DER key."""
try:
if USE_NEW_CRYPTO_LIB:
der = self._impl.private_bytes(Encoding.DER,
PrivateFormat.PKCS8,
NoEncryption())
else:
pem = crypto.dump_privatekey(crypto.FILETYPE_PEM, self._impl)
# Convert from PEM encoding to PKCS#8 DER.
# There isn't a way to do this via the PyOpenSSL API so we
# have to use PyCrypto instead.
der = pycrypto_RSA.importKey(pem).exportKey('DER', pkcs=8)
except Exception as e:
LOG.exception("Error generating key fingerprint (ref {0}): {1}".format(self.ref, e))
raise CryptoError("Failed to get fingerprint for key with ref: {0}".format(self.ref))
fp = hashlib.sha1(der).digest()
return ":".join([binascii.hexlify(x) for x in fp])
@property
def _ssh_key(self):
if "ssh_key" not in self._cache:
if USE_NEW_CRYPTO_LIB:
pem = self._impl.private_bytes(Encoding.PEM,
PrivateFormat.TraditionalOpenSSL,
NoEncryption())
else:
pem = crypto.dump_privatekey(crypto.FILETYPE_PEM, self._impl)
if "BEGIN RSA PRIVATE KEY" not in pem:
# Convert from PKCS#8 into "traditional" RSA format.
# There isn't a way to do this via the PyOpenSSL API so we
# have to use PyCrypto instead.
pem = pycrypto_RSA.importKey(pem).exportKey('PEM', pkcs=1)
self._cache["ssh_key"] = paramiko.rsakey.RSAKey(file_obj=StringIO(pem))
return self._cache["ssh_key"]
def ssh_publickey(self):
"""Exports the public key component in OpenSSH format."""
try:
return "{0} {1} {2}".format(
self._ssh_key.get_name(),
self._ssh_key.get_base64(),
self._request.user.project_name,
)
except Exception as e:
LOG.exception("Error exporting public SSH key (ref {0}): {1}".format(self.ref, e))
raise CryptoError("Failed to export public SSH key with ref: {0}".format(self.ref))
def ssh_fingerprint(self):
"""Returns the SSH fingerprint of the key."""
try:
fp = self._ssh_key.get_fingerprint()
except Exception as e:
LOG.exception("Error generating SSH key fingerprint (ref {0}): {1}".format(self.ref, e))
raise CryptoError("Failed to get SSH fingerprint for key with ref: {0}".format(self.ref))
return ":".join([binascii.hexlify(x) for x in fp])
class Certificate(CryptoStashItem):
def __init__(self, impl, stash, metadata):
super(Certificate, self).__init__(impl, stash, metadata)
def export(self):
"""Exports the certificate in PEM format."""
try:
if USE_NEW_CRYPTO_LIB:
return self._impl.public_bytes(Encoding.PEM)
else:
return crypto.dump_certificate(crypto.FILETYPE_PEM, self._impl)
except Exception as e:
LOG.exception("Error exporting certificate (ref {0}): {1}".format(self.ref, e))
raise CryptoError("Failed to export certificate with ref: {0}".format(self.ref))
def fingerprint(self):
"""Returns the fingerprint of the certificate."""
try:
if USE_NEW_CRYPTO_LIB:
fp = self._impl.fingerprint(hashes.SHA1())
return ":".join([binascii.hexlify(x) for x in fp])
else:
return self._impl.digest("sha1").lower()
except Exception as e:
LOG.exception("Error generating certificate fingerprint (ref {0}): {1}".format(self.ref, e))
raise CryptoError("Failed to get fingerprint for certificate with ref: {0}".format(self.ref))
def verify_key_pair(self, key):
"""Verifies that the certificate is paired with the given private key."""
assert isinstance(key, PrivateKey)
test_data = base64.b64decode("Ag5Ns98mgdLxiq3pyuNecMCXGUcYopmPNyc6GsJ6wd0=")
try:
if USE_NEW_CRYPTO_LIB:
pad = padding.PSS(padding.MGF1(hashes.SHA256()),
padding.PSS.MAX_LENGTH)
signer = key._impl.signer(pad, hashes.SHA256())
signer.update(test_data)
sig = signer.finalize()
verifier = self._impl.public_key().verifier(sig,
pad,
hashes.SHA256())
verifier.update(test_data)
try:
verifier.verify()
except InvalidSignature:
return False
else:
sig = crypto.sign(key._impl, test_data, "sha256")
try:
crypto.verify(self._impl, sig, test_data, "sha256")
except:
return False
except Exception as e:
LOG.exception("Error verifying certificate/key pair (cert {0}; key {1}): {2}".format(self.ref, key.ref, e))
raise CryptoError("Failed to verify certificate \"{0}\" and key \"{1}\"".format(self.ref, key.ref))
return True
# TODO: Use Barbican for storage instead of Swift. However, the following
# blueprint will need to be implemented first so that we can retrieve
# items via cloud-init in the VM without needing a full user token.
# https://blueprints.launchpad.net/nova/+spec/instance-users
class CryptoStash(object):
def __init__(self, request, params=None):
self._request = request
self._base_ref = "stash"
self._params = {}
self._s1key_cache = None
if params is not None:
self.init_params(params)
@property
def _s1key(self):
if self._s1key_cache is None:
if "salt" not in self.params:
raise CryptoError("Crypto stash parameters incomplete")
try:
salt = base64.b64decode(self.params.get("salt"))
if len(salt) < 32:
raise ValueError("Salt is too short")
except Exception as e:
LOG.exception("Error decoding crypto stash salt: {0}".format(e))
raise CryptoError("Crypto stash internal fault")
if hasattr(settings, "NCI_CRYPTO_STASH_SECRET_PATH"):
path = settings.NCI_CRYPTO_STASH_SECRET_PATH
else:
path = "/etc/openstack-dashboard"
if not os.path.isdir(path):
path = settings.LOCAL_PATH
path = os.path.join(path, ".crypto_stash")
try:
with open(path) as fh:
master = fh.readline().strip()
if not master:
raise ValueError("Master secret is empty")
master = base64.b64decode(master)
if len(master) < 32:
raise ValueError("Master secret is too short")
except Exception as e:
LOG.exception("Error loading crypto stash master secret: {0}".format(e))
raise CryptoError("Crypto stash internal fault")
# This is the first stage of HKDF:
# https://tools.ietf.org/html/rfc5869
# NB: It's assumed that the master key was generated from a
# cryptographically strong random source.
h = hmac.new(salt, digestmod=hashlib.sha256)
h.update(master)
self._s1key_cache = h.digest()
return self._s1key_cache
def init_params(self, params=None):
"""Creates new or loads existing stash parameters."""
if params is not None:
if not isinstance(params, dict):
raise CryptoError("Invalid crypto stash parameters type")
elif params.get("version", 0) != 1:
raise CryptoError("Unsupported crypto stash format")
self._params = params
else:
self._params = {
"version": 1,
"salt": base64.b64encode(os.urandom(32)),
}
self._s1key_cache = None
@property
def initialised(self):
return bool(self._params)
@property
def params(self):
"""Returns current stash parameters."""
if not self._params:
raise CryptoError("Crypto stash parameters not set")
return self._params
def _save_to_stash(self, item_cls, key_impl):
item = item_cls(key_impl, self, None)
container = nci_private_container_name(self._request)
api.swift.swift_api(self._request).put_object(container,
item.ref,
item.export(),
content_type="text/plain")
return item
def create_private_key(self):
"""Generates a new private key and saves it in the stash."""
try:
if USE_NEW_CRYPTO_LIB:
key_impl = rsa.generate_private_key(65537,
3072,
default_backend())
else:
key_impl = crypto.PKey()
key_impl.generate_key(crypto.TYPE_RSA, 3072)
except Exception as e:
LOG.exception("Error generating new RSA key: {0}".format(e))
raise CryptoError("Failed to generate new private key")
return self._save_to_stash(PrivateKey, key_impl)
def import_private_key(self, upload):
"""Imports an unencrypted private key into the stash."""
if (upload.size < 0) or (upload.size > 262144):
raise CryptoError("Uploaded file too large - expected a private key")
try:
if USE_NEW_CRYPTO_LIB:
key_impl = load_pem_private_key(upload.read(),
None,
default_backend())
key_size = key_impl.key_size
else:
key_impl = crypto.load_privatekey(crypto.FILETYPE_PEM,
upload.read())
key_size = key_impl.bits()
except Exception as e:
LOG.exception("Error importing RSA key: {0}".format(e))
raise CryptoError("Import failed - expected a PEM encoded unencrypted private key")
if key_size < 3072:
raise CryptoError("Import failed - key must be 3072 bits or larger")
return self._save_to_stash(PrivateKey, key_impl)
def load_private_key(self, metadata):
"""Loads an existing private key from the stash."""
if not isinstance(metadata, dict):
raise CryptoError("Metadata missing or invalid type when loading private key")
key = PrivateKey(None, self, metadata)
swift_obj = api.swift.swift_get_object(self._request,
nci_private_container_name(self._request),
key.ref,
resp_chunk_size=None)
pw = key.password
try:
if USE_NEW_CRYPTO_LIB:
LOG.debug("Using new cryptography library")
key._impl = load_pem_private_key(swift_obj.data,
pw,
default_backend())
else:
LOG.debug("Using old cryptography library")
key._impl = crypto.load_privatekey(crypto.FILETYPE_PEM,
swift_obj.data,
pw)
except Exception as e:
LOG.exception("Error loading RSA key: {0}".format(e))
raise CryptoError("Failed to load private key with ref: {0}".format(key.ref))
return key
def create_x509_cert(self, key, subject_cn, valid_days):
"""Returns a new self-signed X.509 certificate in PEM format."""
assert isinstance(key, PrivateKey)
now = datetime.datetime.utcnow()
nvb = now + datetime.timedelta(days=-1)
nva = now + datetime.timedelta(days=valid_days)
try:
if USE_NEW_CRYPTO_LIB:
builder = x509.CertificateBuilder()
builder = builder.serial_number(int(uuid.uuid4()))
builder = builder.not_valid_before(nvb)
builder = builder.not_valid_after(nva)
pub_key_impl = key._impl.public_key()
builder = builder.public_key(pub_key_impl)
cn = x509.Name([
x509.NameAttribute(NameOID.COMMON_NAME,
subject_cn if isinstance(subject_cn, six.text_type) else six.u(subject_cn)),
])
builder = builder.subject_name(cn)
builder = builder.issuer_name(cn)
builder = builder.add_extension(
x509.BasicConstraints(ca=True, path_length=0),
True)
builder = builder.add_extension(
x509.SubjectKeyIdentifier.from_public_key(pub_key_impl),
False)
builder = builder.add_extension(
x509.AuthorityKeyIdentifier.from_issuer_public_key(pub_key_impl),
False)
cert_impl = builder.sign(key._impl,
hashes.SHA256(),
default_backend())
else:
cert_impl = crypto.X509()
cert_impl.set_version(2)
cert_impl.set_serial_number(int(uuid.uuid4()))
cert_impl.set_notBefore(nvb.strftime("%Y%m%d%H%M%SZ"))
cert_impl.set_notAfter(nva.strftime("%Y%m%d%H%M%SZ"))
cert_impl.set_pubkey(key._impl)
subject = cert_impl.get_subject()
subject.CN = subject_cn
cert_impl.set_issuer(subject)
cert_impl.add_extensions([
crypto.X509Extension(b"basicConstraints",
True,
b"CA:TRUE, pathlen:0"),
crypto.X509Extension(b"subjectKeyIdentifier",
False,
b"hash",
subject=cert_impl),
])
# This has to be done after the above since it can't extract
# the subject key from the certificate until it's assigned.
cert_impl.add_extensions([
crypto.X509Extension(b"authorityKeyIdentifier",
False,
b"keyid:always",
issuer=cert_impl),
])
cert_impl.sign(key._impl, "sha256")
except Exception as e:
LOG.exception("Error creating X.509 certificate: {0}".format(e))
raise CryptoError("Failed to create X.509 certificate")
return self._save_to_stash(Certificate, cert_impl)
def import_x509_cert(self, upload):
"""Imports a certificate into the stash."""
if (upload.size < 0) or (upload.size > 262144):
raise CryptoError("Uploaded file too large - expected an X.509 certificate")
try:
if USE_NEW_CRYPTO_LIB:
cert_impl = x509.load_pem_x509_certificate(upload.read(),
default_backend())
else:
cert_impl = crypto.load_certificate(crypto.FILETYPE_PEM,
upload.read())
except Exception as e:
LOG.exception("Error importing X.509 certificate: {0}".format(e))
raise CryptoError("Import failed - expected a PEM encoded X.509 certificate")
return self._save_to_stash(Certificate, cert_impl)
def load_x509_cert(self, metadata):
"""Loads an existing certificate from the stash."""
if not isinstance(metadata, dict):
raise CryptoError("Metadata missing or invalid type when loading certificate")
cert = Certificate(None, self, metadata)
swift_obj = api.swift.swift_get_object(self._request,
nci_private_container_name(self._request),
cert.ref,
resp_chunk_size=None)
try:
if USE_NEW_CRYPTO_LIB:
cert._impl = x509.load_pem_x509_certificate(swift_obj.data,
default_backend())
else:
cert._impl = crypto.load_certificate(crypto.FILETYPE_PEM,
swift_obj.data)
except Exception as e:
LOG.exception("Error loading X.509 certificate: {0}".format(e))
raise CryptoError("Failed to load X.509 certificate with ref: {0}".format(cert.ref))
return cert
def delete(self, obj):
"""Deletes the given item from the stash."""
assert isinstance(obj, CryptoStashItem)
container = nci_private_container_name(self._request)
api.swift.swift_delete_object(self._request,
container,
obj.ref)
def swift_create_temp_url_key(request):
"""Assigns a secret key for generating temporary Swift URLs."""
try:
secret = base64.b64encode(os.urandom(32))
except Exception as e:
LOG.exception("Error generating temp URL key: {0}".format(e))
raise CryptoError("Failed to generate temporary URL key")
headers = { TEMP_URL_KEY_METADATA_HDR: secret }
api.swift.swift_api(request).post_account(headers)
# Workaround for Ceph bug #10668 which doesn't include the key in the
# returned metadata even though a value is assigned.
# http://tracker.ceph.com/issues/10668
# https://github.com/ceph/ceph/commit/80570e7b6c000f45d81ac3d05240b1f5c85ce125
metadata = api.swift.swift_api(request).head_account()
if TEMP_URL_KEY_METADATA_HDR.lower() not in metadata:
container = nci_private_container_name(request)
api.swift.swift_api(request).put_object(container,
"temp-url-key",
secret,
content_type="text/plain")
def swift_get_temp_url_key(request):
"""Retrieves the secret key for generating temporary Swift URLs."""
secret = None
container = nci_private_container_name(request)
metadata = api.swift.swift_api(request).head_account()
if TEMP_URL_KEY_METADATA_HDR.lower() in metadata:
secret = metadata[TEMP_URL_KEY_METADATA_HDR.lower()]
try:
if api.swift.swift_object_exists(request, container, "temp-url-key"):
api.swift.swift_delete_object(request,
container,
"temp-url-key")
except:
pass
else:
# See above notes on Ceph workaround.
if api.swift.swift_object_exists(request, container, "temp-url-key"):
swift_obj = api.swift.swift_get_object(request,
container,
"temp-url-key",
resp_chunk_size=None)
secret = swift_obj.data
return secret
# vim:ts=4 et sw=4 sts=4:
|
|
#!/usr/bin/env python
"""
sentry.utils.runner
~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
from logan.runner import run_app, configure_app
import base64
import os
import sys
import pkg_resources
import warnings
from functools import partial
USE_GEVENT = os.environ.get('USE_GEVENT') == '1'
SKIP_BACKEND_VALIDATION = os.environ.get('SKIP_BACKEND_VALIDATION') == '1'
KEY_LENGTH = 40
CONFIG_TEMPLATE = """
# This file is just Python, with a touch of Django which means
# you can inherit and tweak settings to your hearts content.
from sentry.conf.server import *
import os.path
CONF_ROOT = os.path.dirname(__file__)
DATABASES = {
'default': {
'ENGINE': 'sentry.db.postgres',
'NAME': 'sentry',
'USER': 'postgres',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
# You should not change this setting after your database has been created
# unless you have altered all schemas first
SENTRY_USE_BIG_INTS = True
# If you're expecting any kind of real traffic on Sentry, we highly recommend
# configuring the CACHES and Redis settings
###########
# General #
###########
# The administrative email for this installation.
# Note: This will be reported back to getsentry.com as the point of contact. See
# the beacon documentation for more information. This **must** be a string.
# SENTRY_ADMIN_EMAIL = 'your.name@example.com'
SENTRY_ADMIN_EMAIL = ''
# Instruct Sentry that this install intends to be run by a single organization
# and thus various UI optimizations should be enabled.
SENTRY_SINGLE_ORGANIZATION = True
#########
# Redis #
#########
# Generic Redis configuration used as defaults for various things including:
# Buffers, Quotas, TSDB
SENTRY_REDIS_OPTIONS = {
'hosts': {
0: {
'host': '127.0.0.1',
'port': 6379,
}
}
}
#########
# Cache #
#########
# Sentry currently utilizes two separate mechanisms. While CACHES is not a
# requirement, it will optimize several high throughput patterns.
# If you wish to use memcached, install the dependencies and adjust the config
# as shown:
#
# pip install python-memcached
#
# CACHES = {
# 'default': {
# 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
# 'LOCATION': ['127.0.0.1:11211'],
# }
# }
# A primary cache is required for things such as processing events
SENTRY_CACHE = 'sentry.cache.redis.RedisCache'
#########
# Queue #
#########
# See https://docs.getsentry.com/on-premise/server/queue/ for more
# information on configuring your queue broker and workers. Sentry relies
# on a Python framework called Celery to manage queues.
CELERY_ALWAYS_EAGER = False
BROKER_URL = 'redis://localhost:6379'
###############
# Rate Limits #
###############
# Rate limits apply to notification handlers and are enforced per-project
# automatically.
SENTRY_RATELIMITER = 'sentry.ratelimits.redis.RedisRateLimiter'
##################
# Update Buffers #
##################
# Buffers (combined with queueing) act as an intermediate layer between the
# database and the storage API. They will greatly improve efficiency on large
# numbers of the same events being sent to the API in a short amount of time.
# (read: if you send any kind of real data to Sentry, you should enable buffers)
SENTRY_BUFFER = 'sentry.buffer.redis.RedisBuffer'
##########
# Quotas #
##########
# Quotas allow you to rate limit individual projects or the Sentry install as
# a whole.
SENTRY_QUOTAS = 'sentry.quotas.redis.RedisQuota'
########
# TSDB #
########
# The TSDB is used for building charts as well as making things like per-rate
# alerts possible.
SENTRY_TSDB = 'sentry.tsdb.redis.RedisTSDB'
################
# File storage #
################
# Any Django storage backend is compatible with Sentry. For more solutions see
# the django-storages package: https://django-storages.readthedocs.org/en/latest/
SENTRY_FILESTORE = 'django.core.files.storage.FileSystemStorage'
SENTRY_FILESTORE_OPTIONS = {
'location': '/tmp/sentry-files',
}
##############
# Web Server #
##############
# You MUST configure the absolute URI root for Sentry:
SENTRY_URL_PREFIX = 'http://sentry.example.com' # No trailing slash!
# If you're using a reverse proxy, you should enable the X-Forwarded-Proto
# header and uncomment the following settings
# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# SESSION_COOKIE_SECURE = True
# If you're not hosting at the root of your web server, and not using uWSGI,
# you need to uncomment and set it to the path where Sentry is hosted.
# FORCE_SCRIPT_NAME = '/sentry'
SENTRY_WEB_HOST = '0.0.0.0'
SENTRY_WEB_PORT = 9000
SENTRY_WEB_OPTIONS = {
# 'workers': 3, # the number of gunicorn workers
# 'secure_scheme_headers': {'X-FORWARDED-PROTO': 'https'},
}
###############
# Mail Server #
###############
# For more information check Django's documentation:
# https://docs.djangoproject.com/en/1.6/topics/email/
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'localhost'
EMAIL_HOST_PASSWORD = ''
EMAIL_HOST_USER = ''
EMAIL_PORT = 25
EMAIL_USE_TLS = False
# The email address to send on behalf of
SERVER_EMAIL = 'root@localhost'
# If you're using mailgun for inbound mail, set your API key and configure a
# route to forward to /api/hooks/mailgun/inbound/
MAILGUN_API_KEY = ''
########
# etc. #
########
# If this file ever becomes compromised, it's important to regenerate your SECRET_KEY
# Changing this value will result in all current sessions being invalidated
SECRET_KEY = %(default_key)r
"""
def generate_settings():
"""
This command is run when ``default_path`` doesn't exist, or ``init`` is
run and returns a string representing the default data to put into their
settings file.
"""
output = CONFIG_TEMPLATE % dict(
default_key=base64.b64encode(os.urandom(KEY_LENGTH)),
)
return output
def install_plugin_apps(settings):
# entry_points={
# 'sentry.apps': [
# 'phabricator = sentry_phabricator'
# ],
# },
installed_apps = list(settings.INSTALLED_APPS)
for ep in pkg_resources.iter_entry_points('sentry.apps'):
installed_apps.append(ep.module_name)
settings.INSTALLED_APPS = tuple(installed_apps)
def register_plugins(settings):
from sentry.plugins import register
# entry_points={
# 'sentry.plugins': [
# 'phabricator = sentry_phabricator.plugins:PhabricatorPlugin'
# ],
# },
for ep in pkg_resources.iter_entry_points('sentry.plugins'):
try:
plugin = ep.load()
except Exception:
import sys
import traceback
sys.stderr.write("Failed to load plugin %r:\n%s\n" % (ep.name, traceback.format_exc()))
else:
register(plugin)
def initialize_receivers():
# force signal registration
import sentry.receivers # NOQA
def initialize_gevent():
from gevent import monkey
monkey.patch_all()
try:
import psycopg2 # NOQA
except ImportError:
pass
else:
from sentry.utils.gevent import make_psycopg_green
make_psycopg_green()
def get_asset_version(settings):
path = os.path.join(settings.STATIC_ROOT, 'version')
try:
with open(path) as fp:
return fp.read().strip()
except IOError:
from time import time
return int(time())
def initialize_app(config, skip_backend_validation=False):
settings = config['settings']
fix_south(settings)
apply_legacy_settings(settings)
install_plugin_apps(settings)
# Commonly setups don't correctly configure themselves for production envs
# so lets try to provide a bit more guidance
if settings.CELERY_ALWAYS_EAGER and not settings.DEBUG:
warnings.warn('Sentry is configured to run asynchronous tasks in-process. '
'This is not recommended within production environments. '
'See https://docs.getsentry.com/on-premise/server/queue/ for more information.')
if settings.SENTRY_SINGLE_ORGANIZATION:
settings.SENTRY_FEATURES['organizations:create'] = False
settings.SUDO_COOKIE_SECURE = getattr(settings, 'SESSION_COOKIE_SECURE', False)
settings.SUDO_COOKIE_DOMAIN = getattr(settings, 'SESSION_COOKIE_DOMAIN', None)
settings.CACHES['default']['VERSION'] = settings.CACHE_VERSION
settings.ASSET_VERSION = get_asset_version(settings)
settings.STATIC_URL = settings.STATIC_URL.format(
version=settings.ASSET_VERSION,
)
if USE_GEVENT:
from django.db import connections
connections['default'].allow_thread_sharing = True
register_plugins(settings)
initialize_receivers()
if not (skip_backend_validation or SKIP_BACKEND_VALIDATION):
validate_backends()
from django.utils import timezone
from sentry.app import env
env.data['config'] = config.get('config_path')
env.data['start_date'] = timezone.now()
def validate_backends():
from sentry import app
app.buffer.validate()
app.nodestore.validate()
app.quotas.validate()
app.search.validate()
app.ratelimiter.validate()
app.tsdb.validate()
def fix_south(settings):
# South needs an adapter defined conditionally
if settings.DATABASES['default']['ENGINE'] != 'sentry.db.postgres':
return
settings.SOUTH_DATABASE_ADAPTERS = {
'default': 'south.db.postgresql_psycopg2'
}
def show_big_error(message):
sys.stderr.write('\n')
sys.stderr.write('\033[91m!! %s !!\033[0m\n' % ('!' * min(len(message), 80),))
sys.stderr.write('\033[91m!! %s !!\033[0m\n' % message)
sys.stderr.write('\033[91m!! %s !!\033[0m\n' % ('!' * min(len(message), 80),))
sys.stderr.write('\n')
def apply_legacy_settings(settings):
# SENTRY_USE_QUEUE used to determine if Celery was eager or not
if hasattr(settings, 'SENTRY_USE_QUEUE'):
warnings.warn('SENTRY_USE_QUEUE is deprecated. Please use CELERY_ALWAYS_EAGER instead. '
'See https://docs.getsentry.com/on-premise/server/queue/ for more information.', DeprecationWarning)
settings.CELERY_ALWAYS_EAGER = (not settings.SENTRY_USE_QUEUE)
if not settings.SENTRY_ADMIN_EMAIL:
show_big_error('SENTRY_ADMIN_EMAIL is not configured')
elif not isinstance(settings.SENTRY_ADMIN_EMAIL, basestring):
show_big_error('SENTRY_ADMIN_EMAIL must be a string')
if settings.SENTRY_URL_PREFIX in ('', 'http://sentry.example.com') and not settings.DEBUG:
# Maybe also point to a piece of documentation for more information?
# This directly coincides with users getting the awkward
# `ALLOWED_HOSTS` exception.
show_big_error('SENTRY_URL_PREFIX is not configured')
# Set `ALLOWED_HOSTS` to the catch-all so it works
settings.ALLOWED_HOSTS = ['*']
if settings.TIME_ZONE != 'UTC':
# non-UTC timezones are not supported
show_big_error('TIME_ZONE should be set to UTC')
# Set ALLOWED_HOSTS if it's not already available
if not settings.ALLOWED_HOSTS:
from urlparse import urlparse
urlbits = urlparse(settings.SENTRY_URL_PREFIX)
if urlbits.hostname:
settings.ALLOWED_HOSTS = (urlbits.hostname,)
if hasattr(settings, 'SENTRY_ALLOW_REGISTRATION'):
warnings.warn('SENTRY_ALLOW_REGISTRATION is deprecated. Use SENTRY_FEATURES instead.', DeprecationWarning)
settings.SENTRY_FEATURES['auth:register'] = settings.SENTRY_ALLOW_REGISTRATION
def skip_migration_if_applied(settings, app_name, table_name,
name='0001_initial'):
from south.migration import Migrations
from sentry.utils.db import table_exists
import types
if app_name not in settings.INSTALLED_APPS:
return
migration = Migrations(app_name)[name]
def skip_if_table_exists(original):
def wrapped(self):
# TODO: look into why we're having to return some ridiculous
# lambda
if table_exists(table_name):
return lambda x=None: None
return original()
wrapped.__name__ = original.__name__
return wrapped
migration.forwards = types.MethodType(
skip_if_table_exists(migration.forwards), migration)
def on_configure(config):
"""
Executes after settings are full installed and configured.
At this point we can force import on various things such as models
as all of settings should be correctly configured.
"""
settings = config['settings']
skip_migration_if_applied(
settings, 'kombu.contrib.django', 'djkombu_queue')
skip_migration_if_applied(
settings, 'social_auth', 'social_auth_association')
def configure(config_path=None, skip_backend_validation=False):
configure_app(
project='sentry',
config_path=config_path,
default_config_path='~/.sentry/sentry.conf.py',
default_settings='sentry.conf.server',
settings_initializer=generate_settings,
settings_envvar='SENTRY_CONF',
initializer=partial(
initialize_app, skip_backend_validation=skip_backend_validation),
on_configure=on_configure,
)
def main():
if USE_GEVENT:
sys.stderr.write("Configuring Sentry with gevent bindings\n")
initialize_gevent()
run_app(
project='sentry',
default_config_path='~/.sentry/sentry.conf.py',
default_settings='sentry.conf.server',
settings_initializer=generate_settings,
settings_envvar='SENTRY_CONF',
initializer=initialize_app,
)
if __name__ == '__main__':
main()
|
|
""" Cisco_IOS_XR_config_cfgmgr_exec_oper
This module contains a collection of YANG definitions
for Cisco IOS\-XR config\-cfgmgr\-exec package operational data.
This module contains definitions
for the following management objects\:
cfg\-hist\-gl\: Configuration History Global path information
Copyright (c) 2013\-2016 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class HistRecordEnum(Enum):
"""
HistRecordEnum
Possible types of history
.. data:: cfghist_bag_record_all = 0
All history
.. data:: cfghist_bag_record_alarm = 1
Alarm history
.. data:: cfghist_bag_record_cfs_check = 2
CfgCheck history
.. data:: cfghist_bag_record_commit = 3
Commit history
.. data:: cfghist_bag_record_oir = 4
OIR history
.. data:: cfghist_bag_record_shutdown = 5
Shutdown history
.. data:: cfghist_bag_record_startup = 6
Bootup history
.. data:: cfghist_bag_record_backup = 7
Backup history
.. data:: cfghist_bag_record_rebase = 8
Rebase history
.. data:: cfghist_bag_record_last = 9
Last history
"""
cfghist_bag_record_all = 0
cfghist_bag_record_alarm = 1
cfghist_bag_record_cfs_check = 2
cfghist_bag_record_commit = 3
cfghist_bag_record_oir = 4
cfghist_bag_record_shutdown = 5
cfghist_bag_record_startup = 6
cfghist_bag_record_backup = 7
cfghist_bag_record_rebase = 8
cfghist_bag_record_last = 9
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_config_cfgmgr_exec_oper as meta
return meta._meta_table['HistRecordEnum']
class CfgHistGl(object):
"""
Configuration History Global path information
.. attribute:: record_type
History summary information for a specific type of history
**type**\: list of :py:class:`RecordType <ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_cfgmgr_exec_oper.CfgHistGl.RecordType>`
"""
_prefix = 'config-cfgmgr-exec-oper'
_revision = '2015-11-09'
def __init__(self):
self.record_type = YList()
self.record_type.parent = self
self.record_type.name = 'record_type'
class RecordType(object):
"""
History summary information for a specific type
of history
.. attribute:: record_type <key>
Record type
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
.. attribute:: record
History summary information for a specific type of history
**type**\: list of :py:class:`Record <ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_cfgmgr_exec_oper.CfgHistGl.RecordType.Record>`
"""
_prefix = 'config-cfgmgr-exec-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.record_type = None
self.record = YList()
self.record.parent = self
self.record.name = 'record'
class Record(object):
"""
History summary information for a specific type
of history
.. attribute:: record <key>
Record
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: info
Content of the history
**type**\: :py:class:`Info <ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_cfgmgr_exec_oper.CfgHistGl.RecordType.Record.Info>`
.. attribute:: record_type
Record type
**type**\: :py:class:`HistRecordEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_cfgmgr_exec_oper.HistRecordEnum>`
.. attribute:: timestamp
Time stamp for the history
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'config-cfgmgr-exec-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.record = None
self.info = CfgHistGl.RecordType.Record.Info()
self.info.parent = self
self.record_type = None
self.timestamp = None
class Info(object):
"""
Content of the history
.. attribute:: a
B
**type**\: int
**range:** 0..4294967295
.. attribute:: alarm_info
alarm info
**type**\: :py:class:`AlarmInfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_cfgmgr_exec_oper.CfgHistGl.RecordType.Record.Info.AlarmInfo>`
.. attribute:: backup_info
backup info
**type**\: :py:class:`BackupInfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_cfgmgr_exec_oper.CfgHistGl.RecordType.Record.Info.BackupInfo>`
.. attribute:: cfscheck_info
cfscheck info
**type**\: :py:class:`CfscheckInfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_cfgmgr_exec_oper.CfgHistGl.RecordType.Record.Info.CfscheckInfo>`
.. attribute:: commit_info
commit info
**type**\: :py:class:`CommitInfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_cfgmgr_exec_oper.CfgHistGl.RecordType.Record.Info.CommitInfo>`
.. attribute:: oir_info
oir info
**type**\: :py:class:`OirInfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_cfgmgr_exec_oper.CfgHistGl.RecordType.Record.Info.OirInfo>`
.. attribute:: shutdown_info
shutdown info
**type**\: :py:class:`ShutdownInfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_cfgmgr_exec_oper.CfgHistGl.RecordType.Record.Info.ShutdownInfo>`
.. attribute:: startup_info
startup info
**type**\: :py:class:`StartupInfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_cfgmgr_exec_oper.CfgHistGl.RecordType.Record.Info.StartupInfo>`
.. attribute:: type
type
**type**\: :py:class:`HistRecordEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_cfgmgr_exec_oper.HistRecordEnum>`
"""
_prefix = 'config-cfgmgr-exec-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.a = None
self.alarm_info = CfgHistGl.RecordType.Record.Info.AlarmInfo()
self.alarm_info.parent = self
self.backup_info = CfgHistGl.RecordType.Record.Info.BackupInfo()
self.backup_info.parent = self
self.cfscheck_info = CfgHistGl.RecordType.Record.Info.CfscheckInfo()
self.cfscheck_info.parent = self
self.commit_info = CfgHistGl.RecordType.Record.Info.CommitInfo()
self.commit_info.parent = self
self.oir_info = CfgHistGl.RecordType.Record.Info.OirInfo()
self.oir_info.parent = self
self.shutdown_info = CfgHistGl.RecordType.Record.Info.ShutdownInfo()
self.shutdown_info.parent = self
self.startup_info = CfgHistGl.RecordType.Record.Info.StartupInfo()
self.startup_info.parent = self
self.type = None
class AlarmInfo(object):
"""
alarm info
.. attribute:: state
State
**type**\: str
.. attribute:: where
Where
**type**\: str
"""
_prefix = 'config-cfgmgr-exec-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.state = None
self.where = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-config-cfgmgr-exec-oper:alarm-info'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.state is not None:
return True
if self.where is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_config_cfgmgr_exec_oper as meta
return meta._meta_table['CfgHistGl.RecordType.Record.Info.AlarmInfo']['meta_info']
class CfscheckInfo(object):
"""
cfscheck info
.. attribute:: line
Line
**type**\: str
.. attribute:: user_id
UserId
**type**\: str
"""
_prefix = 'config-cfgmgr-exec-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.line = None
self.user_id = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-config-cfgmgr-exec-oper:cfscheck-info'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.line is not None:
return True
if self.user_id is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_config_cfgmgr_exec_oper as meta
return meta._meta_table['CfgHistGl.RecordType.Record.Info.CfscheckInfo']['meta_info']
class CommitInfo(object):
"""
commit info
.. attribute:: client_name
Client name
**type**\: str
.. attribute:: comment
Comment
**type**\: str
.. attribute:: commit_id
CommitId
**type**\: str
.. attribute:: label
Label
**type**\: str
.. attribute:: line
Line
**type**\: str
.. attribute:: user_id
UserId
**type**\: str
"""
_prefix = 'config-cfgmgr-exec-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.client_name = None
self.comment = None
self.commit_id = None
self.label = None
self.line = None
self.user_id = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-config-cfgmgr-exec-oper:commit-info'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.client_name is not None:
return True
if self.comment is not None:
return True
if self.commit_id is not None:
return True
if self.label is not None:
return True
if self.line is not None:
return True
if self.user_id is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_config_cfgmgr_exec_oper as meta
return meta._meta_table['CfgHistGl.RecordType.Record.Info.CommitInfo']['meta_info']
class OirInfo(object):
"""
oir info
.. attribute:: config_name
Config Name
**type**\: str
.. attribute:: config_type
Config Type
**type**\: str
.. attribute:: operation
Operation
**type**\: str
"""
_prefix = 'config-cfgmgr-exec-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.config_name = None
self.config_type = None
self.operation = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-config-cfgmgr-exec-oper:oir-info'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.config_name is not None:
return True
if self.config_type is not None:
return True
if self.operation is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_config_cfgmgr_exec_oper as meta
return meta._meta_table['CfgHistGl.RecordType.Record.Info.OirInfo']['meta_info']
class ShutdownInfo(object):
"""
shutdown info
.. attribute:: comment
Comment
**type**\: str
"""
_prefix = 'config-cfgmgr-exec-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.comment = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-config-cfgmgr-exec-oper:shutdown-info'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.comment is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_config_cfgmgr_exec_oper as meta
return meta._meta_table['CfgHistGl.RecordType.Record.Info.ShutdownInfo']['meta_info']
class StartupInfo(object):
"""
startup info
.. attribute:: boot_path
Boot Path
**type**\: str
.. attribute:: how_booted
How Booted
**type**\: str
"""
_prefix = 'config-cfgmgr-exec-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.boot_path = None
self.how_booted = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-config-cfgmgr-exec-oper:startup-info'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.boot_path is not None:
return True
if self.how_booted is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_config_cfgmgr_exec_oper as meta
return meta._meta_table['CfgHistGl.RecordType.Record.Info.StartupInfo']['meta_info']
class BackupInfo(object):
"""
backup info
.. attribute:: comment
Comment
**type**\: str
"""
_prefix = 'config-cfgmgr-exec-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.comment = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-config-cfgmgr-exec-oper:backup-info'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.comment is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_config_cfgmgr_exec_oper as meta
return meta._meta_table['CfgHistGl.RecordType.Record.Info.BackupInfo']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-config-cfgmgr-exec-oper:info'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.a is not None:
return True
if self.alarm_info is not None and self.alarm_info._has_data():
return True
if self.backup_info is not None and self.backup_info._has_data():
return True
if self.cfscheck_info is not None and self.cfscheck_info._has_data():
return True
if self.commit_info is not None and self.commit_info._has_data():
return True
if self.oir_info is not None and self.oir_info._has_data():
return True
if self.shutdown_info is not None and self.shutdown_info._has_data():
return True
if self.startup_info is not None and self.startup_info._has_data():
return True
if self.type is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_config_cfgmgr_exec_oper as meta
return meta._meta_table['CfgHistGl.RecordType.Record.Info']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.record is None:
raise YPYModelError('Key property record is None')
return self.parent._common_path +'/Cisco-IOS-XR-config-cfgmgr-exec-oper:record[Cisco-IOS-XR-config-cfgmgr-exec-oper:record = ' + str(self.record) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.record is not None:
return True
if self.info is not None and self.info._has_data():
return True
if self.record_type is not None:
return True
if self.timestamp is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_config_cfgmgr_exec_oper as meta
return meta._meta_table['CfgHistGl.RecordType.Record']['meta_info']
@property
def _common_path(self):
if self.record_type is None:
raise YPYModelError('Key property record_type is None')
return '/Cisco-IOS-XR-config-cfgmgr-exec-oper:cfg-hist-gl/Cisco-IOS-XR-config-cfgmgr-exec-oper:record-type[Cisco-IOS-XR-config-cfgmgr-exec-oper:record-type = ' + str(self.record_type) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.record_type is not None:
return True
if self.record is not None:
for child_ref in self.record:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_config_cfgmgr_exec_oper as meta
return meta._meta_table['CfgHistGl.RecordType']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-config-cfgmgr-exec-oper:cfg-hist-gl'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.record_type is not None:
for child_ref in self.record_type:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_config_cfgmgr_exec_oper as meta
return meta._meta_table['CfgHistGl']['meta_info']
|
|
##
# Copyright (c) 2005-2015 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from twext.python.clsprop import classproperty
import txweb2.dav.test.util
from txweb2 import http_headers, responsecode
from txweb2.dav.util import allDataFromStream
from txweb2.test.test_server import SimpleRequest
from twisted.internet.defer import inlineCallbacks, succeed
from txdav.caldav.datastore.scheduling.ischedule.localservers import (
ServersDB, Server
)
from txdav.common.datastore.podding.resource import ConduitResource
from txdav.common.datastore.test.util import populateCalendarsFrom, CommonCommonTests
import json
from txdav.common.datastore.podding.conduit import PoddingConduit
class ConduitPOST (CommonCommonTests, txweb2.dav.test.util.TestCase):
class FakeConduit(PoddingConduit):
def recv_fake(self, txn, j):
return succeed({
"back2u": j["echo"],
"more": "bits",
})
@inlineCallbacks
def setUp(self):
yield super(ConduitPOST, self).setUp()
serversDB = ServersDB()
self.thisServer = Server("A", "http://127.0.0.1", "A", True)
serversDB.addServer(self.thisServer)
yield self.buildStoreAndDirectory(serversDB=serversDB)
self.site.resource.putChild("conduit", ConduitResource(self.site.resource, self.storeUnderTest()))
yield self.populate()
@inlineCallbacks
def populate(self):
yield populateCalendarsFrom(self.requirements, self.storeUnderTest())
self.notifierFactory.reset()
@classproperty(cache=False)
def requirements(cls): #@NoSelf
return {
"user01": {
"calendar_1": {
},
"inbox": {
},
},
"user02": {
"calendar_1": {
},
"inbox": {
},
},
"user03": {
"calendar_1": {
},
"inbox": {
},
},
}
@inlineCallbacks
def test_receive_no_secret(self):
"""
Cross-pod request fails when there is no shared secret header present.
"""
request = SimpleRequest(
self.site,
"POST",
"/conduit",
headers=http_headers.Headers(rawHeaders={
"Content-Type": ("text/plain",)
}),
content="""Hello, World!
""".replace("\n", "\r\n")
)
response = (yield self.send(request))
self.assertEqual(response.code, responsecode.FORBIDDEN)
@inlineCallbacks
def test_receive_wrong_mime(self):
"""
Cross-pod request fails when Content-Type header is wrong.
"""
request = SimpleRequest(
self.site,
"POST",
"/conduit",
headers=http_headers.Headers(rawHeaders={
"Content-Type": ("text/plain",),
self.thisServer.secretHeader()[0]: self.thisServer.secretHeader()[1],
}),
content="""Hello, World!
""".replace("\n", "\r\n")
)
response = (yield self.send(request))
self.assertEqual(response.code, responsecode.BAD_REQUEST)
@inlineCallbacks
def test_receive_invalid_json(self):
"""
Cross-pod request fails when request data is not JSON.
"""
request = SimpleRequest(
self.site,
"POST",
"/conduit",
headers=http_headers.Headers(rawHeaders={
"Content-Type": ("application/json",),
self.thisServer.secretHeader()[0]: self.thisServer.secretHeader()[1],
}),
content="""Hello, World!
""".replace("\n", "\r\n")
)
response = (yield self.send(request))
self.assertEqual(response.code, responsecode.BAD_REQUEST)
@inlineCallbacks
def test_receive_bad_json(self):
"""
Cross-pod request fails when JSON data does not have an "action".
"""
request = SimpleRequest(
self.site,
"POST",
"/conduit",
headers=http_headers.Headers(rawHeaders={
"Content-Type": ("application/json",),
self.thisServer.secretHeader()[0]: self.thisServer.secretHeader()[1],
}),
content="""
{
"foo":"bar"
}
""".replace("\n", "\r\n")
)
response = (yield self.send(request))
self.assertEqual(response.code, responsecode.BAD_REQUEST)
@inlineCallbacks
def test_receive_ping(self):
"""
Cross-pod request works with the "ping" action.
"""
request = SimpleRequest(
self.site,
"POST",
"/conduit",
headers=http_headers.Headers(rawHeaders={
"Content-Type": ("application/json",),
self.thisServer.secretHeader()[0]: self.thisServer.secretHeader()[1],
}),
content="""
{
"action":"ping"
}
""".replace("\n", "\r\n")
)
response = (yield self.send(request))
self.assertEqual(response.code, responsecode.OK)
data = (yield allDataFromStream(response.stream))
j = json.loads(data)
self.assertTrue("result" in j)
self.assertEqual(j["result"], "ok")
@inlineCallbacks
def test_receive_fake_conduit_no_action(self):
"""
Cross-pod request fails when conduit does not support the action.
"""
store = self.storeUnderTest()
self.patch(store, "conduit", self.FakeConduit(store))
request = SimpleRequest(
self.site,
"POST",
"/conduit",
headers=http_headers.Headers(rawHeaders={
"Content-Type": ("application/json",),
self.thisServer.secretHeader()[0]: self.thisServer.secretHeader()[1],
}),
content="""
{
"action":"bogus",
"echo":"bravo"
}
""".replace("\n", "\r\n")
)
response = (yield self.send(request))
self.assertEqual(response.code, responsecode.BAD_REQUEST)
@inlineCallbacks
def test_receive_fake_conduit(self):
"""
Cross-pod request works when conduit does support the action.
"""
store = self.storeUnderTest()
self.patch(store, "conduit", self.FakeConduit(store))
request = SimpleRequest(
self.site,
"POST",
"/conduit",
headers=http_headers.Headers(rawHeaders={
"Content-Type": ("application/json",),
self.thisServer.secretHeader()[0]: self.thisServer.secretHeader()[1],
}),
content="""
{
"action":"fake",
"echo":"bravo"
}
""".replace("\n", "\r\n")
)
response = (yield self.send(request))
self.assertEqual(response.code, responsecode.OK)
data = (yield allDataFromStream(response.stream))
j = json.loads(data)
self.assertTrue("result" in j)
self.assertEqual(j["result"], "ok")
self.assertTrue("value" in j)
self.assertEqual(j["value"], {"back2u": "bravo", "more": "bits"})
|
|
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import unittest
from azure.common import (
AzureHttpError,
AzureException,
)
from tests.testcase import (
StorageTestCase,
record,
TestMode,
)
from azure.storage.queue import (
QueueService,
)
from tests.test_encryption_helper import (
KeyWrapper,
KeyResolver,
RSAKeyWrapper,
)
from os import urandom
from json import (
loads,
dumps,
)
from azure.storage._encryption import (
_WrappedContentKey,
_EncryptionAgent,
_EncryptionData,
)
from base64 import(
b64encode,
b64decode,
)
from azure.storage._error import(
_ERROR_VALUE_NONE,
_ERROR_OBJECT_INVALID,
_ERROR_DECRYPTION_FAILURE,
_ERROR_DATA_NOT_ENCRYPTED,
_ERROR_ENCRYPTION_REQUIRED,
)
from azure.storage._constants import __version__
from azure.storage.queue.models import QueueMessageFormat
from cryptography.hazmat import backends
from cryptography.hazmat.primitives.ciphers.algorithms import AES
from cryptography.hazmat.primitives.ciphers.modes import CBC
from cryptography.hazmat.primitives.padding import PKCS7
from cryptography.hazmat.primitives.ciphers import Cipher
from azure.storage._common_conversion import _decode_base64_to_bytes
#------------------------------------------------------------------------------
TEST_QUEUE_PREFIX = 'encryptionqueue'
#------------------------------------------------------------------------------
class StorageQueueEncryptionTest(StorageTestCase):
def setUp(self):
super(StorageQueueEncryptionTest, self).setUp()
self.qs = self._create_storage_service(QueueService, self.settings)
self.test_queues = []
def tearDown(self):
if not self.is_playback():
for queue_name in self.test_queues:
try:
self.qs.delete_queue(queue_name)
except:
pass
return super(StorageQueueEncryptionTest, self).tearDown()
#--Helpers-----------------------------------------------------------------
def _get_queue_reference(self, prefix=TEST_QUEUE_PREFIX):
queue_name = self.get_resource_name(prefix)
self.test_queues.append(queue_name)
return queue_name
def _create_queue(self, prefix=TEST_QUEUE_PREFIX):
queue_name = self._get_queue_reference(prefix)
self.qs.create_queue(queue_name)
return queue_name
#--------------------------------------------------------------------------
@record
def test_get_messages_encrypted_kek(self):
# Arrange
self.qs.key_encryption_key = KeyWrapper('key1')
queue_name = self._create_queue()
self.qs.put_message(queue_name, u'encrypted_message_2')
# Act
li = self.qs.get_messages(queue_name)
# Assert
self.assertEqual(li[0].content, u'encrypted_message_2')
@record
def test_get_messages_encrypted_resolver(self):
# Arrange
self.qs.key_encryption_key = KeyWrapper('key1')
queue_name = self._create_queue()
self.qs.put_message(queue_name, u'encrypted_message_2')
key_resolver = KeyResolver()
key_resolver.put_key(self.qs.key_encryption_key)
self.qs.key_resolver_function = key_resolver.resolve_key
self.qs.key_encryption_key = None #Ensure that the resolver is used
# Act
li = self.qs.get_messages(queue_name)
# Assert
self.assertEqual(li[0].content, u'encrypted_message_2')
@record
def test_peek_messages_encrypted_kek(self):
# Arrange
self.qs.key_encryption_key = KeyWrapper('key1')
queue_name = self._create_queue()
self.qs.put_message(queue_name, u'encrypted_message_3')
# Act
li = self.qs.peek_messages(queue_name)
# Assert
self.assertEqual(li[0].content, u'encrypted_message_3')
@record
def test_peek_messages_encrypted_resolver(self):
# Arrange
self.qs.key_encryption_key = KeyWrapper('key1')
queue_name = self._create_queue()
self.qs.put_message(queue_name, u'encrypted_message_4')
key_resolver = KeyResolver()
key_resolver.put_key(self.qs.key_encryption_key)
self.resolver = key_resolver.resolve_key
# Act
li = self.qs.peek_messages(queue_name)
# Assert
self.assertEqual(li[0].content, u'encrypted_message_4')
def test_peek_messages_encrypted_kek_RSA(self):
# We can only generate random RSA keys, so this must be run live or
# the playback test will fail due to a change in kek values.
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
self.qs.key_encryption_key = RSAKeyWrapper('key2')
queue_name = self._create_queue()
self.qs.put_message(queue_name, u'encrypted_message_3')
# Act
li = self.qs.peek_messages(queue_name)
# Assert
self.assertEqual(li[0].content, u'encrypted_message_3')
@record
def test_update_encrypted_message(self):
# Arrange
queue_name = self._create_queue()
self.qs.key_encryption_key = KeyWrapper('key1')
self.qs.put_message(queue_name, u'Update Me')
list_result1 = self.qs.get_messages(queue_name)
# Act
message = self.qs.update_message(queue_name,
list_result1[0].id,
list_result1[0].pop_receipt,
0,
content = u'Updated',)
list_result2 = self.qs.get_messages(queue_name)
# Assert
message = list_result2[0]
self.assertEqual(u'Updated', message.content)
@record
def test_update_encrypted_binary_message(self):
# Arrange
queue_name = self._create_queue()
self.qs.key_encryption_key = KeyWrapper('key1')
self.qs.encode_function = QueueMessageFormat.binary_base64encode
self.qs.decode_function = QueueMessageFormat.binary_base64decode
binary_message = self.get_random_bytes(100)
self.qs.put_message(queue_name, binary_message)
list_result1 = self.qs.get_messages(queue_name)
# Act
binary_message = self.get_random_bytes(100)
self.qs.update_message(queue_name,
list_result1[0].id,
list_result1[0].pop_receipt,
0,
content = binary_message,)
list_result2 = self.qs.get_messages(queue_name)
# Assert
message = list_result2[0]
self.assertEqual(binary_message, message.content)
@record
def test_update_encrypted_raw_text_message(self):
# Arrange
queue_name = self._create_queue()
self.qs.key_encryption_key = KeyWrapper('key1')
self.qs.encode_function = QueueMessageFormat.noencode
self.qs.decode_function = QueueMessageFormat.nodecode
raw_text = u'Update Me'
self.qs.put_message(queue_name, raw_text)
list_result1 = self.qs.get_messages(queue_name)
# Act
raw_text = u'Updated'
self.qs.update_message(queue_name,
list_result1[0].id,
list_result1[0].pop_receipt,
0,
content = raw_text,)
list_result2 = self.qs.get_messages(queue_name)
# Assert
message = list_result2[0]
self.assertEqual(raw_text, message.content)
@record
def test_update_encrypted_json_message(self):
# Arrange
queue_name = self._create_queue()
self.qs.key_encryption_key = KeyWrapper('key1')
self.qs.encode_function = QueueMessageFormat.noencode
self.qs.decode_function = QueueMessageFormat.nodecode
message_dict = {'val1': 1, 'val2':'2'}
json_text = dumps(message_dict)
self.qs.put_message(queue_name, json_text)
list_result1 = self.qs.get_messages(queue_name)
# Act
message_dict['val1'] = 0
message_dict['val2'] = 'updated'
json_text = dumps(message_dict)
self.qs.update_message(queue_name,
list_result1[0].id,
list_result1[0].pop_receipt,
0,
content = json_text,)
list_result2 = self.qs.get_messages(queue_name)
# Assert
message = list_result2[0]
self.assertEqual(message_dict, loads(message.content))
@record
def test_invalid_value_kek_wrap(self):
# Arrange
queue_name = self._create_queue()
self.qs.key_encryption_key = KeyWrapper('key1')
self.qs.key_encryption_key.get_kid = None
try:
self.qs.put_message(queue_name, u'message')
self.fail()
except AttributeError as e:
self.assertEqual(str(e), _ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid'))
self.qs.key_encryption_key = KeyWrapper('key1')
self.qs.key_encryption_key.get_kid = None
with self.assertRaises(AttributeError):
self.qs.put_message(queue_name, u'message')
self.qs.key_encryption_key = KeyWrapper('key1')
self.qs.key_encryption_key.wrap_key = None
with self.assertRaises(AttributeError):
self.qs.put_message(queue_name, u'message')
@record
def test_missing_attribute_kek_wrap(self):
# Arrange
queue_name = self._create_queue()
valid_key = KeyWrapper('key1')
# Act
invalid_key_1 = lambda: None #functions are objects, so this effectively creates an empty object
invalid_key_1.get_key_wrap_algorithm = valid_key.get_key_wrap_algorithm
invalid_key_1.get_kid = valid_key.get_kid
#No attribute wrap_key
self.qs.key_encryption_key = invalid_key_1
with self.assertRaises(AttributeError):
self.qs.put_message(queue_name, u'message')
invalid_key_2 = lambda: None #functions are objects, so this effectively creates an empty object
invalid_key_2.wrap_key = valid_key.wrap_key
invalid_key_2.get_kid = valid_key.get_kid
#No attribute get_key_wrap_algorithm
self.qs.key_encryption_key = invalid_key_2
with self.assertRaises(AttributeError):
self.qs.put_message(queue_name, u'message')
invalid_key_3 = lambda: None #functions are objects, so this effectively creates an empty object
invalid_key_3.get_key_wrap_algorithm = valid_key.get_key_wrap_algorithm
invalid_key_3.wrap_key = valid_key.wrap_key
#No attribute get_kid
self.qs.key_encryption_key = invalid_key_3
with self.assertRaises(AttributeError):
self.qs.put_message(queue_name, u'message')
@record
def test_invalid_value_kek_unwrap(self):
# Arrange
queue_name = self._create_queue()
self.qs.key_encryption_key = KeyWrapper('key1')
self.qs.put_message(queue_name, u'message')
# Act
self.qs.key_encryption_key.unwrap_key = None
with self.assertRaises(AzureException):
self.qs.peek_messages(queue_name)
self.qs.key_encryption_key.get_kid = None
with self.assertRaises(AzureException):
self.qs.peek_messages(queue_name)
@record
def test_missing_attribute_kek_unrwap(self):
# Arrange
queue_name = self._create_queue()
self.qs.key_encryption_key = KeyWrapper('key1')
self.qs.put_message(queue_name, u'message')
# Act
valid_key = KeyWrapper('key1')
invalid_key_1 = lambda: None #functions are objects, so this effectively creates an empty object
invalid_key_1.unwrap_key = valid_key.unwrap_key
#No attribute get_kid
self.qs.key_encryption_key = invalid_key_1
try:
self.qs.peek_messages(queue_name)
self.fail()
except AzureException as e:
self.assertEqual(str(e),_ERROR_DECRYPTION_FAILURE)
invalid_key_2 = lambda: None #functions are objects, so this effectively creates an empty object
invalid_key_2.get_kid = valid_key.get_kid
#No attribute unwrap_key
self.qs.key_encryption_key = invalid_key_2
with self.assertRaises(AzureException):
self.qs.peek_messages(queue_name)
@record
def test_validate_encryption(self):
# Arrange
queue_name = self._create_queue()
kek = KeyWrapper('key1')
self.qs.key_encryption_key = kek
self.qs.put_message(queue_name, u'message')
# Act
self.qs.key_encryption_key = None # Message will not be decrypted
li = self.qs.peek_messages(queue_name)
message = li[0].content
message = loads(message)
encryption_data = message['EncryptionData']
wrapped_content_key = encryption_data['WrappedContentKey']
wrapped_content_key = _WrappedContentKey(wrapped_content_key['Algorithm'],
b64decode(wrapped_content_key['EncryptedKey'].encode(encoding='utf-8')),
wrapped_content_key['KeyId'])
encryption_agent = encryption_data['EncryptionAgent']
encryption_agent = _EncryptionAgent(encryption_agent['EncryptionAlgorithm'],
encryption_agent['Protocol'])
encryption_data = _EncryptionData(b64decode(encryption_data['ContentEncryptionIV'].encode(encoding='utf-8')),
encryption_agent,
wrapped_content_key,
{'EncryptionLibrary':__version__})
message = message['EncryptedMessageContents']
content_encryption_key = kek.unwrap_key(encryption_data.wrapped_content_key.encrypted_key,
encryption_data.wrapped_content_key.algorithm)
#Create decryption cipher
backend = backends.default_backend()
algorithm = AES(content_encryption_key)
mode = CBC(encryption_data.content_encryption_IV)
cipher = Cipher(algorithm, mode, backend)
#decode and decrypt data
decrypted_data = _decode_base64_to_bytes(message)
decryptor = cipher.decryptor()
decrypted_data = (decryptor.update(decrypted_data) + decryptor.finalize())
#unpad data
unpadder = PKCS7(128).unpadder()
decrypted_data = (unpadder.update(decrypted_data) + unpadder.finalize())
decrypted_data = decrypted_data.decode(encoding='utf-8')
# Assert
self.assertEqual(decrypted_data, u'message')
@record
def test_put_with_strict_mode(self):
# Arrange
queue_name = self._create_queue()
kek = KeyWrapper('key1')
self.qs.key_encryption_key = kek
self.qs.require_encryption = True
self.qs.put_message(queue_name, u'message')
self.qs.key_encryption_key = None
# Assert
try:
self.qs.put_message(queue_name, u'message')
self.fail()
except ValueError as e:
self.assertEqual(str(e), _ERROR_ENCRYPTION_REQUIRED)
@record
def test_get_with_strict_mode(self):
# Arrange
queue_name = self._create_queue()
self.qs.put_message(queue_name, u'message')
self.qs.require_encryption = True
self.qs.key_encryption_key = KeyWrapper('key1')
try:
self.qs.get_messages(queue_name)
except ValueError as e:
self.assertEqual(str(e), _ERROR_MESSAGE_NOT_ENCRYPTED)
@record
def test_encryption_add_encrypted_64k_message(self):
# Arrange
queue_name = self._create_queue()
message = u'a'*1024*64
# Act
self.qs.put_message(queue_name, message)
# Assert
self.qs.key_encryption_key = KeyWrapper('key1')
with self.assertRaises(AzureHttpError):
self.qs.put_message(queue_name, message)
@record
def test_encryption_nonmatching_kid(self):
# Arrange
queue_name = self._create_queue()
self.qs.key_encryption_key = KeyWrapper('key1')
self.qs.put_message(queue_name, u'message')
# Act
self.qs.key_encryption_key.kid = 'Invalid'
# Assert
try:
self.qs.get_messages(queue_name)
self.fail()
except AzureException as e:
self.assertEqual(str(e), _ERROR_DECRYPTION_FAILURE)
#------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/python
from azuremodules import *
import argparse
import sys
#for error checking
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--distro', help='Please mention which distro you are testing', required=True, type = str)
args = parser.parse_args()
distro = args.distro
def verify_default_targetpw(distro):
RunLog.info("Checking Defaults targetpw is commented or not..")
sudoers_out = Run("cat /etc/sudoers")
if "Defaults targetpw" in sudoers_out:
if "#Defaults targetpw" in sudoers_out:
print(distro+"_TEST_SUDOERS_VERIFICATION_SUCCESS")
RunLog.info("Defaults targetpw is commented")
return True
else:
RunLog.error("Defaults targetpw is present in /etc sudoers but it is not commented.")
print(distro+"_TEST_SUDOERS_VERIFICATION_FAIL")
return False
else:
RunLog.info("Defaults targetpw is not present in /etc/sudoers")
print(distro+"_TEST_SUDOERS_VERIFICATION_SUCCESS")
return True
def verify_grub(distro):
import os.path
RunLog.info("Checking console=ttyS0 rootdelay=300..")
if distro == "UBUNTU":
grub_out = Run("cat /etc/default/grub")
if distro == "SUSE":
if os.path.exists("/boot/grub2/grub.cfg"):
grub_out = Run("cat /boot/grub2/grub.cfg")
elif os.path.exists("/boot/grub/grub.conf"):
grub_out = Run("cat /boot/grub/grub.conf")
else:
RunLog.error("Unable to locate grub file")
print(distro+"_TEST_GRUB_VERIFICATION_FAIL")
return False
if distro == "CENTOS" or distro == "ORACLELINUX" or distro == "REDHAT" or distro == "SLES" or distro == "FEDORA":
if os.path.isfile("/boot/grub2/grub.cfg"):
RunLog.info("Getting Contents of /boot/grub2/grub.cfg")
grub_out = Run("cat /boot/grub2/grub.cfg")
elif os.path.isfile("/boot/grub/menu.lst"):
RunLog.info("Getting Contents of /boot/grub/menu.lst")
grub_out = Run("cat /boot/grub/menu.lst")
else:
RunLog.error("Unable to locate grub file")
print(distro+"_TEST_GRUB_VERIFICATION_FAIL")
return False
if distro == "COREOS":
#in core os we don't have access to boot partition
grub_out = Run("dmesg")
if "console=ttyS0" in grub_out and "rootdelay=300" in grub_out and "libata.atapi_enabled=0" not in grub_out and "reserve=0x1f0,0x8" not in grub_out:
if distro == "CENTOS" or distro == "ORACLELINUX" or distro == "REDHAT":
# check numa=off in grub for CentOS 6.x and Oracle Linux 6.x
version_release = Run("cat /etc/system-release | grep -Eo '[0-9].?[0-9]?' | head -1 | tr -d '\n'")
if float(version_release) < 6.6:
if "numa=off" in grub_out:
print(distro+"_TEST_GRUB_VERIFICATION_SUCCESS")
else :
RunLog.error("numa=off not present in etc/default/grub")
print(distro+"_TEST_GRUB_VERIFICATION_FAIL")
else:
print(distro+"_TEST_GRUB_VERIFICATION_SUCCESS")
else:
print(distro+"_TEST_GRUB_VERIFICATION_SUCCESS")
return True
else:
print(distro+"_TEST_GRUB_VERIFICATION_FAIL")
if "console=ttyS0" not in grub_out:
RunLog.error("console=ttyS0 not present")
if "rootdelay=300" not in grub_out:
RunLog.error("rootdelay=300 not present")
if "libata.atapi_enabled=0" in grub_out:
RunLog.error("libata.atapi_enabled=0 is present")
if "reserve=0x1f0,0x8" in grub_out:
RunLog.error("reserve=0x1f0,0x8 is present")
return False
def verify_network_manager(distro):
RunLog.info("Verifying that network manager is not installed")
n_out = Run ("rpm -q NetworkManager")
if "is not installed" in n_out:
RunLog.info("Network Manager is not installed")
print(distro+"_TEST_NETWORK_MANAGER_NOT_INSTALLED")
return True
else:
# NetworkManager package no longer conflicts with the wwagent on CentOS 7.0+ and Oracle Linux 7.0+
if distro == "CENTOS" or distro == "ORACLELINUX" or distro == "REDHAT":
version_release = Run("cat /etc/system-release | grep -Eo '[0-9].?[0-9]?' | head -1 | tr -d '\n'")
if float(version_release) < 7.0:
RunLog.error("Network Manager is installed")
print(distro+"_TEST_NETWORK_MANAGER_INSTALLED")
return False
else:
RunLog.info("Network Manager is installed but not confict with waagent.")
print(distro+"_TEST_NETWORK_MANAGER_NOT_INSTALLED")
return True
else:
RunLog.error("Network Manager is installed")
print(distro+"_TEST_NETWORK_MANAGER_INSTALLED")
return False
def verify_network_file_in_sysconfig(distro):
import os.path
RunLog.info("Checking if network file exists in /etc/sysconfig")
if distro == "CENTOS" or distro == "ORACLELINUX" or distro == "REDHAT" or distro == "FEDORA":
if os.path.isfile("/etc/sysconfig/network"):
RunLog.info("File Exists.")
n_out = Run("cat /etc/sysconfig/network")
if "networking=yes".upper() in n_out.upper():
RunLog.info("NETWORKING=yes present in network file")
print(distro+"_TEST_NETWORK_FILE_SUCCESS")
return True
else:
RunLog.error("NETWORKING=yes not present in network file")
print(distro+"_TEST_NETWORK_FILE_ERROR")
return False
else:
RunLog.error("File not present")
print(distro+"_TEST_NETWORK_FILE_ERROR")
return False
def verify_ifcfg_eth0(distro):
RunLog.info("Verifying contents of ifcfg-eth0 file")
if distro == "CENTOS" or distro == "ORACLELINUX" or distro == "REDHAT" or distro == "FEDORA":
i_out = Run("cat /etc/sysconfig/network-scripts/ifcfg-eth0")
i_out = i_out.replace('"','')
#if "DEVICE=eth0" in i_out and "ONBOOT=yes" in i_out and "BOOTPROTO=dhcp" in i_out and "DHCP=yes" in i_out:
if "DEVICE=eth0" in i_out and "ONBOOT=yes" in i_out and "BOOTPROTO=dhcp" in i_out :
RunLog.info("all required parameters exists.")
print(distro+"_TEST_IFCFG_ETH0_FILE_SUCCESS")
return True
else:
if "DEVICE=eth0" not in i_out:
RunLog.error("DEVICE=eth0 not present in ifcfg-eth0")
if "ONBOOT=yes" not in i_out:
RunLog.error("ONBOOT=yes not present in ifcfg-eth0")
if "BOOTPROTO=dhcp" not in i_out:
RunLog.error("BOOTPROTO=dhcp not present in ifcfg-eth0")
#if "DHCP=yes" not in i_out:
# RunLog.error("DHCP=yes not present in ifcfg-eth0")
print(distro+"_TEST_IFCFG_ETH0_FILE_ERROR")
return False
def verify_udev_rules(distro):
import os.path
RunLog.info("Verifying if udev rules are moved to /var/lib/waagent/")
if distro == "CENTOS" or distro == "ORACLELINUX" or distro == "REDHAT" or distro == "FEDORA":
if not os.path.isfile("/lib/udev/rules.d/75-persistent-net-generator.rules") and not os.path.isfile("/etc/udev/rules.d/70-persistent-net.rules"):
RunLog.info("rules are moved.")
print(distro+"_TEST_UDEV_RULES_SUCCESS")
return True
else:
if os.path.isfile("/lib/udev/rules.d/75-persistent-net-generator.rules"):
RunLog.error("/lib/udev/rules.d/75-persistent-net-generator.rules file present")
if os.path.isfile("/etc/udev/rules.d/70-persistent-net.rules"):
RunLog.error("/etc/udev/rules.d/70-persistent-net.rules file present")
print(distro+"_TEST_UDEV_RULES_ERROR")
return False
if distro == "COREOS":
if not os.path.isfile("/usr/lib64/udev/rules.d/75-persistent-net-generator.rules") and not os.path.isfile("/usr/lib64/udev/rules.d/70-persistent-net.rules"):
RunLog.info("rules are moved.")
print(distro+"_TEST_UDEV_RULES_SUCCESS")
return True
else:
if os.path.isfile("/usr/lib64/udev/rules.d/75-persistent-net-generator.rules"):
RunLog.error("/usr/lib64/udev/rules.d/75-persistent-net-generator.rules file present")
if os.path.isfile("/usr/lib64/udev/rules.d/70-persistent-net.rules"):
RunLog.error("/usr/lib64/udev/rules.d/70-persistent-net.rules file present")
print(distro+"_TEST_UDEV_RULES_ERROR")
return False
if distro == "UBUNTU":
RunLog.info("DISTRO PROVIDED : "+distro)
#Test 1 : verify that hv-kvp-daemon-init is installed or not, it's optional not strict.
RunLog.info("Checking if hv-kvp-daemon-init is installed or not..")
#kvp_install_status = Run("dpkg -s hv-kvp-daemon-init")
kvp_install_status = Run("pgrep -lf hv_kvp_daemon")
matchCount = 0
if "hv_kvp_daemon" in kvp_install_status:
matchCount = matchCount + 1
if matchCount == 1:
print(distro+"_TEST_KVP_INSTALLED")
else:
print(distro+"_TEST_KVP_NOT_INSTALLED")
#Test 2 : Make sure that repositories are installed.
RunLog.info("Checking if repositories are installed or not..")
repository_out = Run("apt-get update")
if "security.ubuntu.com" in repository_out and "azure.archive.ubuntu.com" in repository_out and "Hit" in repository_out:
print(distro+"_TEST_REPOSITORIES_AVAILABLE")
else:
print(distro+"_TEST_REPOSITORIES_ERROR")
#Test 3 : Make sure to have console=ttyS0 rootdelay=300 in /etc/default/grub.
result = verify_grub(distro)
#Test 4 : Make sure that default targetpw is commented in /etc/sudoers file.
result = verify_default_targetpw(distro)
if distro == "DEBIAN":
RunLog.info("DISTRO PROVIDED : "+distro)
#Test 1 : verify that hv-kvp-daemon-init is installed or not, it's optional not strict.
RunLog.info("Checking if hv-kvp-daemon-init is installed or not..")
kvp_install_status = Run("pgrep -lf hv_kvp_daemon")
matchCount = 0
if "hv_kvp_daemon" in kvp_install_status:
matchCount = matchCount + 1
if matchCount == 1:
print(distro+"_TEST_KVP_INSTALLED")
else:
print(distro+"_TEST_KVP_NOT_INSTALLED")
#Test 2 : Make sure that repositories are installed.
RunLog.info("Checking if repositories are installed or not..")
repository_out = Run("apt-get update")
if ( "deb.debian.org" in repository_out or "debian-archive.trafficmanager.net" in repository_out ) and "Hit" in repository_out:
print(distro+"_TEST_REPOSITORIES_AVAILABLE")
else:
print(distro+"_TEST_REPOSITORIES_ERROR")
#Test 3 : Make sure that default targetpw is commented in /etc/sudoers file.
result = verify_default_targetpw(distro)
if distro == "SUSE":
#Make sure that distro contains Cloud specific repositories
RunLog.info("Verifying Cloud specific repositories")
Oss_repo_count = Run("zypper lr | grep -vi debug | grep -vi non | grep Oss | wc -l | tr -d '\n'")
Update_repo_count = Run("zypper lr | grep -vi debug | grep -vi non | grep Update | wc -l | tr -d '\n'")
Oss_repo_enable_refresh = Run("zypper lr | grep -vi debug | grep -vi non | grep Oss | grep -o Yes | wc -l | tr -d '\n'")
Update_repo_enable_refresh = Run("zypper lr | grep -vi debug | grep -vi non | grep Update | grep -o Yes | wc -l | tr -d '\n'")
if int(Oss_repo_count) > 0 and int(Update_repo_count) > 0:
RunLog.info("All expected repositories are present")
if int(Oss_repo_enable_refresh) >= 2 and int(Update_repo_enable_refresh) >= 2:
RunLog.info("All expected repositories are enabled and refreshed")
print(distro+"_TEST_REPOSITORIES_AVAILABLE")
else:
RunLog.error("One or more expected repositories are not enabled/refreshed.")
print(distro+"_TEST_REPOSITORIES_ERROR")
else:
RunLog.error("One or more expected repositories are not present")
print(distro+"_TEST_REPOSITORIES_ERROR")
#Verify Grub
result = verify_grub(distro)
#Test : Make sure that default targetpw is commented in /etc/sudoers file.
result = verify_default_targetpw(distro)
if distro == "CENTOS":
#Test 1 : Make sure Network Manager is not installed
result = verify_network_manager(distro)
result = verify_network_file_in_sysconfig(distro)
result = verify_ifcfg_eth0(distro)
result = verify_udev_rules(distro)
#Verify repositories
r_out = Run("yum repolist")
if "base" in r_out and "updates" in r_out:
RunLog.info("Expected repositories are present")
print(distro+"_TEST_REPOSITORIES_AVAILABLE")
else:
if "base" not in r_out:
RunLog.error("Base repository not present")
if "updates" not in r_out:
RunLog.error("Updates repository not present")
print(distro+"_TEST_REPOSITORIES_ERROR")
#Verify etc/yum.conf
y_out = Run("cat /etc/yum.conf")
# check http_caching=packages in yum.conf for CentOS 6.x
version_release = Run("cat /etc/system-release | grep -Eo '[0-9].?[0-9]?' | head -1 | tr -d '\n'")
if float(version_release) < 6.6:
if "http_caching=packages" in y_out:
RunLog.info("http_caching=packages present in /etc/yum.conf")
print(distro+"_TEST_YUM_CONF_SUCCESS")
else:
RunLog.error("http_caching=packages not present in /etc/yum.conf")
print(distro+"_TEST_YUM_CONF_ERROR")
else:
print(distro+"_TEST_YUM_CONF_SUCCESS")
result = verify_grub(distro)
if distro == "REDHAT" or distro == "FEDORA":
#Test 1 : Make sure Network Manager is not installed
result = verify_default_targetpw(distro)
result = verify_network_manager(distro)
result = verify_network_file_in_sysconfig(distro)
result = verify_ifcfg_eth0(distro)
result = verify_udev_rules(distro)
#Verify repositories
r_out = Run("yum repolist")
if "base" in r_out and "updates" in r_out:
RunLog.info("Expected repositories are present")
print(distro+"_TEST_REPOSITORIES_AVAILABLE")
else:
if "base" not in r_out:
RunLog.error("Base repository not present")
if "updates" not in r_out:
RunLog.error("Updates repository not present")
print(distro+"_TEST_REPOSITORIES_ERROR")
if distro == "REDHAT":
ra_out = Run("yum repolist all | grep 'rhui-rhel-' | wc -l")
if(ra_out > 5):
RunLog.info("yum repolist all status: Success, repo count = %s", ra_out)
print(distro+"_TEST_RHUIREPOSITORIES_AVAILABLE")
else:
RunLog.error("yum repolist all status: Fail, repo count = %s", ra_out)
print(distro+"_TEST_RHUIREPOSITORIES_ERROR")
#Verify etc/yum.conf
version_release = Run("cat /etc/system-release | grep -Eo '[0-9].?[0-9]?' | head -1 | tr -d '\n'")
if float(version_release) < 6.6:
if "http_caching=packages" in y_out:
RunLog.info("http_caching=packages present in /etc/yum.conf")
print(distro+"_TEST_YUM_CONF_SUCCESS")
else:
RunLog.error("http_caching=packages not present in /etc/yum.conf")
print(distro+"_TEST_YUM_CONF_ERROR")
else:
print(distro+"_TEST_YUM_CONF_SUCCESS")
result = verify_grub(distro)
if distro == "ORACLELINUX":
#Test 1 : Make sure Network Manager is not installed
result = verify_network_manager(distro)
result = verify_network_file_in_sysconfig(distro)
result = verify_ifcfg_eth0(distro)
result = verify_udev_rules(distro)
#Verify repositories
r_out = Run("yum repolist")
if "latest" in r_out:
RunLog.info("Expected latest repositories are present")
print(distro+"_TEST_REPOSITORIES_AVAILABLE")
else:
RunLog.error("Expected latest repository not present")
print(distro+"_TEST_REPOSITORIES_ERROR")
# no need to verify yum.conf since http_caching is not required for Oracle Linux.
result = verify_grub(distro)
if distro == "SLES":
#Verify Repositories..
r_out = Run("zypper lr")
if "Pool" in r_out and "Updates" in r_out:
RunLog.info("All expected repositories are present")
RunLog.info("All expected repositories are enabled and refreshed")
print(distro+"_TEST_REPOSITORIES_AVAILABLE")
else:
RunLog.error("One or more expected repositories are not present")
print(distro+"_TEST_REPOSITORIES_ERROR")
#Verify Grub
result = verify_grub(distro)
#Verify sudoers file
result = verify_default_targetpw(distro)
#Vefiry : It is recommended that you set /etc/sysconfig/network/dhcp or equivalent from DHCLIENT_SET_HOSTNAME="yes" to DHCLIENT_SET_HOSTNAME="no"
RunLog.info('Checking recommended setting if DHCLIENT_SET_HOSTNAME="no" present in /etc/sysconfig/network/dhcp')
d_out = Run("cat /etc/sysconfig/network/dhcp")
if 'DHCLIENT_SET_HOSTNAME="no"' in d_out:
RunLog.info('DHCLIENT_SET_HOSTNAME="no" present in /etc/sysconfig/network/dhcp')
else:
RunLog.info("DHCLIENT_SET_HOSTNAME='no' not present in /etc/sysconfig/network/dhcp, it's not strict.")
if distro == "COREOS":
#"rootdelay=300" has issues with CoreOS which causes extra long boot time
#result = verify_grub(distro)
result = verify_udev_rules(distro)
|
|
# Copyright IBM Corp. All Rights Reserved.
#
# SPDX-License-Identifier: Apache-2.0
#
import unittest
import subprocess
tool_directory = '../../tools/LTE/scripts'
class perf_goleveldb(unittest.TestCase):
def test_FAB_3790_VaryNumParallelTxPerChain(self):
'''
In this Performance test, we observe the performance (time to
complete a set number of Ledger operations) of the Ledger component,
with goleveldb as the state database. We vary the number of parallel
transactions per chain and observe the performance.
Passing criteria: Underlying LTE test completed successfully with
exit code 0
'''
logfile = open("output_VaryNumParallelTxPerChain.log", "w")
returncode = subprocess.call(
"./runbenchmarks.sh -f parameters_daily_CI.sh varyNumParallelTxPerChain",
shell=True, stderr=subprocess.STDOUT, stdout=logfile,
cwd=tool_directory)
logfile.close()
self.assertEqual(returncode, 0, msg="VaryNumParallelTxPerChain "
"performance test failed. \nPlease check the logfile "
+logfile.name+" for more details.")
def test_FAB_3795_VaryNumChains(self):
'''
In this Performance test, we observe the performance (time to
complete a set number of Ledger operations) of the Ledger component,
with goleveldb as the state database. We vary the number of chains
(ledgers).
Passing criteria: Underlying LTE test completed successfully with
exit code 0
'''
logfile = open("output_VaryNumChains.log", "w")
returncode = subprocess.call(
"./runbenchmarks.sh -f parameters_daily_CI.sh varyNumChains",
shell=True, stderr=subprocess.STDOUT, stdout=logfile,
cwd=tool_directory)
logfile.close()
self.assertEqual(returncode, 0, msg="VaryNumChains performance test"
" failed. \nPlease check the logfile "+logfile.name+" for more "
"details.")
def test_FAB_3798_VaryNumParallelTxWithSingleChain(self):
'''
In this Performance test, we observe the performance (time to
complete a set number of Ledger operations) of the Ledger component,
with goleveldb as the state database. We vary the number of parallel
transactions on a single chain.
Passing criteria: Underlying LTE test completed successfully with
exit code 0
'''
logfile = open("output_VaryNumParallelTxWithSingleChain.log", "w")
returncode = subprocess.call(
"./runbenchmarks.sh -f parameters_daily_CI.sh varyNumParallelTxWithSingleChain",
shell=True, stderr=subprocess.STDOUT, stdout=logfile,
cwd=tool_directory)
logfile.close()
self.assertEqual(returncode, 0, msg="VaryNumParallelTxWithSingleChain "
"performance test failed. \nPlease check the logfile "
+logfile.name+" for more details.")
def test_FAB_3799_VaryNumChainsWithNoParallelism(self):
'''
In this Performance test, we observe the performance (time to
complete a set number of Ledger operations) of the Ledger component,
with goleveldb as the state database. We vary the number of chains
without any parallelism within a single chain.
Passing criteria: Underlying LTE test completed successfully with
exit code 0
'''
logfile = open("output_VaryNumChainsWithNoParallelism.log", "w")
returncode = subprocess.call(
"./runbenchmarks.sh -f parameters_daily_CI.sh varyNumChainsWithNoParallelism",
shell=True, stderr=subprocess.STDOUT, stdout=logfile,
cwd=tool_directory)
logfile.close()
self.assertEqual(returncode, 0, msg="varyNumChainsWithNoParallelism "
"performance test failed. \nPlease check the logfile "
+logfile.name+" for more details.")
def test_FAB_3801_VaryKVSize(self):
'''
In this Performance test, we observe the performance (time to
complete a set number of Ledger operations) of the Ledger component,
with goleveldb as the state database. We vary the size of key-value.
Passing criteria: Underlying LTE test completed successfully with
exit code 0
'''
logfile = open("output_VaryKVSize.log", "w")
returncode = subprocess.call(
"./runbenchmarks.sh -f parameters_daily_CI.sh varyKVSize",
shell=True, stderr=subprocess.STDOUT, stdout=logfile,
cwd=tool_directory)
logfile.close()
self.assertEqual(returncode, 0, msg="varyKVSize performance test"
" failed. \nPlease check the logfile "+logfile.name+" for more "
"details.")
def test_FAB_3802_VaryBatchSize(self):
'''
In this Performance test, we observe the performance (time to
complete a set number of Ledger operations) of the Ledger component,
with goleveldb as the state database. We vary the value of the batch
size
Passing criteria: Underlying LTE test completed successfully with
exit code 0
'''
logfile = open("output_VaryBatchSize.log", "w")
returncode = subprocess.call(
"./runbenchmarks.sh -f parameters_daily_CI.sh varyBatchSize",
shell=True, stderr=subprocess.STDOUT, stdout=logfile,
cwd=tool_directory)
logfile.close()
self.assertEqual(returncode, 0, msg="varyBatchSize performance test"
" failed. \nPlease check the logfile "+logfile.name+" for more "
"details.")
def test_FAB_3800_VaryNumKeysInEachTx(self):
'''
In this Performance test, we observe the performance (time to
complete a set number of Ledger operations) of the Ledger component,
with goleveldb as the state database. We vary the number of keys in
each transaction.
Passing criteria: Underlying LTE test completed successfully with
exit code 0
'''
logfile = open("output_VaryNumKeysInEachTx.log", "w")
returncode = subprocess.call(
"./runbenchmarks.sh -f parameters_daily_CI.sh varyNumKeysInEachTx",
shell=True, stderr=subprocess.STDOUT, stdout=logfile,
cwd=tool_directory)
logfile.close()
self.assertEqual(returncode, 0, msg="varyNumKeysInEachTx performance "
"test failed. \nPlease check the logfile "+logfile.name
+" for more details.")
def test_FAB_3803_VaryNumTxs(self):
'''
In this Performance test, we observe the performance (time to
complete a set number of Ledger operations) of the Ledger component,
with goleveldb as the state database. We vary the number of
transactions carried out.
Passing criteria: Underlying LTE test completed successfully with
exit code 0
'''
logfile = open("output_VaryNumTxs.log", "w")
returncode = subprocess.call(
"./runbenchmarks.sh -f parameters_daily_CI.sh varyNumTxs",
shell=True, stderr=subprocess.STDOUT, stdout=logfile,
cwd=tool_directory)
logfile.close()
self.assertEqual(returncode, 0, msg="varyNumTxs performance test"
" failed. \nPlease check the logfile "+logfile.name+" for more "
"details.")
class perf_couchdb(unittest.TestCase):
@unittest.skip("WIP, skipping")
def test_FAB_3870_VaryNumParallelTxPerChain(self):
'''
In this Performance test, we observe the performance (operations
per second) of the Ledger component, with CouchDB as the state
database, as we vary the number of parallel transactions per chain.
'''
self.assertTrue(True)
@unittest.skip("WIP, skipping")
def test_FAB_3871_VaryNumChain(self):
'''
In this Performance test, we observe the performance (operations
per second) of the Ledger component, with CouchDB as the state
database, as we vary the number of chains (ledgers).
'''
self.assertTrue(True)
@unittest.skip("WIP, skipping")
def test_FAB_3872_VaryNumParallelTxWithSingleChain(self):
'''
In this Performance test, we observe the performance (operations
per second) of the Ledger component, with CouchDB as the state
database, vary the number of parallel transactions on a single chain.
'''
self.assertTrue(True)
@unittest.skip("WIP, skipping")
def test_FAB_3873_VaryNumChainWithNoParallelism(self):
'''
In this Performance test, we observe the performance (operations
per second) of the Ledger component, with CouchDB as the state
database, as we vary the number of chains without any parallelism.
within a single chain.
'''
self.assertTrue(True)
@unittest.skip("WIP, skipping")
def test_FAB_3874_VaryKVSize(self):
'''
In this Performance test, we observe the performance (operations
per second) of the Ledger component, with CouchDB as the state
database, varying the size of key-value.
'''
self.assertTrue(True)
@unittest.skip("WIP, skipping")
def test_FAB_3875_VaryBatchSize(self):
'''
In this Performance test, we observe the performance (operations
per second) of the Ledger component, with CouchDB as the state
database, as we vary the value of the batch size.
'''
self.assertTrue(True)
@unittest.skip("WIP, skipping")
def test_FAB_3876_VaryNumKeysInEachTX(self):
'''
In this Performance test, we observe the performance (operations
per second) of the Ledger component, with CouchDB as the state
database, as we vary the number of keys in each transaction.
'''
self.assertTrue(True)
@unittest.skip("WIP, skipping")
def test_FAB_3877_VaryNumTxs(self):
'''
In this Performance test, we observe the performance (operations
per second) of the Ledger component, with CouchDB as the state
database, as we vary the number of transactions carried out.
'''
self.assertTrue(True)
|
|
# -*- test-case-name: twisted.words.test.test_jabberxmlstream -*-
#
# Copyright (c) 2001-2006 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
XMPP XML Streams
Building blocks for setting up XML Streams, including helping classes for
doing authentication on either client or server side, and working with XML
Stanzas.
"""
from zope.interface import directlyProvides, implements
from twisted.internet import defer
from twisted.internet.error import ConnectionLost
from twisted.python import failure
from twisted.words.protocols.jabber import error, ijabber
from twisted.words.xish import domish, xmlstream
from twisted.words.xish.xmlstream import STREAM_CONNECTED_EVENT
from twisted.words.xish.xmlstream import STREAM_START_EVENT
from twisted.words.xish.xmlstream import STREAM_END_EVENT
from twisted.words.xish.xmlstream import STREAM_ERROR_EVENT
try:
from twisted.internet import ssl
except ImportError:
ssl = None
if ssl and not ssl.supported:
ssl = None
STREAM_AUTHD_EVENT = intern("//event/stream/authd")
INIT_FAILED_EVENT = intern("//event/xmpp/initfailed")
NS_STREAMS = 'http://etherx.jabber.org/streams'
NS_XMPP_TLS = 'urn:ietf:params:xml:ns:xmpp-tls'
Reset = object()
def hashPassword(sid, password):
"""
Create a SHA1-digest string of a session identifier and password.
"""
import sha
return sha.new("%s%s" % (sid, password)).hexdigest()
class Authenticator:
"""
Base class for business logic of initializing an XmlStream
Subclass this object to enable an XmlStream to initialize and authenticate
to different types of stream hosts (such as clients, components, etc.).
Rules:
1. The Authenticator MUST dispatch a L{STREAM_AUTHD_EVENT} when the
stream has been completely initialized.
2. The Authenticator SHOULD reset all state information when
L{associateWithStream} is called.
3. The Authenticator SHOULD override L{streamStarted}, and start
initialization there.
@type xmlstream: L{XmlStream}
@ivar xmlstream: The XmlStream that needs authentication
@note: the term authenticator is historical. Authenticators perform
all steps required to prepare the stream for the exchange
of XML stanzas.
"""
def __init__(self):
self.xmlstream = None
def connectionMade(self):
"""
Called by the XmlStream when the underlying socket connection is
in place.
This allows the Authenticator to send an initial root element, if it's
connecting, or wait for an inbound root from the peer if it's accepting
the connection.
Subclasses can use self.xmlstream.send() to send any initial data to
the peer.
"""
def streamStarted(self):
"""
Called by the XmlStream when the stream has started.
A stream is considered to have started when the root element has been
received and, if applicable, the feature set has been received.
"""
def associateWithStream(self, xmlstream):
"""
Called by the XmlStreamFactory when a connection has been made
to the requested peer, and an XmlStream object has been
instantiated.
The default implementation just saves a handle to the new
XmlStream.
@type xmlstream: L{XmlStream}
@param xmlstream: The XmlStream that will be passing events to this
Authenticator.
"""
self.xmlstream = xmlstream
class ConnectAuthenticator(Authenticator):
"""
Authenticator for initiating entities.
"""
namespace = None
def __init__(self, otherHost):
self.otherHost = otherHost
def connectionMade(self):
self.xmlstream.namespace = self.namespace
self.xmlstream.otherHost = self.otherHost
self.xmlstream.sendHeader()
def initializeStream(self):
"""
Perform stream initialization procedures.
An L{XmlStream} holds a list of initializer objects in its
C{initializers} attribute. This method calls these initializers in
order and dispatches the C{STREAM_AUTHD_EVENT} event when the list has
been successfully processed. Otherwise it dispatches the
C{INIT_FAILED_EVENT} event with the failure.
Initializers may return the special L{Reset} object to halt the
initialization processing. It signals that the current initializer was
successfully processed, but that the XML Stream has been reset. An
example is the TLSInitiatingInitializer.
"""
def remove_first(result):
self.xmlstream.initializers.pop(0)
return result
def do_next(result):
"""
Take the first initializer and process it.
On success, the initializer is removed from the list and
then next initializer will be tried.
"""
if result is Reset:
return None
try:
init = self.xmlstream.initializers[0]
except IndexError:
self.xmlstream.dispatch(self.xmlstream, STREAM_AUTHD_EVENT)
return None
else:
d = defer.maybeDeferred(init.initialize)
d.addCallback(remove_first)
d.addCallback(do_next)
return d
d = defer.succeed(None)
d.addCallback(do_next)
d.addErrback(self.xmlstream.dispatch, INIT_FAILED_EVENT)
def streamStarted(self):
self.initializeStream()
class FeatureNotAdvertized(Exception):
"""
Exception indicating a stream feature was not advertized, while required by
the initiating entity.
"""
class BaseFeatureInitiatingInitializer(object):
"""
Base class for initializers with a stream feature.
This assumes the associated XmlStream represents the initiating entity
of the connection.
@cvar feature: tuple of (uri, name) of the stream feature root element.
@type feature: tuple of (L{str}, L{str})
@ivar required: whether the stream feature is required to be advertized
by the receiving entity.
@type required: L{bool}
"""
implements(ijabber.IInitiatingInitializer)
feature = None
required = False
def __init__(self, xs):
self.xmlstream = xs
def initialize(self):
"""
Initiate the initialization.
Checks if the receiving entity advertizes the stream feature. If it
does, the initialization is started. If it is not advertized, and the
C{required} instance variable is L{True}, it raises
L{FeatureNotAdvertized}. Otherwise, the initialization silently
succeeds.
"""
if self.feature in self.xmlstream.features:
return self.start()
elif self.required:
raise FeatureNotAdvertized
else:
return None
def start(self):
"""
Start the actual initialization.
May return a deferred for asynchronous initialization.
"""
class TLSError(Exception):
"""
TLS base exception.
"""
class TLSFailed(TLSError):
"""
Exception indicating failed TLS negotiation
"""
class TLSRequired(TLSError):
"""
Exception indicating required TLS negotiation.
This exception is raised when the receiving entity requires TLS
negotiation and the initiating does not desire to negotiate TLS.
"""
class TLSNotSupported(TLSError):
"""
Exception indicating missing TLS support.
This exception is raised when the initiating entity wants and requires to
negotiate TLS when the OpenSSL library is not available.
"""
class TLSInitiatingInitializer(BaseFeatureInitiatingInitializer):
"""
TLS stream initializer for the initiating entity.
It is strongly required to include this initializer in the list of
initializers for an XMPP stream. By default it will try to negotiate TLS.
An XMPP server may indicate that TLS is required. If TLS is not desired,
set the C{wanted} attribute to False instead of removing it from the list
of initializers, so a proper exception L{TLSRequired} can be raised.
@cvar wanted: indicates if TLS negotiation is wanted.
@type wanted: L{bool}
"""
feature = (NS_XMPP_TLS, 'starttls')
wanted = True
_deferred = None
def onProceed(self, obj):
"""
Proceed with TLS negotiation and reset the XML stream.
"""
self.xmlstream.removeObserver('/failure', self.onFailure)
ctx = ssl.CertificateOptions()
self.xmlstream.transport.startTLS(ctx)
self.xmlstream.reset()
self.xmlstream.sendHeader()
self._deferred.callback(Reset)
def onFailure(self, obj):
self.xmlstream.removeObserver('/proceed', self.onProceed)
self._deferred.errback(TLSFailed())
def start(self):
"""
Start TLS negotiation.
This checks if the receiving entity requires TLS, the SSL library is
available and uses the C{required} and C{wanted} instance variables to
determine what to do in the various different cases.
For example, if the SSL library is not available, and wanted and
required by the user, it raises an exception. However if it is not
required by both parties, initialization silently succeeds, moving
on to the next step.
"""
if self.wanted:
if ssl is None:
if self.required:
return defer.fail(TLSNotSupported())
else:
return defer.succeed(None)
else:
pass
elif self.xmlstream.features[self.feature].required:
return defer.fail(TLSRequired())
else:
return defer.succeed(None)
self._deferred = defer.Deferred()
self.xmlstream.addOnetimeObserver("/proceed", self.onProceed)
self.xmlstream.addOnetimeObserver("/failure", self.onFailure)
self.xmlstream.send(domish.Element((NS_XMPP_TLS, "starttls")))
return self._deferred
class XmlStream(xmlstream.XmlStream):
"""
XMPP XML Stream protocol handler.
@ivar version: XML stream version as a tuple (major, minor). Initially,
this is set to the minimally supported version. Upon
receiving the stream header of the peer, it is set to the
minimum of that value and the version on the received
header.
@type version: (L{int}, L{int})
@ivar namespace: default namespace URI for stream
@type namespace: L{str}
@ivar thisHost: hostname of this entity
@ivar otherHost: hostname of the peer entity
@ivar sid: session identifier
@type sid: L{str}
@ivar initiating: True if this is the initiating stream
@type initiating: L{bool}
@ivar features: map of (uri, name) to stream features element received from
the receiving entity.
@type features: L{dict} of (L{str}, L{str}) to L{domish.Element}.
@ivar prefixes: map of URI to prefixes that are to appear on stream
header.
@type prefixes: L{dict} of L{str} to L{str}
@ivar initializers: list of stream initializer objects
@type initializers: L{list} of objects that provide L{IInitializer}
@ivar authenticator: associated authenticator that uses C{initializers} to
initialize the XML stream.
"""
version = (1, 0)
namespace = 'invalid'
thisHost = None
otherHost = None
sid = None
initiating = True
prefixes = {NS_STREAMS: 'stream'}
_headerSent = False # True if the stream header has been sent
def __init__(self, authenticator):
xmlstream.XmlStream.__init__(self)
self.authenticator = authenticator
self.initializers = []
self.features = {}
# Reset the authenticator
authenticator.associateWithStream(self)
def _callLater(self, *args, **kwargs):
from twisted.internet import reactor
return reactor.callLater(*args, **kwargs)
def reset(self):
"""
Reset XML Stream.
Resets the XML Parser for incoming data. This is to be used after
successfully negotiating a new layer, e.g. TLS and SASL. Note that
registered event observers will continue to be in place.
"""
self._headerSent = False
self._initializeStream()
def onStreamError(self, errelem):
"""
Called when a stream:error element has been received.
Dispatches a L{STREAM_ERROR_EVENT} event with the error element to
allow for cleanup actions and drops the connection.
@param errelem: The received error element.
@type errelem: L{domish.Element}
"""
self.dispatch(failure.Failure(error.exceptionFromStreamError(errelem)),
STREAM_ERROR_EVENT)
self.transport.loseConnection()
def onFeatures(self, features):
"""
Called when a stream:features element has been received.
Stores the received features in the C{features} attribute, checks the
need for initiating TLS and notifies the authenticator of the start of
the stream.
@param features: The received features element.
@type features: L{domish.Element}
"""
self.features = {}
for feature in features.elements():
self.features[(feature.uri, feature.name)] = feature
self.authenticator.streamStarted()
def sendHeader(self):
"""
Send stream header.
"""
rootElem = domish.Element((NS_STREAMS, 'stream'), self.namespace)
if self.initiating and self.otherHost:
rootElem['to'] = self.otherHost
elif not self.initiating:
if self.thisHost:
rootElem['from'] = self.thisHost
if self.sid:
rootElem['id'] = self.sid
if self.version >= (1, 0):
rootElem['version'] = "%d.%d" % (self.version[0], self.version[1])
self.rootElem = rootElem
self.send(rootElem.toXml(prefixes=self.prefixes, closeElement=0))
self._headerSent = True
def sendFooter(self):
"""
Send stream footer.
"""
self.send('</stream:stream>')
def sendStreamError(self, streamError):
"""
Send stream level error.
If we are the receiving entity, and haven't sent the header yet,
we sent one first.
If the given C{failure} is a L{error.StreamError}, it is rendered
to its XML representation, otherwise a generic C{internal-error}
stream error is generated.
After sending the stream error, the stream is closed and the transport
connection dropped.
"""
if not self._headerSent and not self.initiating:
self.sendHeader()
if self._headerSent:
self.send(streamError.getElement())
self.sendFooter()
self.transport.loseConnection()
def send(self, obj):
"""
Send data over the stream.
This overrides L{xmlstream.Xmlstream.send} to use the default namespace
of the stream header when serializing L{domish.IElement}s. It is
assumed that if you pass an object that provides L{domish.IElement},
it represents a direct child of the stream's root element.
"""
if domish.IElement.providedBy(obj):
obj = obj.toXml(prefixes=self.prefixes,
defaultUri=self.namespace,
prefixesInScope=self.prefixes.values())
xmlstream.XmlStream.send(self, obj)
def connectionMade(self):
"""
Called when a connection is made.
Notifies the authenticator when a connection has been made.
"""
xmlstream.XmlStream.connectionMade(self)
self.authenticator.connectionMade()
def onDocumentStart(self, rootelem):
"""
Called when the stream header has been received.
Extracts the header's C{id} and C{version} attributes from the root
element. The C{id} attribute is stored in our C{sid} attribute and the
C{version} attribute is parsed and the minimum of the version we sent
and the parsed C{version} attribute is stored as a tuple (major, minor)
in this class' C{version} attribute. If no C{version} attribute was
present, we assume version 0.0.
If appropriate (we are the initiating stream and the minimum of our and
the other party's version is at least 1.0), a one-time observer is
registered for getting the stream features. The registered function is
C{onFeatures}.
Ultimately, the authenticator's C{streamStarted} method will be called.
@param rootelem: The root element.
@type rootelem: L{domish.Element}
"""
xmlstream.XmlStream.onDocumentStart(self, rootelem)
# Extract stream identifier
if rootelem.hasAttribute("id"):
self.sid = rootelem["id"]
# Extract stream version and take minimum with the version sent
if rootelem.hasAttribute("version"):
version = rootelem["version"].split(".")
try:
version = (int(version[0]), int(version[1]))
except IndexError, ValueError:
version = (0, 0)
else:
version = (0, 0)
self.version = min(self.version, version)
# Setup observer for stream errors
self.addOnetimeObserver("/error[@xmlns='%s']" % NS_STREAMS,
self.onStreamError)
# Setup observer for stream features, if applicable
if self.initiating and self.version >= (1, 0):
self.addOnetimeObserver('/features[@xmlns="%s"]' % NS_STREAMS,
self.onFeatures)
else:
self.authenticator.streamStarted()
class XmlStreamFactory(xmlstream.XmlStreamFactory):
def __init__(self, authenticator):
xmlstream.XmlStreamFactory.__init__(self)
self.authenticator = authenticator
def buildProtocol(self, _):
self.resetDelay()
# Create the stream and register all the bootstrap observers
xs = XmlStream(self.authenticator)
xs.factory = self
for event, fn in self.bootstraps: xs.addObserver(event, fn)
return xs
class TimeoutError(Exception):
"""
Exception raised when no IQ response has been received before the
configured timeout.
"""
def upgradeWithIQResponseTracker(xs):
"""
Enhances an XmlStream for iq response tracking.
This makes an L{XmlStream} object provide L{IIQResponseTracker}. When a
response is an error iq stanza, the deferred has its errback invoked with a
failure that holds a L{StanzaException<error.StanzaException>} that is
easier to examine.
"""
def callback(iq):
"""
Handle iq response by firing associated deferred.
"""
if getattr(iq, 'handled', False):
return
try:
d = xs.iqDeferreds[iq["id"]]
except KeyError:
pass
else:
del xs.iqDeferreds[iq["id"]]
iq.handled = True
if iq['type'] == 'error':
d.errback(error.exceptionFromStanza(iq))
else:
d.callback(iq)
def disconnected(_):
"""
Make sure deferreds do not linger on after disconnect.
This errbacks all deferreds of iq's for which no response has been
received with a L{ConnectionLost} failure. Otherwise, the deferreds
will never be fired.
"""
iqDeferreds = xs.iqDeferreds
xs.iqDeferreds = {}
for d in iqDeferreds.itervalues():
d.errback(ConnectionLost())
xs.iqDeferreds = {}
xs.iqDefaultTimeout = getattr(xs, 'iqDefaultTimeout', None)
xs.addObserver(xmlstream.STREAM_END_EVENT, disconnected)
xs.addObserver('/iq[@type="result"]', callback)
xs.addObserver('/iq[@type="error"]', callback)
directlyProvides(xs, ijabber.IIQResponseTracker)
class IQ(domish.Element):
"""
Wrapper for an iq stanza.
Iq stanzas are used for communications with a request-response behaviour.
Each iq request is associated with an XML stream and has its own unique id
to be able to track the response.
@ivar timeout: if set, a timeout period after which the deferred returned
by C{send} will have its errback called with a
L{TimeoutError} failure.
@type timeout: C{float}
"""
timeout = None
def __init__(self, xmlstream, type = "set"):
"""
@type xmlstream: L{xmlstream.XmlStream}
@param xmlstream: XmlStream to use for transmission of this IQ
@type type: L{str}
@param type: IQ type identifier ('get' or 'set')
"""
domish.Element.__init__(self, (None, "iq"))
self.addUniqueId()
self["type"] = type
self._xmlstream = xmlstream
def send(self, to=None):
"""
Send out this iq.
Returns a deferred that is fired when an iq response with the same id
is received. Result responses will be passed to the deferred callback.
Error responses will be transformed into a
L{StanzaError<error.StanzaError>} and result in the errback of the
deferred being invoked.
@rtype: L{defer.Deferred}
"""
if to is not None:
self["to"] = to
if not ijabber.IIQResponseTracker.providedBy(self._xmlstream):
upgradeWithIQResponseTracker(self._xmlstream)
d = defer.Deferred()
self._xmlstream.iqDeferreds[self['id']] = d
timeout = self.timeout or self._xmlstream.iqDefaultTimeout
if timeout is not None:
def onTimeout():
del self._xmlstream.iqDeferreds[self['id']]
d.errback(TimeoutError("IQ timed out"))
call = self._xmlstream._callLater(timeout, onTimeout)
def cancelTimeout(result):
if call.active():
call.cancel()
return result
d.addBoth(cancelTimeout)
self._xmlstream.send(self)
return d
|
|
"""
websocket - WebSocket client library for Python
Copyright (C) 2010 Hiroki Ohtani(liris)
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import socket
from urlparse import urlparse
import os
import struct
import uuid
import hashlib
import base64
import logging
"""
websocket python client.
=========================
This version support only hybi-13.
Please see http://tools.ietf.org/html/rfc6455 for protocol.
"""
# websocket supported version.
VERSION = 13
# closing frame status codes.
STATUS_NORMAL = 1000
STATUS_GOING_AWAY = 1001
STATUS_PROTOCOL_ERROR = 1002
STATUS_UNSUPPORTED_DATA_TYPE = 1003
STATUS_STATUS_NOT_AVAILABLE = 1005
STATUS_ABNORMAL_CLOSED = 1006
STATUS_INVALID_PAYLOAD = 1007
STATUS_POLICY_VIOLATION = 1008
STATUS_MESSAGE_TOO_BIG = 1009
STATUS_INVALID_EXTENSION = 1010
STATUS_UNEXPECTED_CONDITION = 1011
STATUS_TLS_HANDSHAKE_ERROR = 1015
logger = logging.getLogger()
class WebSocketException(Exception):
"""
websocket exeception class.
"""
pass
# default_timeout = None
# This value is currently too low, so it might not connect over the network,
# for local machine it should be okay. It is needed, because the read function
# otherwise keeps up everything else, since we cannot wait for a read while
# doing other stuff in the socketio/wavemeter code
default_timeout = 0.01
traceEnabled = False
def enableTrace(tracable):
"""
turn on/off the tracability.
tracable: boolean value. if set True, tracability is enabled.
"""
global traceEnabled
traceEnabled = tracable
if tracable:
if not logger.handlers:
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.DEBUG)
def setdefaulttimeout(timeout):
"""
Set the global timeout setting to connect.
timeout: default socket timeout time. This value is second.
"""
global default_timeout
default_timeout = timeout
def getdefaulttimeout():
"""
Return the global timeout setting(second) to connect.
"""
return default_timeout
def _parse_url(url):
"""
parse url and the result is tuple of
(hostname, port, resource path and the flag of secure mode)
url: url string.
"""
if ":" not in url:
raise ValueError("url is invalid")
scheme, url = url.split(":", 1)
url = url.rstrip("/")
parsed = urlparse(url, scheme="http")
if parsed.hostname:
hostname = parsed.hostname
else:
raise ValueError("hostname is invalid")
port = 0
if parsed.port:
port = parsed.port
is_secure = False
if scheme == "ws":
if not port:
port = 80
elif scheme == "wss":
is_secure = True
if not port:
port = 443
else:
raise ValueError("scheme %s is invalid" % scheme)
if parsed.path:
resource = parsed.path
else:
resource = "/"
if parsed.query:
resource += "?" + parsed.query
return (hostname, port, resource, is_secure)
def create_connection(url, timeout=None, **options):
"""
connect to url and return websocket object.
Connect to url and return the WebSocket object.
Passing optional timeout parameter will set the timeout on the socket.
If no timeout is supplied, the global default timeout setting returned by getdefauttimeout() is used.
You can customize using 'options'.
If you set "header" dict object, you can set your own custom header.
>>> conn = create_connection("ws://echo.websocket.org/",
... header={"User-Agent: MyProgram",
... "x-custom: header"})
timeout: socket timeout time. This value is integer.
if you set None for this value, it means "use default_timeout value"
options: current support option is only "header".
if you set header as dict value, the custom HTTP headers are added.
"""
websock = WebSocket()
websock.settimeout(timeout != None and timeout or default_timeout)
websock.connect(url, **options)
return websock
_MAX_INTEGER = (1 << 32) -1
_AVAILABLE_KEY_CHARS = range(0x21, 0x2f + 1) + range(0x3a, 0x7e + 1)
_MAX_CHAR_BYTE = (1<<8) -1
# ref. Websocket gets an update, and it breaks stuff.
# http://axod.blogspot.com/2010/06/websocket-gets-update-and-it-breaks.html
def _create_sec_websocket_key():
uid = uuid.uuid4()
return base64.encodestring(uid.bytes).strip()
_HEADERS_TO_CHECK = {
"upgrade": "websocket",
"connection": "upgrade",
}
class _SSLSocketWrapper(object):
def __init__(self, sock):
self.ssl = socket.ssl(sock)
def recv(self, bufsize):
return self.ssl.read(bufsize)
def send(self, payload):
return self.ssl.write(payload)
_BOOL_VALUES = (0, 1)
def _is_bool(*values):
for v in values:
if v not in _BOOL_VALUES:
return False
return True
class ABNF(object):
"""
ABNF frame class.
see http://tools.ietf.org/html/rfc5234
and http://tools.ietf.org/html/rfc6455#section-5.2
"""
# operation code values.
OPCODE_TEXT = 0x1
OPCODE_BINARY = 0x2
OPCODE_CLOSE = 0x8
OPCODE_PING = 0x9
OPCODE_PONG = 0xa
# available operation code value tuple
OPCODES = (OPCODE_TEXT, OPCODE_BINARY, OPCODE_CLOSE,
OPCODE_PING, OPCODE_PONG)
# opcode human readable string
OPCODE_MAP = {
OPCODE_TEXT: "text",
OPCODE_BINARY: "binary",
OPCODE_CLOSE: "close",
OPCODE_PING: "ping",
OPCODE_PONG: "pong"
}
# data length threashold.
LENGTH_7 = 0x7d
LENGTH_16 = 1 << 16
LENGTH_63 = 1 << 63
def __init__(self, fin = 0, rsv1 = 0, rsv2 = 0, rsv3 = 0,
opcode = OPCODE_TEXT, mask = 1, data = ""):
"""
Constructor for ABNF.
please check RFC for arguments.
"""
self.fin = fin
self.rsv1 = rsv1
self.rsv2 = rsv2
self.rsv3 = rsv3
self.opcode = opcode
self.mask = mask
self.data = data
self.get_mask_key = os.urandom
@staticmethod
def create_frame(data, opcode):
"""
create frame to send text, binary and other data.
data: data to send. This is string value(byte array).
if opcode is OPCODE_TEXT and this value is uniocde,
data value is conveted into unicode string, automatically.
opcode: operation code. please see OPCODE_XXX.
"""
if opcode == ABNF.OPCODE_TEXT and isinstance(data, unicode):
data = data.encode("utf-8")
# mask must be set if send data from client
return ABNF(1, 0, 0, 0, opcode, 1, data)
def format(self):
"""
format this object to string(byte array) to send data to server.
"""
if not _is_bool(self.fin, self.rsv1, self.rsv2, self.rsv3):
raise ValueError("not 0 or 1")
if self.opcode not in ABNF.OPCODES:
raise ValueError("Invalid OPCODE")
length = len(self.data)
if length >= ABNF.LENGTH_63:
raise ValueError("data is too long")
frame_header = chr(self.fin << 7
| self.rsv1 << 6 | self.rsv2 << 5 | self.rsv3 << 4
| self.opcode)
if length < ABNF.LENGTH_7:
frame_header += chr(self.mask << 7 | length)
elif length < ABNF.LENGTH_16:
frame_header += chr(self.mask << 7 | 0x7e)
frame_header += struct.pack("!H", length)
else:
frame_header += chr(self.mask << 7 | 0x7f)
frame_header += struct.pack("!Q", length)
if not self.mask:
return frame_header + self.data
else:
mask_key = self.get_mask_key(4)
return frame_header + self._get_masked(mask_key)
def _get_masked(self, mask_key):
s = ABNF.mask(mask_key, self.data)
return mask_key + "".join(s)
@staticmethod
def mask(mask_key, data):
"""
mask or unmask data. Just do xor for each byte
mask_key: 4 byte string(byte).
data: data to mask/unmask.
"""
_m = map(ord, mask_key)
_d = map(ord, data)
for i in range(len(_d)):
_d[i] ^= _m[i % 4]
s = map(chr, _d)
return "".join(s)
class WebSocket(object):
"""
Low level WebSocket interface.
This class is based on
The WebSocket protocol draft-hixie-thewebsocketprotocol-76
http://tools.ietf.org/html/draft-hixie-thewebsocketprotocol-76
We can connect to the websocket server and send/recieve data.
The following example is a echo client.
>>> import websocket
>>> ws = websocket.WebSocket()
>>> ws.connect("ws://echo.websocket.org")
>>> ws.send("Hello, Server")
>>> ws.recv()
'Hello, Server'
>>> ws.close()
get_mask_key: a callable to produce new mask keys, see the set_mask_key
function's docstring for more details
"""
def __init__(self, get_mask_key = None):
"""
Initalize WebSocket object.
"""
self.connected = False
self.io_sock = self.sock = socket.socket()
self.get_mask_key = get_mask_key
enableTrace(traceEnabled)
def set_mask_key(self, func):
"""
set function to create musk key. You can custumize mask key generator.
Mainly, this is for testing purpose.
func: callable object. the fuct must 1 argument as integer.
The argument means length of mask key.
This func must be return string(byte array),
which length is argument specified.
"""
self.get_mask_key = func
def settimeout(self, timeout):
"""
Set the timeout to the websocket.
timeout: timeout time(second).
"""
self.sock.settimeout(timeout)
def gettimeout(self):
"""
Get the websocket timeout(second).
"""
return self.sock.gettimeout()
def connect(self, url, **options):
"""
Connect to url. url is websocket url scheme. ie. ws://host:port/resource
You can customize using 'options'.
If you set "header" dict object, you can set your own custom header.
>>> ws = WebSocket()
>>> ws.connect("ws://echo.websocket.org/",
... header={"User-Agent: MyProgram",
... "x-custom: header"})
timeout: socket timeout time. This value is integer.
if you set None for this value,
it means "use default_timeout value"
options: current support option is only "header".
if you set header as dict value,
the custom HTTP headers are added.
"""
hostname, port, resource, is_secure = _parse_url(url)
# TODO: we need to support proxy
self.sock.connect((hostname, port))
if is_secure:
self.io_sock = _SSLSocketWrapper(self.sock)
self._handshake(hostname, port, resource, **options)
def _handshake(self, host, port, resource, **options):
sock = self.io_sock
headers = []
headers.append("GET %s HTTP/1.1" % resource)
headers.append("Upgrade: websocket")
headers.append("Connection: Upgrade")
if port == 80:
hostport = host
else:
hostport = "%s:%d" % (host, port)
headers.append("Host: %s" % hostport)
headers.append("Origin: %s" % hostport)
key = _create_sec_websocket_key()
headers.append("Sec-WebSocket-Key: %s" % key)
headers.append("Sec-WebSocket-Protocol: chat, superchat")
headers.append("Sec-WebSocket-Version: %s" % VERSION)
if "header" in options:
headers.extend(options["header"])
headers.append("")
headers.append("")
header_str = "\r\n".join(headers)
sock.send(header_str)
if traceEnabled:
logger.debug( "--- request header ---")
logger.debug( header_str)
logger.debug("-----------------------")
status, resp_headers = self._read_headers()
if status != 101:
self.close()
raise WebSocketException("Handshake Status %d" % status)
success = self._validate_header(resp_headers, key)
if not success:
self.close()
raise WebSocketException("Invalid WebSocket Header")
self.connected = True
def _validate_header(self, headers, key):
for k, v in _HEADERS_TO_CHECK.iteritems():
r = headers.get(k, None)
if not r:
return False
r = r.lower()
if v != r:
return False
result = headers.get("sec-websocket-accept", None)
if not result:
return False
result = result.lower()
value = key + "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
hashed = base64.encodestring(hashlib.sha1(value).digest()).strip().lower()
return hashed == result
def _read_headers(self):
status = None
headers = {}
if traceEnabled:
logger.debug("--- response header ---")
while True:
line = self._recv_line()
if line == "\r\n":
break
line = line.strip()
if traceEnabled:
logger.debug(line)
if not status:
status_info = line.split(" ", 2)
status = int(status_info[1])
else:
kv = line.split(":", 1)
if len(kv) == 2:
key, value = kv
headers[key.lower()] = value.strip().lower()
else:
raise WebSocketException("Invalid header")
if traceEnabled:
logger.debug("-----------------------")
return status, headers
def send(self, payload, opcode = ABNF.OPCODE_TEXT):
"""
Send the data as string.
payload: Payload must be utf-8 string or unicoce,
if the opcode is OPCODE_TEXT.
Otherwise, it must be string(byte array)
opcode: operation code to send. Please see OPCODE_XXX.
"""
frame = ABNF.create_frame(payload, opcode)
if self.get_mask_key:
frame.get_mask_key = self.get_mask_key
data = frame.format()
self.io_sock.send(data)
if traceEnabled:
logger.debug("send: " + repr(data))
def ping(self, payload = ""):
"""
send ping data.
payload: data payload to send server.
"""
self.send(payload, ABNF.OPCODE_PING)
def pong(self, payload):
"""
send pong data.
payload: data payload to send server.
"""
self.send(payload, ABNF.OPCODE_PONG)
def recv(self):
"""
Receive string data(byte array) from the server.
return value: string(byte array) value.
"""
opcode, data = self.recv_data()
return data
def recv_data(self):
"""
Recieve data with operation code.
return value: tuple of operation code and string(byte array) value.
"""
while True:
frame = self.recv_frame()
if not frame:
# handle error:
# 'NoneType' object has no attribute 'opcode'
raise WebSocketException("Not a valid frame %s" % frame)
elif frame.opcode in (ABNF.OPCODE_TEXT, ABNF.OPCODE_BINARY):
return (frame.opcode, frame.data)
elif frame.opcode == ABNF.OPCODE_CLOSE:
self.send_close()
return (frame.opcode, None)
elif frame.opcode == ABNF.OPCODE_PING:
self.pong("Hi!")
def recv_frame(self):
"""
recieve data as frame from server.
return value: ABNF frame object.
"""
header_bytes = self._recv(2)
if not header_bytes:
return None
b1 = ord(header_bytes[0])
fin = b1 >> 7 & 1
rsv1 = b1 >> 6 & 1
rsv2 = b1 >> 5 & 1
rsv3 = b1 >> 4 & 1
opcode = b1 & 0xf
b2 = ord(header_bytes[1])
mask = b2 >> 7 & 1
length = b2 & 0x7f
length_data = ""
if length == 0x7e:
length_data = self._recv(2)
length = struct.unpack("!H", length_data)[0]
elif length == 0x7f:
length_data = self._recv(8)
length = struct.unpack("!Q", length_data)[0]
mask_key = ""
if mask:
mask_key = self._recv(4)
data = self._recv_strict(length)
if traceEnabled:
recieved = header_bytes + length_data + mask_key + data
logger.debug("recv: " + repr(recieved))
if mask:
data = ABNF.mask(mask_key, data)
frame = ABNF(fin, rsv1, rsv2, rsv3, opcode, mask, data)
return frame
def send_close(self, status = STATUS_NORMAL, reason = ""):
"""
send close data to the server.
status: status code to send. see STATUS_XXX.
reason: the reason to close. This must be string.
"""
if status < 0 or status >= ABNF.LENGTH_16:
raise ValueError("code is invalid range")
self.send(struct.pack('!H', status) + reason, ABNF.OPCODE_CLOSE)
def close(self, status = STATUS_NORMAL, reason = ""):
"""
Close Websocket object
status: status code to send. see STATUS_XXX.
reason: the reason to close. This must be string.
"""
if self.connected:
if status < 0 or status >= ABNF.LENGTH_16:
raise ValueError("code is invalid range")
try:
self.send(struct.pack('!H', status) + reason, ABNF.OPCODE_CLOSE)
timeout = self.sock.gettimeout()
self.sock.settimeout(3)
try:
frame = self.recv_frame()
if logger.isEnabledFor(logging.DEBUG):
logger.error("close status: " + repr(frame.data))
except:
pass
self.sock.settimeout(timeout)
self.sock.shutdown(socket.SHUT_RDWR)
except:
pass
self._closeInternal()
def _closeInternal(self):
self.connected = False
self.sock.close()
self.io_sock = self.sock
def _recv(self, bufsize):
bytes = self.io_sock.recv(bufsize)
return bytes
def _recv_strict(self, bufsize):
remaining = bufsize
bytes = ""
while remaining:
bytes += self._recv(remaining)
remaining = bufsize - len(bytes)
return bytes
def _recv_line(self):
line = []
while True:
c = self._recv(1)
line.append(c)
if c == "\n":
break
return "".join(line)
class WebSocketApp(object):
"""
Higher level of APIs are provided.
The interface is like JavaScript WebSocket object.
"""
def __init__(self, url,
on_open = None, on_message = None, on_error = None,
on_close = None, keep_running = True, get_mask_key = None):
"""
url: websocket url.
on_open: callable object which is called at opening websocket.
this function has one argument. The arugment is this class object.
on_message: callbale object which is called when recieved data.
on_message has 2 arguments.
The 1st arugment is this class object.
The passing 2nd arugment is utf-8 string which we get from the server.
on_error: callable object which is called when we get error.
on_error has 2 arguments.
The 1st arugment is this class object.
The passing 2nd arugment is exception object.
on_close: callable object which is called when closed the connection.
this function has one argument. The arugment is this class object.
keep_running: a boolean flag indicating whether the app's main loop should
keep running, defaults to True
get_mask_key: a callable to produce new mask keys, see the WebSocket.set_mask_key's
docstring for more information
"""
self.url = url
self.on_open = on_open
self.on_message = on_message
self.on_error = on_error
self.on_close = on_close
self.keep_running = keep_running
self.get_mask_key = get_mask_key
self.sock = None
def send(self, data):
"""
send message. data must be utf-8 string or unicode.
"""
self.sock.send(data)
def close(self):
"""
close websocket connection.
"""
self.keep_running = False
self.sock.close()
def run_forever(self):
"""
run event loop for WebSocket framework.
This loop is infinite loop and is alive during websocket is available.
"""
if self.sock:
raise WebSocketException("socket is already opened")
try:
self.sock = WebSocket(self.get_mask_key)
self.sock.connect(self.url)
self._run_with_no_err(self.on_open)
while self.keep_running:
data = self.sock.recv()
if data is None:
break
self._run_with_no_err(self.on_message, data)
except Exception, e:
self._run_with_no_err(self.on_error, e)
finally:
self.sock.close()
self._run_with_no_err(self.on_close)
self.sock = None
def _run_with_no_err(self, callback, *args):
if callback:
try:
callback(self, *args)
except Exception, e:
if logger.isEnabledFor(logging.DEBUG):
logger.error(e)
if __name__ == "__main__":
enableTrace(True)
ws = create_connection("ws://echo.websocket.org/")
print "Sending 'Hello, World'..."
ws.send("Hello, World")
print "Sent"
print "Receiving..."
result = ws.recv()
print "Received '%s'" % result
ws.close()
|
|
import abc
import requests
import re
import logging
import sys
from html.parser import HTMLParser
from collections import defaultdict, OrderedDict
from kernel.IssuesModel import backlogIssuesModel
from kernel.IssuesWorkflow import issuesWorkflow
from kernel.WebReaders import TextAreaExtractor
__author__ = "Manuel Escriche <mev@tid.es>"
class PageParser(HTMLParser):
def __init__(self):
if sys.version_info[:2] == (3, 6):
super().__init__()
elif sys.version_info[:2] == (3, 4):
super().__init__(self)
else:
logging.error("Backlog tool requires python3.4 or python3.6")
exit()
self._filter = re.compile(r'\s*(FIWARE[\w\-\.]+)\s*')
self._recording = False
self._link = ''
self._reference = ''
self._data = list()
def handle_starttag(self, tag, attrs):
# print("tag=", tag, "attrs=", attrs)
if tag != 'a':
return
for name, value in attrs:
if name == 'href':
if self._filter.search(value):
self._recording = True
self._link = value
def handle_data(self, data):
# print("data=", data)
if not self._recording:
return
m = self._filter.search(data)
if m:
self._reference = m.group()
else:
self._recording = False
def handle_endtag(self, tag):
if not self._recording:
return
if tag != 'a':
return
self._data.append(self._reference.strip())
self._recording = False
@property
def data(self):
return self._data
class PublisherItemRule(metaclass=abc.ABCMeta):
@abc.abstractmethod
def __init__(self, name):
self.name = name
self.description = defaultdict(str)
@property
def status(self): return '--> Item status: NT= no test done, OK= test passed; KO= test not passed'
@abc.abstractmethod
def __call__(self, *args, **kwargs):
raise NotImplementedError()
def __iter__(self):
for key in sorted(self.description.keys()):
yield key
keys = __iter__
def values(self):
for key in self.description.keys():
yield self.description[key]
def items(self):
for key in self.description.keys():
yield (key, self.description[key])
def __getitem__(self, item): return self.description[item]
class RoadMapTest(PublisherItemRule):
def __init__(self, name, publisher):
super().__init__(name)
self.publisher = publisher
_key = lambda i: '{0}.{1}'.format(name, i)
self.description[_key(1)] = "Test for Technical Roadmap - undue publishing"
self.description[_key(2)] = "Test for Technical Roadmap - missing publication"
def _filter_excluded(self, item):
if item.issueType not in backlogIssuesModel.open:
return True
if item.resolution == 'Dismissed':
return True
return False
def _rule_correct_publication(self, item):
return True
def _rule_valid_no_publication(self, item):
return False
def __call__(self, item):
_key = lambda i: '{0}.{1}'.format(self.name, i)
if self._filter_excluded(item):
item.test.status[self.name] = 'NT'
return
output = defaultdict(str)
self.roadmap = self.publisher.readRoadmap(item.chapter)
if item.reference in self.roadmap:
output[_key(1)] = 'OK' if self._rule_correct_publication(item) else 'KO'
else:
output[_key(2)] = 'OK' if self._rule_valid_no_publication(item) else 'KO'
status = 'OK' if all(value == 'OK' for value in output.values()) else 'KO'
item.test.test[self.name] = output
item.test.status[self.name] = status
return
class MaterializingTest(PublisherItemRule):
def __init__(self, name, publisher):
super().__init__(name)
self.publisher = publisher
_key = lambda i: '{0}.{1}'.format(name, i)
self.description[_key(1)] = "Test for Materializing - undue publishing"
self.description[_key(2)] = "Test for Materializing - missing publication"
def _filter_excluded(self, item):
if item.issueType not in backlogIssuesModel.open:
return True
if item.resolution == 'Dismissed':
return True
return False
def _rule_correct_publication(self, item):
assert item.reference in self.materializing
if item.issueType in backlogIssuesModel.midTermTypes:
if item.status in issuesWorkflow.closed:
return True
return True if item.resolution == 'Done' else False
else:
return True if any([son.reference in self.materializing and
son.status in issuesWorkflow.closed for son in item.sons]) else False
def _rule_valid_no_publication(self, item):
assert item.reference not in self.materializing
if item.issueType in backlogIssuesModel.midTermTypes:
return False if item.status in issuesWorkflow.closed else True
else:
return False if any([son.reference in self.materializing for son in item.sons]) else True
def __call__(self, item):
_key = lambda i: '{0}.{1}'.format(self.name, i)
if self._filter_excluded(item):
item.test.status[self.name] = 'NT'
return
output = defaultdict(str)
self.materializing = self.publisher.readMaterializing(item.chapter)
if item.reference in self.materializing:
output[_key(1)] = 'OK' if self._rule_correct_publication(item) else 'KO'
else:
output[_key(2)] = 'OK' if self._rule_valid_no_publication(item) else 'KO'
status = 'OK' if all(value == 'OK' for value in output.values()) else 'KO'
item.test.test[self.name] = output
item.test.status[self.name] = status
return
class OpenDescriptionTest(PublisherItemRule):
def __init__(self, name, url):
super().__init__(name)
_key = lambda i: '{0}.{1}'.format(name, i)
self.description[_key(1)] = 'Test for Open Description - page availability'
self.description[_key(2)] = \
"Test for Open Description - right structure: Name, Chapter, Goal, Description and Rational"
self.description[_key(3)] = "Test for Open Description - Over-consistency between reference and name"
self.description[_key(4)] = "Test for Open Description - Incorrect chapter name"
self.description[_key(5)] = "Test for Open Description - less than 10 words in goal field"
self.description[_key(6)] = "Test for Open Description - less than 30 words in description field"
self.description[_key(7)] = "Test for Open Description - less than 20 words in rational field"
self.description[_key(8)] = "Test for Open Description - more than 250 words in record"
self._reader = TextAreaExtractor(url)
def _filter_excluded(self, item):
if item.issueType not in backlogIssuesModel.open:
return True
if item.test.status['RoadMap'] == 'KO':
return True
if item.test.status['Materializing'] == 'KO':
return True
return False
def _rule_available_page(self, item):
description = self._reader(item.reference)
item.openDescription = description if description['exist'] else None
# print(item.openDescription)
return True if item.openDescription else False
def _rule_right_description_structure(self, item):
if 'Name' not in item.openDescription:
return False
if 'Chapter' not in item.openDescription:
return False
if 'Goal' not in item.openDescription:
return False
if 'Description' not in item.openDescription:
return False
if 'Rationale' not in item.openDescription:
return False
return True
def _rule_overconsistency_name_reference(self, item):
if item.openDescription['Name'] == item.reference:
return True
if item.openDescription['Name'] == item.shortReference:
return True
return False
def _rule_proper_chapter(self, item):
return True if item.chapter == item.openDescription['Chapter'] else False
def _rule_proper_goal_size(self, item):
return True if item.openDescription.size('Goal') >= 10 else False
def _rule_proper_description_size(self, item):
return True if item.openDescription.size('Description') >= 30 else False
def _rule_proper_rationale_size(self, item):
return True if item.openDescription.size('Rationale') >= 20 else False
def _rule_proper_record_size(self, item):
return True if item.openDescription.NSize <= 250 else False
def __call__(self, item):
_key = lambda i: '{0}.{1}'.format(self.name, i)
if self._filter_excluded(item):
item.test.status[self.name] = 'NT'
return
output = defaultdict(str)
output[_key(1)] = 'OK' if self._rule_available_page(item) else 'KO'
if output[_key(1)] == 'OK':
output[_key(2)] = 'OK' if self._rule_right_description_structure(item) else 'KO'
if output[_key(2)] == 'OK':
output[_key(3)] = 'KO' if self._rule_overconsistency_name_reference(item) else 'OK'
output[_key(4)] = 'OK' if self._rule_proper_chapter(item) else 'KO'
output[_key(5)] = 'OK' if self._rule_proper_goal_size(item) else 'KO'
output[_key(6)] = 'OK' if self._rule_proper_description_size(item) else 'KO'
output[_key(7)] = 'OK' if self._rule_proper_rationale_size(item) else 'KO'
output[_key(8)] = 'OK' if self._rule_proper_record_size(item) else 'KO'
status = 'OK' if all(value == 'OK' for value in output.values()) else 'KO'
item.test.test[self.name] = output
item.test.status[self.name] = status
return
class PublisherTestBook:
def __init__(self, publisher, url):
self._store = OrderedDict()
criteria = 'RoadMap'
self._store[criteria] = RoadMapTest(criteria, publisher)
criteria = 'Materializing'
self._store[criteria] = MaterializingTest(criteria, publisher)
criteria = 'OpenDescription'
self._store[criteria] = OpenDescriptionTest(criteria, url)
@property
def store(self):
return self._store
def __iter__(self):
for rule in self._store.keys():
yield rule
keys = __iter__
def values(self):
for rule in self._store.keys():
yield self._store[rule]
def items(self):
for rule in self._store.keys():
yield (rule, self._store[rule])
def __getitem__(self, rule): return self._store[rule]
def __len__(self): return len(self._store)
class Publisher:
"""
Backlog Publisher
"""
url_root = 'http://forge.fiware.org/plugins/mediawiki/wiki/fiware/index.php/'
Roadmap = {'Cloud': 'Roadmap_of_Cloud_Hosting',
'Data': 'Roadmap_of_Data/Context_Management',
'IoT': 'Roadmap_of_Internet_of_Things_(IoT)_Services',
'Apps': 'Roadmap_of_Applications/Services_Ecosystem_and_Delivery_Framework',
'Security': 'Roadmap_of_Security',
'I2ND': 'Roadmap_of_Advanced_middleware,_Interface_to_Networks_and_Robotics',
'WebUI': 'Roadmap_of_Advanced_Web-based_UI'}
Materializing = {'Cloud': 'Materializing_Cloud_Hosting_in_FI-WARE',
'Data': 'Materializing_Data/Context_Management_in_FI-WARE',
'IoT': 'Materializing_Internet_of_Things_(IoT)_Services_Enablement_in_FI-WARE',
'Apps': 'Materializing_Applications/Services_Ecosystem_and_Delivery_Framework_in_FI-WARE',
'Security': 'Materializing_Security_in_FI-WARE',
'I2ND': 'Materializing_the_Interface_to_Networks_and_Devices_(I2ND)_in_FI-WARE',
'WebUI': 'Materializing_Advanced_Middleware_and_Web_User_Interfaces_in_FI-WARE'}
def __init__(self):
# self.roadmap = self._read_docs(Publisher.Roadmap)
# self.materializing = self._read_docs(Publisher.Materializing)
self.roadmap = dict()
self.materializing = dict()
self.testBook = PublisherTestBook(self, Publisher.url_root)
def _readPage(self, page_url):
try:
page = requests.get(Publisher.url_root + page_url)
except Exception as e:
logging.error(e)
logging.error('Failure when reading {}'.format(Publisher.url_root + page_url))
page = None
raise
parser = PageParser()
parser.feed(page.text)
return parser.data
def readRoadmap(self, chapter):
if chapter not in self.roadmap:
url = Publisher.Roadmap[chapter]
data = self._readPage(url)
data.extend(self._readPage(url + '(previous_releases)'))
self.roadmap[chapter] = data
return self.roadmap[chapter]
def readMaterializing(self, chapter):
if chapter not in self.materializing:
self.materializing[chapter] = self._readPage(Publisher.Materializing[chapter])
return self.materializing[chapter]
def _read_docs(self, doc):
# print('_read_docs')
__filter = re.compile(r'\[\s*(FIWARE[\w\-\.]+)\s*\]')
data = dict()
for chapter in doc:
# print(Publisher.url_root + doc[chapter])
page = requests.get(Publisher.url_root + doc[chapter])
parser = PageParser()
parser.feed(page.text)
# print(parser.data)
# _input = html2text(page.text)
# if chapter == 'Data': print(_input)
# data[chapter] = list(re.findall(__filter, html2text(page.text)))
data[chapter] = parser.data
# if chapter == 'Data': print(data[chapter])
return data
if __name__ == "__main__":
pass
|
|
"""Tests for brim.service."""
"""Copyright and License.
Copyright 2012-2014 Gregory Holt
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import socket
import ssl
import time
from errno import EADDRINUSE, EPERM
from os import devnull
from unittest import main, TestCase
from nose import SkipTest
from brim import service
class Test_capture_exceptions_stdout_stderr(TestCase):
def setUp(self):
self.orig_sys = service.sys
self.orig_dup2 = service.dup2
self.dup2calls = []
def _dup2(*args):
self.dup2calls.append(args)
service.sys = self
service.dup2 = _dup2
def tearDown(self):
service.sys = self.orig_sys
service.dup2 = self.orig_dup2
def test_calls_flush_dup2_on_standard_io(self):
class FakeFile(object):
def __init__(self, fileno):
self._fileno = fileno
self._flush_calls = 0
def fileno(self):
return self._fileno
def flush(self):
self._flush_calls += 1
self.stdout = stdout = FakeFile(456)
self.stderr = stderr = FakeFile(789)
service.capture_exceptions_stdout_stderr()
self.assertEqual(
set([b for a, b in self.dup2calls]),
set([stdout.fileno(), stderr.fileno()]))
self.assertEqual(stdout._flush_calls, 1)
self.assertEqual(stderr._flush_calls, 1)
def test_does_not_call_dup2_on_things_not_understood(self):
self.stdout = 'stdout'
self.stderr = 'stderr'
service.capture_exceptions_stdout_stderr()
self.assertEqual(len(self.dup2calls), 0)
def test_ignores_dup2_exceptions(self):
self.stdout = open(devnull, 'wb')
self.stderr = open(devnull, 'wb')
def _dup2(*args):
raise OSError()
orig_dup2 = service.dup2
try:
service.dup2 = _dup2
service.capture_exceptions_stdout_stderr()
finally:
service.dup2 = orig_dup2
def test_output_is_redirected(self):
self.stdout = 'stdout'
self.stderr = 'stderr'
service.capture_exceptions_stdout_stderr()
# These would raise exceptions if not replaced by the above call.
print >>self.stdout, 'test stdout'
print >>self.stderr, 'test stderr'
def test_excepthook_is_set(self):
self.excepthook = 'excepthook'
self.stdout = 'stdout'
self.stderr = 'stderr'
service.capture_exceptions_stdout_stderr()
self.assertNotEquals(self.excepthook, 'excepthook')
def test_excepthook_calls_us(self):
self.stdout = 'stdout'
self.stderr = 'stderr'
calls = []
def _exc(*args):
calls.append(args)
service.capture_exceptions_stdout_stderr(_exc)
self.assertEqual(len(calls), 0)
self.excepthook(1, 2, 3)
self.assertEqual(calls, [(1, 2, 3)])
def test_stdout_calls_us(self):
self.stdout = 'stdout'
self.stderr = 'stderr'
calls = []
def _stdout(*args):
calls.append(args)
service.capture_exceptions_stdout_stderr(stdout_func=_stdout)
self.assertEqual(len(calls), 0)
print >>self.stdout, 'test'
self.assertEqual(calls, [('test\n',)])
def test_stderr_calls_us(self):
self.stdout = 'stdout'
self.stderr = 'stderr'
calls = []
def _stderr(*args):
calls.append(args)
service.capture_exceptions_stdout_stderr(stderr_func=_stderr)
self.assertEqual(len(calls), 0)
print >>self.stderr, 'test'
self.assertEqual(calls, [('test\n',)])
def test_combine_writes(self):
self.stdout = 'stdout'
self.stderr = 'stderr'
calls = []
def _stdout(*args):
calls.append(args)
service.capture_exceptions_stdout_stderr(stdout_func=_stdout)
self.assertEqual(len(calls), 0)
print >>self.stdout, 'test',
self.assertEqual(calls, [])
print >>self.stdout, 'and more'
self.assertEqual(calls, [('test and more\n',)])
def test_combine_writes_unless_flush(self):
self.stdout = 'stdout'
self.stderr = 'stderr'
calls = []
def _stdout(*args):
calls.append(args)
service.capture_exceptions_stdout_stderr(stdout_func=_stdout)
self.assertEqual(len(calls), 0)
print >>self.stdout, 'test',
self.assertEqual(calls, [])
self.stdout.flush()
self.assertEqual(calls, [('test',)])
print >>self.stdout, 'and more'
self.assertEqual(calls, [('test',), (' and more\n',)])
def test_close_just_flushes(self):
self.stdout = 'stdout'
self.stderr = 'stderr'
calls = []
def _stdout(*args):
calls.append(args)
service.capture_exceptions_stdout_stderr(stdout_func=_stdout)
self.assertEqual(len(calls), 0)
print >>self.stdout, 'test',
self.assertEqual(calls, [])
self.stdout.close()
self.assertEqual(calls, [('test',)])
print >>self.stdout, 'and more'
self.assertEqual(calls, [('test',), (' and more\n',)])
def test_writelines(self):
self.stdout = 'stdout'
self.stderr = 'stderr'
calls = []
def _stdout(*args):
calls.append(args)
service.capture_exceptions_stdout_stderr(stdout_func=_stdout)
self.assertEqual(len(calls), 0)
self.stdout.writelines(['abc\n', 'def', 'ghi\n', 'jkl'])
self.assertEqual(calls, [('abc\ndefghi\n',)])
self.stdout.flush()
self.assertEqual(calls, [('abc\ndefghi\n',), ('jkl',)])
class Test_droppriv(TestCase):
def setUp(self):
self.orig_geteuid = service.geteuid
self.orig_getegid = service.getegid
self.orig_getpwnam = service.getpwnam
self.orig_getgrnam = service.getgrnam
self.orig_setuid = service.setuid
self.orig_setgid = service.setgid
self.orig_os_umask = service.os_umask
self.orig_setsid = service.setsid
self.orig_chdir = service.chdir
self.orig_setgroups = service.setgroups
class PWNam(object):
def __init__(self, uid, gid):
self.pw_uid = uid
self.pw_gid = gid
class GrNam(object):
def __init__(self, gid):
self.gr_gid = gid
self.euid = 1
self.egid = 2
self.pwnam = {'user': PWNam(self.euid, self.egid)}
self.grnam = {'group': GrNam(self.egid)}
self.setuid_calls = []
self.setgid_calls = []
self.os_umask_calls = []
self.setsid_calls = []
self.chdir_calls = []
self.setgroups_calls = []
service.geteuid = lambda: self.euid
service.getegid = lambda: self.egid
service.getpwnam = lambda u: self.pwnam[u]
service.getgrnam = lambda g: self.grnam[g]
service.setuid = lambda *a: self.setuid_calls.append(a)
service.setgid = lambda *a: self.setgid_calls.append(a)
service.os_umask = lambda *a: self.os_umask_calls.append(a)
service.setsid = lambda *a: self.setsid_calls.append(a)
service.chdir = lambda *a: self.chdir_calls.append(a)
service.setgroups = lambda *a: self.setgroups_calls.append(a)
def tearDown(self):
service.geteuid = self.orig_geteuid
service.getegid = self.orig_getegid
service.getpwnam = self.orig_getpwnam
service.getgrnam = self.orig_getgrnam
service.setuid = self.orig_setuid
service.setgid = self.orig_setgid
service.os_umask = self.orig_os_umask
service.setsid = self.orig_setsid
service.chdir = self.orig_chdir
service.setgroups = self.orig_setgroups
def test_droppriv_to_same_uid_gid(self):
service.droppriv('user')
self.assertEqual(self.setgroups_calls, [([],)])
self.assertEqual(self.setuid_calls, [(1,)])
self.assertEqual(self.setgid_calls, [(2,)])
self.assertEqual(self.os_umask_calls, [(0022,)])
self.assertEqual(self.setsid_calls, [()])
self.assertEqual(self.chdir_calls, [('/',)])
def test_droppriv_to_different_uid_default_gid(self):
self.pwnam['user'].pw_uid = 10
self.pwnam['user'].pw_gid = 20
self.grnam['group'].gr_gid = 30
service.droppriv('user')
self.assertEqual(self.setgroups_calls, [([],)])
self.assertEqual(self.setuid_calls, [(10,)])
self.assertEqual(self.setgid_calls, [(20,)])
self.assertEqual(self.os_umask_calls, [(0022,)])
self.assertEqual(self.setsid_calls, [()])
self.assertEqual(self.chdir_calls, [('/',)])
def test_droppriv_to_different_uid_gid(self):
self.pwnam['user'].pw_uid = 10
self.pwnam['user'].pw_gid = 20
self.grnam['group'].gr_gid = 30
service.droppriv('user', 'group')
self.assertEqual(self.setgroups_calls, [([],)])
self.assertEqual(self.setuid_calls, [(10,)])
self.assertEqual(self.setgid_calls, [(30,)])
self.assertEqual(self.os_umask_calls, [(0022,)])
self.assertEqual(self.setsid_calls, [()])
self.assertEqual(self.chdir_calls, [('/',)])
def test_droppriv_umask(self):
service.droppriv('user', umask=0123)
self.assertEqual(self.setgroups_calls, [([],)])
self.assertEqual(self.setuid_calls, [(1,)])
self.assertEqual(self.setgid_calls, [(2,)])
self.assertEqual(self.os_umask_calls, [(0123,)])
self.assertEqual(self.setsid_calls, [()])
self.assertEqual(self.chdir_calls, [('/',)])
def test_droppriv_unknown_user(self):
exc = None
try:
service.droppriv('unknown')
except Exception as err:
exc = err
self.assertEqual(str(exc), "Cannot switch to unknown user 'unknown'.")
self.assertEqual(self.setgroups_calls, [([],)])
self.assertEqual(self.setuid_calls, [])
self.assertEqual(self.setgid_calls, [])
self.assertEqual(self.os_umask_calls, [])
self.assertEqual(self.setsid_calls, [])
self.assertEqual(self.chdir_calls, [])
def test_droppriv_unknown_group(self):
exc = None
try:
service.droppriv('user', 'unknown')
except Exception as err:
exc = err
self.assertEqual(str(exc), "Cannot switch to unknown group 'unknown'.")
self.assertEqual(self.setgroups_calls, [([],)])
self.assertEqual(self.setuid_calls, [])
self.assertEqual(self.setgid_calls, [])
self.assertEqual(self.os_umask_calls, [])
self.assertEqual(self.setsid_calls, [])
self.assertEqual(self.chdir_calls, [])
def test_setuid_failure(self):
def _setuid(*args):
raise OSError()
exc = None
orig_setuid = service.setuid
try:
service.setuid = _setuid
service.droppriv('user')
except Exception as err:
exc = err
finally:
service.setuid = orig_setuid
self.assertEqual(
str(exc), "Permission denied when switching to user 'user'.")
self.assertEqual(self.setgroups_calls, [([],)])
self.assertEqual(self.setuid_calls, [])
# This also asserts setgid is called before setuid.
self.assertEqual(self.setgid_calls, [(2,)])
self.assertEqual(self.os_umask_calls, [])
self.assertEqual(self.setsid_calls, [])
self.assertEqual(self.chdir_calls, [])
def test_setgid_failure(self):
def _setgid(*args):
raise OSError()
exc = None
orig_setgid = service.setgid
try:
service.setgid = _setgid
service.droppriv('user', 'group')
except Exception as err:
exc = err
finally:
service.setgid = orig_setgid
self.assertEqual(
str(exc), "Permission denied when switching to group 'group'.")
self.assertEqual(self.setgroups_calls, [([],)])
# This also asserts setuid is not called before setgid.
self.assertEqual(self.setuid_calls, [])
self.assertEqual(self.setgid_calls, [])
self.assertEqual(self.os_umask_calls, [])
self.assertEqual(self.setsid_calls, [])
self.assertEqual(self.chdir_calls, [])
def test_setgroups_failure(self):
setgroups_calls = []
def _setgroups(*args):
setgroups_calls.append(args)
e = OSError('test')
e.errno = 0
raise e
exc = None
orig_setgroups = service.setgroups
try:
service.setgroups = _setgroups
service.droppriv('user')
except Exception as err:
exc = err
finally:
service.setgroups = orig_setgroups
self.assertEqual(str(exc), 'test')
self.assertEqual(setgroups_calls, [([],)])
self.assertEqual(self.setuid_calls, [])
self.assertEqual(self.setgid_calls, [])
self.assertEqual(self.os_umask_calls, [])
self.assertEqual(self.setsid_calls, [])
self.assertEqual(self.chdir_calls, [])
def test_setgroups_perm_failure_ignored(self):
setgroups_calls = []
def _setgroups(*args):
setgroups_calls.append(args)
e = OSError('test')
e.errno = EPERM
raise e
exc = None
orig_setgroups = service.setgroups
try:
service.setgroups = _setgroups
service.droppriv('user')
except Exception as err:
exc = err
finally:
service.setgroups = orig_setgroups
self.assertEqual(exc, None)
self.assertEqual(setgroups_calls, [([],)])
self.assertEqual(self.setuid_calls, [(1,)])
self.assertEqual(self.setgid_calls, [(2,)])
self.assertEqual(self.os_umask_calls, [(0022,)])
self.assertEqual(self.setsid_calls, [()])
self.assertEqual(self.chdir_calls, [('/',)])
def test_setsid_failure(self):
setsid_calls = []
def _setsid(*args):
setsid_calls.append(args)
e = OSError('test')
e.errno = 0
raise e
exc = None
orig_setsid = service.setsid
try:
service.setsid = _setsid
service.droppriv('user')
except Exception as err:
exc = err
finally:
service.setsid = orig_setsid
self.assertEqual(str(exc), 'test')
self.assertEqual(self.setgroups_calls, [([],)])
self.assertEqual(self.setuid_calls, [(1,)])
self.assertEqual(self.setgid_calls, [(2,)])
self.assertEqual(self.os_umask_calls, [(0022,)])
self.assertEqual(setsid_calls, [()])
self.assertEqual(self.chdir_calls, [])
def test_setsid_perm_failure_ignored(self):
setsid_calls = []
def _setsid(*args):
setsid_calls.append(args)
e = OSError('test')
e.errno = EPERM
raise e
exc = None
orig_setsid = service.setsid
try:
service.setsid = _setsid
service.droppriv('user')
except Exception as err:
exc = err
finally:
service.setsid = orig_setsid
self.assertEqual(exc, None)
self.assertEqual(self.setgroups_calls, [([],)])
self.assertEqual(self.setuid_calls, [(1,)])
self.assertEqual(self.setgid_calls, [(2,)])
self.assertEqual(self.os_umask_calls, [(0022,)])
self.assertEqual(setsid_calls, [()])
self.assertEqual(self.chdir_calls, [('/',)])
class FakeSocket(object):
def __init__(self, *args):
self.init = args
self.setsockopt_calls = []
self.bind_calls = []
self.listen_calls = []
def setsockopt(self, *args):
self.setsockopt_calls.append(args)
def bind(self, *args):
self.bind_calls.append(args)
def listen(self, *args):
self.listen_calls.append(args)
class NonBindingSocket(FakeSocket):
def bind(self, *args):
self.bind_calls.append(args)
exc = socket.error()
exc.errno = EADDRINUSE
raise exc
class BadBindSocket(FakeSocket):
def bind(self, *args):
exc = socket.error('badbind')
exc.errno = EPERM
raise exc
class Test_get_listening_tcp_socket(TestCase):
def setUp(self):
self.orig_getaddrinfo = socket.getaddrinfo
self.orig_socket = socket.socket
self.orig_time = service.time
self.orig_sleep = time.sleep
self.orig_wrap_socket = ssl.wrap_socket
self.getaddrinfo_calls = []
self.getaddrinfo_return = ((socket.AF_INET,),)
self.time_calls = []
self.time_value = 0
self.sleep_calls = []
self.wrap_socket_calls = []
def _getaddrinfo(*args):
self.getaddrinfo_calls.append(args)
return self.getaddrinfo_return
def _time(*args):
self.time_calls.append(args)
self.time_value += 1
return self.time_value
def _wrap_socket(*args, **kwargs):
self.wrap_socket_calls.append((args, kwargs))
return 'wrappedsock'
socket.getaddrinfo = _getaddrinfo
socket.socket = FakeSocket
service.time = _time
time.sleep = lambda *a: self.sleep_calls.append(a)
ssl.wrap_socket = _wrap_socket
def tearDown(self):
socket.getaddrinfo = self.orig_getaddrinfo
socket.socket = self.orig_socket
service.time = self.orig_time
time.sleep = self.orig_sleep
ssl.wrap_socket = self.orig_wrap_socket
def test_happy_path_inet(self):
ip = '1.2.3.4'
port = 5678
sock = service.get_listening_tcp_socket(ip, port)
self.assertEqual(
self.getaddrinfo_calls,
[(ip, port, socket.AF_UNSPEC, socket.SOCK_STREAM)])
self.assertEqual(sock.init, (socket.AF_INET, socket.SOCK_STREAM))
self.assertEqual(set(sock.setsockopt_calls), set([
(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1),
(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),
(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 600)]))
self.assertEqual(sock.bind_calls, [((ip, port),)])
self.assertEqual(sock.listen_calls, [(4096,)])
self.assertEqual(self.wrap_socket_calls, [])
def test_happy_path_inet6(self):
self.getaddrinfo_return = ((socket.AF_INET6,),)
sock = service.get_listening_tcp_socket('1.2.3.4', 5678)
self.assertEqual(sock.init, (socket.AF_INET6, socket.SOCK_STREAM))
def test_uses_passed_backlog(self):
backlog = 1000
sock = service.get_listening_tcp_socket('1.2.3.4', 5678, backlog)
self.assertEqual(sock.listen_calls, [(backlog,)])
def test_retries(self):
socket.socket = NonBindingSocket
exc = None
try:
service.get_listening_tcp_socket('1.2.3.4', 5678)
except Exception as err:
exc = err
self.assertEqual(
str(exc),
'Could not bind to 1.2.3.4:5678 after trying for 30 seconds.')
# Calls time once before loop to calculate when to stop and once per
# loop to see if it's time to stop.
self.assertEqual(self.time_value, 31)
self.assertEqual(len(self.time_calls), 31)
# Sleeps 29 times and then sees it's been 30s (the default retry time).
self.assertEqual(len(self.sleep_calls), 29)
def test_uses_passed_retry(self):
socket.socket = NonBindingSocket
exc = None
try:
service.get_listening_tcp_socket('1.2.3.4', 5678, retry=10)
except Exception as err:
exc = err
self.assertEqual(
str(exc),
'Could not bind to 1.2.3.4:5678 after trying for 10 seconds.')
# Calls time once before loop to calculate when to stop and once per
# loop to see if it's time to stop.
self.assertEqual(self.time_value, 11)
self.assertEqual(len(self.time_calls), 11)
# Sleeps 9 times and then sees it's been 10s.
self.assertEqual(len(self.sleep_calls), 9)
def test_wraps_socket(self):
certfile = 'certfile'
keyfile = 'keyfile'
sock = service.get_listening_tcp_socket(
'1.2.3.4', 5678, certfile=certfile, keyfile=keyfile)
self.assertEqual(sock, 'wrappedsock')
self.assertEqual(len(self.wrap_socket_calls), 1)
self.assertEqual(
self.wrap_socket_calls[0][1],
{'certfile': 'certfile', 'keyfile': 'keyfile'})
def test_uses_eventlet_socket(self):
try:
import eventlet.green.socket
except ImportError:
raise SkipTest()
orig_esocket = eventlet.green.socket.socket
orig_egetaddrinfo = eventlet.green.socket.getaddrinfo
egetaddrinfo_calls = []
def _getaddrinfo(*args):
egetaddrinfo_calls.append(args)
return self.getaddrinfo_return
try:
# Won't bind unless it uses eventlet's socket.
socket.socket = NonBindingSocket
eventlet.green.socket.socket = FakeSocket
eventlet.green.socket.getaddrinfo = _getaddrinfo
ip = '1.2.3.4'
port = 5678
sock = service.get_listening_tcp_socket(ip, port, style='eventlet')
self.assertEqual(
egetaddrinfo_calls,
[(ip, port, socket.AF_UNSPEC, socket.SOCK_STREAM)])
self.assertEqual(sock.init, (socket.AF_INET, socket.SOCK_STREAM))
self.assertEqual(set(sock.setsockopt_calls), set([
(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1),
(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),
(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 600)]))
self.assertEqual(sock.bind_calls, [((ip, port),)])
self.assertEqual(sock.listen_calls, [(4096,)])
self.assertEqual(self.wrap_socket_calls, [])
finally:
eventlet.green.socket.socket = orig_esocket
eventlet.green.socket.getaddrinfo = orig_egetaddrinfo
def test_uses_eventlet_wrap_socket(self):
try:
import eventlet.green.socket
import eventlet.green.ssl
except ImportError:
raise SkipTest()
orig_esocket = eventlet.green.socket.socket
orig_egetaddrinfo = eventlet.green.socket.getaddrinfo
orig_ewrap_socket = eventlet.green.ssl.wrap_socket
egetaddrinfo_calls = []
ewrap_socket_calls = []
def _getaddrinfo(*args):
egetaddrinfo_calls.append(args)
return self.getaddrinfo_return
def _ewrap_socket(*args, **kwargs):
ewrap_socket_calls.append((args, kwargs))
return 'ewrappedsock'
try:
eventlet.green.socket.socket = FakeSocket
eventlet.green.socket.getaddrinfo = _getaddrinfo
eventlet.green.ssl.wrap_socket = _ewrap_socket
certfile = 'certfile'
keyfile = 'keyfile'
sock = service.get_listening_tcp_socket(
'1.2.3.4', 5678, style='eventlet', certfile=certfile,
keyfile=keyfile)
self.assertEqual(sock, 'ewrappedsock')
self.assertEqual(len(ewrap_socket_calls), 1)
self.assertEqual(
ewrap_socket_calls[0][1],
{'certfile': 'certfile', 'keyfile': 'keyfile'})
finally:
eventlet.green.socket.socket = orig_esocket
eventlet.green.socket.getaddrinfo = orig_egetaddrinfo
eventlet.green.ssl.wrap_socket = orig_ewrap_socket
def test_uses_eventlet_sleep(self):
try:
import eventlet
import eventlet.green.socket
except ImportError:
raise SkipTest()
orig_sleep = eventlet.sleep
orig_esocket = eventlet.green.socket.socket
esleep_calls = []
try:
eventlet.sleep = lambda *a: esleep_calls.append(a)
eventlet.green.socket.socket = NonBindingSocket
exc = None
try:
service.get_listening_tcp_socket(
'1.2.3.4', 5678, style='eventlet')
except Exception as err:
exc = err
self.assertEqual(
str(exc),
'Could not bind to 1.2.3.4:5678 after trying for 30 seconds.')
self.assertEqual(len(esleep_calls), 29)
self.assertEqual(len(self.sleep_calls), 0)
finally:
eventlet.sleep = orig_sleep
eventlet.green.socket.socket = orig_esocket
def test_invalid_style(self):
exc = None
try:
service.get_listening_tcp_socket('1.2.3.4', 5678, style='invalid')
except Exception as err:
exc = err
self.assertEqual(str(exc), "Socket style 'invalid' not understood.")
def test_ip_as_none_is_all(self):
sock = service.get_listening_tcp_socket(None, 5678)
self.assertEqual(sock.bind_calls[0][0][0], '0.0.0.0')
def test_ip_as_star_is_all(self):
sock = service.get_listening_tcp_socket('*', 5678)
self.assertEqual(sock.bind_calls[0][0][0], '0.0.0.0')
def test_no_family_raises_exception(self):
self.getaddrinfo_return = ((socket.AF_APPLETALK,),)
exc = None
try:
service.get_listening_tcp_socket('1.2.3.4', 5678)
except Exception as err:
exc = err
self.assertEqual(
str(exc),
'Could not determine address family of 1.2.3.4:5678 for binding.')
def test_odd_exception_reraised(self):
socket.socket = BadBindSocket
exc = None
try:
service.get_listening_tcp_socket('1.2.3.4', 5678)
except Exception as err:
exc = err
self.assertEqual(str(exc), 'badbind')
class Test_get_listening_udp_socket(TestCase):
def setUp(self):
self.orig_getaddrinfo = socket.getaddrinfo
self.orig_socket = socket.socket
self.orig_time = service.time
self.orig_sleep = time.sleep
self.getaddrinfo_calls = []
self.getaddrinfo_return = ((socket.AF_INET,),)
self.time_calls = []
self.time_value = 0
self.sleep_calls = []
def _getaddrinfo(*args):
self.getaddrinfo_calls.append(args)
return self.getaddrinfo_return
def _time(*args):
self.time_calls.append(args)
self.time_value += 1
return self.time_value
socket.getaddrinfo = _getaddrinfo
socket.socket = FakeSocket
service.time = _time
time.sleep = lambda *a: self.sleep_calls.append(a)
def tearDown(self):
socket.getaddrinfo = self.orig_getaddrinfo
socket.socket = self.orig_socket
service.time = self.orig_time
time.sleep = self.orig_sleep
def test_happy_path_inet(self):
ip = '1.2.3.4'
port = 5678
sock = service.get_listening_udp_socket(ip, port)
self.assertEqual(
self.getaddrinfo_calls,
[(ip, port, socket.AF_UNSPEC, socket.SOCK_DGRAM)])
self.assertEqual(sock.init, (socket.AF_INET, socket.SOCK_DGRAM))
self.assertEqual(set(sock.setsockopt_calls), set([
(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)]))
self.assertEqual(sock.bind_calls, [((ip, port),)])
def test_happy_path_inet6(self):
self.getaddrinfo_return = ((socket.AF_INET6,),)
sock = service.get_listening_udp_socket('1.2.3.4', 5678)
self.assertEqual(sock.init, (socket.AF_INET6, socket.SOCK_DGRAM))
def test_retries(self):
socket.socket = NonBindingSocket
exc = None
try:
service.get_listening_udp_socket('1.2.3.4', 5678)
except Exception as err:
exc = err
self.assertEqual(
str(exc),
'Could not bind to 1.2.3.4:5678 after trying for 30 seconds.')
# Calls time once before loop to calculate when to stop and once per
# loop to see if it's time to stop.
self.assertEqual(self.time_value, 31)
self.assertEqual(len(self.time_calls), 31)
# Sleeps 29 times and then sees it's been 30s (the default retry time).
self.assertEqual(len(self.sleep_calls), 29)
def test_uses_passed_retry(self):
socket.socket = NonBindingSocket
exc = None
try:
service.get_listening_udp_socket('1.2.3.4', 5678, retry=10)
except Exception as err:
exc = err
self.assertEqual(
str(exc),
'Could not bind to 1.2.3.4:5678 after trying for 10 seconds.')
# Calls time once before loop to calculate when to stop and once per
# loop to see if it's time to stop.
self.assertEqual(self.time_value, 11)
self.assertEqual(len(self.time_calls), 11)
# Sleeps 9 times and then sees it's been 10s.
self.assertEqual(len(self.sleep_calls), 9)
def test_uses_eventlet_socket(self):
try:
import eventlet.green.socket
except ImportError:
raise SkipTest()
orig_esocket = eventlet.green.socket.socket
orig_egetaddrinfo = eventlet.green.socket.getaddrinfo
egetaddrinfo_calls = []
def _getaddrinfo(*args):
egetaddrinfo_calls.append(args)
return self.getaddrinfo_return
try:
# Won't bind unless it uses eventlet's socket.
socket.socket = NonBindingSocket
eventlet.green.socket.socket = FakeSocket
eventlet.green.socket.getaddrinfo = _getaddrinfo
ip = '1.2.3.4'
port = 5678
sock = service.get_listening_udp_socket(ip, port, style='eventlet')
self.assertEqual(
egetaddrinfo_calls,
[(ip, port, socket.AF_UNSPEC, socket.SOCK_DGRAM)])
self.assertEqual(sock.init, (socket.AF_INET, socket.SOCK_DGRAM))
self.assertEqual(set(sock.setsockopt_calls), set([
(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)]))
self.assertEqual(sock.bind_calls, [((ip, port),)])
finally:
eventlet.green.socket.socket = orig_esocket
eventlet.green.socket.getaddrinfo = orig_egetaddrinfo
def test_uses_eventlet_sleep(self):
try:
import eventlet
import eventlet.green.socket
except ImportError:
raise SkipTest()
orig_sleep = eventlet.sleep
orig_esocket = eventlet.green.socket.socket
esleep_calls = []
try:
eventlet.sleep = lambda *a: esleep_calls.append(a)
eventlet.green.socket.socket = NonBindingSocket
exc = None
try:
service.get_listening_udp_socket(
'1.2.3.4', 5678, style='eventlet')
except Exception as err:
exc = err
self.assertEqual(
str(exc),
'Could not bind to 1.2.3.4:5678 after trying for 30 seconds.')
self.assertEqual(len(esleep_calls), 29)
self.assertEqual(len(self.sleep_calls), 0)
finally:
eventlet.sleep = orig_sleep
eventlet.green.socket.socket = orig_esocket
def test_invalid_style(self):
exc = None
try:
service.get_listening_udp_socket('1.2.3.4', 5678, style='invalid')
except Exception as err:
exc = err
self.assertEqual(str(exc), "Socket style 'invalid' not understood.")
def test_ip_as_none_is_all(self):
sock = service.get_listening_udp_socket(None, 5678)
self.assertEqual(sock.bind_calls[0][0][0], '0.0.0.0')
def test_ip_as_star_is_all(self):
sock = service.get_listening_udp_socket('*', 5678)
self.assertEqual(sock.bind_calls[0][0][0], '0.0.0.0')
def test_no_family_raises_exception(self):
self.getaddrinfo_return = ((socket.AF_APPLETALK,),)
exc = None
try:
service.get_listening_udp_socket('1.2.3.4', 5678)
except Exception as err:
exc = err
self.assertEqual(
str(exc),
'Could not determine address family of 1.2.3.4:5678 for binding.')
def test_odd_exception_reraised(self):
socket.socket = BadBindSocket
exc = None
try:
service.get_listening_udp_socket('1.2.3.4', 5678)
except Exception as err:
exc = err
self.assertEqual(str(exc), 'badbind')
class Test_signum2str(TestCase):
def test_signum2str(self):
self.assertEqual(service.signum2str(1), 'SIGHUP')
self.assertEqual(service.signum2str(12), 'SIGUSR2')
self.assertEqual(service.signum2str(999999), 'UNKNOWN')
class FakeLogger(object):
def __init__(self):
self.debug_calls = []
self.info_calls = []
self.exception_calls = []
def debug(self, *args):
self.debug_calls.append(args)
def info(self, *args):
self.info_calls.append(args)
def exception(self, *args):
self.exception_calls.append(args)
class Test_sustain_workers(TestCase):
def setUp(self):
self.orig_sleep = time.sleep
self.orig_signal = service.signal
self.orig_fork = service.fork
self.orig_os_wait = service.os_wait
self.orig_wifexited = service.WIFEXITED
self.orig_wifsignaled = service.WIFSIGNALED
self.orig_killpg = service.killpg
self.sleep_calls = []
self.signal_calls = []
self.killpg_calls = []
self.worker_func_calls = []
time.sleep = lambda *a: self.sleep_calls.append(a)
service.signal = lambda *a: self.signal_calls.append(a)
service.fork = lambda *a: 1
service.os_wait = lambda *a: (1, 0)
service.WIFEXITED = lambda *a: True
service.WIFSIGNALED = lambda *a: True
service.killpg = lambda *a: self.killpg_calls.append(a)
self.worker_func = lambda *a: self.worker_func_calls.append(a)
def tearDown(self):
time.sleep = self.orig_sleep
service.signal = self.orig_signal
service.fork = self.orig_fork
service.os_wait = self.orig_os_wait
service.WIFEXITED = self.orig_wifexited
service.WIFSIGNALED = self.orig_wifsignaled
service.killpg = self.orig_killpg
def test_workers0(self):
logger = FakeLogger()
service.sustain_workers(0, self.worker_func, logger)
self.assertEqual(self.worker_func_calls, [(0,)])
self.assertEqual(
logger.debug_calls,
[('wid:000 pid:%s Starting inproc worker.' % service.getpid(),)])
self.assertEqual(
logger.info_calls,
[('Exiting due to workers = 0 mode.',)])
def test_workers0_no_logger(self):
service.sustain_workers(0, self.worker_func)
self.assertEqual(self.worker_func_calls, [(0,)])
def test_sigterm_exit(self):
logger = FakeLogger()
def _os_wait(*args):
self.signal_calls[0][1]()
return (1, 0)
service.os_wait = _os_wait
service.sustain_workers(1, self.worker_func, logger)
self.assertEqual(logger.debug_calls, [])
self.assertEqual(logger.info_calls, [('Exiting due to SIGTERM.',)])
self.assertEqual(self.killpg_calls, [(0, service.SIGTERM)])
def test_sighup_exit(self):
logger = FakeLogger()
def _os_wait(*args):
self.signal_calls[1][1]()
return (1, 0)
service.os_wait = _os_wait
service.sustain_workers(1, self.worker_func, logger)
self.assertEqual(logger.debug_calls, [])
self.assertEqual(logger.info_calls, [('Exiting due to SIGHUP.',)])
self.assertEqual(self.killpg_calls, [(0, service.SIGHUP)])
def test_keyboard_interrupt_exit(self):
logger = FakeLogger()
def _os_wait(*args):
raise KeyboardInterrupt()
service.os_wait = _os_wait
service.sustain_workers(1, self.worker_func, logger)
self.assertEqual(logger.debug_calls, [])
self.assertEqual(logger.info_calls, [('Exiting due to SIGINT.',)])
self.assertEqual(self.killpg_calls, [(0, service.SIGINT)])
def test_no_logger_ok(self):
def _os_wait(*args):
raise KeyboardInterrupt()
service.os_wait = _os_wait
service.sustain_workers(1, self.worker_func)
self.assertEqual(self.killpg_calls, [(0, service.SIGINT)])
def test_oserror_unknown_reraise(self):
logger = FakeLogger()
def _os_wait(*args):
raise OSError('testing')
service.os_wait = _os_wait
exc = None
try:
service.sustain_workers(1, self.worker_func, logger)
except Exception as err:
exc = err
self.assertEqual(str(exc), 'testing')
def test_oserror_eintr_cycle(self):
logger = FakeLogger()
self.called = [0]
def _os_wait(*args):
self.called[0] += 1
if self.called[0] == 2:
raise KeyboardInterrupt()
err = OSError('testing')
err.errno = service.EINTR
raise err
service.os_wait = _os_wait
service.sustain_workers(1, self.worker_func, logger)
self.assertEqual(logger.debug_calls, [])
self.assertEqual(logger.info_calls, [('Exiting due to SIGINT.',)])
self.assertEqual(self.killpg_calls, [(0, service.SIGINT)])
self.assertEqual(self.called[0], 2)
def test_oserror_echild_cycle(self):
logger = FakeLogger()
self.called = [0]
def _os_wait(*args):
self.called[0] += 1
if self.called[0] == 2:
raise KeyboardInterrupt()
err = OSError('testing')
err.errno = service.ECHILD
raise err
service.os_wait = _os_wait
service.sustain_workers(1, self.worker_func, logger)
self.assertEqual(logger.debug_calls, [])
self.assertEqual(logger.info_calls, [('Exiting due to SIGINT.',)])
self.assertEqual(self.killpg_calls, [(0, service.SIGINT)])
self.assertEqual(self.called[0], 2)
def test_child(self):
logger = FakeLogger()
service.fork = lambda *a: 0
service.sustain_workers(1, self.worker_func, logger)
# Asserts the TERM and HUP signal handlers are cleared with the child.
self.assertEqual(
set(self.signal_calls[-2:]),
set([(service.SIGHUP, 0), (service.SIGTERM, 0)]))
self.assertEqual(self.worker_func_calls, [(0,)])
self.assertEqual(logger.debug_calls, [
('wid:000 ppid:%s pid:%s Starting worker.' %
(service.getppid(), service.getpid()),),
('wid:000 ppid:%s pid:%s Worker exited.' %
(service.getppid(), service.getpid()),)])
self.assertEqual(logger.info_calls, [])
def test_child_exception(self):
def _worker_func(*args):
raise Exception('testing')
logger = FakeLogger()
service.fork = lambda *a: 0
exc = None
try:
service.sustain_workers(1, _worker_func, logger)
except Exception as err:
exc = err
self.assertEqual(str(exc), 'testing')
self.assertEqual(logger.debug_calls, [
('wid:000 ppid:%s pid:%s Starting worker.' %
(service.getppid(), service.getpid()),)])
self.assertEqual(logger.info_calls, [])
self.assertEqual(logger.exception_calls, [(
'wid:000 ppid:%s pid:%s Worker exited due to exception: testing' %
(service.getppid(), service.getpid()),)])
def test_no_sleep_on_initial_launch(self):
fork_calls = []
def _os_wait(*args):
raise KeyboardInterrupt()
def _fork(*args):
fork_calls.append(args)
return len(fork_calls)
service.os_wait = _os_wait
service.fork = _fork
service.sustain_workers(5, self.worker_func)
self.assertEqual(fork_calls, [()] * 5)
self.assertEqual(self.sleep_calls, [])
def test_sleep_on_relaunches(self):
fork_calls = []
def _os_wait_int(*args):
raise KeyboardInterrupt()
def _os_wait(*args):
service.os_wait = _os_wait_int
return 1, 0
def _fork(*args):
fork_calls.append(args)
return len(fork_calls)
service.os_wait = _os_wait
service.fork = _fork
service.sustain_workers(5, self.worker_func)
self.assertEqual(fork_calls, [()] * 6)
self.assertEqual(self.sleep_calls, [(1,)])
if __name__ == '__main__':
main()
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum, EnumMeta
from six import with_metaclass
class _CaseInsensitiveEnumMeta(EnumMeta):
def __getitem__(self, name):
return super().__getitem__(name.upper())
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
try:
return cls._member_map_[name.upper()]
except KeyError:
raise AttributeError(name)
class AuthenticationType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""An enumeration of possible authentication types when connecting
"""
NONE = "None"
WINDOWS_AUTHENTICATION = "WindowsAuthentication"
SQL_AUTHENTICATION = "SqlAuthentication"
ACTIVE_DIRECTORY_INTEGRATED = "ActiveDirectoryIntegrated"
ACTIVE_DIRECTORY_PASSWORD = "ActiveDirectoryPassword"
class BackupFileStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""An enumeration of Status of the log backup file.
"""
ARRIVED = "Arrived"
QUEUED = "Queued"
UPLOADING = "Uploading"
UPLOADED = "Uploaded"
RESTORING = "Restoring"
RESTORED = "Restored"
CANCELLED = "Cancelled"
class BackupMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""An enumeration of backup modes
"""
CREATE_BACKUP = "CreateBackup"
EXISTING_BACKUP = "ExistingBackup"
class BackupType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Enum of the different backup types.
"""
DATABASE = "Database"
TRANSACTION_LOG = "TransactionLog"
FILE = "File"
DIFFERENTIAL_DATABASE = "DifferentialDatabase"
DIFFERENTIAL_FILE = "DifferentialFile"
PARTIAL = "Partial"
DIFFERENTIAL_PARTIAL = "DifferentialPartial"
class CommandState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The state of the command. This is ignored if submitted.
"""
UNKNOWN = "Unknown"
ACCEPTED = "Accepted"
RUNNING = "Running"
SUCCEEDED = "Succeeded"
FAILED = "Failed"
class CreatedByType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The type of identity that created the resource.
"""
USER = "User"
APPLICATION = "Application"
MANAGED_IDENTITY = "ManagedIdentity"
KEY = "Key"
class DatabaseCompatLevel(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""An enumeration of SQL Server database compatibility levels
"""
COMPAT_LEVEL80 = "CompatLevel80"
COMPAT_LEVEL90 = "CompatLevel90"
COMPAT_LEVEL100 = "CompatLevel100"
COMPAT_LEVEL110 = "CompatLevel110"
COMPAT_LEVEL120 = "CompatLevel120"
COMPAT_LEVEL130 = "CompatLevel130"
COMPAT_LEVEL140 = "CompatLevel140"
class DatabaseFileType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""An enumeration of SQL Server database file types
"""
ROWS = "Rows"
LOG = "Log"
FILESTREAM = "Filestream"
NOT_SUPPORTED = "NotSupported"
FULLTEXT = "Fulltext"
class DatabaseMigrationStage(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Current stage of migration
"""
NONE = "None"
INITIALIZE = "Initialize"
BACKUP = "Backup"
FILE_COPY = "FileCopy"
RESTORE = "Restore"
COMPLETED = "Completed"
class DatabaseMigrationState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Database level migration state.
"""
UNDEFINED = "UNDEFINED"
INITIAL = "INITIAL"
FULL_BACKUP_UPLOAD_START = "FULL_BACKUP_UPLOAD_START"
LOG_SHIPPING_START = "LOG_SHIPPING_START"
UPLOAD_LOG_FILES_START = "UPLOAD_LOG_FILES_START"
CUTOVER_START = "CUTOVER_START"
POST_CUTOVER_COMPLETE = "POST_CUTOVER_COMPLETE"
COMPLETED = "COMPLETED"
CANCELLED = "CANCELLED"
FAILED = "FAILED"
class DatabaseState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""An enumeration of SQL Server Database states
"""
ONLINE = "Online"
RESTORING = "Restoring"
RECOVERING = "Recovering"
RECOVERY_PENDING = "RecoveryPending"
SUSPECT = "Suspect"
EMERGENCY = "Emergency"
OFFLINE = "Offline"
COPYING = "Copying"
OFFLINE_SECONDARY = "OfflineSecondary"
class DataMigrationResultCode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Result code of the data migration
"""
INITIAL = "Initial"
COMPLETED = "Completed"
OBJECT_NOT_EXISTS_IN_SOURCE = "ObjectNotExistsInSource"
OBJECT_NOT_EXISTS_IN_TARGET = "ObjectNotExistsInTarget"
TARGET_OBJECT_IS_INACCESSIBLE = "TargetObjectIsInaccessible"
FATAL_ERROR = "FatalError"
class ErrorType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Error type
"""
DEFAULT = "Default"
WARNING = "Warning"
ERROR = "Error"
class LoginMigrationStage(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Enum of the different stage of login migration.
"""
NONE = "None"
INITIALIZE = "Initialize"
LOGIN_MIGRATION = "LoginMigration"
ESTABLISH_USER_MAPPING = "EstablishUserMapping"
ASSIGN_ROLE_MEMBERSHIP = "AssignRoleMembership"
ASSIGN_ROLE_OWNERSHIP = "AssignRoleOwnership"
ESTABLISH_SERVER_PERMISSIONS = "EstablishServerPermissions"
ESTABLISH_OBJECT_PERMISSIONS = "EstablishObjectPermissions"
COMPLETED = "Completed"
class LoginType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Enum mapping of SMO LoginType.
"""
WINDOWS_USER = "WindowsUser"
WINDOWS_GROUP = "WindowsGroup"
SQL_LOGIN = "SqlLogin"
CERTIFICATE = "Certificate"
ASYMMETRIC_KEY = "AsymmetricKey"
EXTERNAL_USER = "ExternalUser"
EXTERNAL_GROUP = "ExternalGroup"
class MigrationState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Current state of migration
"""
NONE = "None"
IN_PROGRESS = "InProgress"
FAILED = "Failed"
WARNING = "Warning"
COMPLETED = "Completed"
SKIPPED = "Skipped"
STOPPED = "Stopped"
class MigrationStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Current status of migration
"""
DEFAULT = "Default"
CONNECTING = "Connecting"
SOURCE_AND_TARGET_SELECTED = "SourceAndTargetSelected"
SELECT_LOGINS = "SelectLogins"
CONFIGURED = "Configured"
RUNNING = "Running"
ERROR = "Error"
STOPPED = "Stopped"
COMPLETED = "Completed"
COMPLETED_WITH_WARNINGS = "CompletedWithWarnings"
class MongoDbClusterType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The type of data source
"""
BLOB_CONTAINER = "BlobContainer"
COSMOS_DB = "CosmosDb"
MONGO_DB = "MongoDb"
class MongoDbErrorType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The type of error or warning
"""
ERROR = "Error"
VALIDATION_ERROR = "ValidationError"
WARNING = "Warning"
class MongoDbMigrationState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
NOT_STARTED = "NotStarted"
VALIDATING_INPUT = "ValidatingInput"
INITIALIZING = "Initializing"
RESTARTING = "Restarting"
COPYING = "Copying"
INITIAL_REPLAY = "InitialReplay"
REPLAYING = "Replaying"
FINALIZING = "Finalizing"
COMPLETE = "Complete"
CANCELED = "Canceled"
FAILED = "Failed"
class MongoDbProgressResultType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The type of progress object
"""
MIGRATION = "Migration"
DATABASE = "Database"
COLLECTION = "Collection"
class MongoDbReplication(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Describes how changes will be replicated from the source to the target. The default is OneTime.
"""
DISABLED = "Disabled"
ONE_TIME = "OneTime"
CONTINUOUS = "Continuous"
class MongoDbShardKeyOrder(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The field ordering
"""
FORWARD = "Forward"
REVERSE = "Reverse"
HASHED = "Hashed"
class MySqlTargetPlatformType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""An enumeration of possible target types when migrating from MySQL
"""
SQL_SERVER = "SqlServer"
AZURE_DB_FOR_MY_SQL = "AzureDbForMySQL"
class NameCheckFailureReason(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The reason why the name is not available, if nameAvailable is false
"""
ALREADY_EXISTS = "AlreadyExists"
INVALID = "Invalid"
class ObjectType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""An enumeration of type of objects
"""
STORED_PROCEDURES = "StoredProcedures"
TABLE = "Table"
USER = "User"
VIEW = "View"
FUNCTION = "Function"
class ProjectProvisioningState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The project's provisioning state
"""
DELETING = "Deleting"
SUCCEEDED = "Succeeded"
class ProjectSourcePlatform(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Source platform of the project
"""
SQL = "SQL"
MY_SQL = "MySQL"
POSTGRE_SQL = "PostgreSql"
MONGO_DB = "MongoDb"
UNKNOWN = "Unknown"
class ProjectTargetPlatform(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Target platform of the project
"""
SQLDB = "SQLDB"
SQLMI = "SQLMI"
AZURE_DB_FOR_MY_SQL = "AzureDbForMySql"
AZURE_DB_FOR_POSTGRE_SQL = "AzureDbForPostgreSql"
MONGO_DB = "MongoDb"
UNKNOWN = "Unknown"
class ReplicateMigrationState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Wrapper for replicate reported migration states.
"""
UNDEFINED = "UNDEFINED"
VALIDATING = "VALIDATING"
PENDING = "PENDING"
COMPLETE = "COMPLETE"
ACTION_REQUIRED = "ACTION_REQUIRED"
FAILED = "FAILED"
class ResourceSkuCapacityScaleType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The scale type applicable to the SKU.
"""
AUTOMATIC = "Automatic"
MANUAL = "Manual"
NONE = "None"
class ResourceSkuRestrictionsReasonCode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The reason code for restriction.
"""
QUOTA_ID = "QuotaId"
NOT_AVAILABLE_FOR_SUBSCRIPTION = "NotAvailableForSubscription"
class ResourceSkuRestrictionsType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The type of restrictions.
"""
LOCATION = "location"
class ScenarioSource(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""An enumeration of source type
"""
ACCESS = "Access"
DB2 = "DB2"
MY_SQL = "MySQL"
ORACLE = "Oracle"
SQL = "SQL"
SYBASE = "Sybase"
POSTGRE_SQL = "PostgreSQL"
MONGO_DB = "MongoDB"
SQLRDS = "SQLRDS"
MY_SQLRDS = "MySQLRDS"
POSTGRE_SQLRDS = "PostgreSQLRDS"
class ScenarioTarget(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""An enumeration of target type
"""
SQL_SERVER = "SQLServer"
SQLDB = "SQLDB"
SQLDW = "SQLDW"
SQLMI = "SQLMI"
AZURE_DB_FOR_MY_SQL = "AzureDBForMySql"
AZURE_DB_FOR_POSTGRES_SQL = "AzureDBForPostgresSQL"
MONGO_DB = "MongoDB"
class SchemaMigrationOption(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Option for how schema is extracted and applied to target
"""
NONE = "None"
EXTRACT_FROM_SOURCE = "ExtractFromSource"
USE_STORAGE_FILE = "UseStorageFile"
class SchemaMigrationStage(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Current stage of schema migration
"""
NOT_STARTED = "NotStarted"
VALIDATING_INPUTS = "ValidatingInputs"
COLLECTING_OBJECTS = "CollectingObjects"
DOWNLOADING_SCRIPT = "DownloadingScript"
GENERATING_SCRIPT = "GeneratingScript"
UPLOADING_SCRIPT = "UploadingScript"
DEPLOYING_SCHEMA = "DeployingSchema"
COMPLETED = "Completed"
COMPLETED_WITH_WARNINGS = "CompletedWithWarnings"
FAILED = "Failed"
class ServerLevelPermissionsGroup(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Permission group for validations. These groups will run a set of permissions for validating
user activity. Select the permission group for the activity that you are performing.
"""
DEFAULT = "Default"
MIGRATION_FROM_SQL_SERVER_TO_AZURE_DB = "MigrationFromSqlServerToAzureDB"
MIGRATION_FROM_SQL_SERVER_TO_AZURE_MI = "MigrationFromSqlServerToAzureMI"
MIGRATION_FROM_MY_SQL_TO_AZURE_DB_FOR_MY_SQL = "MigrationFromMySQLToAzureDBForMySQL"
class ServiceProvisioningState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The resource's provisioning state
"""
ACCEPTED = "Accepted"
DELETING = "Deleting"
DEPLOYING = "Deploying"
STOPPED = "Stopped"
STOPPING = "Stopping"
STARTING = "Starting"
FAILED_TO_START = "FailedToStart"
FAILED_TO_STOP = "FailedToStop"
SUCCEEDED = "Succeeded"
FAILED = "Failed"
class ServiceScalability(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The scalability approach
"""
NONE = "none"
MANUAL = "manual"
AUTOMATIC = "automatic"
class Severity(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Severity of the validation error
"""
MESSAGE = "Message"
WARNING = "Warning"
ERROR = "Error"
class SqlSourcePlatform(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""An enumeration of source platform types
"""
SQL_ON_PREM = "SqlOnPrem"
class SsisMigrationOverwriteOption(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The overwrite option for SSIS object migration, only ignore and overwrite are supported in DMS
now and future may add Reuse option for container object
"""
IGNORE = "Ignore"
OVERWRITE = "Overwrite"
class SsisMigrationStage(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Current stage of SSIS migration
"""
NONE = "None"
INITIALIZE = "Initialize"
IN_PROGRESS = "InProgress"
COMPLETED = "Completed"
class SsisStoreType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""An enumeration of supported source SSIS store type in DMS
"""
SSIS_CATALOG = "SsisCatalog"
class SyncDatabaseMigrationReportingState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Enum of the different state of database level online migration.
"""
UNDEFINED = "UNDEFINED"
CONFIGURING = "CONFIGURING"
INITIALIAZING = "INITIALIAZING"
STARTING = "STARTING"
RUNNING = "RUNNING"
READY_TO_COMPLETE = "READY_TO_COMPLETE"
COMPLETING = "COMPLETING"
COMPLETE = "COMPLETE"
CANCELLING = "CANCELLING"
CANCELLED = "CANCELLED"
FAILED = "FAILED"
VALIDATING = "VALIDATING"
VALIDATION_COMPLETE = "VALIDATION_COMPLETE"
VALIDATION_FAILED = "VALIDATION_FAILED"
RESTORE_IN_PROGRESS = "RESTORE_IN_PROGRESS"
RESTORE_COMPLETED = "RESTORE_COMPLETED"
BACKUP_IN_PROGRESS = "BACKUP_IN_PROGRESS"
BACKUP_COMPLETED = "BACKUP_COMPLETED"
class SyncTableMigrationState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Enum of the different state of table level online migration.
"""
BEFORE_LOAD = "BEFORE_LOAD"
FULL_LOAD = "FULL_LOAD"
COMPLETED = "COMPLETED"
CANCELED = "CANCELED"
ERROR = "ERROR"
FAILED = "FAILED"
class TaskState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The state of the task. This is ignored if submitted.
"""
UNKNOWN = "Unknown"
QUEUED = "Queued"
RUNNING = "Running"
CANCELED = "Canceled"
SUCCEEDED = "Succeeded"
FAILED = "Failed"
FAILED_INPUT_VALIDATION = "FailedInputValidation"
FAULTED = "Faulted"
class UpdateActionType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Type of the actual difference for the compared object, while performing schema comparison
"""
DELETED_ON_TARGET = "DeletedOnTarget"
CHANGED_ON_TARGET = "ChangedOnTarget"
ADDED_ON_TARGET = "AddedOnTarget"
class ValidationStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Current status of the validation
"""
DEFAULT = "Default"
NOT_STARTED = "NotStarted"
INITIALIZED = "Initialized"
IN_PROGRESS = "InProgress"
COMPLETED = "Completed"
COMPLETED_WITH_ISSUES = "CompletedWithIssues"
STOPPED = "Stopped"
FAILED = "Failed"
|
|
#
# Restriction Analysis Libraries.
# Copyright (C) 2004. Frederic Sohm.
#
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
import re
import itertools
from Bio.Restriction import RanaConfig as RanaConf
from Bio.Restriction.DNAUtils import complement
"""
Usage :
PrintFormat allow to print the results from restriction analysis in 3
different format.
List, column or map.
the easiest way to use it is :
>>> from Rana.PrintFormat import PrintFormat
>>> from Rana.Restriction import AllEnzymes
>>> from Rana.fts import fts
>>> seq = fts(pBR322)
>>> dct = AllEnzymes.search(seq)
>>> new = PrintFormat()
>>> new.print_that(dct, '\n my pBR322 analysis\n\n','\n no site :\n\n')
my pBR322 analysis
AasI : 2169, 2582.
AatII : 4289.
...
More enzymes.
...
ZraI : 4287.
ZrmI : 3847.
no site :
AarI AatI Acc65I AcsI AcvI AdeI AflII AgeI
...
More enzymes.
...
Vha464I XapI XbaI XcmI XhoI XmaCI XmaI XmaJI
Zsp2I
>>>
Some of the methods of PrintFormat are meant to be overriden by derived
class.
"""
class PrintFormat(object) :
"""PrintFormat allow the printing of results of restriction analysis."""
ConsoleWidth = RanaConf.ConsoleWidth
NameWidth = RanaConf.NameWidth
MaxSize = RanaConf.MaxSize
Cmodulo = ConsoleWidth%NameWidth
PrefWidth = ConsoleWidth - Cmodulo
Indent = RanaConf.Indent
linesize = PrefWidth - NameWidth
def __init__(self) :
"""PrintFormat() -> new PrintFormat Instance"""
pass
def print_as(self, what='list') :
"""PF.print_as([what='list']) -> print the results as specified.
Valid format are :
'list' -> alphabetical order
'number' -> number of sites in the sequence
'map' -> a map representation of the sequence with the sites.
If you want more flexibility over-ride the virtual method make_format.
"""
if what == 'map' :
self.make_format = self._make_map
elif what == 'number' :
self.make_format = self._make_number
else :
self.make_format = self._make_list
return
def print_that(self, dct, title='', s1='') :
"""PF.print_that(dct, [title[, s1]]) -> Print dct nicely formatted.
dct is a dictionary as returned by a RestrictionBatch.search()
title is the title of the map.
It must be a formated string, i.e. you must include the line break.
s1 is the title separating the list of enzymes that have sites from
those without sites.
s1 must be a formatted string as well.
The format of print_that is a list."""
if not dct :
dct = self.results
ls, nc = [], []
for k, v in dct.iteritems() :
if v :
ls.append((k,v))
else :
nc.append(k)
print self.make_format(ls, title, nc, s1)
return
def make_format(self, cut=[], title='', nc=[], s1='') :
"""PF.make_format(cut, nc, title, s) -> string
Virtual method.
Here to be pointed to one of the _make_* methods.
You can as well create a new method and point make_format to it."""
return self._make_list(cut,title, nc,s1)
###### _make_* methods to be used with the virtual method make_format
def _make_list(self, ls,title, nc,s1) :
"""PF._make_number(ls,title, nc,s1) -> string.
return a string of form :
title.
enzyme1 : position1, position2.
enzyme2 : position1, position2, position3.
ls is a list of cutting enzymes.
title is the title.
nc is a list of non cutting enzymes.
s1 is the sentence before the non cutting enzymes."""
return self._make_list_only(ls, title) + self._make_nocut_only(nc, s1)
def _make_map(self, ls,title, nc,s1) :
"""PF._make_number(ls,title, nc,s1) -> string.
return a string of form :
title.
enzyme1, position
|
AAAAAAAAAAAAAAAAAAAAA...
|||||||||||||||||||||
TTTTTTTTTTTTTTTTTTTTT...
ls is a list of cutting enzymes.
title is the title.
nc is a list of non cutting enzymes.
s1 is the sentence before the non cutting enzymes."""
return self._make_map_only(ls, title) + self._make_nocut_only(nc, s1)
def _make_number(self, ls,title, nc,s1) :
"""PF._make_number(ls,title, nc,s1) -> string.
title.
enzyme which cut 1 time :
enzyme1 : position1.
enzyme which cut 2 times :
enzyme2 : position1, position2.
...
ls is a list of cutting enzymes.
title is the title.
nc is a list of non cutting enzymes.
s1 is the sentence before the non cutting enzymes."""
return self._make_number_only(ls, title)+self._make_nocut_only(nc,s1)
def _make_nocut(self, ls,title, nc,s1) :
"""PF._make_nocut(ls,title, nc,s1) -> string.
return a formatted string of the non cutting enzymes.
ls is a list of cutting enzymes -> will not be used.
Here for compatibility with make_format.
title is the title.
nc is a list of non cutting enzymes.
s1 is the sentence before the non cutting enzymes."""
return title + self._make_nocut_only(nc, s1)
def _make_nocut_only(self, nc, s1, ls =[],title='') :
"""PF._make_nocut_only(nc, s1) -> string.
return a formatted string of the non cutting enzymes.
nc is a list of non cutting enzymes.
s1 is the sentence before the non cutting enzymes.
"""
if not nc :
return s1
nc.sort()
st = ''
stringsite = s1 or '\n Enzymes which do not cut the sequence.\n\n'
Join = ''.join
for key in nc :
st = Join((st, str.ljust(str(key), self.NameWidth)))
if len(st) > self.linesize :
stringsite = Join((stringsite, st, '\n'))
st = ''
stringsite = Join((stringsite, st, '\n'))
return stringsite
def _make_list_only(self, ls, title, nc = [], s1 = '') :
"""PF._make_list_only(ls, title) -> string.
return a string of form :
title.
enzyme1 : position1, position2.
enzyme2 : position1, position2, position3.
...
ls is a list of results.
title is a string.
Non cutting enzymes are not included."""
if not ls :
return title
return self.__next_section(ls, title)
def _make_number_only(self, ls, title, nc = [], s1 ='') :
"""PF._make_number_only(ls, title) -> string.
return a string of form :
title.
enzyme which cut 1 time :
enzyme1 : position1.
enzyme which cut 2 times :
enzyme2 : position1, position2.
...
ls is a list of results.
title is a string.
Non cutting enzymes are not included."""
if not ls :
return title
ls.sort(lambda x,y : cmp(len(x[1]), len(y[1])))
iterator = iter(ls)
cur_len = 1
new_sect = []
for name, sites in iterator :
l = len(sites)
if l > cur_len :
title += "\n\nenzymes which cut %i times :\n\n"%cur_len
title = self.__next_section(new_sect, title)
new_sect, cur_len = [(name, sites)], l
continue
new_sect.append((name,sites))
title += "\n\nenzymes which cut %i times :\n\n"%cur_len
return self.__next_section(new_sect, title)
def _make_map_only(self, ls, title, nc = [], s1 = '') :
"""PF._make_map_only(ls, title) -> string.
return a string of form :
title.
enzyme1, position
|
AAAAAAAAAAAAAAAAAAAAA...
|||||||||||||||||||||
TTTTTTTTTTTTTTTTTTTTT...
ls is a list of results.
title is a string.
Non cutting enzymes are not included.
"""
if not ls :
return title
resultKeys = [str(x) for x,y in ls]
resultKeys.sort()
map = title or ''
enzymemap = {}
for (enzyme, cut) in ls :
for c in cut :
if enzymemap.has_key(c) :
enzymemap[c].append(str(enzyme))
else :
enzymemap[c] = [str(enzyme)]
mapping = enzymemap.keys()
mapping.sort()
cutloc = {}
x, counter, length = 0, 0, len(self.sequence)
for x in xrange(60, length, 60) :
counter = x - 60
l=[]
for key in mapping :
if key <= x :
l.append(key)
else :
cutloc[counter] = l
mapping = mapping[mapping.index(key):]
break
cutloc[x] = l
cutloc[x] = mapping
sequence = self.sequence.tostring()
revsequence = complement(sequence)
a = '|'
base, counter = 0, 0
emptyline = ' ' * 60
Join = ''.join
for base in xrange(60, length, 60) :
counter = base - 60
line = emptyline
for key in cutloc[counter] :
s = ''
if key == base :
for n in enzymemap[key] : s = ' '.join((s,n))
l = line[0:59]
lineo = Join((l, str(key), s, '\n'))
line2 = Join((l, a, '\n'))
linetot = Join((lineo, line2))
map = Join((map, linetot))
break
for n in enzymemap[key] : s = ' '.join((s,n))
k = key%60
lineo = Join((line[0:(k-1)], str(key), s, '\n'))
line = Join((line[0:(k-1)], a, line[k:]))
line2 = Join((line[0:(k-1)], a, line[k:], '\n'))
linetot = Join((lineo,line2))
map = Join((map,linetot))
mapunit = '\n'.join((sequence[counter : base],a * 60,
revsequence[counter : base],
Join((str.ljust(str(counter+1), 15), ' '* 30,
str.rjust(str(base), 15),'\n\n'))
))
map = Join((map, mapunit))
line = ' '* 60
for key in cutloc[base] :
s = ''
if key == length:
for n in enzymemap[key] :
s = Join((s,' ',n))
l = line[0:(length-1)]
lineo = Join((l,str(key),s,'\n'))
line2 = Join((l,a,'\n'))
linetot = Join((lineo, line2))
map = Join((map, linetot))
break
for n in enzymemap[key] : s = Join((s,' ',n))
k = key%60
lineo = Join((line[0:(k-1)],str(key),s,'\n'))
line = Join((line[0:(k-1)],a,line[k:]))
line2 = Join((line[0:(k-1)],a,line[k:],'\n'))
linetot = Join((lineo,line2))
map = Join((map,linetot))
mapunit = ''
mapunit = Join((sequence[base : length], '\n'))
mapunit = Join((mapunit, a * (length-base), '\n'))
mapunit = Join((mapunit,revsequence[base:length], '\n'))
mapunit = Join((mapunit, Join((str.ljust(str(base+1), 15), ' '*(
length-base-30),str.rjust(str(length), 15),
'\n\n'))))
map = Join((map,mapunit))
return map
###### private method to do lists :
def __next_section(self, ls, into) :
"""FP.__next_section(ls, into) -> string.
ls is a list of tuple (string, [int, int]).
into is a string to which the formatted ls will be added.
Format ls as a string of lines :
The form is :
enzyme1 : position1.
enzyme2 : position2, position3.
then add the formatted ls to tot
return tot."""
ls.sort()
indentation = '\n' + (self.NameWidth + self.Indent) * ' '
linesize = self.linesize - self.MaxSize
pat = re.compile("([\w,\s()]){1,%i}[,\.]"%linesize)
several, Join = '', ''.join
for name, sites in ls :
stringsite = ''
l = Join((', '.join([str(site) for site in sites]), '.'))
if len(l) > linesize :
#
# cut where appropriate and add the indentation
#
l = [x.group() for x in re.finditer(pat, l)]
stringsite = indentation.join(l)
else :
stringsite = l
into = Join((into,
str(name).ljust(self.NameWidth),' : ',stringsite,'\n'))
return into
|
|
# Twisted Imports
from twisted.internet import reactor, defer, task
from twisted.internet.protocol import Factory
from twisted.protocols.basic import LineReceiver
# System Imports
from collections import namedtuple
# Package Imports
from ..machine import Machine, Component, Property, Stream, ui
from ..util import now
from ..protocol import basic, gsioc
from gilson_components import layout
#__all__ = ["UVVis151"]
class Error (Exception):
"Base class for exceptions in this module"
pass
class GSIOC (Machine):
protocolFactory = Factory.forProtocol(gsioc.Receiver)
name = "GSIOC Connection"
def setup (self):
def connected (result):
reactor.callLater(0.5, self._connect_wait.callback, True)
return result
# Put in a delay to allow the GSIOC to intialise
self._connect_wait = defer.Deferred()
self.ready.addCallback(connected)
def gsioc (self, id):
d = defer.Deferred()
def send_slave (result):
d.callback(self.protocol.slave(id))
# Wait until GSIOC has connected
self._connect_wait.addCallback(send_slave)
return d
def _iter_ci_FIFO (s):
for i in xrange(0, len(s), 7):
yield s[i:i + 6]
def _set_output (machine, i):
i = str(i)
def set_output (value):
if value == "open":
machine.protocol.buffered_command("D" + i)
else:
machine.protocol.buffered_command("C" + i)
return set_output
class ControlModule506C (Machine):
protocolFactory = Factory.forProtocol(gsioc.Receiver)
name = "Gilson 506C Control Module"
A = 1
B = 2
C = 4
D = 8
input_map = {
"@": 0, "A": A, "B": B, "C": A | B, "D": C,
"E": A | C, "F": B | C, "G": A | B | C, "H": D,
"I": A | D, "J": B | D, "K": A | B | D, "L": C | D,
"M": A | C | D, "N": B | C | D, "O": A | B | C | D
}
analogue_sample_frequency = 0.1
analogue_sample_interval = 0.5
contact_input_sample_interval = 0.5
contact_output_sample_interval = 0.5
def setup (self, **kwargs):
self.analogue1 = gsioc.FIFOStream(channel = 0, title = "Analogue Input A", type = float, unit = "mV", factor = 0.01)
self.analogue2 = gsioc.FIFOStream(channel = 1, title = "Analogue Input B", type = float, unit = "mV", factor = 0.01)
self.analogue3 = gsioc.FIFOStream(channel = 2, title = "Analogue Input C", type = float, unit = "mV", factor = 0.01)
self.analogue4 = gsioc.FIFOStream(channel = 3, title = "Analogue Input D", type = float, unit = "mV", factor = 0.01)
self.input1 = Property(title = "Contact Input A", type = str)
self.input2 = Property(title = "Contact Input B", type = str)
self.input3 = Property(title = "Contact Input C", type = str)
self.input4 = Property(title = "Contact Input D", type = str)
self.output1 = Property(title = "Contact Output A", type = str, options = ("open", "closed"), setter = _set_output(self, 1))
self.output2 = Property(title = "Contact Output B", type = str, options = ("open", "closed"), setter = _set_output(self, 2))
self.output3 = Property(title = "Contact Output C", type = str, options = ("open", "closed"), setter = _set_output(self, 3))
self.output4 = Property(title = "Contact Output D", type = str, options = ("open", "closed"), setter = _set_output(self, 4))
self.output5 = Property(title = "Contact Output E", type = str, options = ("open", "closed"), setter = _set_output(self, 5))
self.output6 = Property(title = "Contact Output F", type = str, options = ("open", "closed"), setter = _set_output(self, 6))
self.ui = ui(
traces = [{
"title": "Analogue Inputs",
"unit": self.analogue1.unit,
"traces": [self.analogue1, self.analogue2, self.analogue3, self.analogue4],
"colours": ["#FF1300", "#FFB100", "#1435AD", "#00C322"]
}],
properties = [
self.input1,
self.input2,
self.input3,
self.input4,
self.output1,
self.output2,
self.output3,
self.output4,
self.output5,
self.output6
]
)
def start (self):
# Reset Analogue Input FIFO buffers
self.analogue1.reset(self.protocol, self.analogue_sample_frequency)
self.analogue2.reset(self.protocol, self.analogue_sample_frequency)
self.analogue3.reset(self.protocol, self.analogue_sample_frequency)
self.analogue4.reset(self.protocol, self.analogue_sample_frequency)
def monitorAnalogueInputs ():
self.analogue1.update(self.protocol)
self.analogue2.update(self.protocol)
self.analogue3.update(self.protocol)
self.analogue4.update(self.protocol)
self._tick(monitorAnalogueInputs, self.analogue_sample_interval)
# Reset Contact Event FIFO
def resetContactInputs ():
def interpret (result):
if len(result) != 4:
return
self.input1._push("closed" if result[0] == "C" else "open")
self.input2._push("closed" if result[1] == "C" else "open")
self.input3._push("closed" if result[2] == "C" else "open")
self.input4._push("closed" if result[3] == "C" else "open")
self._last_contact_update = now()
self.protocol.buffered_command("9")
self.protocol.immediate_command("*").addCallback(interpret)
def interpretContactInputs (result):
if result[0] == "|":
return # Buffer is empty
if len(result) % 7 > 0:
raise Exception("Malformed contact event FIFO: " + str(result))
for entry in _iter_ci_FIFO(result):
try:
state = self.input_map[result[0]]
time = self._last_contact_update + (int(result[1:6], 16) * 0.01)
except IndexError, KeyError:
raise Exception("Malformed contact event FIFO: " + str(result))
self.input1._push("closed" if state & self.A else "open", time)
self.input2._push("closed" if state & self.B else "open", time)
self.input3._push("closed" if state & self.C else "open", time)
self.input4._push("closed" if state & self.D else "open", time)
def interpretContactOutputs (result):
if len(result) != 6:
return
self.output1._push("closed" if result[0] == "C" else "open")
self.output2._push("closed" if result[1] == "C" else "open")
self.output3._push("closed" if result[2] == "C" else "open")
self.output4._push("closed" if result[3] == "C" else "open")
self.output5._push("closed" if result[4] == "C" else "open")
self.output6._push("closed" if result[5] == "C" else "open")
def monitorContactInputs ():
self.protocol.immediate_command("9").addCallback(interpretContactInputs)
def monitorContactOutputs ():
self.protocol.immediate_command("?").addCallback(interpretContactOutputs)
self._tick(resetContactInputs, 45 * 3600) # Event buffer runs out after ~46h
self._tick(monitorContactInputs, self.contact_input_sample_interval)
self._tick(monitorContactOutputs, self.contact_output_sample_interval)
def stop (self):
self._stopTicks()
class SampleInjector233 (Machine):
protocolFactory = Factory.forProtocol(basic.QueuedLineReceiver)
name = "Gilson Sampling Injector"
_layouts = {}
_current_position = (0, 0, 0)
# Positions determined by manual calibration of our device.
# Testing recommended in case of non-calibrated machine!
_default_locations = {
"zero": (0, 350, 0),
"inject:1": (2460, 516, 515),
"inject:2": (3866, 516, 515),
"wash:a:deep": (140, 400, 750),
"wash:a:shallow": (70, 400, 400),
"wash:a:drain": (0, 400, 350)
}
def add_layout (self, name, layout):
self._layouts[name] = layout
def remove_layout (self, name):
if name in self._layouts:
del self._layouts[name]
def clear_layouts (self):
self._layouts = {}
def setup (self):
def set_position (location):
if location in self._default_locations:
x, y, z = self._default_locations[location]
elif ":" in location:
name, pos = location.split(":")
if name not in self._layouts:
raise Exception ("Unknown layout: %s" % name)
x, y, z = self._layouts[name].xyz(pos)
else:
raise Exception ("Invalid location: %s" % location)
# Move to z_up
self.protocol.buffered_command("z0")
self.protocol.buffered_command("W")
# Move to x,y
self.protocol.buffered_command("x{:d}".format(x))
self.protocol.buffered_command("y{:d}".format(y))
self.protocol.buffered_command("W")
# Move to z_down
self.protocol.buffered_command("z{:d}".format(z))
self.protocol.buffered_command("W")
# Time for both Z movements
z_time = (self._current_position[2] / 1250. + z / 900.)
# Time for XY movement
xy_time = max(
abs(self._current_position[0] - x) / 2500.,
abs(self._current_position[1] - y) / 2500.
)
# Start checking a bit before anticipated
# completion time
expected_time = max(0, z_time + xy_time - 0.5)
self._current_position = (x, y, z)
finished = defer.Deferred()
def check_finished ():
def cb (result):
if result[1] == "1":
finished.errback()
elif result[0] == "1":
reactor.callLater(0.1, check)
elif result[0] == "0":
self.position._push(location)
finished.callback("ok")
def check ():
self.protocol.immediate_command("S").addCallback(cb)
check()
reactor.callLater(expected_time, check_finished)
return finished
def set_valve (valve):
c = "I{:d}" + ("/" if valve == "switching" else "")
def set_valve (pos):
return self.protocol.buffered_command(c.format(1 if pos == "inject" else 0));
return set_valve
# setup variables
self.position = Property(title = "Position", type = str, setter = set_position)
self.injection = Property(title = "Injection Valve", type = str, options = ("load", "inject"), setter = set_valve("injection"))
self.switching = Property(title = "Switching Valve", type = str, options = ("load", "inject"), setter = set_valve("switching"))
#self.status = Property(title = "Status", type = str)
self.ui = ui(
properties = [
self.position,
self.injection,
self.switching,
#self.status
]
)
def start (self):
def get_param (id):
def request (result):
return self.protocol.immediate_command("P")
return self.protocol.buffered_command("P" + str(id)).addCallback(request)
def interpretState (result):
if result[1] == "1":
self.status._push("error")
elif result[0] == "1":
self.status._push("busy")
elif result[0] == "0":
self.status._push("idle")
valve_states = ("load", "inject", "running", "error", "missing")
def interpretValveState (result):
if result[0] == "0":
pass
# print "Injection Valve on Right"
self.injection._push(valve_states[int(result[1])])
self.switching._push(valve_states[int(result[2])])
def monitor1 ():
#self.protocol.immediate_command("S").addCallback(interpretState)
self.protocol.immediate_command("P").addCallback(interpretValveState)
self._tick(monitor1, 0.5)
def stop (self):
self._stopTicks()
def reset (self):
return defer.gatherResults([
self.injection.set("load"),
self.switching.set("load"),
self.position.set("zero")
])
class Pump305 (Machine):
protocolFactory = Factory.forProtocol(basic.QueuedLineReceiver)
name = "Gilson 305 HPLC Pump"
def setup (self):
pass
def start (self):
pass
def reset (self):
return defer.success()
class InvalidPistonSize (Error):
"The requested piston size is not in the configured list."
class InitializationFailed (Error):
"The requested piston failed to initialise."
class InvalidTarget (Error):
"The requested target volume is not supported."
class ValveMoveFailed (Error):
"The requested valve movement failed."
_PistonSize = namedtuple('_PistonSize', ["flow_max", "flow_sanitise"])
class _syringe_piston (Component):
piston_ids = ("L", "R")
piston_sizes = {
None: _PistonSize( 0, lambda x: 0),
100: _PistonSize( 6, lambda x: round(max(0.001, min(x, 6)), 3)),
250: _PistonSize( 15, lambda x: round(max(0.001, min(x, 15)), 3)),
500: _PistonSize( 30, lambda x: round(max(0.001, min(x, 30)), 3)),
1000: _PistonSize( 60, lambda x: round(max(0.01, min(x, 60)), 2)),
5000: _PistonSize(120, lambda x: round(max(0.01, min(x, 120)), 2)),
10000: _PistonSize(240, lambda x: round(max(0.02, min(x, 240)), 2)),
25000: _PistonSize(240, lambda x: round(max(0.04, min(x, 240)), 2)),
39000: _PistonSize(39000, lambda x: int(max(1, min(x, 39000))))
}
status_text = {
"N": "ready",
"R": "running",
"O": "error",
"I": "uninitialized",
"M": "missing",
"H": "paused",
"W": "waiting"
}
def __init__ (self, machine, id, size):
if id not in (0, 1):
raise Error ("Piston id must be 0 or 1")
if size not in self.piston_sizes:
raise InvalidPistonSize(size)
self._i = id
self._id = self.piston_ids[id]
self._size = size
self._machine = machine
self._rate = self.piston_sizes[size].flow_max / 4.
self.title = self._id + " Piston"
self.status = Property(title = self._id + " Syringe Status", type = str)
self.target = Property(title = self._id + " Syringe Target Volume", type = float, unit = "uL", setter = self.set_target)
self.volume = Stream(title = self._id + " Syringe Volume", type = float, unit = "uL")
def set_target (self, target, timely_start = False):
"""
Move to a target volume by aspirating or dispensing
the appropriate volume.
@param target: The desired volume of aspirated liquid in uL.
@param timely_start: Synchronise with other syringe.
"""
if self._size is None:
raise Error ("Syringe " + self._id + " not installed")
finished = defer.Deferred()
current_target = self.target.value
target = min(max(target, 0), self._size)
movement = target - current_target
# For 100, 250 uL pistons, the pump expects the volume parameter
# as a 5-character float. For all others, as a 5-char integer.
if self._size in (100, 250):
command = "{:s}{:s}{:05.1f}"
else:
command = "{:s}{:s}{:05d}"
movement = int(movement)
# Send the command, e.g. "AL00100", followed by a go command, e.g. "BL"
self._machine.protocol.buffered_command(command.format(
"D" if movement < 0 else "A",
self._id,
abs(movement)
))
if timely_start:
self._machine.protocol.buffered_command("T{:s}".format(self._id))
self._machine.protocol.buffered_command("B{:s}".format(self._id))
self.target._push(target)
def check_finished (delay):
def cb (result):
status = result[6 * self._i]
if status == "N":
# Movement complete, now idle
monitor.stop()
finished.callback(None)
elif status == "R":
# Keep checking rapidly if it is still running
reactor.callLater(0.1, check)
elif status == "W" or status == "H":
# Less frequent checks if the syringe is waiting
reactor.callLater(delay, check)
else:
# Error returned
monitor.stop()
finished.errback(None)
def check ():
self._machine.protocol.immediate_command("M").addCallback(cb)
check()
def monitor_movement ():
def cb (result):
self.update(result[0 + 6 * self._i : 6 + 6 * self._i])
return self._machine.protocol.immediate_command("M").addCallback(cb)
expected_time = max(round((abs(movement) / 1000 / self._rate) * 60, 1) - 0.5, 0)
reactor.callLater(expected_time, check_finished, expected_time)
monitor = task.LoopingCall(monitor_movement)
monitor.start(1, now = True)
return finished
def set_rate (self, rate):
"""
Set the syringe piston flow rate.
@param rate: The desired flow rate in mL/min
"""
if self._size is None:
raise Error ("Syringe " + self._id + " not installed")
# Return a flow rate within the allowed bounds
rate = self.piston_sizes[self._size].flow_sanitise(rate)
self._rate = rate
# It seems that the specified flow rate can be only 5 characters long
if self._size is 39000:
rate = "{:05d}".format(rate)
else:
rate = "{:05.3f}".format(rate)[:5]
print "set rate: S" + self._id + rate
return self._machine.protocol.buffered_command(
"S" + self._id + rate
)
def aspirate (self, volume, timely_start = False):
"""
Aspirate a volume of solution.
@param volume: The volume to aspirate in uL.
@param timely_start: Synchronise with other syringe.
"""
return self.set_target(self.target.value + volume, timely_start)
def dispense (self, volume, timely_start = False):
"""
Dispense a volume of solution.
@param volume: The volume to dispense in uL.
@param timely_start: Synchronise with other syringe.
"""
return self.set_target(self.target.value - volume, timely_start)
def initialize (self):
"Initialise syringe."
# An error will be returned if the pump doesn't recognise the size
def cb (result):
if result[1] == "1":
raise InitializationFailed
else:
self.target._push(0)
return self._machine.protocol.buffered_command(
"O{:s}".format(self._id)
)
# TODO: monitor / update whilst initializing, return when done...
def initialisation_failed (failure):
failure.trap(InitializationFailed)
print "Syringe Initialisation failed. Trying again"
return task.deferLater(reactor, 1, self.initialize)
# Send commands to initialise the syringe
if self._size is not None:
self._machine.protocol.buffered_command(
"P{:s}{:05d}".format(self._id, self._size)
)
d = self._machine.protocol.immediate_command("S")
d.addCallback(cb)
d.addErrback(initialisation_failed)
return d
else:
return defer.succeed(None)
def update (self, status):
self.status._push(self.status_text[status[0]])
self.volume._push(float(status[1:]))
class SyringePump402 (Machine):
protocolFactory = Factory.forProtocol(basic.QueuedLineReceiver)
name = "Gilson Piston Pump 402"
initialise_on_start = True
valve_positions = {
"N": "needle",
"R": "reservoir",
"X": "moving",
"O": "error",
"M": "missing"
}
def setup (self, syringe_sizes):
if all(s is None for s in syringe_sizes):
raise InvalidPistonSize(syringe_sizes)
self.piston1 = _syringe_piston(self, 0, syringe_sizes[0])
self.piston2 = _syringe_piston(self, 1, syringe_sizes[1])
def _set_valve_position (id):
command = ("VL", "VR")[id]
def start_checking (result, position, finished):
return task.deferLater(
reactor, 0.5, check_finished,
position, finished
)
def check_finished (position, finished):
def cb (result):
status = result[id]
if status == "N" or status == "R":
# Workaround...
if id is 0:
self.valve1._push(position)
elif id is 1:
self.valve2._push(position)
finished.callback(None)
elif status == "X": # Still running
reactor.callLater(0.1, check)
else: # Error condition
finished.errback(ValveMoveFailed())
def check ():
self.protocol.immediate_command("V").addCallback(cb)
check()
def setter (position):
finished = defer.Deferred()
self.protocol.buffered_command(
command + ("R" if position == "reservoir" else "N")
).addCallback(
start_checking, position, finished
).addErrback(finished.errback)
return finished
return setter
self.valve1 = Property(
title = "L Valve Position", type = str,
options = ("reservoir", "needle"),
setter = _set_valve_position(0)
)
self.valve2 = Property(
title = "R Valve Position", type = str,
options = ("reservoir", "needle"),
setter = _set_valve_position(1)
)
self.ui = ui(
properties = [
self.piston1.status,
self.piston1.volume,
self.valve1,
self.piston2.status,
self.piston2.volume,
self.valve2
]
)
def start (self):
if self.initialise_on_start:
self.piston1.initialize()
self.piston2.initialize()
def interpret_status (result):
self.piston1.update(result[0:6])
self.piston2.update(result[6:12])
def interpret_valves (result):
self.valve1._push(self.valve_positions[result[0]])
self.valve2._push(self.valve_positions[result[1]])
self.protocol.immediate_command("M").addCallback(interpret_status)
self.protocol.immediate_command("V").addCallback(interpret_valves)
def stop (self):
pass
def reset (self):
return defer.succeed(None)
def pause (self):
return self.protocol.buffered_command("HB")
def resume (self):
return self.protocol.buffered_command("BB")
def _set_lamp (machine):
def set_lamp (power):
return machine.protocol.buffered_command("L%d" % (1 if power == "on" else 0));
return set_lamp
def _set_wavelength (machine):
def set_wavelength (wavelength):
return machine.protocol.buffered_command("P0=%s" % wavelength);
return set_wavelength
def _set_sensitivity (machine, i):
def set_sensitivity (AU):
return machine.protocol.buffered_command("P%s=%s" % (i, AU));
return set_sensitivity
class UVVis151 (Machine):
protocolFactory = Factory.forProtocol(basic.QueuedLineReceiver)
name = "Gilson 151 UV/VIS Spectrometer"
analogue_sample_frequency = 0.1
analogue_sample_interval = 0.5
default_wavelength = 254
def setup (self):
# setup variables
self.power = Property(title = "Lamp Power", type = str, options = ("on", "off"), setter = _set_lamp(self))
self.wavelength = Property(title = "Wavelength", type = int, min = 170, max = 700, unit = "nm", setter = _set_wavelength(self))
self.sensitivity1 = Property(title = "Sensitivity 1", type = float, min = 0.001, max = 2., unit = "AUFS", setter = _set_sensitivity(self, 1))
self.sensitivity2 = Property(title = "Sensitivity 2", type = float, min = 0.001, max = 2., unit = "AUFS", setter = _set_sensitivity(self, 2))
self.detection1 = gsioc.FIFOStream(channel = 0, title = "Detection at Sensitivity 1", type = float)
self.detection2 = gsioc.FIFOStream(channel = 1, title = "Detection at Sensitivity 2", type = float)
self.transmittance = gsioc.FIFOStream(channel = 2, title = "Transmittance", type = float, unit = "%", factor = 0.1)
self.ui = ui(
traces = [{
"title": "Detection",
"unit": self.detection1.unit,
"traces": [self.detection1, self.detection2],
"colours": ["#000", "#07F"]
}, {
"title": "Transmittance",
"unit": self.transmittance.unit,
"traces": [self.transmittance],
"colours": ["#0c4"]
}],
)
def start (self):
def get_param (id):
def request (result):
return self.protocol.immediate_command("P")
return self.protocol.buffered_command("P" + str(id)).addCallback(request)
def interpretLampStatus (result):
if len(result) == 9:
self.power._push("on")
else:
self.power._push("off")
def interpretWavelength (result):
if result[0:1] == "00=":
self.wavelength._push(int(result[2:]))
def monitorStatus ():
pass
# i = monitors.__iter__()
#self.protocol.immediate_command("L").addCallback(interpretLampStatus)
#get_param(0).addCallback(interpretWavelength)
def monitorData ():
self.detection1.update(self.protocol)
self.detection2.update(self.protocol)
self.transmittance.update(self.protocol)
def reset ():
self.detection1.reset(self.protocol, self.analogue_sample_frequency)
self.detection2.reset(self.protocol, self.analogue_sample_frequency)
self.transmittance.reset(self.protocol, self.analogue_sample_frequency)
# Reset the buffers every minute.
self._tick(reset, 60)
self._tick(monitorData, self.analogue_sample_interval)
# Temp: Get wavelength at startup
get_param(0).addCallback(interpretWavelength)
def stop (self):
self._stopTicks()
def reset (self):
return defer.gatherResults([
self.wavelength.set(self.default_wavelength)
])
def zero (self):
return self.protocol.buffered_command("Z")
|
|
# -*- coding: utf-8 -*-
import os
import urlparse
from django.db import models
import markupsafe
import gitlab
from addons.base import exceptions
from addons.base.models import (BaseOAuthNodeSettings, BaseOAuthUserSettings,
BaseStorageAddon)
from addons.gitlab import utils
from addons.gitlab.api import GitLabClient
from addons.gitlab.serializer import GitLabSerializer
from addons.gitlab import settings as gitlab_settings
from addons.gitlab.exceptions import ApiError, NotFoundError, GitLabError
from framework.auth import Auth
from osf.models.files import File, Folder, BaseFileNode
from website import settings
from website.util import web_url_for
hook_domain = gitlab_settings.HOOK_DOMAIN or settings.DOMAIN
class GitLabFileNode(BaseFileNode):
_provider = 'gitlab'
class GitLabFolder(GitLabFileNode, Folder):
pass
class GitLabFile(GitLabFileNode, File):
version_identifier = 'commitSha'
@property
def _hashes(self):
try:
return {'commit': self._history[-1]['extra']['commitSha']}
except (IndexError, KeyError):
return None
def touch(self, auth_header, revision=None, ref=None, branch=None, **kwargs):
revision = revision or ref or branch
return super(GitLabFile, self).touch(auth_header, revision=revision, **kwargs)
class GitLabProvider(object):
name = 'GitLab'
short_name = 'gitlab'
serializer = GitLabSerializer
def __init__(self, account=None):
super(GitLabProvider, self).__init__() # this does exactly nothing...
# provide an unauthenticated session by default
self.account = account
def __repr__(self):
return '<{name}: {status}>'.format(
name=self.__class__.__name__,
status=self.account.display_name if self.account else 'anonymous'
)
class UserSettings(BaseOAuthUserSettings):
oauth_provider = GitLabProvider
serializer = GitLabSerializer
class NodeSettings(BaseOAuthNodeSettings, BaseStorageAddon):
oauth_provider = GitLabProvider
serializer = GitLabSerializer
user = models.TextField(blank=True, null=True)
repo = models.TextField(blank=True, null=True)
repo_id = models.TextField(blank=True, null=True)
hook_id = models.TextField(blank=True, null=True)
hook_secret = models.TextField(blank=True, null=True)
user_settings = models.ForeignKey(UserSettings, null=True, blank=True)
@property
def folder_id(self):
return self.repo or None
@property
def folder_name(self):
if self.complete:
return '{}/{}'.format(self.user, self.repo)
return None
@property
def folder_path(self):
return self.repo or None
@property
def has_auth(self):
return bool(self.user_settings and self.user_settings.has_auth)
@property
def complete(self):
return self.has_auth and self.repo is not None and self.user is not None
def authorize(self, user_settings, save=False):
self.user_settings = user_settings
self.owner.add_log(
action='gitlab_node_authorized',
params={
'project': self.owner.parent_id,
'node': self.owner._id,
},
auth=Auth(user_settings.owner),
)
if save:
self.save()
def clear_settings(self):
self.user = None
self.repo = None
self.repo_id = None
self.hook_id = None
self.hook_secret = None
def deauthorize(self, auth=None, log=True):
self.delete_hook(save=False)
self.clear_settings()
if log:
self.owner.add_log(
action='gitlab_node_deauthorized',
params={
'project': self.owner.parent_id,
'node': self.owner._id,
},
auth=auth,
)
self.clear_auth()
def delete(self, save=False):
super(NodeSettings, self).delete(save=False)
self.deauthorize(log=False)
if save:
self.save()
@property
def repo_url(self):
if self.repo:
return 'https://{0}/{1}'.format(self.external_account.display_name, self.repo)
@property
def short_url(self):
if self.repo:
return self.repo
@property
def is_private(self):
connection = GitLabClient(external_account=self.external_account)
return connection.repo(self.repo_id).visibility == 'private'
def to_json(self, user):
ret = super(NodeSettings, self).to_json(user)
user_settings = user.get_addon('gitlab')
ret.update({
'user_has_auth': user_settings and user_settings.has_auth,
'is_registration': self.owner.is_registration,
})
if self.user_settings and self.user_settings.has_auth:
owner = self.user_settings.owner
connection = GitLabClient(external_account=self.external_account)
valid_credentials = True
try:
repos = [repo.attributes for repo in connection.repos()]
except GitLabError:
valid_credentials = False
if owner == user:
ret.update({'repos': repos})
ret.update({
'node_has_auth': True,
'gitlab_user': self.user or '',
'gitlab_repo': self.repo or '',
'gitlab_repo_id': self.repo_id if self.repo_id is not None else '0',
'gitlab_repo_full_name': '{0} / {1}'.format(self.user, self.repo) if (self.user and self.repo) else '',
'auth_osf_name': owner.fullname,
'auth_osf_url': owner.url,
'auth_osf_id': owner._id,
'gitlab_host': self.external_account.display_name,
'gitlab_user_name': self.external_account.display_name,
'gitlab_user_url': self.external_account.profile_url,
'is_owner': owner == user,
'valid_credentials': valid_credentials,
'addons_url': web_url_for('user_addons'),
'files_url': self.owner.web_url_for('collect_file_trees')
})
return ret
def serialize_waterbutler_credentials(self):
if not self.complete or not self.repo:
raise exceptions.AddonError('Addon is not authorized')
return {'token': self.external_account.oauth_key}
def serialize_waterbutler_settings(self):
if not self.complete:
raise exceptions.AddonError('Repo is not configured')
return {
'host': self.external_account.oauth_secret,
'owner': self.user,
'repo': self.repo,
'repo_id': self.repo_id
}
def create_waterbutler_log(self, auth, action, metadata):
path = metadata['path']
url = self.owner.web_url_for('addon_view_or_download_file', path=path, provider='gitlab')
if not metadata.get('extra'):
sha = None
urls = {}
else:
sha = metadata['extra']['fileSha']
urls = {
'view': '{0}?branch={1}'.format(url, sha),
'download': '{0}?action=download&branch={1}'.format(url, sha)
}
self.owner.add_log(
'gitlab_{0}'.format(action),
auth=auth,
params={
'project': self.owner.parent_id,
'node': self.owner._id,
'path': path,
'urls': urls,
'gitlab': {
'host': 'https://{0}'.format(self.external_account.display_name),
'user': self.user,
'repo': self.repo,
'sha': sha,
},
},
)
#############
# Callbacks #
#############
def before_page_load(self, node, user):
"""
:param Node node:
:param User user:
:return str: Alert message
"""
messages = []
# Quit if not contributor
if not node.is_contributor(user):
return messages
# Quit if not configured
if self.user is None or self.repo is None:
return messages
# Quit if no user authorization
if self.user_settings is None:
return messages
connect = GitLabClient(external_account=self.external_account)
try:
repo = connect.repo(self.repo_id)
except (ApiError, GitLabError):
return
except gitlab.exceptions.GitlabError as exc:
if exc.response_code == 403 and 'must accept the Terms of Service' in exc.error_message:
return [('Your gitlab account does not have proper authentication. Ensure you have agreed to Gitlab\'s '
'current Terms of Service by disabling and re-enabling your account.')]
else:
raise exc
# GitLab has visibility types: public, private, internal.
node_permissions = 'public' if node.is_public else 'private'
if repo.visibility != node_permissions:
message = (
'Warning: This OSF {category} is {node_perm}, but the GitLab '
'repo {user} / {repo} has {repo_perm} visibility.'.format(
category=markupsafe.escape(node.project_or_component),
node_perm=markupsafe.escape(node_permissions),
repo_perm=markupsafe.escape(repo.visibility),
user=markupsafe.escape(self.user),
repo=markupsafe.escape(self.repo),
)
)
if repo.visibility == 'private':
message += (
' Users can view the contents of this private GitLab '
'repository through this public project.'
)
else:
message += (
' The files in this GitLab repo can be viewed on GitLab '
'<u><a href="{url}">here</a></u>.'
).format(url=repo.http_url_to_repo)
messages.append(message)
return messages
def before_remove_contributor_message(self, node, removed):
"""
:param Node node:
:param User removed:
:return str: Alert message
"""
try:
message = (super(NodeSettings, self).before_remove_contributor_message(node, removed) +
'You can download the contents of this repository before removing '
'this contributor <u><a href="{url}">here</a></u>.'.format(
url=node.api_url + 'gitlab/tarball/'
))
except TypeError:
# super call returned None due to lack of user auth
return None
else:
return message
# backwards compatibility -- TODO: is this necessary?
before_remove_contributor = before_remove_contributor_message
def after_remove_contributor(self, node, removed, auth=None):
"""
:param Node node:
:param User removed:
:return str: Alert message
"""
if self.user_settings and self.user_settings.owner == removed:
# Delete OAuth tokens
self.user_settings = None
self.save()
message = (
u'Because the GitLab add-on for {category} "{title}" was authenticated '
u'by {user}, authentication information has been deleted.'
).format(
category=markupsafe.escape(node.category_display),
title=markupsafe.escape(node.title),
user=markupsafe.escape(removed.fullname)
)
if not auth or auth.user != removed:
url = node.web_url_for('node_setting')
message += (
u' You can re-authenticate on the <u><a href="{url}">Settings</a></u> page.'
).format(url=url)
#
return message
def after_fork(self, node, fork, user, save=True):
"""
:param Node node: Original node
:param Node fork: Forked node
:param User user: User creating fork
:param bool save: Save settings after callback
:return tuple: Tuple of cloned settings and alert message
"""
clone = super(NodeSettings, self).after_fork(
node, fork, user, save=False
)
# Copy authentication if authenticated by forking user
if self.user_settings and self.user_settings.owner == user:
clone.user_settings = self.user_settings
if save:
clone.save()
return clone
def before_make_public(self, node):
try:
is_private = self.is_private
except NotFoundError:
return None
if is_private:
return (
'This {cat} is connected to a private GitLab repository. Users '
'(other than contributors) will not be able to see the '
'contents of this repo unless it is made public on GitLab.'
).format(
cat=node.project_or_component,
)
def after_delete(self, user):
self.deauthorize(Auth(user=user), log=True)
#########
# Hooks #
#########
# TODO: Should Events be added here?
# TODO: Move hook logic to service
def add_hook(self, save=True):
if self.user_settings:
connect = GitLabClient(external_account=self.external_account)
secret = utils.make_hook_secret()
hook = connect.add_hook(
self.user, self.repo,
'web',
{
'url': urlparse.urljoin(
hook_domain,
os.path.join(
self.owner.api_url, 'gitlab', 'hook/'
)
),
'content_type': gitlab_settings.HOOK_CONTENT_TYPE,
'secret': secret,
},
events=gitlab_settings.HOOK_EVENTS,
)
if hook:
self.hook_id = hook.id
self.hook_secret = secret
if save:
self.save()
def delete_hook(self, save=True):
"""
:return bool: Hook was deleted
"""
if self.user_settings and self.hook_id:
connection = GitLabClient(external_account=self.external_account)
try:
response = connection.delete_hook(self.user, self.repo, self.hook_id)
except (GitLabError, NotFoundError):
return False
if response:
self.hook_id = None
if save:
self.save()
return True
return False
|
|
"""
A spider that can crawl an Open edX instance.
"""
import os
import re
import json
from datetime import datetime
from path import Path
import yaml
import requests
from urlobject import URLObject
import scrapy
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from pa11ycrawler.items import A11yItem
LOGIN_HTML_PATH = "/login"
LOGIN_API_PATH = "/user_api/v1/account/login_session/"
AUTO_AUTH_PATH = "/auto_auth"
COURSE_BLOCKS_API_PATH = "/api/courses/v1/blocks/"
LOGIN_FAILURE_MSG = "We couldn't sign you in."
def get_csrf_token(response):
"""
Extract the CSRF token out of the "Set-Cookie" header of a response.
"""
cookie_headers = [
h.decode('ascii') for h in response.headers.getlist("Set-Cookie")
]
if not cookie_headers:
return None
csrf_headers = [
h for h in cookie_headers if h.startswith("csrftoken=")
]
if not csrf_headers:
return None
match = re.match("csrftoken=([^ ;]+);", csrf_headers[-1])
return match.group(1)
def load_pa11y_ignore_rules(file=None, url=None): # pylint: disable=redefined-builtin
"""
Load the pa11y ignore rules from the given file or URL.
"""
if not file and not url:
return None
if file:
file = Path(file)
if not file.isfile():
msg = (
"pa11y_ignore_rules_file specified, "
"but file does not exist! {file}"
).format(file=file)
raise ValueError(msg)
return yaml.safe_load(file.text())
# must be URL
resp = requests.get(url)
if not resp.ok:
msg = (
"pa11y_ignore_rules_url specified, "
"but failed to fetch URL. status={status}"
).format(status=resp.status_code)
err = RuntimeError(msg)
err.response = resp
raise err
return yaml.safe_load(resp.text)
class EdxSpider(CrawlSpider):
"A Scrapy spider that can crawl an Open edX instance."
name = 'edx'
rules = (
Rule(
LinkExtractor(
deny=[
# don't crawl logout links
r"/logout/",
# don't crawl xblock links
r"://[^/]+/xblock/",
# don't crawl anything that returns an archive
r"\?_accept=application/x-tgz",
],
unique=True,
),
callback='parse_item',
follow=True,
),
)
def __init__(
self,
domain="localhost",
port="8000",
email=None,
password=None,
http_user=None,
http_pass=None,
course_key="course-v1:edX+Test101+course",
pa11y_ignore_rules_file=None,
pa11y_ignore_rules_url=None,
data_dir="data",
): # noqa
super(EdxSpider, self).__init__()
self.login_email = email
self.login_password = password
self.domain = domain
self.port = int(port)
self.course_key = course_key
self.http_user = http_user
self.http_pass = http_pass
self.data_dir = os.path.abspath(os.path.expanduser(data_dir))
self.pa11y_ignore_rules = load_pa11y_ignore_rules(
file=pa11y_ignore_rules_file, url=pa11y_ignore_rules_url,
)
# set start URL based on course_key, which is the test course by default
api_url = (
URLObject("http://")
.with_hostname(self.domain)
.with_port(self.port)
.with_path(COURSE_BLOCKS_API_PATH)
.set_query_params(
course_id=self.course_key,
depth="all",
all_blocks="true",
)
)
self.start_urls = [api_url]
self.allowed_domains = [domain]
def start_requests(self):
"""
Gets the spider started.
If both `self.login_email` and `self.login_password` are set,
this method generates a request to login with those credentials.
Otherwise, this method generates a request to go to the "auto auth"
page and get credentials from there. Either way, this method
doesn't actually generate requests from `self.start_urls` -- that is
handled by the `after_initial_login()` and `after_auto_auth()`
methods.
"""
if self.login_email and self.login_password:
login_url = (
URLObject("http://")
.with_hostname(self.domain)
.with_port(self.port)
.with_path(LOGIN_HTML_PATH)
)
yield scrapy.Request(
login_url,
callback=self.after_initial_csrf,
)
else:
self.logger.info(
"email/password unset, fetching credentials via auto_auth"
)
auth_url = (
URLObject("http://")
.with_hostname(self.domain)
.with_port(self.port)
.with_path(AUTO_AUTH_PATH)
.set_query_params(
staff='true',
course_id=self.course_key,
)
)
# make sure to request a parseable JSON response
headers = {
b"Accept": b"application/json",
}
yield scrapy.Request(
auth_url,
headers=headers,
callback=self.after_auto_auth,
)
def after_initial_csrf(self, response):
"""
This method is called *only* if the crawler is started with an
email and password combination.
In order to log in, we need a CSRF token from a GET request. This
method takes the result of a GET request, extracts the CSRF token,
and uses it to make a login request. The response to this login
request will be handled by the `after_initial_login` method.
"""
login_url = (
URLObject("http://")
.with_hostname(self.domain)
.with_port(self.port)
.with_path(LOGIN_API_PATH)
)
credentials = {
"email": self.login_email,
"password": self.login_password,
}
headers = {
b"X-CSRFToken": get_csrf_token(response),
}
yield scrapy.FormRequest(
login_url,
formdata=credentials,
headers=headers,
callback=self.after_initial_login,
)
def after_initial_login(self, response):
"""
This method is called *only* if the crawler is started with an
email and password combination.
It verifies that the login request was successful,
and then generates requests from `self.start_urls`.
"""
if "We couldn't sign you in." in response.text:
self.logger.error("Credentials failed!")
return
self.logger.info("successfully completed initial login")
for url in self.start_urls:
yield self.make_requests_from_url(url)
def after_auto_auth(self, response):
"""
This method is called *only* if the crawler is started without an
email and password combination. It parses the response from the
"auto auth" feature, and saves the email and password combination.
Then it generates requests from `self.start_urls`.
"""
result = json.loads(response.text)
self.login_email = result["email"]
self.login_password = result["password"]
msg = (
"Obtained credentials via auto_auth! "
"email={email} password={password}"
).format(**result)
self.logger.info(msg)
for url in self.start_urls:
yield self.make_requests_from_url(url)
def parse_item(self, response):
"""
Get basic information about a page, so that it can be passed to the
`pa11y` tool for further testing.
@url https://www.google.com/
@returns items 1 1
@returns requests 0 0
@scrapes url request_headers accessed_at page_title
"""
# if we got redirected to a login page, then login
if URLObject(response.url).path == LOGIN_HTML_PATH:
reqs = self.handle_unexpected_redirect_to_login_page(response)
for req in reqs:
yield req
title = response.xpath("//title/text()").extract_first()
if title:
title = title.strip()
# `response.request.headers` is a dictionary where the key is the
# header name, and the value is a *list*, containing one item,
# which is the header value. We need to get rid of this list, and just
# have key-value pairs. (This list probably exists in case the same
# header is sent multiple times, but that's not happening in this case,
# and the list construct is getting in the way.)
#
# We also need to convert bytes to ASCII. In practice, headers can
# only contain ASCII characters: see
# http://stackoverflow.com/questions/5423223/how-to-send-non-english-unicode-string-using-http-header
request_headers = {key.decode('ascii'): value[0].decode('ascii')
for key, value
in response.request.headers.items()}
item = A11yItem(
url=response.url,
request_headers=request_headers,
accessed_at=datetime.utcnow(),
page_title=title,
)
yield item
def handle_unexpected_redirect_to_login_page(self, response):
"""
This method is called if the crawler has been unexpectedly logged out.
If that happens, and the crawler requests a page that requires a
logged-in user, the crawler will be redirected to a login page,
with the originally-requested URL as the `next` query parameter.
This method simply causes the crawler to log back in using the saved
email and password credentials. We rely on the fact that the login
page will redirect the user to the URL in the `next` query parameter
if the login is successful -- this will allow the crawl to resume
where it left off.
This is method is very much like the `get_initial_login()` method,
but the callback is `self.after_login` instead of
`self.after_initial_login`.
"""
next_url = URLObject(response.url).query_dict.get("next")
login_url = (
URLObject("http://")
.with_hostname(self.domain)
.with_port(self.port)
.with_path(LOGIN_API_PATH)
)
if next_url:
login_url = login_url.set_query_param("next", next_url)
credentials = {
"email": self.login_email,
"password": self.login_password,
}
headers = {
b"X-CSRFToken": get_csrf_token(response),
}
yield scrapy.FormRequest(
login_url,
formdata=credentials,
headers=headers,
callback=self.after_login,
)
def after_login(self, response):
"""
Check for a login error, then proceed as normal.
This is very much like the `after_initial_login()` method, but
it searches for links in the response instead of generating
requests from `self.start_urls`.
"""
if LOGIN_FAILURE_MSG in response.text:
self.logger.error("Credentials failed!")
return
# delegate to the `parse_item()` method, which handles normal responses.
for item in self.parse_item(response):
yield item
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.