code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
# Generated by Django 3.0.7 on 2020-07-23 12:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("trans", "0091_json_key"),
]
operations = [
migrations.AddField(
model_name="alert",
name="dismissed",
field=models.BooleanField(db_index=True, default=False),
),
]
| nijel/weblate | weblate/trans/migrations/0092_alert_dismissed.py | Python | gpl-3.0 | 392 |
import os
def slimdict(hangmanWord, badguess, goodguess):
wl = len(hangmanWord)
dictionary = []
with open("dict.txt", 'r', encoding='utf-8') as rawdict:
for word in rawdict:
word = word[:-1]
if len(word) == wl:
dictionary.append(word)
# while based search
## remove words that have bad letters, or words that dont have all of the good letters
## Much faster than letter location based search, used to optimize code run
dictionary = [word for word in dictionary if not any(letter in word for letter in badguess)]
dictionary = [word for word in dictionary if all(letter in word for letter in goodguess)]
y = 0
while y < len(dictionary):
x = 0
while x < wl:
if hangmanWord[x] != "-":
if hangmanWord[x] != dictionary[y][x]:
dictionary.pop(y)
if y < 2:
y = 0
else:
y = y - 2
x = x + 1
y = y + 1
return dictionary
def goodguesser(hangmanWord, badguess):
goodguess = []
for item in hangmanWord:
if item != "-":
goodguess.append(item)
for gl in goodguess:
x = 0
while x < len(badguess):
if badguess[x] == gl:
badguess.pop(x)
if x < 2:
x = 0
else:
x = x - 2
x = x + 1
return goodguess
def chooser(goodguess, dictionary):
## Post Filtered Dictionary
alphacount = [['a', 0], ['b', 0], ['c', 0], ['d', 0], ['e', 0], ['f', 0],
['g', 0], ['h', 0], ['i', 0], ['j', 0], ['k', 0], ['l', 0], ['m', 0],
['n', 0], ['o', 0], ['p', 0], ['q', 0], ['r', 0], ['s', 0], ['t', 0],
['u', 0], ['v', 0], ['w', 0], ['x', 0], ['y', 0], ['z', 0]]
## Remove good guesses, since they need not be guessed again
x = 0
while x < len(alphacount):
for gl in goodguess:
if gl == alphacount[x][0]:
alphacount.pop(x)
x = x + 1
## count all the letters in all the words, for best guess
## Remove duplicate letters:
## Reason why: The goal is to get as many correct
## guesses, not neccisarly get the answer quickly.
for word in dictionary:
word = set(word)
for item in alphacount:
if item[0] in word:
item[1] = item[1] + 1
alphacount.sort(key=lambda e:e[1], reverse=True)
return alphacount
def main():
os.system('cls')
hangmanWord = input("Hangman Word: ")
badguess = list(input("Attempted letters: "))
goodguess = goodguesser(hangmanWord, badguess)
dictionary = slimdict(hangmanWord, badguess, goodguess)
alphacount = chooser(goodguess, dictionary)
if alphacount[0][1] == 0:
print("No words left, bad word not in dictionary")
else:
print("Guess: '{}'".format(alphacount[0][0]))
print("Number of possible words: {}".format(len(dictionary)))
if len(dictionary) < 6:
for item in dictionary:
print(item, end=", ")
input()
while True:
main()
| deidyomega/hangmansolver | hangmansolver.py | Python | mit | 3,193 |
from pycloudia.services.interfaces import IInvoker
from pycloudia.activities.facades.interfaces import IService
from pycloudia.activities.facades.consts import HEADER
class ClientProxy(IService):
"""
:type sender: L{pycloudia.cluster.interfaces.ISender}
:type target_factory: L{pycloudia.services.interfaces.IServiceChannelFactory}
"""
sender = None
target_factory = None
def process_outgoing_package(self, address, client_id, package):
target = self.target_factory.create_by_address(address)
package.headers[HEADER.CLIENT_ID] = client_id
self.sender.send_package(target, package)
class ServerProxy(IInvoker):
def __init__(self, service):
"""
:type service: L{pycloudia.activities.facades.interfaces.IService}
"""
self.service = service
def process_package(self, package):
address = package.headers.pop(HEADER.ADDRESS)
client_id = package.headers.pop(HEADER.CLIENT_ID)
self.service.process_outgoing_package(address, client_id, package)
| cordis/pycloudia | pycloudia/activities/facades/proxies.py | Python | mit | 1,059 |
"""The tests for the MQTT lock platform."""
import unittest
from homeassistant.setup import setup_component
from homeassistant.const import (STATE_LOCKED, STATE_UNLOCKED,
ATTR_ASSUMED_STATE)
import homeassistant.components.lock as lock
from tests.common import (
mock_mqtt_component, fire_mqtt_message, get_test_home_assistant)
class TestLockMQTT(unittest.TestCase):
"""Test the MQTT lock."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.mock_publish = mock_mqtt_component(self.hass)
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
def test_controlling_state_via_topic(self):
"""Test the controlling state via topic."""
assert setup_component(self.hass, lock.DOMAIN, {
lock.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'state_topic': 'state-topic',
'command_topic': 'command-topic',
'payload_lock': 'LOCK',
'payload_unlock': 'UNLOCK'
}
})
state = self.hass.states.get('lock.test')
self.assertEqual(STATE_UNLOCKED, state.state)
self.assertFalse(state.attributes.get(ATTR_ASSUMED_STATE))
fire_mqtt_message(self.hass, 'state-topic', 'LOCK')
self.hass.block_till_done()
state = self.hass.states.get('lock.test')
self.assertEqual(STATE_LOCKED, state.state)
fire_mqtt_message(self.hass, 'state-topic', 'UNLOCK')
self.hass.block_till_done()
state = self.hass.states.get('lock.test')
self.assertEqual(STATE_UNLOCKED, state.state)
def test_sending_mqtt_commands_and_optimistic(self):
"""Test the sending MQTT commands in optimistic mode."""
assert setup_component(self.hass, lock.DOMAIN, {
lock.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'command_topic': 'command-topic',
'payload_lock': 'LOCK',
'payload_unlock': 'UNLOCK',
'qos': 2
}
})
state = self.hass.states.get('lock.test')
self.assertEqual(STATE_UNLOCKED, state.state)
self.assertTrue(state.attributes.get(ATTR_ASSUMED_STATE))
lock.lock(self.hass, 'lock.test')
self.hass.block_till_done()
self.assertEqual(('command-topic', 'LOCK', 2, False),
self.mock_publish.mock_calls[-2][1])
state = self.hass.states.get('lock.test')
self.assertEqual(STATE_LOCKED, state.state)
lock.unlock(self.hass, 'lock.test')
self.hass.block_till_done()
self.assertEqual(('command-topic', 'UNLOCK', 2, False),
self.mock_publish.mock_calls[-2][1])
state = self.hass.states.get('lock.test')
self.assertEqual(STATE_UNLOCKED, state.state)
def test_controlling_state_via_topic_and_json_message(self):
"""Test the controlling state via topic and JSON message."""
assert setup_component(self.hass, lock.DOMAIN, {
lock.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'state_topic': 'state-topic',
'command_topic': 'command-topic',
'payload_lock': 'LOCK',
'payload_unlock': 'UNLOCK',
'value_template': '{{ value_json.val }}'
}
})
state = self.hass.states.get('lock.test')
self.assertEqual(STATE_UNLOCKED, state.state)
fire_mqtt_message(self.hass, 'state-topic', '{"val":"LOCK"}')
self.hass.block_till_done()
state = self.hass.states.get('lock.test')
self.assertEqual(STATE_LOCKED, state.state)
fire_mqtt_message(self.hass, 'state-topic', '{"val":"UNLOCK"}')
self.hass.block_till_done()
state = self.hass.states.get('lock.test')
self.assertEqual(STATE_UNLOCKED, state.state)
| MungoRae/home-assistant | tests/components/lock/test_mqtt.py | Python | apache-2.0 | 4,146 |
# -*- coding: utf-8 -*-
# This code is part of Ansible, but is an independent component.
# This particular file, and this file only, is based on
# Lib/ipaddress.py of cpython
# It is licensed under the PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
#
# 1. This LICENSE AGREEMENT is between the Python Software Foundation
# ("PSF"), and the Individual or Organization ("Licensee") accessing and
# otherwise using this software ("Python") in source or binary form and
# its associated documentation.
#
# 2. Subject to the terms and conditions of this License Agreement, PSF hereby
# grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
# analyze, test, perform and/or display publicly, prepare derivative works,
# distribute, and otherwise use Python alone or in any derivative version,
# provided, however, that PSF's License Agreement and PSF's notice of copyright,
# i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
# 2011, 2012, 2013, 2014, 2015 Python Software Foundation; All Rights Reserved"
# are retained in Python alone or in any derivative version prepared by Licensee.
#
# 3. In the event Licensee prepares a derivative work that is based on
# or incorporates Python or any part thereof, and wants to make
# the derivative work available to others as provided herein, then
# Licensee hereby agrees to include in any such work a brief summary of
# the changes made to Python.
#
# 4. PSF is making Python available to Licensee on an "AS IS"
# basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
# INFRINGE ANY THIRD PARTY RIGHTS.
#
# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
#
# 6. This License Agreement will automatically terminate upon a material
# breach of its terms and conditions.
#
# 7. Nothing in this License Agreement shall be deemed to create any
# relationship of agency, partnership, or joint venture between PSF and
# Licensee. This License Agreement does not grant permission to use PSF
# trademarks or trade name in a trademark sense to endorse or promote
# products or services of Licensee, or any third party.
#
# 8. By copying, installing or otherwise using Python, Licensee
# agrees to be bound by the terms and conditions of this License
# Agreement.
# Copyright 2007 Google Inc.
# Licensed to PSF under a Contributor Agreement.
"""A fast, lightweight IPv4/IPv6 manipulation library in Python.
This library is used to create/poke/manipulate IPv4 and IPv6 addresses
and networks.
"""
from __future__ import unicode_literals
import itertools
import struct
# The following makes it easier for us to script updates of the bundled code and is not part of
# upstream
_BUNDLED_METADATA = {"pypi_name": "ipaddress", "version": "1.0.22"}
__version__ = "1.0.22"
# Compatibility functions
_compat_int_types = (int,)
try:
_compat_int_types = (int, long)
except NameError:
pass
try:
_compat_str = unicode
except NameError:
_compat_str = str
assert bytes != str
if b"\0"[0] == 0: # Python 3 semantics
def _compat_bytes_to_byte_vals(byt):
return byt
else:
def _compat_bytes_to_byte_vals(byt):
return [struct.unpack(b"!B", b)[0] for b in byt]
try:
_compat_int_from_byte_vals = int.from_bytes
except AttributeError:
def _compat_int_from_byte_vals(bytvals, endianess):
assert endianess == "big"
res = 0
for bv in bytvals:
assert isinstance(bv, _compat_int_types)
res = (res << 8) + bv
return res
def _compat_to_bytes(intval, length, endianess):
assert isinstance(intval, _compat_int_types)
assert endianess == "big"
if length == 4:
if intval < 0 or intval >= 2 ** 32:
raise struct.error("integer out of range for 'I' format code")
return struct.pack(b"!I", intval)
elif length == 16:
if intval < 0 or intval >= 2 ** 128:
raise struct.error("integer out of range for 'QQ' format code")
return struct.pack(b"!QQ", intval >> 64, intval & 0xFFFFFFFFFFFFFFFF)
else:
raise NotImplementedError()
if hasattr(int, "bit_length"):
# Not int.bit_length , since that won't work in 2.7 where long exists
def _compat_bit_length(i):
return i.bit_length()
else:
def _compat_bit_length(i):
for res in itertools.count():
if i >> res == 0:
return res
def _compat_range(start, end, step=1):
assert step > 0
i = start
while i < end:
yield i
i += step
class _TotalOrderingMixin(object):
__slots__ = ()
# Helper that derives the other comparison operations from
# __lt__ and __eq__
# We avoid functools.total_ordering because it doesn't handle
# NotImplemented correctly yet (http://bugs.python.org/issue10042)
def __eq__(self, other):
raise NotImplementedError
def __ne__(self, other):
equal = self.__eq__(other)
if equal is NotImplemented:
return NotImplemented
return not equal
def __lt__(self, other):
raise NotImplementedError
def __le__(self, other):
less = self.__lt__(other)
if less is NotImplemented or not less:
return self.__eq__(other)
return less
def __gt__(self, other):
less = self.__lt__(other)
if less is NotImplemented:
return NotImplemented
equal = self.__eq__(other)
if equal is NotImplemented:
return NotImplemented
return not (less or equal)
def __ge__(self, other):
less = self.__lt__(other)
if less is NotImplemented:
return NotImplemented
return not less
IPV4LENGTH = 32
IPV6LENGTH = 128
class AddressValueError(ValueError):
"""A Value Error related to the address."""
class NetmaskValueError(ValueError):
"""A Value Error related to the netmask."""
def ip_address(address):
"""Take an IP string/int and return an object of the correct type.
Args:
address: A string or integer, the IP address. Either IPv4 or
IPv6 addresses may be supplied; integers less than 2**32 will
be considered to be IPv4 by default.
Returns:
An IPv4Address or IPv6Address object.
Raises:
ValueError: if the *address* passed isn't either a v4 or a v6
address
"""
try:
return IPv4Address(address)
except (AddressValueError, NetmaskValueError):
pass
try:
return IPv6Address(address)
except (AddressValueError, NetmaskValueError):
pass
if isinstance(address, bytes):
raise AddressValueError(
"%r does not appear to be an IPv4 or IPv6 address. "
"Did you pass in a bytes (str in Python 2) instead of"
" a unicode object?" % address
)
raise ValueError(
"%r does not appear to be an IPv4 or IPv6 address" % address
)
def ip_network(address, strict=True):
"""Take an IP string/int and return an object of the correct type.
Args:
address: A string or integer, the IP network. Either IPv4 or
IPv6 networks may be supplied; integers less than 2**32 will
be considered to be IPv4 by default.
Returns:
An IPv4Network or IPv6Network object.
Raises:
ValueError: if the string passed isn't either a v4 or a v6
address. Or if the network has host bits set.
"""
try:
return IPv4Network(address, strict)
except (AddressValueError, NetmaskValueError):
pass
try:
return IPv6Network(address, strict)
except (AddressValueError, NetmaskValueError):
pass
if isinstance(address, bytes):
raise AddressValueError(
"%r does not appear to be an IPv4 or IPv6 network. "
"Did you pass in a bytes (str in Python 2) instead of"
" a unicode object?" % address
)
raise ValueError(
"%r does not appear to be an IPv4 or IPv6 network" % address
)
def ip_interface(address):
"""Take an IP string/int and return an object of the correct type.
Args:
address: A string or integer, the IP address. Either IPv4 or
IPv6 addresses may be supplied; integers less than 2**32 will
be considered to be IPv4 by default.
Returns:
An IPv4Interface or IPv6Interface object.
Raises:
ValueError: if the string passed isn't either a v4 or a v6
address.
Notes:
The IPv?Interface classes describe an Address on a particular
Network, so they're basically a combination of both the Address
and Network classes.
"""
try:
return IPv4Interface(address)
except (AddressValueError, NetmaskValueError):
pass
try:
return IPv6Interface(address)
except (AddressValueError, NetmaskValueError):
pass
raise ValueError(
"%r does not appear to be an IPv4 or IPv6 interface" % address
)
def v4_int_to_packed(address):
"""Represent an address as 4 packed bytes in network (big-endian) order.
Args:
address: An integer representation of an IPv4 IP address.
Returns:
The integer address packed as 4 bytes in network (big-endian) order.
Raises:
ValueError: If the integer is negative or too large to be an
IPv4 IP address.
"""
try:
return _compat_to_bytes(address, 4, "big")
except (struct.error, OverflowError):
raise ValueError("Address negative or too large for IPv4")
def v6_int_to_packed(address):
"""Represent an address as 16 packed bytes in network (big-endian) order.
Args:
address: An integer representation of an IPv6 IP address.
Returns:
The integer address packed as 16 bytes in network (big-endian) order.
"""
try:
return _compat_to_bytes(address, 16, "big")
except (struct.error, OverflowError):
raise ValueError("Address negative or too large for IPv6")
def _split_optional_netmask(address):
"""Helper to split the netmask and raise AddressValueError if needed"""
addr = _compat_str(address).split("/")
if len(addr) > 2:
raise AddressValueError("Only one '/' permitted in %r" % address)
return addr
def _find_address_range(addresses):
"""Find a sequence of sorted deduplicated IPv#Address.
Args:
addresses: a list of IPv#Address objects.
Yields:
A tuple containing the first and last IP addresses in the sequence.
"""
it = iter(addresses)
first = last = next(it) # pylint: disable=stop-iteration-return
for ip in it:
if ip._ip != last._ip + 1:
yield first, last
first = ip
last = ip
yield first, last
def _count_righthand_zero_bits(number, bits):
"""Count the number of zero bits on the right hand side.
Args:
number: an integer.
bits: maximum number of bits to count.
Returns:
The number of zero bits on the right hand side of the number.
"""
if number == 0:
return bits
return min(bits, _compat_bit_length(~number & (number - 1)))
def summarize_address_range(first, last):
"""Summarize a network range given the first and last IP addresses.
Example:
>>> list(summarize_address_range(IPv4Address('192.0.2.0'),
... IPv4Address('192.0.2.130')))
... #doctest: +NORMALIZE_WHITESPACE
[IPv4Network('192.0.2.0/25'), IPv4Network('192.0.2.128/31'),
IPv4Network('192.0.2.130/32')]
Args:
first: the first IPv4Address or IPv6Address in the range.
last: the last IPv4Address or IPv6Address in the range.
Returns:
An iterator of the summarized IPv(4|6) network objects.
Raise:
TypeError:
If the first and last objects are not IP addresses.
If the first and last objects are not the same version.
ValueError:
If the last object is not greater than the first.
If the version of the first address is not 4 or 6.
"""
if not (
isinstance(first, _BaseAddress) and isinstance(last, _BaseAddress)
):
raise TypeError("first and last must be IP addresses, not networks")
if first.version != last.version:
raise TypeError(
"%s and %s are not of the same version" % (first, last)
)
if first > last:
raise ValueError("last IP address must be greater than first")
if first.version == 4:
ip = IPv4Network
elif first.version == 6:
ip = IPv6Network
else:
raise ValueError("unknown IP version")
ip_bits = first._max_prefixlen
first_int = first._ip
last_int = last._ip
while first_int <= last_int:
nbits = min(
_count_righthand_zero_bits(first_int, ip_bits),
_compat_bit_length(last_int - first_int + 1) - 1,
)
net = ip((first_int, ip_bits - nbits))
yield net
first_int += 1 << nbits
if first_int - 1 == ip._ALL_ONES:
break
def _collapse_addresses_internal(addresses):
"""Loops through the addresses, collapsing concurrent netblocks.
Example:
ip1 = IPv4Network('192.0.2.0/26')
ip2 = IPv4Network('192.0.2.64/26')
ip3 = IPv4Network('192.0.2.128/26')
ip4 = IPv4Network('192.0.2.192/26')
_collapse_addresses_internal([ip1, ip2, ip3, ip4]) ->
[IPv4Network('192.0.2.0/24')]
This shouldn't be called directly; it is called via
collapse_addresses([]).
Args:
addresses: A list of IPv4Network's or IPv6Network's
Returns:
A list of IPv4Network's or IPv6Network's depending on what we were
passed.
"""
# First merge
to_merge = list(addresses)
subnets = {}
while to_merge:
net = to_merge.pop()
supernet = net.supernet()
existing = subnets.get(supernet)
if existing is None:
subnets[supernet] = net
elif existing != net:
# Merge consecutive subnets
del subnets[supernet]
to_merge.append(supernet)
# Then iterate over resulting networks, skipping subsumed subnets
last = None
for net in sorted(subnets.values()):
if last is not None:
# Since they are sorted,
# last.network_address <= net.network_address is a given.
if last.broadcast_address >= net.broadcast_address:
continue
yield net
last = net
def collapse_addresses(addresses):
"""Collapse a list of IP objects.
Example:
collapse_addresses([IPv4Network('192.0.2.0/25'),
IPv4Network('192.0.2.128/25')]) ->
[IPv4Network('192.0.2.0/24')]
Args:
addresses: An iterator of IPv4Network or IPv6Network objects.
Returns:
An iterator of the collapsed IPv(4|6)Network objects.
Raises:
TypeError: If passed a list of mixed version objects.
"""
addrs = []
ips = []
nets = []
# split IP addresses and networks
for ip in addresses:
if isinstance(ip, _BaseAddress):
if ips and ips[-1]._version != ip._version:
raise TypeError(
"%s and %s are not of the same version" % (ip, ips[-1])
)
ips.append(ip)
elif ip._prefixlen == ip._max_prefixlen:
if ips and ips[-1]._version != ip._version:
raise TypeError(
"%s and %s are not of the same version" % (ip, ips[-1])
)
try:
ips.append(ip.ip)
except AttributeError:
ips.append(ip.network_address)
else:
if nets and nets[-1]._version != ip._version:
raise TypeError(
"%s and %s are not of the same version" % (ip, nets[-1])
)
nets.append(ip)
# sort and dedup
ips = sorted(set(ips))
# find consecutive address ranges in the sorted sequence and summarize them
if ips:
for first, last in _find_address_range(ips):
addrs.extend(summarize_address_range(first, last))
return _collapse_addresses_internal(addrs + nets)
def get_mixed_type_key(obj):
"""Return a key suitable for sorting between networks and addresses.
Address and Network objects are not sortable by default; they're
fundamentally different so the expression
IPv4Address('192.0.2.0') <= IPv4Network('192.0.2.0/24')
doesn't make any sense. There are some times however, where you may wish
to have ipaddress sort these for you anyway. If you need to do this, you
can use this function as the key= argument to sorted().
Args:
obj: either a Network or Address object.
Returns:
appropriate key.
"""
if isinstance(obj, _BaseNetwork):
return obj._get_networks_key()
elif isinstance(obj, _BaseAddress):
return obj._get_address_key()
return NotImplemented
class _IPAddressBase(_TotalOrderingMixin):
"""The mother class."""
__slots__ = ()
@property
def exploded(self):
"""Return the longhand version of the IP address as a string."""
return self._explode_shorthand_ip_string()
@property
def compressed(self):
"""Return the shorthand version of the IP address as a string."""
return _compat_str(self)
@property
def reverse_pointer(self):
"""The name of the reverse DNS pointer for the IP address, e.g.:
>>> ipaddress.ip_address("127.0.0.1").reverse_pointer
'1.0.0.127.in-addr.arpa'
>>> ipaddress.ip_address("2001:db8::1").reverse_pointer
'1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa'
"""
return self._reverse_pointer()
@property
def version(self):
msg = "%200s has no version specified" % (type(self),)
raise NotImplementedError(msg)
def _check_int_address(self, address):
if address < 0:
msg = "%d (< 0) is not permitted as an IPv%d address"
raise AddressValueError(msg % (address, self._version))
if address > self._ALL_ONES:
msg = "%d (>= 2**%d) is not permitted as an IPv%d address"
raise AddressValueError(
msg % (address, self._max_prefixlen, self._version)
)
def _check_packed_address(self, address, expected_len):
address_len = len(address)
if address_len != expected_len:
msg = (
"%r (len %d != %d) is not permitted as an IPv%d address. "
"Did you pass in a bytes (str in Python 2) instead of"
" a unicode object?"
)
raise AddressValueError(
msg % (address, address_len, expected_len, self._version)
)
@classmethod
def _ip_int_from_prefix(cls, prefixlen):
"""Turn the prefix length into a bitwise netmask
Args:
prefixlen: An integer, the prefix length.
Returns:
An integer.
"""
return cls._ALL_ONES ^ (cls._ALL_ONES >> prefixlen)
@classmethod
def _prefix_from_ip_int(cls, ip_int):
"""Return prefix length from the bitwise netmask.
Args:
ip_int: An integer, the netmask in expanded bitwise format
Returns:
An integer, the prefix length.
Raises:
ValueError: If the input intermingles zeroes & ones
"""
trailing_zeroes = _count_righthand_zero_bits(
ip_int, cls._max_prefixlen
)
prefixlen = cls._max_prefixlen - trailing_zeroes
leading_ones = ip_int >> trailing_zeroes
all_ones = (1 << prefixlen) - 1
if leading_ones != all_ones:
byteslen = cls._max_prefixlen // 8
details = _compat_to_bytes(ip_int, byteslen, "big")
msg = "Netmask pattern %r mixes zeroes & ones"
raise ValueError(msg % details)
return prefixlen
@classmethod
def _report_invalid_netmask(cls, netmask_str):
msg = "%r is not a valid netmask" % netmask_str
raise NetmaskValueError(msg)
@classmethod
def _prefix_from_prefix_string(cls, prefixlen_str):
"""Return prefix length from a numeric string
Args:
prefixlen_str: The string to be converted
Returns:
An integer, the prefix length.
Raises:
NetmaskValueError: If the input is not a valid netmask
"""
# int allows a leading +/- as well as surrounding whitespace,
# so we ensure that isn't the case
if not _BaseV4._DECIMAL_DIGITS.issuperset(prefixlen_str):
cls._report_invalid_netmask(prefixlen_str)
try:
prefixlen = int(prefixlen_str)
except ValueError:
cls._report_invalid_netmask(prefixlen_str)
if not (0 <= prefixlen <= cls._max_prefixlen):
cls._report_invalid_netmask(prefixlen_str)
return prefixlen
@classmethod
def _prefix_from_ip_string(cls, ip_str):
"""Turn a netmask/hostmask string into a prefix length
Args:
ip_str: The netmask/hostmask to be converted
Returns:
An integer, the prefix length.
Raises:
NetmaskValueError: If the input is not a valid netmask/hostmask
"""
# Parse the netmask/hostmask like an IP address.
try:
ip_int = cls._ip_int_from_string(ip_str)
except AddressValueError:
cls._report_invalid_netmask(ip_str)
# Try matching a netmask (this would be /1*0*/ as a bitwise regexp).
# Note that the two ambiguous cases (all-ones and all-zeroes) are
# treated as netmasks.
try:
return cls._prefix_from_ip_int(ip_int)
except ValueError:
pass
# Invert the bits, and try matching a /0+1+/ hostmask instead.
ip_int ^= cls._ALL_ONES
try:
return cls._prefix_from_ip_int(ip_int)
except ValueError:
cls._report_invalid_netmask(ip_str)
def __reduce__(self):
return self.__class__, (_compat_str(self),)
class _BaseAddress(_IPAddressBase):
"""A generic IP object.
This IP class contains the version independent methods which are
used by single IP addresses.
"""
__slots__ = ()
def __int__(self):
return self._ip
def __eq__(self, other):
try:
return self._ip == other._ip and self._version == other._version
except AttributeError:
return NotImplemented
def __lt__(self, other):
if not isinstance(other, _IPAddressBase):
return NotImplemented
if not isinstance(other, _BaseAddress):
raise TypeError(
"%s and %s are not of the same type" % (self, other)
)
if self._version != other._version:
raise TypeError(
"%s and %s are not of the same version" % (self, other)
)
if self._ip != other._ip:
return self._ip < other._ip
return False
# Shorthand for Integer addition and subtraction. This is not
# meant to ever support addition/subtraction of addresses.
def __add__(self, other):
if not isinstance(other, _compat_int_types):
return NotImplemented
return self.__class__(int(self) + other)
def __sub__(self, other):
if not isinstance(other, _compat_int_types):
return NotImplemented
return self.__class__(int(self) - other)
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, _compat_str(self))
def __str__(self):
return _compat_str(self._string_from_ip_int(self._ip))
def __hash__(self):
return hash(hex(int(self._ip)))
def _get_address_key(self):
return (self._version, self)
def __reduce__(self):
return self.__class__, (self._ip,)
class _BaseNetwork(_IPAddressBase):
"""A generic IP network object.
This IP class contains the version independent methods which are
used by networks.
"""
def __init__(self, address):
self._cache = {}
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, _compat_str(self))
def __str__(self):
return "%s/%d" % (self.network_address, self.prefixlen)
def hosts(self):
"""Generate Iterator over usable hosts in a network.
This is like __iter__ except it doesn't return the network
or broadcast addresses.
"""
network = int(self.network_address)
broadcast = int(self.broadcast_address)
for x in _compat_range(network + 1, broadcast):
yield self._address_class(x)
def __iter__(self):
network = int(self.network_address)
broadcast = int(self.broadcast_address)
for x in _compat_range(network, broadcast + 1):
yield self._address_class(x)
def __getitem__(self, n):
network = int(self.network_address)
broadcast = int(self.broadcast_address)
if n >= 0:
if network + n > broadcast:
raise IndexError("address out of range")
return self._address_class(network + n)
else:
n += 1
if broadcast + n < network:
raise IndexError("address out of range")
return self._address_class(broadcast + n)
def __lt__(self, other):
if not isinstance(other, _IPAddressBase):
return NotImplemented
if not isinstance(other, _BaseNetwork):
raise TypeError(
"%s and %s are not of the same type" % (self, other)
)
if self._version != other._version:
raise TypeError(
"%s and %s are not of the same version" % (self, other)
)
if self.network_address != other.network_address:
return self.network_address < other.network_address
if self.netmask != other.netmask:
return self.netmask < other.netmask
return False
def __eq__(self, other):
try:
return (
self._version == other._version
and self.network_address == other.network_address
and int(self.netmask) == int(other.netmask)
)
except AttributeError:
return NotImplemented
def __hash__(self):
return hash(int(self.network_address) ^ int(self.netmask))
def __contains__(self, other):
# always false if one is v4 and the other is v6.
if self._version != other._version:
return False
# dealing with another network.
if isinstance(other, _BaseNetwork):
return False
# dealing with another address
else:
# address
return (
int(self.network_address)
<= int(other._ip)
<= int(self.broadcast_address)
)
def overlaps(self, other):
"""Tell if self is partly contained in other."""
return self.network_address in other or (
self.broadcast_address in other
or (
other.network_address in self
or (other.broadcast_address in self)
)
)
@property
def broadcast_address(self):
x = self._cache.get("broadcast_address")
if x is None:
x = self._address_class(
int(self.network_address) | int(self.hostmask)
)
self._cache["broadcast_address"] = x
return x
@property
def hostmask(self):
x = self._cache.get("hostmask")
if x is None:
x = self._address_class(int(self.netmask) ^ self._ALL_ONES)
self._cache["hostmask"] = x
return x
@property
def with_prefixlen(self):
return "%s/%d" % (self.network_address, self._prefixlen)
@property
def with_netmask(self):
return "%s/%s" % (self.network_address, self.netmask)
@property
def with_hostmask(self):
return "%s/%s" % (self.network_address, self.hostmask)
@property
def num_addresses(self):
"""Number of hosts in the current subnet."""
return int(self.broadcast_address) - int(self.network_address) + 1
@property
def _address_class(self):
# Returning bare address objects (rather than interfaces) allows for
# more consistent behaviour across the network address, broadcast
# address and individual host addresses.
msg = "%200s has no associated address class" % (type(self),)
raise NotImplementedError(msg)
@property
def prefixlen(self):
return self._prefixlen
def address_exclude(self, other):
"""Remove an address from a larger block.
For example:
addr1 = ip_network('192.0.2.0/28')
addr2 = ip_network('192.0.2.1/32')
list(addr1.address_exclude(addr2)) =
[IPv4Network('192.0.2.0/32'), IPv4Network('192.0.2.2/31'),
IPv4Network('192.0.2.4/30'), IPv4Network('192.0.2.8/29')]
or IPv6:
addr1 = ip_network('2001:db8::1/32')
addr2 = ip_network('2001:db8::1/128')
list(addr1.address_exclude(addr2)) =
[ip_network('2001:db8::1/128'),
ip_network('2001:db8::2/127'),
ip_network('2001:db8::4/126'),
ip_network('2001:db8::8/125'),
...
ip_network('2001:db8:8000::/33')]
Args:
other: An IPv4Network or IPv6Network object of the same type.
Returns:
An iterator of the IPv(4|6)Network objects which is self
minus other.
Raises:
TypeError: If self and other are of differing address
versions, or if other is not a network object.
ValueError: If other is not completely contained by self.
"""
if not self._version == other._version:
raise TypeError(
"%s and %s are not of the same version" % (self, other)
)
if not isinstance(other, _BaseNetwork):
raise TypeError("%s is not a network object" % other)
if not other.subnet_of(self):
raise ValueError("%s not contained in %s" % (other, self))
if other == self:
return
# Make sure we're comparing the network of other.
other = other.__class__(
"%s/%s" % (other.network_address, other.prefixlen)
)
s1, s2 = self.subnets()
while s1 != other and s2 != other:
if other.subnet_of(s1):
yield s2
s1, s2 = s1.subnets()
elif other.subnet_of(s2):
yield s1
s1, s2 = s2.subnets()
else:
# If we got here, there's a bug somewhere.
raise AssertionError(
"Error performing exclusion: "
"s1: %s s2: %s other: %s" % (s1, s2, other)
)
if s1 == other:
yield s2
elif s2 == other:
yield s1
else:
# If we got here, there's a bug somewhere.
raise AssertionError(
"Error performing exclusion: "
"s1: %s s2: %s other: %s" % (s1, s2, other)
)
def compare_networks(self, other):
"""Compare two IP objects.
This is only concerned about the comparison of the integer
representation of the network addresses. This means that the
host bits aren't considered at all in this method. If you want
to compare host bits, you can easily enough do a
'HostA._ip < HostB._ip'
Args:
other: An IP object.
Returns:
If the IP versions of self and other are the same, returns:
-1 if self < other:
eg: IPv4Network('192.0.2.0/25') < IPv4Network('192.0.2.128/25')
IPv6Network('2001:db8::1000/124') <
IPv6Network('2001:db8::2000/124')
0 if self == other
eg: IPv4Network('192.0.2.0/24') == IPv4Network('192.0.2.0/24')
IPv6Network('2001:db8::1000/124') ==
IPv6Network('2001:db8::1000/124')
1 if self > other
eg: IPv4Network('192.0.2.128/25') > IPv4Network('192.0.2.0/25')
IPv6Network('2001:db8::2000/124') >
IPv6Network('2001:db8::1000/124')
Raises:
TypeError if the IP versions are different.
"""
# does this need to raise a ValueError?
if self._version != other._version:
raise TypeError(
"%s and %s are not of the same type" % (self, other)
)
# self._version == other._version below here:
if self.network_address < other.network_address:
return -1
if self.network_address > other.network_address:
return 1
# self.network_address == other.network_address below here:
if self.netmask < other.netmask:
return -1
if self.netmask > other.netmask:
return 1
return 0
def _get_networks_key(self):
"""Network-only key function.
Returns an object that identifies this address' network and
netmask. This function is a suitable "key" argument for sorted()
and list.sort().
"""
return (self._version, self.network_address, self.netmask)
def subnets(self, prefixlen_diff=1, new_prefix=None):
"""The subnets which join to make the current subnet.
In the case that self contains only one IP
(self._prefixlen == 32 for IPv4 or self._prefixlen == 128
for IPv6), yield an iterator with just ourself.
Args:
prefixlen_diff: An integer, the amount the prefix length
should be increased by. This should not be set if
new_prefix is also set.
new_prefix: The desired new prefix length. This must be a
larger number (smaller prefix) than the existing prefix.
This should not be set if prefixlen_diff is also set.
Returns:
An iterator of IPv(4|6) objects.
Raises:
ValueError: The prefixlen_diff is too small or too large.
OR
prefixlen_diff and new_prefix are both set or new_prefix
is a smaller number than the current prefix (smaller
number means a larger network)
"""
if self._prefixlen == self._max_prefixlen:
yield self
return
if new_prefix is not None:
if new_prefix < self._prefixlen:
raise ValueError("new prefix must be longer")
if prefixlen_diff != 1:
raise ValueError("cannot set prefixlen_diff and new_prefix")
prefixlen_diff = new_prefix - self._prefixlen
if prefixlen_diff < 0:
raise ValueError("prefix length diff must be > 0")
new_prefixlen = self._prefixlen + prefixlen_diff
if new_prefixlen > self._max_prefixlen:
raise ValueError(
"prefix length diff %d is invalid for netblock %s"
% (new_prefixlen, self)
)
start = int(self.network_address)
end = int(self.broadcast_address) + 1
step = (int(self.hostmask) + 1) >> prefixlen_diff
for new_addr in _compat_range(start, end, step):
current = self.__class__((new_addr, new_prefixlen))
yield current
def supernet(self, prefixlen_diff=1, new_prefix=None):
"""The supernet containing the current network.
Args:
prefixlen_diff: An integer, the amount the prefix length of
the network should be decreased by. For example, given a
/24 network and a prefixlen_diff of 3, a supernet with a
/21 netmask is returned.
Returns:
An IPv4 network object.
Raises:
ValueError: If self.prefixlen - prefixlen_diff < 0. I.e., you have
a negative prefix length.
OR
If prefixlen_diff and new_prefix are both set or new_prefix is a
larger number than the current prefix (larger number means a
smaller network)
"""
if self._prefixlen == 0:
return self
if new_prefix is not None:
if new_prefix > self._prefixlen:
raise ValueError("new prefix must be shorter")
if prefixlen_diff != 1:
raise ValueError("cannot set prefixlen_diff and new_prefix")
prefixlen_diff = self._prefixlen - new_prefix
new_prefixlen = self.prefixlen - prefixlen_diff
if new_prefixlen < 0:
raise ValueError(
"current prefixlen is %d, cannot have a prefixlen_diff of %d"
% (self.prefixlen, prefixlen_diff)
)
return self.__class__(
(
int(self.network_address)
& (int(self.netmask) << prefixlen_diff),
new_prefixlen,
)
)
@property
def is_multicast(self):
"""Test if the address is reserved for multicast use.
Returns:
A boolean, True if the address is a multicast address.
See RFC 2373 2.7 for details.
"""
return (
self.network_address.is_multicast
and self.broadcast_address.is_multicast
)
@staticmethod
def _is_subnet_of(a, b):
try:
# Always false if one is v4 and the other is v6.
if a._version != b._version:
raise TypeError(
"%s and %s are not of the same version" % (a, b)
)
return (
b.network_address <= a.network_address
and b.broadcast_address >= a.broadcast_address
)
except AttributeError:
raise TypeError(
"Unable to test subnet containment "
"between %s and %s" % (a, b)
)
def subnet_of(self, other):
"""Return True if this network is a subnet of other."""
return self._is_subnet_of(self, other)
def supernet_of(self, other):
"""Return True if this network is a supernet of other."""
return self._is_subnet_of(other, self)
@property
def is_reserved(self):
"""Test if the address is otherwise IETF reserved.
Returns:
A boolean, True if the address is within one of the
reserved IPv6 Network ranges.
"""
return (
self.network_address.is_reserved
and self.broadcast_address.is_reserved
)
@property
def is_link_local(self):
"""Test if the address is reserved for link-local.
Returns:
A boolean, True if the address is reserved per RFC 4291.
"""
return (
self.network_address.is_link_local
and self.broadcast_address.is_link_local
)
@property
def is_private(self):
"""Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per
iana-ipv4-special-registry or iana-ipv6-special-registry.
"""
return (
self.network_address.is_private
and self.broadcast_address.is_private
)
@property
def is_global(self):
"""Test if this address is allocated for public networks.
Returns:
A boolean, True if the address is not reserved per
iana-ipv4-special-registry or iana-ipv6-special-registry.
"""
return not self.is_private
@property
def is_unspecified(self):
"""Test if the address is unspecified.
Returns:
A boolean, True if this is the unspecified address as defined in
RFC 2373 2.5.2.
"""
return (
self.network_address.is_unspecified
and self.broadcast_address.is_unspecified
)
@property
def is_loopback(self):
"""Test if the address is a loopback address.
Returns:
A boolean, True if the address is a loopback address as defined in
RFC 2373 2.5.3.
"""
return (
self.network_address.is_loopback
and self.broadcast_address.is_loopback
)
class _BaseV4(object):
"""Base IPv4 object.
The following methods are used by IPv4 objects in both single IP
addresses and networks.
"""
__slots__ = ()
_version = 4
# Equivalent to 255.255.255.255 or 32 bits of 1's.
_ALL_ONES = (2 ** IPV4LENGTH) - 1
_DECIMAL_DIGITS = frozenset("0123456789")
# the valid octets for host and netmasks. only useful for IPv4.
_valid_mask_octets = frozenset([255, 254, 252, 248, 240, 224, 192, 128, 0])
_max_prefixlen = IPV4LENGTH
# There are only a handful of valid v4 netmasks, so we cache them all
# when constructed (see _make_netmask()).
_netmask_cache = {}
def _explode_shorthand_ip_string(self):
return _compat_str(self)
@classmethod
def _make_netmask(cls, arg):
"""Make a (netmask, prefix_len) tuple from the given argument.
Argument can be:
- an integer (the prefix length)
- a string representing the prefix length (e.g. "24")
- a string representing the prefix netmask (e.g. "255.255.255.0")
"""
if arg not in cls._netmask_cache:
if isinstance(arg, _compat_int_types):
prefixlen = arg
else:
try:
# Check for a netmask in prefix length form
prefixlen = cls._prefix_from_prefix_string(arg)
except NetmaskValueError:
# Check for a netmask or hostmask in dotted-quad form.
# This may raise NetmaskValueError.
prefixlen = cls._prefix_from_ip_string(arg)
netmask = IPv4Address(cls._ip_int_from_prefix(prefixlen))
cls._netmask_cache[arg] = netmask, prefixlen
return cls._netmask_cache[arg]
@classmethod
def _ip_int_from_string(cls, ip_str):
"""Turn the given IP string into an integer for comparison.
Args:
ip_str: A string, the IP ip_str.
Returns:
The IP ip_str as an integer.
Raises:
AddressValueError: if ip_str isn't a valid IPv4 Address.
"""
if not ip_str:
raise AddressValueError("Address cannot be empty")
octets = ip_str.split(".")
if len(octets) != 4:
raise AddressValueError("Expected 4 octets in %r" % ip_str)
try:
return _compat_int_from_byte_vals(
map(cls._parse_octet, octets), "big"
)
except ValueError as exc:
raise AddressValueError("%s in %r" % (exc, ip_str))
@classmethod
def _parse_octet(cls, octet_str):
"""Convert a decimal octet into an integer.
Args:
octet_str: A string, the number to parse.
Returns:
The octet as an integer.
Raises:
ValueError: if the octet isn't strictly a decimal from [0..255].
"""
if not octet_str:
raise ValueError("Empty octet not permitted")
# Whitelist the characters, since int() allows a lot of bizarre stuff.
if not cls._DECIMAL_DIGITS.issuperset(octet_str):
msg = "Only decimal digits permitted in %r"
raise ValueError(msg % octet_str)
# We do the length check second, since the invalid character error
# is likely to be more informative for the user
if len(octet_str) > 3:
msg = "At most 3 characters permitted in %r"
raise ValueError(msg % octet_str)
# Convert to integer (we know digits are legal)
octet_int = int(octet_str, 10)
# Any octets that look like they *might* be written in octal,
# and which don't look exactly the same in both octal and
# decimal are rejected as ambiguous
if octet_int > 7 and octet_str[0] == "0":
msg = "Ambiguous (octal/decimal) value in %r not permitted"
raise ValueError(msg % octet_str)
if octet_int > 255:
raise ValueError("Octet %d (> 255) not permitted" % octet_int)
return octet_int
@classmethod
def _string_from_ip_int(cls, ip_int):
"""Turns a 32-bit integer into dotted decimal notation.
Args:
ip_int: An integer, the IP address.
Returns:
The IP address as a string in dotted decimal notation.
"""
return ".".join(
_compat_str(
struct.unpack(b"!B", b)[0] if isinstance(b, bytes) else b
)
for b in _compat_to_bytes(ip_int, 4, "big")
)
def _is_hostmask(self, ip_str):
"""Test if the IP string is a hostmask (rather than a netmask).
Args:
ip_str: A string, the potential hostmask.
Returns:
A boolean, True if the IP string is a hostmask.
"""
bits = ip_str.split(".")
try:
parts = [x for x in map(int, bits) if x in self._valid_mask_octets]
except ValueError:
return False
if len(parts) != len(bits):
return False
if parts[0] < parts[-1]:
return True
return False
def _reverse_pointer(self):
"""Return the reverse DNS pointer name for the IPv4 address.
This implements the method described in RFC1035 3.5.
"""
reverse_octets = _compat_str(self).split(".")[::-1]
return ".".join(reverse_octets) + ".in-addr.arpa"
@property
def max_prefixlen(self):
return self._max_prefixlen
@property
def version(self):
return self._version
class IPv4Address(_BaseV4, _BaseAddress):
"""Represent and manipulate single IPv4 Addresses."""
__slots__ = ("_ip", "__weakref__")
def __init__(self, address):
"""
Args:
address: A string or integer representing the IP
Additionally, an integer can be passed, so
IPv4Address('192.0.2.1') == IPv4Address(3221225985).
or, more generally
IPv4Address(int(IPv4Address('192.0.2.1'))) ==
IPv4Address('192.0.2.1')
Raises:
AddressValueError: If ipaddress isn't a valid IPv4 address.
"""
# Efficient constructor from integer.
if isinstance(address, _compat_int_types):
self._check_int_address(address)
self._ip = address
return
# Constructing from a packed address
if isinstance(address, bytes):
self._check_packed_address(address, 4)
bvs = _compat_bytes_to_byte_vals(address)
self._ip = _compat_int_from_byte_vals(bvs, "big")
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP string.
addr_str = _compat_str(address)
if "/" in addr_str:
raise AddressValueError("Unexpected '/' in %r" % address)
self._ip = self._ip_int_from_string(addr_str)
@property
def packed(self):
"""The binary representation of this address."""
return v4_int_to_packed(self._ip)
@property
def is_reserved(self):
"""Test if the address is otherwise IETF reserved.
Returns:
A boolean, True if the address is within the
reserved IPv4 Network range.
"""
return self in self._constants._reserved_network
@property
def is_private(self):
"""Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per
iana-ipv4-special-registry.
"""
return any(self in net for net in self._constants._private_networks)
@property
def is_global(self):
return (
self not in self._constants._public_network and not self.is_private
)
@property
def is_multicast(self):
"""Test if the address is reserved for multicast use.
Returns:
A boolean, True if the address is multicast.
See RFC 3171 for details.
"""
return self in self._constants._multicast_network
@property
def is_unspecified(self):
"""Test if the address is unspecified.
Returns:
A boolean, True if this is the unspecified address as defined in
RFC 5735 3.
"""
return self == self._constants._unspecified_address
@property
def is_loopback(self):
"""Test if the address is a loopback address.
Returns:
A boolean, True if the address is a loopback per RFC 3330.
"""
return self in self._constants._loopback_network
@property
def is_link_local(self):
"""Test if the address is reserved for link-local.
Returns:
A boolean, True if the address is link-local per RFC 3927.
"""
return self in self._constants._linklocal_network
class IPv4Interface(IPv4Address):
def __init__(self, address):
if isinstance(address, (bytes, _compat_int_types)):
IPv4Address.__init__(self, address)
self.network = IPv4Network(self._ip)
self._prefixlen = self._max_prefixlen
return
if isinstance(address, tuple):
IPv4Address.__init__(self, address[0])
if len(address) > 1:
self._prefixlen = int(address[1])
else:
self._prefixlen = self._max_prefixlen
self.network = IPv4Network(address, strict=False)
self.netmask = self.network.netmask
self.hostmask = self.network.hostmask
return
addr = _split_optional_netmask(address)
IPv4Address.__init__(self, addr[0])
self.network = IPv4Network(address, strict=False)
self._prefixlen = self.network._prefixlen
self.netmask = self.network.netmask
self.hostmask = self.network.hostmask
def __str__(self):
return "%s/%d" % (
self._string_from_ip_int(self._ip),
self.network.prefixlen,
)
def __eq__(self, other):
address_equal = IPv4Address.__eq__(self, other)
if not address_equal or address_equal is NotImplemented:
return address_equal
try:
return self.network == other.network
except AttributeError:
# An interface with an associated network is NOT the
# same as an unassociated address. That's why the hash
# takes the extra info into account.
return False
def __lt__(self, other):
address_less = IPv4Address.__lt__(self, other)
if address_less is NotImplemented:
return NotImplemented
try:
return (
self.network < other.network
or self.network == other.network
and address_less
)
except AttributeError:
# We *do* allow addresses and interfaces to be sorted. The
# unassociated address is considered less than all interfaces.
return False
def __hash__(self):
return self._ip ^ self._prefixlen ^ int(self.network.network_address)
__reduce__ = _IPAddressBase.__reduce__
@property
def ip(self):
return IPv4Address(self._ip)
@property
def with_prefixlen(self):
return "%s/%s" % (self._string_from_ip_int(self._ip), self._prefixlen)
@property
def with_netmask(self):
return "%s/%s" % (self._string_from_ip_int(self._ip), self.netmask)
@property
def with_hostmask(self):
return "%s/%s" % (self._string_from_ip_int(self._ip), self.hostmask)
class IPv4Network(_BaseV4, _BaseNetwork):
"""This class represents and manipulates 32-bit IPv4 network + addresses..
Attributes: [examples for IPv4Network('192.0.2.0/27')]
.network_address: IPv4Address('192.0.2.0')
.hostmask: IPv4Address('0.0.0.31')
.broadcast_address: IPv4Address('192.0.2.32')
.netmask: IPv4Address('255.255.255.224')
.prefixlen: 27
"""
# Class to use when creating address objects
_address_class = IPv4Address
def __init__(self, address, strict=True):
"""Instantiate a new IPv4 network object.
Args:
address: A string or integer representing the IP [& network].
'192.0.2.0/24'
'192.0.2.0/255.255.255.0'
'192.0.0.2/0.0.0.255'
are all functionally the same in IPv4. Similarly,
'192.0.2.1'
'192.0.2.1/255.255.255.255'
'192.0.2.1/32'
are also functionally equivalent. That is to say, failing to
provide a subnetmask will create an object with a mask of /32.
If the mask (portion after the / in the argument) is given in
dotted quad form, it is treated as a netmask if it starts with a
non-zero field (e.g. /255.0.0.0 == /8) and as a hostmask if it
starts with a zero field (e.g. 0.255.255.255 == /8), with the
single exception of an all-zero mask which is treated as a
netmask == /0. If no mask is given, a default of /32 is used.
Additionally, an integer can be passed, so
IPv4Network('192.0.2.1') == IPv4Network(3221225985)
or, more generally
IPv4Interface(int(IPv4Interface('192.0.2.1'))) ==
IPv4Interface('192.0.2.1')
Raises:
AddressValueError: If ipaddress isn't a valid IPv4 address.
NetmaskValueError: If the netmask isn't valid for
an IPv4 address.
ValueError: If strict is True and a network address is not
supplied.
"""
_BaseNetwork.__init__(self, address)
# Constructing from a packed address or integer
if isinstance(address, (_compat_int_types, bytes)):
self.network_address = IPv4Address(address)
self.netmask, self._prefixlen = self._make_netmask(
self._max_prefixlen
)
# fixme: address/network test here.
return
if isinstance(address, tuple):
if len(address) > 1:
arg = address[1]
else:
# We weren't given an address[1]
arg = self._max_prefixlen
self.network_address = IPv4Address(address[0])
self.netmask, self._prefixlen = self._make_netmask(arg)
packed = int(self.network_address)
if packed & int(self.netmask) != packed:
if strict:
raise ValueError("%s has host bits set" % self)
else:
self.network_address = IPv4Address(
packed & int(self.netmask)
)
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP prefix string.
addr = _split_optional_netmask(address)
self.network_address = IPv4Address(self._ip_int_from_string(addr[0]))
if len(addr) == 2:
arg = addr[1]
else:
arg = self._max_prefixlen
self.netmask, self._prefixlen = self._make_netmask(arg)
if strict:
if (
IPv4Address(int(self.network_address) & int(self.netmask))
!= self.network_address
):
raise ValueError("%s has host bits set" % self)
self.network_address = IPv4Address(
int(self.network_address) & int(self.netmask)
)
if self._prefixlen == (self._max_prefixlen - 1):
self.hosts = self.__iter__
@property
def is_global(self):
"""Test if this address is allocated for public networks.
Returns:
A boolean, True if the address is not reserved per
iana-ipv4-special-registry.
"""
return (
not (
self.network_address in IPv4Network("100.64.0.0/10")
and self.broadcast_address in IPv4Network("100.64.0.0/10")
)
and not self.is_private
)
class _IPv4Constants(object):
_linklocal_network = IPv4Network("169.254.0.0/16")
_loopback_network = IPv4Network("127.0.0.0/8")
_multicast_network = IPv4Network("224.0.0.0/4")
_public_network = IPv4Network("100.64.0.0/10")
_private_networks = [
IPv4Network("0.0.0.0/8"),
IPv4Network("10.0.0.0/8"),
IPv4Network("127.0.0.0/8"),
IPv4Network("169.254.0.0/16"),
IPv4Network("172.16.0.0/12"),
IPv4Network("192.0.0.0/29"),
IPv4Network("192.0.0.170/31"),
IPv4Network("192.0.2.0/24"),
IPv4Network("192.168.0.0/16"),
IPv4Network("198.18.0.0/15"),
IPv4Network("198.51.100.0/24"),
IPv4Network("203.0.113.0/24"),
IPv4Network("240.0.0.0/4"),
IPv4Network("255.255.255.255/32"),
]
_reserved_network = IPv4Network("240.0.0.0/4")
_unspecified_address = IPv4Address("0.0.0.0")
IPv4Address._constants = _IPv4Constants
class _BaseV6(object):
"""Base IPv6 object.
The following methods are used by IPv6 objects in both single IP
addresses and networks.
"""
__slots__ = ()
_version = 6
_ALL_ONES = (2 ** IPV6LENGTH) - 1
_HEXTET_COUNT = 8
_HEX_DIGITS = frozenset("0123456789ABCDEFabcdef")
_max_prefixlen = IPV6LENGTH
# There are only a bunch of valid v6 netmasks, so we cache them all
# when constructed (see _make_netmask()).
_netmask_cache = {}
@classmethod
def _make_netmask(cls, arg):
"""Make a (netmask, prefix_len) tuple from the given argument.
Argument can be:
- an integer (the prefix length)
- a string representing the prefix length (e.g. "24")
- a string representing the prefix netmask (e.g. "255.255.255.0")
"""
if arg not in cls._netmask_cache:
if isinstance(arg, _compat_int_types):
prefixlen = arg
else:
prefixlen = cls._prefix_from_prefix_string(arg)
netmask = IPv6Address(cls._ip_int_from_prefix(prefixlen))
cls._netmask_cache[arg] = netmask, prefixlen
return cls._netmask_cache[arg]
@classmethod
def _ip_int_from_string(cls, ip_str):
"""Turn an IPv6 ip_str into an integer.
Args:
ip_str: A string, the IPv6 ip_str.
Returns:
An int, the IPv6 address
Raises:
AddressValueError: if ip_str isn't a valid IPv6 Address.
"""
if not ip_str:
raise AddressValueError("Address cannot be empty")
parts = ip_str.split(":")
# An IPv6 address needs at least 2 colons (3 parts).
_min_parts = 3
if len(parts) < _min_parts:
msg = "At least %d parts expected in %r" % (_min_parts, ip_str)
raise AddressValueError(msg)
# If the address has an IPv4-style suffix, convert it to hexadecimal.
if "." in parts[-1]:
try:
ipv4_int = IPv4Address(parts.pop())._ip
except AddressValueError as exc:
raise AddressValueError("%s in %r" % (exc, ip_str))
parts.append("%x" % ((ipv4_int >> 16) & 0xFFFF))
parts.append("%x" % (ipv4_int & 0xFFFF))
# An IPv6 address can't have more than 8 colons (9 parts).
# The extra colon comes from using the "::" notation for a single
# leading or trailing zero part.
_max_parts = cls._HEXTET_COUNT + 1
if len(parts) > _max_parts:
msg = "At most %d colons permitted in %r" % (
_max_parts - 1,
ip_str,
)
raise AddressValueError(msg)
# Disregarding the endpoints, find '::' with nothing in between.
# This indicates that a run of zeroes has been skipped.
skip_index = None
for i in _compat_range(1, len(parts) - 1):
if not parts[i]:
if skip_index is not None:
# Can't have more than one '::'
msg = "At most one '::' permitted in %r" % ip_str
raise AddressValueError(msg)
skip_index = i
# parts_hi is the number of parts to copy from above/before the '::'
# parts_lo is the number of parts to copy from below/after the '::'
if skip_index is not None:
# If we found a '::', then check if it also covers the endpoints.
parts_hi = skip_index
parts_lo = len(parts) - skip_index - 1
if not parts[0]:
parts_hi -= 1
if parts_hi:
msg = "Leading ':' only permitted as part of '::' in %r"
raise AddressValueError(msg % ip_str) # ^: requires ^::
if not parts[-1]:
parts_lo -= 1
if parts_lo:
msg = "Trailing ':' only permitted as part of '::' in %r"
raise AddressValueError(msg % ip_str) # :$ requires ::$
parts_skipped = cls._HEXTET_COUNT - (parts_hi + parts_lo)
if parts_skipped < 1:
msg = "Expected at most %d other parts with '::' in %r"
raise AddressValueError(msg % (cls._HEXTET_COUNT - 1, ip_str))
else:
# Otherwise, allocate the entire address to parts_hi. The
# endpoints could still be empty, but _parse_hextet() will check
# for that.
if len(parts) != cls._HEXTET_COUNT:
msg = "Exactly %d parts expected without '::' in %r"
raise AddressValueError(msg % (cls._HEXTET_COUNT, ip_str))
if not parts[0]:
msg = "Leading ':' only permitted as part of '::' in %r"
raise AddressValueError(msg % ip_str) # ^: requires ^::
if not parts[-1]:
msg = "Trailing ':' only permitted as part of '::' in %r"
raise AddressValueError(msg % ip_str) # :$ requires ::$
parts_hi = len(parts)
parts_lo = 0
parts_skipped = 0
try:
# Now, parse the hextets into a 128-bit integer.
ip_int = 0
for i in range(parts_hi):
ip_int <<= 16
ip_int |= cls._parse_hextet(parts[i])
ip_int <<= 16 * parts_skipped
for i in range(-parts_lo, 0):
ip_int <<= 16
ip_int |= cls._parse_hextet(parts[i])
return ip_int
except ValueError as exc:
raise AddressValueError("%s in %r" % (exc, ip_str))
@classmethod
def _parse_hextet(cls, hextet_str):
"""Convert an IPv6 hextet string into an integer.
Args:
hextet_str: A string, the number to parse.
Returns:
The hextet as an integer.
Raises:
ValueError: if the input isn't strictly a hex number from
[0..FFFF].
"""
# Whitelist the characters, since int() allows a lot of bizarre stuff.
if not cls._HEX_DIGITS.issuperset(hextet_str):
raise ValueError("Only hex digits permitted in %r" % hextet_str)
# We do the length check second, since the invalid character error
# is likely to be more informative for the user
if len(hextet_str) > 4:
msg = "At most 4 characters permitted in %r"
raise ValueError(msg % hextet_str)
# Length check means we can skip checking the integer value
return int(hextet_str, 16)
@classmethod
def _compress_hextets(cls, hextets):
"""Compresses a list of hextets.
Compresses a list of strings, replacing the longest continuous
sequence of "0" in the list with "" and adding empty strings at
the beginning or at the end of the string such that subsequently
calling ":".join(hextets) will produce the compressed version of
the IPv6 address.
Args:
hextets: A list of strings, the hextets to compress.
Returns:
A list of strings.
"""
best_doublecolon_start = -1
best_doublecolon_len = 0
doublecolon_start = -1
doublecolon_len = 0
for index, hextet in enumerate(hextets):
if hextet == "0":
doublecolon_len += 1
if doublecolon_start == -1:
# Start of a sequence of zeros.
doublecolon_start = index
if doublecolon_len > best_doublecolon_len:
# This is the longest sequence of zeros so far.
best_doublecolon_len = doublecolon_len
best_doublecolon_start = doublecolon_start
else:
doublecolon_len = 0
doublecolon_start = -1
if best_doublecolon_len > 1:
best_doublecolon_end = (
best_doublecolon_start + best_doublecolon_len
)
# For zeros at the end of the address.
if best_doublecolon_end == len(hextets):
hextets += [""]
hextets[best_doublecolon_start:best_doublecolon_end] = [""]
# For zeros at the beginning of the address.
if best_doublecolon_start == 0:
hextets = [""] + hextets
return hextets
@classmethod
def _string_from_ip_int(cls, ip_int=None):
"""Turns a 128-bit integer into hexadecimal notation.
Args:
ip_int: An integer, the IP address.
Returns:
A string, the hexadecimal representation of the address.
Raises:
ValueError: The address is bigger than 128 bits of all ones.
"""
if ip_int is None:
ip_int = int(cls._ip)
if ip_int > cls._ALL_ONES:
raise ValueError("IPv6 address is too large")
hex_str = "%032x" % ip_int
hextets = ["%x" % int(hex_str[x : x + 4], 16) for x in range(0, 32, 4)]
hextets = cls._compress_hextets(hextets)
return ":".join(hextets)
def _explode_shorthand_ip_string(self):
"""Expand a shortened IPv6 address.
Args:
ip_str: A string, the IPv6 address.
Returns:
A string, the expanded IPv6 address.
"""
if isinstance(self, IPv6Network):
ip_str = _compat_str(self.network_address)
elif isinstance(self, IPv6Interface):
ip_str = _compat_str(self.ip)
else:
ip_str = _compat_str(self)
ip_int = self._ip_int_from_string(ip_str)
hex_str = "%032x" % ip_int
parts = [hex_str[x : x + 4] for x in range(0, 32, 4)]
if isinstance(self, (_BaseNetwork, IPv6Interface)):
return "%s/%d" % (":".join(parts), self._prefixlen)
return ":".join(parts)
def _reverse_pointer(self):
"""Return the reverse DNS pointer name for the IPv6 address.
This implements the method described in RFC3596 2.5.
"""
reverse_chars = self.exploded[::-1].replace(":", "")
return ".".join(reverse_chars) + ".ip6.arpa"
@property
def max_prefixlen(self):
return self._max_prefixlen
@property
def version(self):
return self._version
class IPv6Address(_BaseV6, _BaseAddress):
"""Represent and manipulate single IPv6 Addresses."""
__slots__ = ("_ip", "__weakref__")
def __init__(self, address):
"""Instantiate a new IPv6 address object.
Args:
address: A string or integer representing the IP
Additionally, an integer can be passed, so
IPv6Address('2001:db8::') ==
IPv6Address(42540766411282592856903984951653826560)
or, more generally
IPv6Address(int(IPv6Address('2001:db8::'))) ==
IPv6Address('2001:db8::')
Raises:
AddressValueError: If address isn't a valid IPv6 address.
"""
# Efficient constructor from integer.
if isinstance(address, _compat_int_types):
self._check_int_address(address)
self._ip = address
return
# Constructing from a packed address
if isinstance(address, bytes):
self._check_packed_address(address, 16)
bvs = _compat_bytes_to_byte_vals(address)
self._ip = _compat_int_from_byte_vals(bvs, "big")
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP string.
addr_str = _compat_str(address)
if "/" in addr_str:
raise AddressValueError("Unexpected '/' in %r" % address)
self._ip = self._ip_int_from_string(addr_str)
@property
def packed(self):
"""The binary representation of this address."""
return v6_int_to_packed(self._ip)
@property
def is_multicast(self):
"""Test if the address is reserved for multicast use.
Returns:
A boolean, True if the address is a multicast address.
See RFC 2373 2.7 for details.
"""
return self in self._constants._multicast_network
@property
def is_reserved(self):
"""Test if the address is otherwise IETF reserved.
Returns:
A boolean, True if the address is within one of the
reserved IPv6 Network ranges.
"""
return any(self in x for x in self._constants._reserved_networks)
@property
def is_link_local(self):
"""Test if the address is reserved for link-local.
Returns:
A boolean, True if the address is reserved per RFC 4291.
"""
return self in self._constants._linklocal_network
@property
def is_site_local(self):
"""Test if the address is reserved for site-local.
Note that the site-local address space has been deprecated by RFC 3879.
Use is_private to test if this address is in the space of unique local
addresses as defined by RFC 4193.
Returns:
A boolean, True if the address is reserved per RFC 3513 2.5.6.
"""
return self in self._constants._sitelocal_network
@property
def is_private(self):
"""Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per
iana-ipv6-special-registry.
"""
return any(self in net for net in self._constants._private_networks)
@property
def is_global(self):
"""Test if this address is allocated for public networks.
Returns:
A boolean, true if the address is not reserved per
iana-ipv6-special-registry.
"""
return not self.is_private
@property
def is_unspecified(self):
"""Test if the address is unspecified.
Returns:
A boolean, True if this is the unspecified address as defined in
RFC 2373 2.5.2.
"""
return self._ip == 0
@property
def is_loopback(self):
"""Test if the address is a loopback address.
Returns:
A boolean, True if the address is a loopback address as defined in
RFC 2373 2.5.3.
"""
return self._ip == 1
@property
def ipv4_mapped(self):
"""Return the IPv4 mapped address.
Returns:
If the IPv6 address is a v4 mapped address, return the
IPv4 mapped address. Return None otherwise.
"""
if (self._ip >> 32) != 0xFFFF:
return None
return IPv4Address(self._ip & 0xFFFFFFFF)
@property
def teredo(self):
"""Tuple of embedded teredo IPs.
Returns:
Tuple of the (server, client) IPs or None if the address
doesn't appear to be a teredo address (doesn't start with
2001::/32)
"""
if (self._ip >> 96) != 0x20010000:
return None
return (
IPv4Address((self._ip >> 64) & 0xFFFFFFFF),
IPv4Address(~self._ip & 0xFFFFFFFF),
)
@property
def sixtofour(self):
"""Return the IPv4 6to4 embedded address.
Returns:
The IPv4 6to4-embedded address if present or None if the
address doesn't appear to contain a 6to4 embedded address.
"""
if (self._ip >> 112) != 0x2002:
return None
return IPv4Address((self._ip >> 80) & 0xFFFFFFFF)
class IPv6Interface(IPv6Address):
def __init__(self, address):
if isinstance(address, (bytes, _compat_int_types)):
IPv6Address.__init__(self, address)
self.network = IPv6Network(self._ip)
self._prefixlen = self._max_prefixlen
return
if isinstance(address, tuple):
IPv6Address.__init__(self, address[0])
if len(address) > 1:
self._prefixlen = int(address[1])
else:
self._prefixlen = self._max_prefixlen
self.network = IPv6Network(address, strict=False)
self.netmask = self.network.netmask
self.hostmask = self.network.hostmask
return
addr = _split_optional_netmask(address)
IPv6Address.__init__(self, addr[0])
self.network = IPv6Network(address, strict=False)
self.netmask = self.network.netmask
self._prefixlen = self.network._prefixlen
self.hostmask = self.network.hostmask
def __str__(self):
return "%s/%d" % (
self._string_from_ip_int(self._ip),
self.network.prefixlen,
)
def __eq__(self, other):
address_equal = IPv6Address.__eq__(self, other)
if not address_equal or address_equal is NotImplemented:
return address_equal
try:
return self.network == other.network
except AttributeError:
# An interface with an associated network is NOT the
# same as an unassociated address. That's why the hash
# takes the extra info into account.
return False
def __lt__(self, other):
address_less = IPv6Address.__lt__(self, other)
if address_less is NotImplemented:
return NotImplemented
try:
return (
self.network < other.network
or self.network == other.network
and address_less
)
except AttributeError:
# We *do* allow addresses and interfaces to be sorted. The
# unassociated address is considered less than all interfaces.
return False
def __hash__(self):
return self._ip ^ self._prefixlen ^ int(self.network.network_address)
__reduce__ = _IPAddressBase.__reduce__
@property
def ip(self):
return IPv6Address(self._ip)
@property
def with_prefixlen(self):
return "%s/%s" % (self._string_from_ip_int(self._ip), self._prefixlen)
@property
def with_netmask(self):
return "%s/%s" % (self._string_from_ip_int(self._ip), self.netmask)
@property
def with_hostmask(self):
return "%s/%s" % (self._string_from_ip_int(self._ip), self.hostmask)
@property
def is_unspecified(self):
return self._ip == 0 and self.network.is_unspecified
@property
def is_loopback(self):
return self._ip == 1 and self.network.is_loopback
class IPv6Network(_BaseV6, _BaseNetwork):
"""This class represents and manipulates 128-bit IPv6 networks.
Attributes: [examples for IPv6('2001:db8::1000/124')]
.network_address: IPv6Address('2001:db8::1000')
.hostmask: IPv6Address('::f')
.broadcast_address: IPv6Address('2001:db8::100f')
.netmask: IPv6Address('ffff:ffff:ffff:ffff:ffff:ffff:ffff:fff0')
.prefixlen: 124
"""
# Class to use when creating address objects
_address_class = IPv6Address
def __init__(self, address, strict=True):
"""Instantiate a new IPv6 Network object.
Args:
address: A string or integer representing the IPv6 network or the
IP and prefix/netmask.
'2001:db8::/128'
'2001:db8:0000:0000:0000:0000:0000:0000/128'
'2001:db8::'
are all functionally the same in IPv6. That is to say,
failing to provide a subnetmask will create an object with
a mask of /128.
Additionally, an integer can be passed, so
IPv6Network('2001:db8::') ==
IPv6Network(42540766411282592856903984951653826560)
or, more generally
IPv6Network(int(IPv6Network('2001:db8::'))) ==
IPv6Network('2001:db8::')
strict: A boolean. If true, ensure that we have been passed
A true network address, eg, 2001:db8::1000/124 and not an
IP address on a network, eg, 2001:db8::1/124.
Raises:
AddressValueError: If address isn't a valid IPv6 address.
NetmaskValueError: If the netmask isn't valid for
an IPv6 address.
ValueError: If strict was True and a network address was not
supplied.
"""
_BaseNetwork.__init__(self, address)
# Efficient constructor from integer or packed address
if isinstance(address, (bytes, _compat_int_types)):
self.network_address = IPv6Address(address)
self.netmask, self._prefixlen = self._make_netmask(
self._max_prefixlen
)
return
if isinstance(address, tuple):
if len(address) > 1:
arg = address[1]
else:
arg = self._max_prefixlen
self.netmask, self._prefixlen = self._make_netmask(arg)
self.network_address = IPv6Address(address[0])
packed = int(self.network_address)
if packed & int(self.netmask) != packed:
if strict:
raise ValueError("%s has host bits set" % self)
else:
self.network_address = IPv6Address(
packed & int(self.netmask)
)
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP prefix string.
addr = _split_optional_netmask(address)
self.network_address = IPv6Address(self._ip_int_from_string(addr[0]))
if len(addr) == 2:
arg = addr[1]
else:
arg = self._max_prefixlen
self.netmask, self._prefixlen = self._make_netmask(arg)
if strict:
if (
IPv6Address(int(self.network_address) & int(self.netmask))
!= self.network_address
):
raise ValueError("%s has host bits set" % self)
self.network_address = IPv6Address(
int(self.network_address) & int(self.netmask)
)
if self._prefixlen == (self._max_prefixlen - 1):
self.hosts = self.__iter__
def hosts(self):
"""Generate Iterator over usable hosts in a network.
This is like __iter__ except it doesn't return the
Subnet-Router anycast address.
"""
network = int(self.network_address)
broadcast = int(self.broadcast_address)
for x in _compat_range(network + 1, broadcast + 1):
yield self._address_class(x)
@property
def is_site_local(self):
"""Test if the address is reserved for site-local.
Note that the site-local address space has been deprecated by RFC 3879.
Use is_private to test if this address is in the space of unique local
addresses as defined by RFC 4193.
Returns:
A boolean, True if the address is reserved per RFC 3513 2.5.6.
"""
return (
self.network_address.is_site_local
and self.broadcast_address.is_site_local
)
class _IPv6Constants(object):
_linklocal_network = IPv6Network("fe80::/10")
_multicast_network = IPv6Network("ff00::/8")
_private_networks = [
IPv6Network("::1/128"),
IPv6Network("::/128"),
IPv6Network("::ffff:0:0/96"),
IPv6Network("100::/64"),
IPv6Network("2001::/23"),
IPv6Network("2001:2::/48"),
IPv6Network("2001:db8::/32"),
IPv6Network("2001:10::/28"),
IPv6Network("fc00::/7"),
IPv6Network("fe80::/10"),
]
_reserved_networks = [
IPv6Network("::/8"),
IPv6Network("100::/8"),
IPv6Network("200::/7"),
IPv6Network("400::/6"),
IPv6Network("800::/5"),
IPv6Network("1000::/4"),
IPv6Network("4000::/3"),
IPv6Network("6000::/3"),
IPv6Network("8000::/3"),
IPv6Network("A000::/3"),
IPv6Network("C000::/3"),
IPv6Network("E000::/4"),
IPv6Network("F000::/5"),
IPv6Network("F800::/6"),
IPv6Network("FE00::/9"),
]
_sitelocal_network = IPv6Network("fec0::/10")
IPv6Address._constants = _IPv6Constants
| azaghal/ansible | test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/compat/ipaddress.py | Python | gpl-3.0 | 83,170 |
"""
reminder mail test
"""
import sys
import locale
import datetime
import pytest
from admin.mailing.mailer import get_template
from admin.mailing.mailer import send_mail_plain_text
from admin.mailing.mailer import FROM
@pytest.mark.skip(reason='Not valid for pytest')
def test_reminder():
locale.setlocale(locale.LC_ALL, 'fr_FR')
now = datetime.datetime.now()
title = 'Rappel'
subtitle = '1ere journee'
mail_tpl = get_template('reminder.txt')
username = 'Test'
unsubscribeurl = '#'
infos = {
'num_matches': 3,
'plural_es': 'es',
'date': str(now),
'week_loc': '1ere',
'week': 1,
'title': title,
'subtitle': subtitle,
'user': username,
'unsubscribeurl': unsubscribeurl,
}
content = mail_tpl % infos
# sends to sender himself
send_mail_plain_text(title, FROM, content)
if __name__ == '__main__':
test_reminder()
| dmartin35/pronosfoot | admin/test/test_reminder_mail.py | Python | mit | 944 |
from __future__ import print_function
from itertools import product
import obarasaika.shell as shell
import obarasaika.obara_saika as os
za = 1.1
zb = 1.2
zc = 1.3
zd = 1.4
ra = [1.0, 0.0, 1.0]
rb = [0.0, 1.0, 2.0]
rc = [0.0, 0.0, 3.0]
rd = [0.0, 0.0, 4.0]
q_base = [0, 0, 0, 0, 0, 0]
q_small = [0, 1, 0, 0, 0, 1]
q_large = [1, 1, 1, 2, 2, 2]
def compute_coulomb():
# get all integrals up to pppp
for p in product('01', repeat=4):
for c in shell.get_shell4(int(p[0]), int(p[1]), int(p[2]), int(p[3])):
if sum(c) > 0:
print(c, os.get_coulomb(za, zb, zc, zd, ra, rb, rc, rd, c))
def compute_overlap():
print('# compute_overlap_base')
S = os.get_overlap(za, zb, ra, rb, q_base)
print(S)
print('# compute_overlap_small')
S = os.get_overlap(za, zb, ra, rb, q_small)
print(S)
print('# compute_overlap_large')
S = os.get_overlap(za, zb, ra, rb, q_large)
print(S)
def compute_kinetic():
print('# compute_kinetic_base')
T = os.get_kinetic(za, zb, ra, rb, q_base)
print(T)
print('# compute_kinetic_small')
T = os.get_kinetic(za, zb, ra, rb, q_small)
print(T)
print('# compute_kinetic_large')
T = os.get_kinetic(za, zb, ra, rb, q_large)
print(T)
def compute_nuclear():
print('# compute_nuclear_base')
V = os.get_nuclear(za, zb, ra, rb, rc, q_base)
print(V)
print('# compute_nuclear_small')
V = os.get_nuclear(za, zb, ra, rb, rc, q_small)
print(V)
print('# compute_nuclear_large')
V = os.get_nuclear(za, zb, ra, rb, rc, q_large)
print(V)
def compute_moment():
print('# compute_moment_base')
M = os.get_moment(za, zb, ra, rb, rc, q_base, [1, 1, 1])
print(M)
print('# compute_moment_small')
M = os.get_moment(za, zb, ra, rb, rc, q_small, [1, 1, 1])
print(M)
print('# compute_moment_large')
M = os.get_moment(za, zb, ra, rb, rc, q_large, [1, 1, 1])
print(M)
def compute_angmom():
print('# compute_angmom_base')
L = os.get_angmom(za, zb, ra, rb, rc, q_base, 0)
print(L)
print('# compute_angmom_small')
L = os.get_angmom(za, zb, ra, rb, rc, q_small, 0)
print(L)
print('# compute_angmom_large')
L = os.get_angmom(za, zb, ra, rb, rc, q_large, 0)
print(L)
if __name__ == '__main__':
compute_overlap()
compute_kinetic()
compute_nuclear()
compute_moment()
compute_angmom()
| berquist/obarasaika | examples/compute.py | Python | bsd-3-clause | 2,414 |
# -*- coding: utf-8 -*-
# 只是个模板,继承还是有问题,不用于继承
class Singleton(object):
def __init__(self):
globals()[self.__class__.__name__] = self
def __call__(self):
return self
# class Singleton:
# def __call__(self):
# return self
#
# Singleton = Singleton() | yes7rose/maya_utils | python/maya_utils/singleton.py | Python | mit | 330 |
"""
Test the garbage collection if we pass the picoscope object.
By: Mark Harfouche
"""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from picoscope import ps6000
# import the garbage collection interface
import gc
if __name__ == "__main__":
ps = ps6000.PS6000()
print("Found the following picoscope:")
print("Serial: " + ps.getUnitInfo("BatchAndSerial"))
pd = ps
print("Copied the picoscope object. " +
" the information of the copied object is:")
print("Serial: " + pd.getUnitInfo("BatchAndSerial"))
print("\n\n\n")
print("Using both objects")
ps.setChannel('A')
pd.setChannel('B')
del ps
print("Deleting the original object and collecting garbage.")
gc.collect()
print("Copied object still works:")
print("Serial: " + pd.getUnitInfo("BatchAndSerial"))
pd.close()
print("Now I closed the other object.")
| arunpersaud/pico-python | examples/garbageCollectorTest.py | Python | bsd-2-clause | 1,002 |
#
#
# Author : fcbruce <fcbruce8964@gmail.com>
#
# Time : Mon 01 May 2017 20:14:00
#
#
from keras.applications.vgg16 import VGG16, preprocess_input
from FeatureGenerator import FeatureGenerator
class VGG16FeatureGenerator(FeatureGenerator):
def __init__(self, weights='imagenet'):
FeatureGenerator.__init__(self, VGG16, (224, 224), preprocess_input, weights)
| fcbruce/DBIR | src/VGG16FeatureGenerator.py | Python | mit | 377 |
"""
Django settings for maoaberta project.
Generated by 'django-admin startproject' using Django 1.8.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from dj_database_url import parse as parse_db_url
from prettyconf import config
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
SECRET_KEY = config('SECRET_KEY', default='secret_key')
DEBUG = config('DEBUG', default=True, cast=config.boolean)
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'contributors',
'organizations',
'projects'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'maoaberta.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'debug': config("TEMPLATE_DEBUG", default=DEBUG, cast=config.boolean),
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media',
],
},
},
]
WSGI_APPLICATION = 'maoaberta.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DEFAULT_DATABASE = 'sqlite:///' + os.path.join(BASE_DIR, 'maoaberta.sqlite')
DATABASES = {
'default': config('DATABASE_URL', cast=parse_db_url, default=DEFAULT_DATABASE),
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static', 'root')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
# Media files
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
| MauricioAlmeida/maoaberta | maoaberta/maoaberta/settings.py | Python | gpl-2.0 | 3,126 |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.internal.platform.power_monitor import android_dumpsys_power_monitor
class DumpsysPowerMonitorMonitorTest(unittest.TestCase):
def testApplicationEnergyConsumption(self):
package = 'com.google.android.apps.chrome'
power_data = {
'system_total': 2000.0,
'per_package': {
package: {'data': [23.9], 'uid': '12345'}
}
}
results = (
android_dumpsys_power_monitor.DumpsysPowerMonitor.ProcessPowerData(
power_data, 4.0, package))
self.assertEqual(results['identifier'], 'dumpsys')
self.assertAlmostEqual(results['application_energy_consumption_mwh'], 95.6)
def testSystemEnergyConsumption(self):
power_data = {
'system_total': 2000.0,
'per_package': {}
}
results = (
android_dumpsys_power_monitor.DumpsysPowerMonitor.ProcessPowerData(
power_data, 4.0, 'some.package'))
self.assertEqual(results['identifier'], 'dumpsys')
self.assertEqual(results['application_energy_consumption_mwh'], 0)
self.assertEqual(results['energy_consumption_mwh'], 8000.0)
if __name__ == '__main__':
unittest.main()
| CapOM/ChromiumGStreamerBackend | tools/telemetry/telemetry/internal/platform/power_monitor/android_dumpsys_power_monitor_unittest.py | Python | bsd-3-clause | 1,315 |
# This script generates phase space portraits for the standard map at many different values of the "kicking" parameter L
# William Gilpin, 2014
from numpy import *
from scipy import *
from matplotlib.pyplot import *
from random import randrange
# steps to run simulation for (generally only needs a few
# to sketch full limit cycles)
N = 150;
def stdmap((x, p)):
pn = mod(p + K*sin(x), 2*pi)
xn = mod(x + pn, 2*pi)
return (xn, pn)
# make the mesh for phase space
x0 = linspace(0, 2*pi, 7)
p0 = linspace(0, 2*pi, 8)
mesh = list()
for ii in xrange(len(x0)):
for jj in xrange(len(p0)):
mesh.append((x0[ii], p0[jj]))
Kvals = linspace(.1,2*pi,10)
for val in Kvals:
K = val
fig = figure(figsize=(5, 5))
for item in mesh:
traj = [item]
for ii in xrange(N):
traj.append(stdmap(traj[ii]))
plot(array(traj).T[0], array(traj).T[1],'.')
hold(True)
xlim([0, 2*pi])
ylim([0, 2*pi])
xlabel('Position (rad)')
ylabel('Momentum')
show()
# savefig('stdmap_'+ str(round(K, 5))+'.png')
| williamgilpin/stdmap | stdmap_plotter.py | Python | mit | 1,084 |
"""
:codeauthor: Jayesh Kariya <jayeshk@saltstack.com>
"""
import os
import shutil
import uuid
import pytest
import salt.modules.seed as seed
import salt.utils.files
import salt.utils.odict
from tests.support.mock import MagicMock, patch
@pytest.fixture
def configure_loader_modules():
return {seed: {}}
@pytest.mark.slow_test
def test_mkconfig_odict():
with patch.dict(seed.__opts__, {"master": "foo"}):
ddd = salt.utils.odict.OrderedDict()
ddd["b"] = "b"
ddd["a"] = "b"
data = seed.mkconfig(ddd, approve_key=False)
with salt.utils.files.fopen(data["config"]) as fic:
fdata = fic.read()
assert fdata == "b: b\na: b\nmaster: foo\n"
def test_prep_bootstrap():
"""
Test to update and get the random script to a random place
"""
with patch.dict(
seed.__salt__,
{
"config.gather_bootstrap_script": MagicMock(
return_value=os.path.join("BS_PATH", "BS")
)
},
), patch.object(uuid, "uuid4", return_value="UUID"), patch.object(
os.path, "exists", return_value=True
), patch.object(
os, "chmod", return_value=None
), patch.object(
shutil, "copy", return_value=None
):
expect = (
os.path.join("MPT", "tmp", "UUID", "BS"),
os.sep + os.path.join("tmp", "UUID"),
)
assert seed.prep_bootstrap("MPT") == expect
expect = (
os.sep + os.path.join("MPT", "tmp", "UUID", "BS"),
os.sep + os.path.join("tmp", "UUID"),
)
assert seed.prep_bootstrap(os.sep + "MPT") == expect
def test_apply_():
"""
Test to seed a location (disk image, directory, or block device)
with the minion config, approve the minion's key, and/or install
salt-minion.
"""
mock = MagicMock(
side_effect=[
False,
{"type": "type", "target": "target"},
{"type": "type", "target": "target"},
{"type": "type", "target": "target"},
]
)
with patch.dict(seed.__salt__, {"file.stats": mock}):
assert seed.apply_("path") == "path does not exist"
with patch.object(seed, "_mount", return_value=False):
assert seed.apply_("path") == "target could not be mounted"
with patch.object(seed, "_mount", return_value="/mountpoint"):
with patch.object(os.path, "join", return_value="A"):
with patch.object(os, "makedirs", MagicMock(side_effect=OSError("f"))):
with patch.object(os.path, "isdir", return_value=False):
pytest.raises(OSError, seed.apply_, "p")
with patch.object(os, "makedirs", MagicMock()):
with patch.object(seed, "mkconfig", return_value="A"):
with patch.object(seed, "_check_install", return_value=False):
with patch.object(
seed, "_umount", return_value=None
) as umount_mock:
assert not seed.apply_("path", install=False)
umount_mock.assert_called_once_with(
"/mountpoint", "target", "type"
)
| saltstack/salt | tests/pytests/unit/modules/test_seed.py | Python | apache-2.0 | 3,332 |
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2010 Nick Hall
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Provide tagging functionality.
"""
#-------------------------------------------------------------------------
#
# Python modules
#
#-------------------------------------------------------------------------
from bisect import insort_left
from xml.sax.saxutils import escape
#-------------------------------------------------------------------------
#
# GTK/Gnome modules
#
#-------------------------------------------------------------------------
from gi.repository import Gtk
from gi.repository import Gdk
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.sgettext
from gramps.gen.lib import Tag
from gramps.gen.db import DbTxn
from ..dbguielement import DbGUIElement
from ..listmodel import ListModel, NOSORT, COLOR, INTEGER
from gramps.gen.const import URL_MANUAL_PAGE
from ..display import display_help
from ..dialog import ErrorDialog, QuestionDialog2
import gramps.gui.widgets.progressdialog as progressdlg
from ..uimanager import ActionGroup
from ..managedwindow import ManagedWindow
#-------------------------------------------------------------------------
#
# Constants
#
#-------------------------------------------------------------------------
TAG_1 = '''
<section id='TagMenu' groups='RW'>
<submenu>
<attribute name="label" translatable="yes">Tag</attribute>
%s
</submenu>
</section>
'''
TAG_2 = (
''' <placeholder id='TagTool' groups='RW'>
<child groups='RO'>
<object class="GtkToolButton" id="TagButton">
<property name="icon-name">gramps-tag</property>
<property name="action-name">win.TagButton</property>
<property name="tooltip_text" translatable="yes">'''
'''Tag selected rows</property>
<property name="label" translatable="yes">Tag</property>
</object>
<packing>
<property name="homogeneous">False</property>
</packing>
</child>
</placeholder>
''')
TAG_3 = '''
<menu id='TagPopup' groups='RW'>
%s
</menu>'''
TAG_MENU = (
'''<section>
<item>
<attribute name="action">win.NewTag</attribute>
<attribute name="label" translatable="yes">'''
'''New Tag...</attribute>
</item>
<item>
<attribute name="action">win.OrganizeTags</attribute>
<attribute name="label" translatable="yes">'''
'''Organize Tags...</attribute>
</item>
</section>
<section>
%s
</section>
''')
WIKI_HELP_PAGE = '%s_-_Filters' % \
URL_MANUAL_PAGE
WIKI_HELP_SEC = _('manual|Organize_Tags_Window')
WIKI_HELP_SEC2 = _('manual|New_Tag_dialog')
#-------------------------------------------------------------------------
#
# Tags
#
#-------------------------------------------------------------------------
class Tags(DbGUIElement):
"""
Provide tagging functionality.
"""
def __init__(self, uistate, dbstate):
self.signal_map = {
'tag-add' : self._tag_add,
'tag-delete' : self._tag_delete,
'tag-update' : self._tag_update,
'tag-rebuild' : self._tag_rebuild
}
DbGUIElement.__init__(self, dbstate.db)
self.dbstate = dbstate
self.db = dbstate.db
self.uistate = uistate
self.tag_id = None
self.tag_ui = None
self.tag_action = None
self.__tag_list = []
dbstate.connect('database-changed', self._db_changed)
dbstate.connect('no-database', self.tag_disable)
self._build_tag_menu()
def tag_enable(self, update_menu=True):
"""
Enables the UI and action groups for the tag menu.
"""
self.uistate.uimanager.insert_action_group(self.tag_action)
self.tag_id = self.uistate.uimanager.add_ui_from_string(self.tag_ui)
if update_menu:
self.uistate.uimanager.update_menu()
def tag_disable(self):
"""
Remove the UI and action groups for the tag menu.
"""
if self.tag_id is not None:
self.uistate.uimanager.remove_ui(self.tag_id)
self.uistate.uimanager.remove_action_group(self.tag_action)
self.tag_id = None
def _db_changed(self, db):
"""
Called when the database is changed.
"""
self.db = db
self._change_db(db)
self._tag_rebuild()
def _connect_db_signals(self):
"""
Connect database signals defined in the signal map.
"""
for sig in self.signal_map:
self.callman.add_db_signal(sig, self.signal_map[sig])
def _tag_add(self, handle_list):
"""
Called when tags are added.
"""
for handle in handle_list:
tag = self.db.get_tag_from_handle(handle)
insort_left(self.__tag_list, (tag.get_name(), handle))
self.update_tag_menu()
def _tag_update(self, handle_list):
"""
Called when tags are updated.
"""
for handle in handle_list:
item = [item for item in self.__tag_list if item[1] == handle][0]
self.__tag_list.remove(item)
tag = self.db.get_tag_from_handle(handle)
insort_left(self.__tag_list, (tag.get_name(), handle))
self.update_tag_menu()
def _tag_delete(self, handle_list):
"""
Called when tags are deleted.
"""
self.__tag_list = [item for item in self.__tag_list
if item[1] not in handle_list]
self.update_tag_menu()
def _tag_rebuild(self):
"""
Called when the tag list needs to be rebuilt.
"""
self.__tag_list = []
if self.dbstate.is_open():
for handle in self.db.get_tag_handles(sort_handles=True):
tag = self.db.get_tag_from_handle(handle)
self.__tag_list.append((tag.get_name(), tag.get_handle()))
self.update_tag_menu()
def update_tag_menu(self):
"""
Re-build the menu when a tag is added or removed.
"""
enabled = self.tag_id is not None
if enabled:
self.tag_disable()
self._build_tag_menu()
if enabled:
self.tag_enable()
def _build_tag_menu(self):
"""
Builds the UI and action group for the tag menu.
"""
actions = []
if not self.dbstate.is_open():
self.tag_ui = ['']
self.tag_action = ActionGroup(name='Tag')
return
tag_menu = ''
menuitem = '''
<item>
<attribute name="action">win.TAG_%s</attribute>
<attribute name="label" translatable="yes">%s</attribute>
</item>'''
for tag_name, handle in self.__tag_list:
tag_menu += menuitem % (handle, tag_name)
actions.append(('TAG_%s' % handle,
make_callback(self.tag_selected_rows, handle)))
tag_menu = TAG_MENU % tag_menu
self.tag_ui = [TAG_1 % tag_menu, TAG_2, TAG_3 % tag_menu]
actions.append(('NewTag', self.cb_new_tag))
actions.append(('OrganizeTags', self.cb_organize_tags))
actions.append(('TagButton', self.cb_tag_button))
self.tag_action = ActionGroup(name='Tag')
self.tag_action.add_actions(actions)
def cb_tag_button(self, *args):
"""
Display the popup menu when the toolbar button is clicked.
"""
menu = self.uistate.uimanager.get_widget('TagPopup')
button = self.uistate.uimanager.get_widget('TagButton')
popup_menu = Gtk.Menu.new_from_model(menu)
popup_menu.attach_to_widget(button, None)
popup_menu.show_all()
if Gtk.MINOR_VERSION < 22:
# ToDo The following is reported to work poorly with Wayland
popup_menu.popup(None, None, cb_menu_position, button, 0, 0)
else:
popup_menu.popup_at_widget(button, Gdk.Gravity.SOUTH,
Gdk.Gravity.NORTH_WEST, None)
def cb_organize_tags(self, *action):
"""
Display the Organize Tags dialog.
"""
OrganizeTagsDialog(self.db, self.uistate, [])
def cb_new_tag(self, *action):
"""
Create a new tag and tag the selected objects.
"""
tag = Tag()
tag.set_priority(self.db.get_number_of_tags())
EditTag(self.db, self.uistate, [], tag)
if tag.get_handle():
self.tag_selected_rows(tag.get_handle())
def tag_selected_rows(self, tag_handle):
"""
Tag the selected rows with the given tag.
"""
view = self.uistate.viewmanager.active_page
selected = view.selected_handles()
# Make the dialog modal so that the user can't start another
# database transaction while the one setting tags is still running.
pmon = progressdlg.ProgressMonitor(progressdlg.GtkProgressDialog,
("", self.uistate.window, Gtk.DialogFlags.MODAL), popup_time=2)
status = progressdlg.LongOpStatus(msg=_("Adding Tags"),
total_steps=len(selected),
interval=len(selected)//20)
pmon.add_op(status)
tag = self.db.get_tag_from_handle(tag_handle)
msg = _('Tag Selection (%s)') % tag.get_name()
with DbTxn(msg, self.db) as trans:
for object_handle in selected:
status.heartbeat()
view.add_tag(trans, object_handle, tag_handle)
status.end()
def cb_menu_position(*args):
"""
Determine the position of the popup menu.
"""
# takes two argument: menu, button
if len(args) == 2:
menu = args[0]
button = args[1]
# broken introspection can't handle MenuPositionFunc annotations corectly
else:
menu = args[0]
button = args[3]
ret_val, x_pos, y_pos = button.get_window().get_origin()
x_pos += button.get_allocation().x
y_pos += button.get_allocation().y + button.get_allocation().height
return (x_pos, y_pos, False)
def make_callback(func, tag_handle):
"""
Generates a callback function based off the passed arguments
"""
return lambda x, y: func(tag_handle)
#-------------------------------------------------------------------------
#
# Organize Tags Dialog
#
#-------------------------------------------------------------------------
class OrganizeTagsDialog(ManagedWindow):
"""
A dialog to enable the user to organize tags.
"""
def __init__(self, db, uistate, track):
ManagedWindow.__init__(self, uistate, track, self.__class__, modal=True)
# the self.top.run() below makes Gtk make it modal, so any change to
# the previous line's "modal" would require that line to be changed
self.db = db
self.namelist = None
self.namemodel = None
self.top = self._create_dialog()
self.set_window(self.top, None, _('Organize Tags'))
self.setup_configs('interface.organizetagsdialog', 400, 350)
self.show()
self.run()
# this is meaningless while it's modal, but since this ManagedWindow can
# have an EditTag ManagedWindow child it needs a non-None second argument
def build_menu_names(self, obj):
return (_('Organize Tags'), ' ')
def run(self):
"""
Run the dialog and return the result.
"""
self._populate_model()
while True:
# the self.top.run() makes Gtk make it modal, so any change to that
# line would require the ManagedWindow.__init__ to be changed also
response = self.top.run()
if response == Gtk.ResponseType.HELP:
display_help(webpage=WIKI_HELP_PAGE,
section=WIKI_HELP_SEC)
else:
break
# Save changed priority values
if response == Gtk.ResponseType.CLOSE and self.__priorities_changed():
with DbTxn(_('Change Tag Priority'), self.db) as trans:
self.__change_tag_priority(trans)
if response != Gtk.ResponseType.DELETE_EVENT:
self.close()
def __priorities_changed(self):
"""
Return True if the tag priorities have changed else return False.
"""
priorities = [row[0] for row in self.namemodel.model]
return priorities != list(range(len(self.namemodel.model)))
def __change_tag_priority(self, trans):
"""
Change the priority of the tags. The order of the list corresponds to
the priority of the tags. The top tag in the list is the highest
priority tag.
"""
for new_priority, row in enumerate(self.namemodel.model):
if row[0] != new_priority:
row[0] = new_priority
tag = self.db.get_tag_from_handle(row[1])
if tag:
tag.set_priority(new_priority)
self.db.commit_tag(tag, trans)
def _populate_model(self):
"""
Populate the model.
"""
self.namemodel.clear()
tags = []
for tag in self.db.iter_tags():
tags.append((tag.get_priority(),
tag.get_handle(),
tag.get_name(),
tag.get_color()))
for row in sorted(tags):
self.namemodel.add(row)
def _create_dialog(self):
"""
Create a dialog box to organize tags.
"""
# pylint: disable-msg=E1101
top = Gtk.Dialog(parent=self.parent_window)
top.vbox.set_spacing(5)
label = Gtk.Label(label='<span size="larger" weight="bold">%s</span>'
% _("Organize Tags"))
label.set_use_markup(True)
top.vbox.pack_start(label, 0, 0, 5)
box = Gtk.Box()
top.vbox.pack_start(box, 1, 1, 5)
name_titles = [('', NOSORT, 20, INTEGER), # Priority
('', NOSORT, 100), # Handle
(_('Name'), NOSORT, 200),
(_('Color'), NOSORT, 50, COLOR)]
self.namelist = Gtk.TreeView()
self.namemodel = ListModel(self.namelist, name_titles)
slist = Gtk.ScrolledWindow()
slist.add(self.namelist)
slist.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
box.pack_start(slist, 1, 1, 5)
bbox = Gtk.ButtonBox(orientation=Gtk.Orientation.VERTICAL)
bbox.set_layout(Gtk.ButtonBoxStyle.START)
bbox.set_spacing(6)
up = Gtk.Button.new_with_mnemonic(_('_Up'))
down = Gtk.Button.new_with_mnemonic(_('_Down'))
add = Gtk.Button.new_with_mnemonic(_('_Add'))
edit = Gtk.Button.new_with_mnemonic(_('_Edit'))
remove = Gtk.Button.new_with_mnemonic(_('_Remove'))
up.connect('clicked', self.cb_up_clicked)
down.connect('clicked', self.cb_down_clicked)
add.connect('clicked', self.cb_add_clicked, top)
edit.connect('clicked', self.cb_edit_clicked, top)
remove.connect('clicked', self.cb_remove_clicked, top)
top.add_button(_('_Close'), Gtk.ResponseType.CLOSE)
top.add_button(_('_Help'), Gtk.ResponseType.HELP)
bbox.add(up)
bbox.add(down)
bbox.add(add)
bbox.add(edit)
bbox.add(remove)
box.pack_start(bbox, 0, 0, 5)
return top
def cb_up_clicked(self, obj):
"""
Move the current selection up one row.
"""
row = self.namemodel.get_selected_row()
self.namemodel.move_up(row)
def cb_down_clicked(self, obj):
"""
Move the current selection down one row.
"""
row = self.namemodel.get_selected_row()
self.namemodel.move_down(row)
def cb_add_clicked(self, button, top):
"""
Create a new tag.
"""
tag = Tag()
tag.set_priority(self.db.get_number_of_tags())
EditTag(self.db, self.uistate, self.track, tag)
if tag.get_handle():
self.namemodel.add((tag.get_priority(),
tag.get_handle(),
tag.get_name(),
tag.get_color()))
def cb_edit_clicked(self, button, top):
"""
Edit the color of an existing tag.
"""
store, iter_ = self.namemodel.get_selected()
if iter_ is None:
return
tag = self.db.get_tag_from_handle(store.get_value(iter_, 1))
EditTag(self.db, self.uistate, self.track, tag)
store.set_value(iter_, 2, tag.get_name())
store.set_value(iter_, 3, tag.get_color())
def cb_remove_clicked(self, button, top):
"""
Remove the selected tag.
"""
store, iter_ = self.namemodel.get_selected()
if iter_ is None:
return
tag_handle = store.get_value(iter_, 1)
tag_name = store.get_value(iter_, 2)
yes_no = QuestionDialog2(
_("Remove tag '%s'?") % tag_name,
_("The tag definition will be removed. The tag will be also "
"removed from all objects in the database."),
_("Yes"),
_("No"),
parent=self.window)
prompt = yes_no.run()
if prompt:
fnc = {'Person': (self.db.get_person_from_handle,
self.db.commit_person),
'Family': (self.db.get_family_from_handle,
self.db.commit_family),
'Event': (self.db.get_event_from_handle,
self.db.commit_event),
'Place': (self.db.get_place_from_handle,
self.db.commit_place),
'Source': (self.db.get_source_from_handle,
self.db.commit_source),
'Citation': (self.db.get_citation_from_handle,
self.db.commit_citation),
'Repository': (self.db.get_repository_from_handle,
self.db.commit_repository),
'Media': (self.db.get_media_from_handle,
self.db.commit_media),
'Note': (self.db.get_note_from_handle,
self.db.commit_note)}
links = [link for link in self.db.find_backlink_handles(tag_handle)]
# Make the dialog modal so that the user can't start another
# database transaction while the one removing tags is still running.
pmon = progressdlg.ProgressMonitor(progressdlg.GtkProgressDialog,
("", self.parent_window, Gtk.DialogFlags.MODAL), popup_time=2)
status = progressdlg.LongOpStatus(msg=_("Removing Tags"),
total_steps=len(links),
interval=len(links)//20)
pmon.add_op(status)
msg = _('Delete Tag (%s)') % tag_name
self.namemodel.remove(iter_)
with DbTxn(msg, self.db) as trans:
for classname, handle in links:
status.heartbeat()
obj = fnc[classname][0](handle) # get from handle
obj.remove_tag(tag_handle)
fnc[classname][1](obj, trans) # commit
self.db.remove_tag(tag_handle, trans)
self.__change_tag_priority(trans)
status.end()
#-------------------------------------------------------------------------
#
# Tag editor
#
#-------------------------------------------------------------------------
class EditTag(ManagedWindow):
"""
A dialog to enable the user to create a new tag.
"""
def __init__(self, db, uistate, track, tag):
self.tag = tag
if self.tag.get_handle():
self.title = _('Tag: %s') % self.tag.get_name()
else:
self.title = _('New Tag')
ManagedWindow.__init__(self, uistate, track, self.__class__, modal=True)
# the self.top.run() below makes Gtk make it modal, so any change to
# the previous line's "modal" would require that line to be changed
self.db = db
self.entry = None
self.color = None
self.top = self._create_dialog()
self.set_window(self.top, None, self.title)
self.setup_configs('interface.edittag', 320, 100)
self.show()
self.run()
def build_menu_names(self, obj): # this is meaningless while it's modal
return (self.title, None)
def run(self):
"""
Run the dialog and return the result.
"""
while True:
# the self.top.run() makes Gtk make it modal, so any change to that
# line would require the ManagedWindow.__init__ to be changed also
response = self.top.run()
if response == Gtk.ResponseType.HELP:
display_help(webpage=WIKI_HELP_PAGE,
section=WIKI_HELP_SEC2)
else:
break
if response == Gtk.ResponseType.OK:
self._save()
if response != Gtk.ResponseType.DELETE_EVENT:
self.close()
def _save(self):
"""
Save the changes made to the tag.
"""
self.tag.set_name(str(self.entry.get_text()))
rgba = self.color.get_rgba()
hexval = "#%02x%02x%02x" % (int(rgba.red * 255),
int(rgba.green * 255),
int(rgba.blue * 255))
self.tag.set_color(hexval)
if not self.tag.get_name():
ErrorDialog(_("Cannot save tag"),
_("The tag name cannot be empty"),
parent=self.window)
return
if not self.tag.get_handle():
msg = _("Add Tag (%s)") % self.tag.get_name()
with DbTxn(msg, self.db) as trans:
self.db.add_tag(self.tag, trans)
else:
orig = self.db.get_tag_from_handle(self.tag.get_handle())
if self.tag.serialize() != orig.serialize():
msg = _("Edit Tag (%s)") % self.tag.get_name()
with DbTxn(msg, self.db) as trans:
self.db.commit_tag(self.tag, trans)
def _create_dialog(self):
"""
Create a dialog box to enter a new tag.
"""
# pylint: disable-msg=E1101
top = Gtk.Dialog(parent=self.parent_window)
top.vbox.set_spacing(5)
hbox = Gtk.Box()
top.vbox.pack_start(hbox, False, False, 10)
label = Gtk.Label(label=_('Tag Name:'))
self.entry = Gtk.Entry()
self.entry.set_text(self.tag.get_name())
self.color = Gtk.ColorButton()
rgba = Gdk.RGBA()
rgba.parse(self.tag.get_color())
self.color.set_rgba(rgba)
title = _("%(title)s - Gramps") % {'title': _("Pick a Color")}
self.color.set_title(title)
hbox.pack_start(label, False, False, 5)
hbox.pack_start(self.entry, True, True, 5)
hbox.pack_start(self.color, False, False, 5)
top.add_button(_('_Help'), Gtk.ResponseType.HELP)
top.add_button(_('_Cancel'), Gtk.ResponseType.CANCEL)
top.add_button(_('_OK'), Gtk.ResponseType.OK)
return top
| dermoth/gramps | gramps/gui/views/tags.py | Python | gpl-2.0 | 24,494 |
from django.conf.urls import patterns
urlpatterns = patterns('', )
| goinnn/deldichoalhecho | promises_instances/urls.py | Python | gpl-3.0 | 68 |
# -*- coding: utf-8 -*-
'''
Exodus Add-on
Copyright (C) 2016 Exodus
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,json,urlparse,base64,random
from resources.lib.modules import client
from resources.lib.modules import control
class trailer:
def __init__(self):
self.base_link = 'http://www.youtube.com'
self.key_link = random.choice(['QUl6YVN5RDd2aFpDLTYta2habTVuYlVyLTZ0Q0JRQnZWcnFkeHNz', 'QUl6YVN5Q2RiNEFNenZpVG0yaHJhSFY3MXo2Nl9HNXBhM2ZvVXd3'])
self.key_link = '&key=%s' % base64.urlsafe_b64decode(self.key_link)
self.search_link = 'https://www.googleapis.com/youtube/v3/search?part=snippet&type=video&maxResults=5&q=%s'
self.youtube_search = 'https://www.googleapis.com/youtube/v3/search?q='
self.youtube_watch = 'http://www.youtube.com/watch?v=%s'
def play(self, name, url=None):
try:
url = self.worker(name, url)
if url == None: return
title = control.infoLabel('listitem.title')
if title == '': title = control.infoLabel('listitem.label')
icon = control.infoLabel('listitem.icon')
item = control.item(path=url, iconImage=icon, thumbnailImage=icon)
try: item.setArt({'icon': icon})
except: pass
item.setInfo(type='Video', infoLabels = {'title': title})
control.player.play(url, item)
except:
pass
def worker(self, name, url):
try:
if url.startswith(self.base_link):
url = self.resolve(url)
if url == None: raise Exception()
return url
elif not url.startswith('http://'):
url = self.youtube_watch % url
url = self.resolve(url)
if url == None: raise Exception()
return url
else:
raise Exception()
except:
query = name + ' trailer'
query = self.youtube_search + query
url = self.search(query)
if url == None: return
return url
def search(self, url):
try:
query = urlparse.parse_qs(urlparse.urlparse(url).query)['q'][0]
apiLang = control.apiLanguage()['youtube']
if apiLang != 'en':
url += "&relevanceLanguage=%s" % apiLang
url = self.search_link % urllib.quote_plus(query) + self.key_link
result = client.request(url)
items = json.loads(result)['items']
items = [(i['id']['videoId']) for i in items]
for url in items:
url = self.resolve(url)
if not url is None: return url
except:
return
def resolve(self, url):
try:
id = url.split('?v=')[-1].split('/')[-1].split('?')[0].split('&')[0]
result = client.request('http://www.youtube.com/watch?v=%s' % id)
message = client.parseDOM(result, 'div', attrs = {'id': 'unavailable-submessage'})
message = ''.join(message)
alert = client.parseDOM(result, 'div', attrs = {'id': 'watch7-notification-area'})
if len(alert) > 0: raise Exception()
if re.search('[a-zA-Z]', message): raise Exception()
url = 'plugin://plugin.video.youtube/play/?video_id=%s' % id
return url
except:
return
| azumimuo/family-xbmc-addon | plugin.video.exodus/resources/lib/modules/trailer.py | Python | gpl-2.0 | 4,049 |
# Copyright 2013-2016 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
import difflib
import six
import sys
import time
from mock import Mock, patch
from cassandra import AlreadyExists, SignatureDescriptor, UserFunctionDescriptor, UserAggregateDescriptor
from cassandra.cluster import Cluster
from cassandra.encoder import Encoder
from cassandra.metadata import (Metadata, KeyspaceMetadata, IndexMetadata,
Token, MD5Token, TokenMap, murmur3, Function, Aggregate, protect_name, protect_names,
get_schema_parser)
from cassandra.policies import SimpleConvictionPolicy
from cassandra.pool import Host
from tests.integration import get_cluster, use_singledc, PROTOCOL_VERSION, get_server_versions, execute_until_pass, \
BasicSegregatedKeyspaceUnitTestCase, BasicSharedKeyspaceUnitTestCase, BasicExistingKeyspaceUnitTestCase, drop_keyspace_shutdown_cluster, CASSANDRA_VERSION, \
BasicExistingSegregatedKeyspaceUnitTestCase, dseonly, DSE_VERSION
def setup_module():
use_singledc()
global CASS_SERVER_VERSION
CASS_SERVER_VERSION = get_server_versions()[0]
class HostMetatDataTests(BasicExistingKeyspaceUnitTestCase):
def test_broadcast_listen_address(self):
"""
Check to ensure that the broadcast and listen adresss is populated correctly
@since 3.3
@jira_ticket PYTHON-332
@expected_result They are populated for C*> 2.0.16, 2.1.6, 2.2.0
@test_category metadata
"""
# All nodes should have the broadcast_address set
for host in self.cluster.metadata.all_hosts():
self.assertIsNotNone(host.broadcast_address)
con = self.cluster.control_connection.get_connections()[0]
local_host = con.host
# The control connection node should have the listen address set.
listen_addrs = [host.listen_address for host in self.cluster.metadata.all_hosts()]
self.assertTrue(local_host in listen_addrs)
def test_host_release_version(self):
"""
Checks the hosts release version and validates that it is equal to the
Cassandra version we are using in our test harness.
@since 3.3
@jira_ticket PYTHON-301
@expected_result host.release version should match our specified Cassandra version.
@test_category metadata
"""
for host in self.cluster.metadata.all_hosts():
self.assertTrue(host.release_version.startswith(CASSANDRA_VERSION))
class SchemaMetadataTests(BasicSegregatedKeyspaceUnitTestCase):
def test_schema_metadata_disable(self):
"""
Checks to ensure that schema metadata_enabled, and token_metadata_enabled
flags work correctly.
@since 3.3
@jira_ticket PYTHON-327
@expected_result schema metadata will not be populated when schema_metadata_enabled is fause
token_metadata will be missing when token_metadata is set to false
@test_category metadata
"""
# Validate metadata is missing where appropriate
no_schema = Cluster(schema_metadata_enabled=False)
no_schema_session = no_schema.connect()
self.assertEqual(len(no_schema.metadata.keyspaces), 0)
self.assertEqual(no_schema.metadata.export_schema_as_string(), '')
no_token = Cluster(token_metadata_enabled=False)
no_token_session = no_token.connect()
self.assertEqual(len(no_token.metadata.token_map.token_to_host_owner), 0)
# Do a simple query to ensure queries are working
query = "SELECT * FROM system.local"
no_schema_rs = no_schema_session.execute(query)
no_token_rs = no_token_session.execute(query)
self.assertIsNotNone(no_schema_rs[0])
self.assertIsNotNone(no_token_rs[0])
no_schema.shutdown()
no_token.shutdown()
def make_create_statement(self, partition_cols, clustering_cols=None, other_cols=None, compact=False):
clustering_cols = clustering_cols or []
other_cols = other_cols or []
statement = "CREATE TABLE %s.%s (" % (self.keyspace_name, self.function_table_name)
if len(partition_cols) == 1 and not clustering_cols:
statement += "%s text PRIMARY KEY, " % protect_name(partition_cols[0])
else:
statement += ", ".join("%s text" % protect_name(col) for col in partition_cols)
statement += ", "
statement += ", ".join("%s text" % protect_name(col) for col in clustering_cols + other_cols)
if len(partition_cols) != 1 or clustering_cols:
statement += ", PRIMARY KEY ("
if len(partition_cols) > 1:
statement += "(" + ", ".join(protect_names(partition_cols)) + ")"
else:
statement += protect_name(partition_cols[0])
if clustering_cols:
statement += ", "
statement += ", ".join(protect_names(clustering_cols))
statement += ")"
statement += ")"
if compact:
statement += " WITH COMPACT STORAGE"
return statement
def check_create_statement(self, tablemeta, original):
recreate = tablemeta.as_cql_query(formatted=False)
self.assertEqual(original, recreate[:len(original)])
execute_until_pass(self.session, "DROP TABLE {0}.{1}".format(self.keyspace_name, self.function_table_name))
execute_until_pass(self.session, recreate)
# create the table again, but with formatting enabled
execute_until_pass(self.session, "DROP TABLE {0}.{1}".format(self.keyspace_name, self.function_table_name))
recreate = tablemeta.as_cql_query(formatted=True)
execute_until_pass(self.session, recreate)
def get_table_metadata(self):
self.cluster.refresh_table_metadata(self.keyspace_name, self.function_table_name)
return self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name]
def test_basic_table_meta_properties(self):
create_statement = self.make_create_statement(["a"], [], ["b", "c"])
self.session.execute(create_statement)
self.cluster.refresh_schema_metadata()
meta = self.cluster.metadata
self.assertNotEqual(meta.cluster_name, None)
self.assertTrue(self.keyspace_name in meta.keyspaces)
ksmeta = meta.keyspaces[self.keyspace_name]
self.assertEqual(ksmeta.name, self.keyspace_name)
self.assertTrue(ksmeta.durable_writes)
self.assertEqual(ksmeta.replication_strategy.name, 'SimpleStrategy')
self.assertEqual(ksmeta.replication_strategy.replication_factor, 1)
self.assertTrue(self.function_table_name in ksmeta.tables)
tablemeta = ksmeta.tables[self.function_table_name]
self.assertEqual(tablemeta.keyspace_name, ksmeta.name)
self.assertEqual(tablemeta.name, self.function_table_name)
self.assertEqual(tablemeta.name, self.function_table_name)
self.assertEqual([u'a'], [c.name for c in tablemeta.partition_key])
self.assertEqual([], tablemeta.clustering_key)
self.assertEqual([u'a', u'b', u'c'], sorted(tablemeta.columns.keys()))
cc = self.cluster.control_connection._connection
parser = get_schema_parser(cc, str(CASS_SERVER_VERSION[0]), 1)
for option in tablemeta.options:
self.assertIn(option, parser.recognized_table_options)
self.check_create_statement(tablemeta, create_statement)
def test_compound_primary_keys(self):
create_statement = self.make_create_statement(["a"], ["b"], ["c"])
create_statement += " WITH CLUSTERING ORDER BY (b ASC)"
self.session.execute(create_statement)
tablemeta = self.get_table_metadata()
self.assertEqual([u'a'], [c.name for c in tablemeta.partition_key])
self.assertEqual([u'b'], [c.name for c in tablemeta.clustering_key])
self.assertEqual([u'a', u'b', u'c'], sorted(tablemeta.columns.keys()))
self.check_create_statement(tablemeta, create_statement)
def test_compound_primary_keys_protected(self):
create_statement = self.make_create_statement(["Aa"], ["Bb"], ["Cc"])
create_statement += ' WITH CLUSTERING ORDER BY ("Bb" ASC)'
self.session.execute(create_statement)
tablemeta = self.get_table_metadata()
self.assertEqual([u'Aa'], [c.name for c in tablemeta.partition_key])
self.assertEqual([u'Bb'], [c.name for c in tablemeta.clustering_key])
self.assertEqual([u'Aa', u'Bb', u'Cc'], sorted(tablemeta.columns.keys()))
self.check_create_statement(tablemeta, create_statement)
def test_compound_primary_keys_more_columns(self):
create_statement = self.make_create_statement(["a"], ["b", "c"], ["d", "e", "f"])
create_statement += " WITH CLUSTERING ORDER BY (b ASC, c ASC)"
self.session.execute(create_statement)
tablemeta = self.get_table_metadata()
self.assertEqual([u'a'], [c.name for c in tablemeta.partition_key])
self.assertEqual([u'b', u'c'], [c.name for c in tablemeta.clustering_key])
self.assertEqual(
[u'a', u'b', u'c', u'd', u'e', u'f'],
sorted(tablemeta.columns.keys()))
self.check_create_statement(tablemeta, create_statement)
def test_composite_primary_key(self):
create_statement = self.make_create_statement(["a", "b"], [], ["c"])
self.session.execute(create_statement)
tablemeta = self.get_table_metadata()
self.assertEqual([u'a', u'b'], [c.name for c in tablemeta.partition_key])
self.assertEqual([], tablemeta.clustering_key)
self.assertEqual([u'a', u'b', u'c'], sorted(tablemeta.columns.keys()))
self.check_create_statement(tablemeta, create_statement)
def test_composite_in_compound_primary_key(self):
create_statement = self.make_create_statement(["a", "b"], ["c"], ["d", "e"])
create_statement += " WITH CLUSTERING ORDER BY (c ASC)"
self.session.execute(create_statement)
tablemeta = self.get_table_metadata()
self.assertEqual([u'a', u'b'], [c.name for c in tablemeta.partition_key])
self.assertEqual([u'c'], [c.name for c in tablemeta.clustering_key])
self.assertEqual([u'a', u'b', u'c', u'd', u'e'], sorted(tablemeta.columns.keys()))
self.check_create_statement(tablemeta, create_statement)
def test_compound_primary_keys_compact(self):
create_statement = self.make_create_statement(["a"], ["b"], ["c"], compact=True)
create_statement += " AND CLUSTERING ORDER BY (b ASC)"
self.session.execute(create_statement)
tablemeta = self.get_table_metadata()
self.assertEqual([u'a'], [c.name for c in tablemeta.partition_key])
self.assertEqual([u'b'], [c.name for c in tablemeta.clustering_key])
self.assertEqual([u'a', u'b', u'c'], sorted(tablemeta.columns.keys()))
self.check_create_statement(tablemeta, create_statement)
def test_cluster_column_ordering_reversed_metadata(self):
"""
Simple test to ensure that the metatdata associated with cluster ordering is surfaced is surfaced correctly.
Creates a table with a few clustering keys. Then checks the clustering order associated with clustering columns
and ensure it's set correctly.
@since 3.0.0
@jira_ticket PYTHON-402
@expected_result is_reversed is set on DESC order, and is False on ASC
@test_category metadata
"""
create_statement = self.make_create_statement(["a"], ["b", "c"], ["d"], compact=True)
create_statement += " AND CLUSTERING ORDER BY (b ASC, c DESC)"
self.session.execute(create_statement)
tablemeta = self.get_table_metadata()
b_column = tablemeta.columns['b']
self.assertFalse(b_column.is_reversed)
c_column = tablemeta.columns['c']
self.assertTrue(c_column.is_reversed)
def test_compound_primary_keys_more_columns_compact(self):
create_statement = self.make_create_statement(["a"], ["b", "c"], ["d"], compact=True)
create_statement += " AND CLUSTERING ORDER BY (b ASC, c ASC)"
self.session.execute(create_statement)
tablemeta = self.get_table_metadata()
self.assertEqual([u'a'], [c.name for c in tablemeta.partition_key])
self.assertEqual([u'b', u'c'], [c.name for c in tablemeta.clustering_key])
self.assertEqual([u'a', u'b', u'c', u'd'], sorted(tablemeta.columns.keys()))
self.check_create_statement(tablemeta, create_statement)
def test_composite_primary_key_compact(self):
create_statement = self.make_create_statement(["a", "b"], [], ["c"], compact=True)
self.session.execute(create_statement)
tablemeta = self.get_table_metadata()
self.assertEqual([u'a', u'b'], [c.name for c in tablemeta.partition_key])
self.assertEqual([], tablemeta.clustering_key)
self.assertEqual([u'a', u'b', u'c'], sorted(tablemeta.columns.keys()))
self.check_create_statement(tablemeta, create_statement)
def test_composite_in_compound_primary_key_compact(self):
create_statement = self.make_create_statement(["a", "b"], ["c"], ["d"], compact=True)
create_statement += " AND CLUSTERING ORDER BY (c ASC)"
self.session.execute(create_statement)
tablemeta = self.get_table_metadata()
self.assertEqual([u'a', u'b'], [c.name for c in tablemeta.partition_key])
self.assertEqual([u'c'], [c.name for c in tablemeta.clustering_key])
self.assertEqual([u'a', u'b', u'c', u'd'], sorted(tablemeta.columns.keys()))
self.check_create_statement(tablemeta, create_statement)
def test_cql_compatibility(self):
if CASS_SERVER_VERSION >= (3, 0):
raise unittest.SkipTest("cql compatibility does not apply Cassandra 3.0+")
# having more than one non-PK column is okay if there aren't any
# clustering columns
create_statement = self.make_create_statement(["a"], [], ["b", "c", "d"], compact=True)
self.session.execute(create_statement)
tablemeta = self.get_table_metadata()
self.assertEqual([u'a'], [c.name for c in tablemeta.partition_key])
self.assertEqual([], tablemeta.clustering_key)
self.assertEqual([u'a', u'b', u'c', u'd'], sorted(tablemeta.columns.keys()))
self.assertTrue(tablemeta.is_cql_compatible)
# ... but if there are clustering columns, it's not CQL compatible.
# This is a hacky way to simulate having clustering columns.
tablemeta.clustering_key = ["foo", "bar"]
tablemeta.columns["foo"] = None
tablemeta.columns["bar"] = None
self.assertFalse(tablemeta.is_cql_compatible)
def test_compound_primary_keys_ordering(self):
create_statement = self.make_create_statement(["a"], ["b"], ["c"])
create_statement += " WITH CLUSTERING ORDER BY (b DESC)"
self.session.execute(create_statement)
tablemeta = self.get_table_metadata()
self.check_create_statement(tablemeta, create_statement)
def test_compound_primary_keys_more_columns_ordering(self):
create_statement = self.make_create_statement(["a"], ["b", "c"], ["d", "e", "f"])
create_statement += " WITH CLUSTERING ORDER BY (b DESC, c ASC)"
self.session.execute(create_statement)
tablemeta = self.get_table_metadata()
self.check_create_statement(tablemeta, create_statement)
def test_composite_in_compound_primary_key_ordering(self):
create_statement = self.make_create_statement(["a", "b"], ["c"], ["d", "e"])
create_statement += " WITH CLUSTERING ORDER BY (c DESC)"
self.session.execute(create_statement)
tablemeta = self.get_table_metadata()
self.check_create_statement(tablemeta, create_statement)
def test_indexes(self):
create_statement = self.make_create_statement(["a"], ["b", "c"], ["d", "e", "f"])
create_statement += " WITH CLUSTERING ORDER BY (b ASC, c ASC)"
execute_until_pass(self.session, create_statement)
d_index = "CREATE INDEX d_index ON %s.%s (d)" % (self.keyspace_name, self.function_table_name)
e_index = "CREATE INDEX e_index ON %s.%s (e)" % (self.keyspace_name, self.function_table_name)
execute_until_pass(self.session, d_index)
execute_until_pass(self.session, e_index)
tablemeta = self.get_table_metadata()
statements = tablemeta.export_as_string().strip()
statements = [s.strip() for s in statements.split(';')]
statements = list(filter(bool, statements))
self.assertEqual(3, len(statements))
self.assertIn(d_index, statements)
self.assertIn(e_index, statements)
# make sure indexes are included in KeyspaceMetadata.export_as_string()
ksmeta = self.cluster.metadata.keyspaces[self.keyspace_name]
statement = ksmeta.export_as_string()
self.assertIn('CREATE INDEX d_index', statement)
self.assertIn('CREATE INDEX e_index', statement)
def test_collection_indexes(self):
if CASS_SERVER_VERSION < (2, 1, 0):
raise unittest.SkipTest("Secondary index on collections were introduced in Cassandra 2.1")
self.session.execute("CREATE TABLE %s.%s (a int PRIMARY KEY, b map<text, text>)"
% (self.keyspace_name, self.function_table_name))
self.session.execute("CREATE INDEX index1 ON %s.%s (keys(b))"
% (self.keyspace_name, self.function_table_name))
tablemeta = self.get_table_metadata()
self.assertIn('(keys(b))', tablemeta.export_as_string())
self.session.execute("DROP INDEX %s.index1" % (self.keyspace_name,))
self.session.execute("CREATE INDEX index2 ON %s.%s (b)"
% (self.keyspace_name, self.function_table_name))
tablemeta = self.get_table_metadata()
target = ' (b)' if CASS_SERVER_VERSION < (3, 0) else 'values(b))' # explicit values in C* 3+
self.assertIn(target, tablemeta.export_as_string())
# test full indexes on frozen collections, if available
if CASS_SERVER_VERSION >= (2, 1, 3):
self.session.execute("DROP TABLE %s.%s" % (self.keyspace_name, self.function_table_name))
self.session.execute("CREATE TABLE %s.%s (a int PRIMARY KEY, b frozen<map<text, text>>)"
% (self.keyspace_name, self.function_table_name))
self.session.execute("CREATE INDEX index3 ON %s.%s (full(b))"
% (self.keyspace_name, self.function_table_name))
tablemeta = self.get_table_metadata()
self.assertIn('(full(b))', tablemeta.export_as_string())
def test_compression_disabled(self):
create_statement = self.make_create_statement(["a"], ["b"], ["c"])
create_statement += " WITH compression = {}"
self.session.execute(create_statement)
tablemeta = self.get_table_metadata()
expected = "compression = {}" if CASS_SERVER_VERSION < (3, 0) else "compression = {'enabled': 'false'}"
self.assertIn(expected, tablemeta.export_as_string())
def test_non_size_tiered_compaction(self):
"""
test options for non-size-tiered compaction strategy
Creates a table with LeveledCompactionStrategy, specifying one non-default option. Verifies that the option is
present in generated CQL, and that other legacy table parameters (min_threshold, max_threshold) are not included.
@since 2.6.0
@jira_ticket PYTHON-352
@expected_result the options map for LeveledCompactionStrategy does not contain min_threshold, max_threshold
@test_category metadata
"""
create_statement = self.make_create_statement(["a"], [], ["b", "c"])
create_statement += "WITH COMPACTION = {'class': 'LeveledCompactionStrategy', 'tombstone_threshold': '0.3'}"
self.session.execute(create_statement)
table_meta = self.get_table_metadata()
cql = table_meta.export_as_string()
self.assertIn("'tombstone_threshold': '0.3'", cql)
self.assertIn("LeveledCompactionStrategy", cql)
self.assertNotIn("min_threshold", cql)
self.assertNotIn("max_threshold", cql)
def test_refresh_schema_metadata(self):
"""
test for synchronously refreshing all cluster metadata
test_refresh_schema_metadata tests all cluster metadata is refreshed when calling refresh_schema_metadata().
It creates a second cluster object with schema_event_refresh_window=-1 such that schema refreshes are disabled
for schema change push events. It then alters the cluster, creating a new keyspace, using the first cluster
object, and verifies that the cluster metadata has not changed in the second cluster object. It then calls
refresh_schema_metadata() and verifies that the cluster metadata is updated in the second cluster object.
Similarly, it then proceeds to altering keyspace, table, UDT, UDF, and UDA metadata and subsequently verfies
that these metadata is updated when refresh_schema_metadata() is called.
@since 2.6.0
@jira_ticket PYTHON-291
@expected_result Cluster, keyspace, table, UDT, UDF, and UDA metadata should be refreshed when refresh_schema_metadata() is called.
@test_category metadata
"""
cluster2 = Cluster(protocol_version=PROTOCOL_VERSION, schema_event_refresh_window=-1)
cluster2.connect()
self.assertNotIn("new_keyspace", cluster2.metadata.keyspaces)
# Cluster metadata modification
self.session.execute("CREATE KEYSPACE new_keyspace WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'}")
self.assertNotIn("new_keyspace", cluster2.metadata.keyspaces)
cluster2.refresh_schema_metadata()
self.assertIn("new_keyspace", cluster2.metadata.keyspaces)
# Keyspace metadata modification
self.session.execute("ALTER KEYSPACE {0} WITH durable_writes = false".format(self.keyspace_name))
self.assertTrue(cluster2.metadata.keyspaces[self.keyspace_name].durable_writes)
cluster2.refresh_schema_metadata()
self.assertFalse(cluster2.metadata.keyspaces[self.keyspace_name].durable_writes)
# Table metadata modification
table_name = "test"
self.session.execute("CREATE TABLE {0}.{1} (a int PRIMARY KEY, b text)".format(self.keyspace_name, table_name))
cluster2.refresh_schema_metadata()
self.session.execute("ALTER TABLE {0}.{1} ADD c double".format(self.keyspace_name, table_name))
self.assertNotIn("c", cluster2.metadata.keyspaces[self.keyspace_name].tables[table_name].columns)
cluster2.refresh_schema_metadata()
self.assertIn("c", cluster2.metadata.keyspaces[self.keyspace_name].tables[table_name].columns)
if PROTOCOL_VERSION >= 3:
# UDT metadata modification
self.session.execute("CREATE TYPE {0}.user (age int, name text)".format(self.keyspace_name))
self.assertEqual(cluster2.metadata.keyspaces[self.keyspace_name].user_types, {})
cluster2.refresh_schema_metadata()
self.assertIn("user", cluster2.metadata.keyspaces[self.keyspace_name].user_types)
if PROTOCOL_VERSION >= 4:
# UDF metadata modification
self.session.execute("""CREATE FUNCTION {0}.sum_int(key int, val int)
RETURNS NULL ON NULL INPUT
RETURNS int
LANGUAGE javascript AS 'key + val';""".format(self.keyspace_name))
self.assertEqual(cluster2.metadata.keyspaces[self.keyspace_name].functions, {})
cluster2.refresh_schema_metadata()
self.assertIn("sum_int(int,int)", cluster2.metadata.keyspaces[self.keyspace_name].functions)
# UDA metadata modification
self.session.execute("""CREATE AGGREGATE {0}.sum_agg(int)
SFUNC sum_int
STYPE int
INITCOND 0"""
.format(self.keyspace_name))
self.assertEqual(cluster2.metadata.keyspaces[self.keyspace_name].aggregates, {})
cluster2.refresh_schema_metadata()
self.assertIn("sum_agg(int)", cluster2.metadata.keyspaces[self.keyspace_name].aggregates)
# Cluster metadata modification
self.session.execute("DROP KEYSPACE new_keyspace")
self.assertIn("new_keyspace", cluster2.metadata.keyspaces)
cluster2.refresh_schema_metadata()
self.assertNotIn("new_keyspace", cluster2.metadata.keyspaces)
cluster2.shutdown()
def test_refresh_keyspace_metadata(self):
"""
test for synchronously refreshing keyspace metadata
test_refresh_keyspace_metadata tests that keyspace metadata is refreshed when calling refresh_keyspace_metadata().
It creates a second cluster object with schema_event_refresh_window=-1 such that schema refreshes are disabled
for schema change push events. It then alters the keyspace, disabling durable_writes, using the first cluster
object, and verifies that the keyspace metadata has not changed in the second cluster object. Finally, it calls
refresh_keyspace_metadata() and verifies that the keyspace metadata is updated in the second cluster object.
@since 2.6.0
@jira_ticket PYTHON-291
@expected_result Keyspace metadata should be refreshed when refresh_keyspace_metadata() is called.
@test_category metadata
"""
cluster2 = Cluster(protocol_version=PROTOCOL_VERSION, schema_event_refresh_window=-1)
cluster2.connect()
self.assertTrue(cluster2.metadata.keyspaces[self.keyspace_name].durable_writes)
self.session.execute("ALTER KEYSPACE {0} WITH durable_writes = false".format(self.keyspace_name))
self.assertTrue(cluster2.metadata.keyspaces[self.keyspace_name].durable_writes)
cluster2.refresh_keyspace_metadata(self.keyspace_name)
self.assertFalse(cluster2.metadata.keyspaces[self.keyspace_name].durable_writes)
cluster2.shutdown()
def test_refresh_table_metadata(self):
"""
test for synchronously refreshing table metadata
test_refresh_table_metatadata tests that table metadata is refreshed when calling test_refresh_table_metatadata().
It creates a second cluster object with schema_event_refresh_window=-1 such that schema refreshes are disabled
for schema change push events. It then alters the table, adding a new column, using the first cluster
object, and verifies that the table metadata has not changed in the second cluster object. Finally, it calls
test_refresh_table_metatadata() and verifies that the table metadata is updated in the second cluster object.
@since 2.6.0
@jira_ticket PYTHON-291
@expected_result Table metadata should be refreshed when refresh_table_metadata() is called.
@test_category metadata
"""
table_name = "test"
self.session.execute("CREATE TABLE {0}.{1} (a int PRIMARY KEY, b text)".format(self.keyspace_name, table_name))
cluster2 = Cluster(protocol_version=PROTOCOL_VERSION, schema_event_refresh_window=-1)
cluster2.connect()
self.assertNotIn("c", cluster2.metadata.keyspaces[self.keyspace_name].tables[table_name].columns)
self.session.execute("ALTER TABLE {0}.{1} ADD c double".format(self.keyspace_name, table_name))
self.assertNotIn("c", cluster2.metadata.keyspaces[self.keyspace_name].tables[table_name].columns)
cluster2.refresh_table_metadata(self.keyspace_name, table_name)
self.assertIn("c", cluster2.metadata.keyspaces[self.keyspace_name].tables[table_name].columns)
cluster2.shutdown()
def test_refresh_metadata_for_mv(self):
"""
test for synchronously refreshing materialized view metadata
test_refresh_table_metadata_for_materialized_views tests that materialized view metadata is refreshed when calling
test_refresh_table_metatadata() with the materialized view name as the table. It creates a second cluster object
with schema_event_refresh_window=-1 such that schema refreshes are disabled for schema change push events.
It then creates a new materialized view , using the first cluster object, and verifies that the materialized view
metadata has not changed in the second cluster object. Finally, it calls test_refresh_table_metatadata() with the
materialized view name as the table name, and verifies that the materialized view metadata is updated in the
second cluster object.
@since 3.0.0
@jira_ticket PYTHON-371
@expected_result Materialized view metadata should be refreshed when refresh_table_metadata() is called.
@test_category metadata
"""
if CASS_SERVER_VERSION < (3, 0):
raise unittest.SkipTest("Materialized views require Cassandra 3.0+")
self.session.execute("CREATE TABLE {0}.{1} (a int PRIMARY KEY, b text)".format(self.keyspace_name, self.function_table_name))
cluster2 = Cluster(protocol_version=PROTOCOL_VERSION, schema_event_refresh_window=-1)
cluster2.connect()
try:
self.assertNotIn("mv1", cluster2.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views)
self.session.execute("CREATE MATERIALIZED VIEW {0}.mv1 AS SELECT b FROM {0}.{1} WHERE b IS NOT NULL PRIMARY KEY (a, b)"
.format(self.keyspace_name, self.function_table_name))
self.assertNotIn("mv1", cluster2.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views)
cluster2.refresh_table_metadata(self.keyspace_name, "mv1")
self.assertIn("mv1", cluster2.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views)
finally:
cluster2.shutdown()
original_meta = self.cluster.metadata.keyspaces[self.keyspace_name].views['mv1']
self.assertIs(original_meta, self.session.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views['mv1'])
self.cluster.refresh_materialized_view_metadata(self.keyspace_name, 'mv1')
current_meta = self.cluster.metadata.keyspaces[self.keyspace_name].views['mv1']
self.assertIsNot(current_meta, original_meta)
self.assertIsNot(original_meta, self.session.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views['mv1'])
self.assertEqual(original_meta.as_cql_query(), current_meta.as_cql_query())
cluster3 = Cluster(protocol_version=PROTOCOL_VERSION, schema_event_refresh_window=-1)
cluster3.connect()
try:
self.assertNotIn("mv2", cluster3.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views)
self.session.execute("CREATE MATERIALIZED VIEW {0}.mv2 AS SELECT b FROM {0}.{1} WHERE b IS NOT NULL PRIMARY KEY (a, b)"
.format(self.keyspace_name, self.function_table_name))
self.assertNotIn("mv2", cluster3.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views)
cluster3.refresh_materialized_view_metadata(self.keyspace_name, 'mv2')
self.assertIn("mv2", cluster3.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views)
finally:
cluster3.shutdown()
def test_refresh_user_type_metadata(self):
"""
test for synchronously refreshing UDT metadata in keyspace
test_refresh_user_type_metadata tests that UDT metadata in a keyspace is refreshed when calling refresh_user_type_metadata().
It creates a second cluster object with schema_event_refresh_window=-1 such that schema refreshes are disabled
for schema change push events. It then alters the keyspace, creating a new UDT, using the first cluster
object, and verifies that the UDT metadata has not changed in the second cluster object. Finally, it calls
refresh_user_type_metadata() and verifies that the UDT metadata in the keyspace is updated in the second cluster object.
@since 2.6.0
@jira_ticket PYTHON-291
@expected_result UDT metadata in the keyspace should be refreshed when refresh_user_type_metadata() is called.
@test_category metadata
"""
if PROTOCOL_VERSION < 3:
raise unittest.SkipTest("Protocol 3+ is required for UDTs, currently testing against {0}".format(PROTOCOL_VERSION))
cluster2 = Cluster(protocol_version=PROTOCOL_VERSION, schema_event_refresh_window=-1)
cluster2.connect()
self.assertEqual(cluster2.metadata.keyspaces[self.keyspace_name].user_types, {})
self.session.execute("CREATE TYPE {0}.user (age int, name text)".format(self.keyspace_name))
self.assertEqual(cluster2.metadata.keyspaces[self.keyspace_name].user_types, {})
cluster2.refresh_user_type_metadata(self.keyspace_name, "user")
self.assertIn("user", cluster2.metadata.keyspaces[self.keyspace_name].user_types)
cluster2.shutdown()
def test_refresh_user_function_metadata(self):
"""
test for synchronously refreshing UDF metadata in keyspace
test_refresh_user_function_metadata tests that UDF metadata in a keyspace is refreshed when calling
refresh_user_function_metadata(). It creates a second cluster object with schema_event_refresh_window=-1 such
that schema refreshes are disabled for schema change push events. It then alters the keyspace, creating a new
UDF, using the first cluster object, and verifies that the UDF metadata has not changed in the second cluster
object. Finally, it calls refresh_user_function_metadata() and verifies that the UDF metadata in the keyspace
is updated in the second cluster object.
@since 2.6.0
@jira_ticket PYTHON-291
@expected_result UDF metadata in the keyspace should be refreshed when refresh_user_function_metadata() is called.
@test_category metadata
"""
if PROTOCOL_VERSION < 4:
raise unittest.SkipTest("Protocol 4+ is required for UDFs, currently testing against {0}".format(PROTOCOL_VERSION))
cluster2 = Cluster(protocol_version=PROTOCOL_VERSION, schema_event_refresh_window=-1)
cluster2.connect()
self.assertEqual(cluster2.metadata.keyspaces[self.keyspace_name].functions, {})
self.session.execute("""CREATE FUNCTION {0}.sum_int(key int, val int)
RETURNS NULL ON NULL INPUT
RETURNS int
LANGUAGE javascript AS 'key + val';""".format(self.keyspace_name))
self.assertEqual(cluster2.metadata.keyspaces[self.keyspace_name].functions, {})
cluster2.refresh_user_function_metadata(self.keyspace_name, UserFunctionDescriptor("sum_int", ["int", "int"]))
self.assertIn("sum_int(int,int)", cluster2.metadata.keyspaces[self.keyspace_name].functions)
cluster2.shutdown()
def test_refresh_user_aggregate_metadata(self):
"""
test for synchronously refreshing UDA metadata in keyspace
test_refresh_user_aggregate_metadata tests that UDA metadata in a keyspace is refreshed when calling
refresh_user_aggregate_metadata(). It creates a second cluster object with schema_event_refresh_window=-1 such
that schema refreshes are disabled for schema change push events. It then alters the keyspace, creating a new
UDA, using the first cluster object, and verifies that the UDA metadata has not changed in the second cluster
object. Finally, it calls refresh_user_aggregate_metadata() and verifies that the UDF metadata in the keyspace
is updated in the second cluster object.
@since 2.6.0
@jira_ticket PYTHON-291
@expected_result UDA metadata in the keyspace should be refreshed when refresh_user_aggregate_metadata() is called.
@test_category metadata
"""
if PROTOCOL_VERSION < 4:
raise unittest.SkipTest("Protocol 4+ is required for UDAs, currently testing against {0}".format(PROTOCOL_VERSION))
cluster2 = Cluster(protocol_version=PROTOCOL_VERSION, schema_event_refresh_window=-1)
cluster2.connect()
self.assertEqual(cluster2.metadata.keyspaces[self.keyspace_name].aggregates, {})
self.session.execute("""CREATE FUNCTION {0}.sum_int(key int, val int)
RETURNS NULL ON NULL INPUT
RETURNS int
LANGUAGE javascript AS 'key + val';""".format(self.keyspace_name))
self.session.execute("""CREATE AGGREGATE {0}.sum_agg(int)
SFUNC sum_int
STYPE int
INITCOND 0"""
.format(self.keyspace_name))
self.assertEqual(cluster2.metadata.keyspaces[self.keyspace_name].aggregates, {})
cluster2.refresh_user_aggregate_metadata(self.keyspace_name, UserAggregateDescriptor("sum_agg", ["int"]))
self.assertIn("sum_agg(int)", cluster2.metadata.keyspaces[self.keyspace_name].aggregates)
cluster2.shutdown()
def test_multiple_indices(self):
"""
test multiple indices on the same column.
Creates a table and two indices. Ensures that both indices metatdata is surface appropriately.
@since 3.0.0
@jira_ticket PYTHON-276
@expected_result IndexMetadata is appropriately surfaced
@test_category metadata
"""
if CASS_SERVER_VERSION < (3, 0):
raise unittest.SkipTest("Materialized views require Cassandra 3.0+")
self.session.execute("CREATE TABLE {0}.{1} (a int PRIMARY KEY, b map<text, int>)".format(self.keyspace_name, self.function_table_name))
self.session.execute("CREATE INDEX index_1 ON {0}.{1}(b)".format(self.keyspace_name, self.function_table_name))
self.session.execute("CREATE INDEX index_2 ON {0}.{1}(keys(b))".format(self.keyspace_name, self.function_table_name))
indices = self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].indexes
self.assertEqual(len(indices), 2)
index_1 = indices["index_1"]
index_2 = indices['index_2']
self.assertEqual(index_1.table_name, "test_multiple_indices")
self.assertEqual(index_1.name, "index_1")
self.assertEqual(index_1.kind, "COMPOSITES")
self.assertEqual(index_1.index_options["target"], "values(b)")
self.assertEqual(index_1.keyspace_name, "schemametadatatests")
self.assertEqual(index_2.table_name, "test_multiple_indices")
self.assertEqual(index_2.name, "index_2")
self.assertEqual(index_2.kind, "COMPOSITES")
self.assertEqual(index_2.index_options["target"], "keys(b)")
self.assertEqual(index_2.keyspace_name, "schemametadatatests")
class TestCodeCoverage(unittest.TestCase):
def test_export_schema(self):
"""
Test export schema functionality
"""
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
cluster.connect()
self.assertIsInstance(cluster.metadata.export_schema_as_string(), six.string_types)
def test_export_keyspace_schema(self):
"""
Test export keyspace schema functionality
"""
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
cluster.connect()
for keyspace in cluster.metadata.keyspaces:
keyspace_metadata = cluster.metadata.keyspaces[keyspace]
self.assertIsInstance(keyspace_metadata.export_as_string(), six.string_types)
self.assertIsInstance(keyspace_metadata.as_cql_query(), six.string_types)
cluster.shutdown()
def assert_equal_diff(self, received, expected):
if received != expected:
diff_string = '\n'.join(difflib.unified_diff(expected.split('\n'),
received.split('\n'),
'EXPECTED', 'RECEIVED',
lineterm=''))
self.fail(diff_string)
def assert_startswith_diff(self, received, prefix):
if not received.startswith(prefix):
prefix_lines = prefix.split('\n')
diff_string = '\n'.join(difflib.unified_diff(prefix_lines,
received.split('\n')[:len(prefix_lines)],
'EXPECTED', 'RECEIVED',
lineterm=''))
self.fail(diff_string)
def test_export_keyspace_schema_udts(self):
"""
Test udt exports
"""
if CASS_SERVER_VERSION < (2, 1, 0):
raise unittest.SkipTest('UDTs were introduced in Cassandra 2.1')
if PROTOCOL_VERSION < 3:
raise unittest.SkipTest(
"Protocol 3.0+ is required for UDT change events, currently testing against %r"
% (PROTOCOL_VERSION,))
if sys.version_info[0:2] != (2, 7):
raise unittest.SkipTest('This test compares static strings generated from dict items, which may change orders. Test with 2.7.')
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
session = cluster.connect()
session.execute("""
CREATE KEYSPACE export_udts
WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'}
AND durable_writes = true;
""")
session.execute("""
CREATE TYPE export_udts.street (
street_number int,
street_name text)
""")
session.execute("""
CREATE TYPE export_udts.zip (
zipcode int,
zip_plus_4 int)
""")
session.execute("""
CREATE TYPE export_udts.address (
street_address frozen<street>,
zip_code frozen<zip>)
""")
session.execute("""
CREATE TABLE export_udts.users (
user text PRIMARY KEY,
addresses map<text, frozen<address>>)
""")
expected_prefix = """CREATE KEYSPACE export_udts WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'} AND durable_writes = true;
CREATE TYPE export_udts.street (
street_number int,
street_name text
);
CREATE TYPE export_udts.zip (
zipcode int,
zip_plus_4 int
);
CREATE TYPE export_udts.address (
street_address frozen<street>,
zip_code frozen<zip>
);
CREATE TABLE export_udts.users (
user text PRIMARY KEY,
addresses map<text, frozen<address>>"""
self.assert_startswith_diff(cluster.metadata.keyspaces['export_udts'].export_as_string(), expected_prefix)
table_meta = cluster.metadata.keyspaces['export_udts'].tables['users']
expected_prefix = """CREATE TABLE export_udts.users (
user text PRIMARY KEY,
addresses map<text, frozen<address>>"""
self.assert_startswith_diff(table_meta.export_as_string(), expected_prefix)
cluster.shutdown()
def test_case_sensitivity(self):
"""
Test that names that need to be escaped in CREATE statements are
"""
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
session = cluster.connect()
ksname = 'AnInterestingKeyspace'
cfname = 'AnInterestingTable'
session.execute("""
CREATE KEYSPACE "%s"
WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'}
""" % (ksname,))
session.execute("""
CREATE TABLE "%s"."%s" (
k int,
"A" int,
"B" int,
"MyColumn" int,
PRIMARY KEY (k, "A"))
WITH CLUSTERING ORDER BY ("A" DESC)
""" % (ksname, cfname))
session.execute("""
CREATE INDEX myindex ON "%s"."%s" ("MyColumn")
""" % (ksname, cfname))
ksmeta = cluster.metadata.keyspaces[ksname]
schema = ksmeta.export_as_string()
self.assertIn('CREATE KEYSPACE "AnInterestingKeyspace"', schema)
self.assertIn('CREATE TABLE "AnInterestingKeyspace"."AnInterestingTable"', schema)
self.assertIn('"A" int', schema)
self.assertIn('"B" int', schema)
self.assertIn('"MyColumn" int', schema)
self.assertIn('PRIMARY KEY (k, "A")', schema)
self.assertIn('WITH CLUSTERING ORDER BY ("A" DESC)', schema)
self.assertIn('CREATE INDEX myindex ON "AnInterestingKeyspace"."AnInterestingTable" ("MyColumn")', schema)
cluster.shutdown()
def test_already_exists_exceptions(self):
"""
Ensure AlreadyExists exception is thrown when hit
"""
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
session = cluster.connect()
ksname = 'test3rf'
cfname = 'test'
ddl = '''
CREATE KEYSPACE %s
WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '3'}'''
self.assertRaises(AlreadyExists, session.execute, ddl % ksname)
ddl = '''
CREATE TABLE %s.%s (
k int PRIMARY KEY,
v int )'''
self.assertRaises(AlreadyExists, session.execute, ddl % (ksname, cfname))
cluster.shutdown()
def test_replicas(self):
"""
Ensure cluster.metadata.get_replicas return correctly when not attached to keyspace
"""
if murmur3 is None:
raise unittest.SkipTest('the murmur3 extension is not available')
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
self.assertEqual(cluster.metadata.get_replicas('test3rf', 'key'), [])
cluster.connect('test3rf')
self.assertNotEqual(list(cluster.metadata.get_replicas('test3rf', six.b('key'))), [])
host = list(cluster.metadata.get_replicas('test3rf', six.b('key')))[0]
self.assertEqual(host.datacenter, 'dc1')
self.assertEqual(host.rack, 'r1')
cluster.shutdown()
def test_token_map(self):
"""
Test token mappings
"""
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
cluster.connect('test3rf')
ring = cluster.metadata.token_map.ring
owners = list(cluster.metadata.token_map.token_to_host_owner[token] for token in ring)
get_replicas = cluster.metadata.token_map.get_replicas
for ksname in ('test1rf', 'test2rf', 'test3rf'):
self.assertNotEqual(list(get_replicas(ksname, ring[0])), [])
for i, token in enumerate(ring):
self.assertEqual(set(get_replicas('test3rf', token)), set(owners))
self.assertEqual(set(get_replicas('test2rf', token)), set([owners[(i + 1) % 3], owners[(i + 2) % 3]]))
self.assertEqual(set(get_replicas('test1rf', token)), set([owners[(i + 1) % 3]]))
cluster.shutdown()
def test_legacy_tables(self):
if CASS_SERVER_VERSION < (2, 1, 0):
raise unittest.SkipTest('Test schema output assumes 2.1.0+ options')
if CASS_SERVER_VERSION >= (2, 2, 0):
raise unittest.SkipTest('Cannot test cli script on Cassandra 2.2.0+')
if sys.version_info[0:2] != (2, 7):
raise unittest.SkipTest('This test compares static strings generated from dict items, which may change orders. Test with 2.7.')
cli_script = """CREATE KEYSPACE legacy
WITH placement_strategy = 'SimpleStrategy'
AND strategy_options = {replication_factor:1};
USE legacy;
CREATE COLUMN FAMILY simple_no_col
WITH comparator = UTF8Type
AND key_validation_class = UUIDType
AND default_validation_class = UTF8Type;
CREATE COLUMN FAMILY simple_with_col
WITH comparator = UTF8Type
and key_validation_class = UUIDType
and default_validation_class = UTF8Type
AND column_metadata = [
{column_name: col_with_meta, validation_class: UTF8Type}
];
CREATE COLUMN FAMILY composite_partition_no_col
WITH comparator = UTF8Type
AND key_validation_class = 'CompositeType(UUIDType,UTF8Type)'
AND default_validation_class = UTF8Type;
CREATE COLUMN FAMILY composite_partition_with_col
WITH comparator = UTF8Type
AND key_validation_class = 'CompositeType(UUIDType,UTF8Type)'
AND default_validation_class = UTF8Type
AND column_metadata = [
{column_name: col_with_meta, validation_class: UTF8Type}
];
CREATE COLUMN FAMILY nested_composite_key
WITH comparator = UTF8Type
and key_validation_class = 'CompositeType(CompositeType(UUIDType,UTF8Type), LongType)'
and default_validation_class = UTF8Type
AND column_metadata = [
{column_name: full_name, validation_class: UTF8Type}
];
create column family composite_comp_no_col
with column_type = 'Standard'
and comparator = 'DynamicCompositeType(t=>org.apache.cassandra.db.marshal.TimeUUIDType,s=>org.apache.cassandra.db.marshal.UTF8Type,b=>org.apache.cassandra.db.marshal.BytesType)'
and default_validation_class = 'BytesType'
and key_validation_class = 'BytesType'
and read_repair_chance = 0.0
and dclocal_read_repair_chance = 0.1
and gc_grace = 864000
and min_compaction_threshold = 4
and max_compaction_threshold = 32
and compaction_strategy = 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy'
and caching = 'KEYS_ONLY'
and cells_per_row_to_cache = '0'
and default_time_to_live = 0
and speculative_retry = 'NONE'
and comment = 'Stores file meta data';
create column family composite_comp_with_col
with column_type = 'Standard'
and comparator = 'DynamicCompositeType(t=>org.apache.cassandra.db.marshal.TimeUUIDType,s=>org.apache.cassandra.db.marshal.UTF8Type,b=>org.apache.cassandra.db.marshal.BytesType)'
and default_validation_class = 'BytesType'
and key_validation_class = 'BytesType'
and read_repair_chance = 0.0
and dclocal_read_repair_chance = 0.1
and gc_grace = 864000
and min_compaction_threshold = 4
and max_compaction_threshold = 32
and compaction_strategy = 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy'
and caching = 'KEYS_ONLY'
and cells_per_row_to_cache = '0'
and default_time_to_live = 0
and speculative_retry = 'NONE'
and comment = 'Stores file meta data'
and column_metadata = [
{column_name : 'b@6d616d6d616a616d6d61',
validation_class : BytesType,
index_name : 'idx_one',
index_type : 0},
{column_name : 'b@6869746d65776974686d75736963',
validation_class : BytesType,
index_name : 'idx_two',
index_type : 0}]
and compression_options = {'sstable_compression' : 'org.apache.cassandra.io.compress.LZ4Compressor'};"""
# note: the inner key type for legacy.nested_composite_key
# (org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UUIDType, org.apache.cassandra.db.marshal.UTF8Type))
# is a bit strange, but it replays in CQL with desired results
expected_string = """CREATE KEYSPACE legacy WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'} AND durable_writes = true;
/*
Warning: Table legacy.composite_comp_with_col omitted because it has constructs not compatible with CQL (was created via legacy API).
Approximate structure, for reference:
(this should not be used to reproduce this schema)
CREATE TABLE legacy.composite_comp_with_col (
key blob,
b blob,
s text,
t timeuuid,
"b@6869746d65776974686d75736963" blob,
"b@6d616d6d616a616d6d61" blob,
PRIMARY KEY (key, b, s, t)
) WITH COMPACT STORAGE
AND CLUSTERING ORDER BY (b ASC, s ASC, t ASC)
AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}'
AND comment = 'Stores file meta data'
AND compaction = {'min_threshold': '4', 'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32'}
AND compression = {'sstable_compression': 'org.apache.cassandra.io.compress.LZ4Compressor'}
AND dclocal_read_repair_chance = 0.1
AND default_time_to_live = 0
AND gc_grace_seconds = 864000
AND max_index_interval = 2048
AND memtable_flush_period_in_ms = 0
AND min_index_interval = 128
AND read_repair_chance = 0.0
AND speculative_retry = 'NONE';
CREATE INDEX idx_two ON legacy.composite_comp_with_col ("b@6869746d65776974686d75736963");
CREATE INDEX idx_one ON legacy.composite_comp_with_col ("b@6d616d6d616a616d6d61");
*/
CREATE TABLE legacy.nested_composite_key (
key 'org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UUIDType, org.apache.cassandra.db.marshal.UTF8Type)',
key2 bigint,
full_name text,
PRIMARY KEY ((key, key2))
) WITH COMPACT STORAGE
AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}'
AND comment = ''
AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy'}
AND compression = {'sstable_compression': 'org.apache.cassandra.io.compress.LZ4Compressor'}
AND dclocal_read_repair_chance = 0.1
AND default_time_to_live = 0
AND gc_grace_seconds = 864000
AND max_index_interval = 2048
AND memtable_flush_period_in_ms = 0
AND min_index_interval = 128
AND read_repair_chance = 0.0
AND speculative_retry = 'NONE';
CREATE TABLE legacy.composite_partition_with_col (
key uuid,
key2 text,
col_with_meta text,
PRIMARY KEY ((key, key2))
) WITH COMPACT STORAGE
AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}'
AND comment = ''
AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy'}
AND compression = {'sstable_compression': 'org.apache.cassandra.io.compress.LZ4Compressor'}
AND dclocal_read_repair_chance = 0.1
AND default_time_to_live = 0
AND gc_grace_seconds = 864000
AND max_index_interval = 2048
AND memtable_flush_period_in_ms = 0
AND min_index_interval = 128
AND read_repair_chance = 0.0
AND speculative_retry = 'NONE';
CREATE TABLE legacy.composite_partition_no_col (
key uuid,
key2 text,
column1 text,
value text,
PRIMARY KEY ((key, key2), column1)
) WITH COMPACT STORAGE
AND CLUSTERING ORDER BY (column1 ASC)
AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}'
AND comment = ''
AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy'}
AND compression = {'sstable_compression': 'org.apache.cassandra.io.compress.LZ4Compressor'}
AND dclocal_read_repair_chance = 0.1
AND default_time_to_live = 0
AND gc_grace_seconds = 864000
AND max_index_interval = 2048
AND memtable_flush_period_in_ms = 0
AND min_index_interval = 128
AND read_repair_chance = 0.0
AND speculative_retry = 'NONE';
CREATE TABLE legacy.simple_with_col (
key uuid PRIMARY KEY,
col_with_meta text
) WITH COMPACT STORAGE
AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}'
AND comment = ''
AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy'}
AND compression = {'sstable_compression': 'org.apache.cassandra.io.compress.LZ4Compressor'}
AND dclocal_read_repair_chance = 0.1
AND default_time_to_live = 0
AND gc_grace_seconds = 864000
AND max_index_interval = 2048
AND memtable_flush_period_in_ms = 0
AND min_index_interval = 128
AND read_repair_chance = 0.0
AND speculative_retry = 'NONE';
CREATE TABLE legacy.simple_no_col (
key uuid,
column1 text,
value text,
PRIMARY KEY (key, column1)
) WITH COMPACT STORAGE
AND CLUSTERING ORDER BY (column1 ASC)
AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}'
AND comment = ''
AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy'}
AND compression = {'sstable_compression': 'org.apache.cassandra.io.compress.LZ4Compressor'}
AND dclocal_read_repair_chance = 0.1
AND default_time_to_live = 0
AND gc_grace_seconds = 864000
AND max_index_interval = 2048
AND memtable_flush_period_in_ms = 0
AND min_index_interval = 128
AND read_repair_chance = 0.0
AND speculative_retry = 'NONE';
/*
Warning: Table legacy.composite_comp_no_col omitted because it has constructs not compatible with CQL (was created via legacy API).
Approximate structure, for reference:
(this should not be used to reproduce this schema)
CREATE TABLE legacy.composite_comp_no_col (
key blob,
column1 'org.apache.cassandra.db.marshal.DynamicCompositeType(org.apache.cassandra.db.marshal.BytesType, org.apache.cassandra.db.marshal.UTF8Type, org.apache.cassandra.db.marshal.TimeUUIDType)',
column2 timeuuid,
value blob,
PRIMARY KEY (key, column1, column1, column2)
) WITH COMPACT STORAGE
AND CLUSTERING ORDER BY (column1 ASC, column1 ASC, column2 ASC)
AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}'
AND comment = 'Stores file meta data'
AND compaction = {'min_threshold': '4', 'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32'}
AND compression = {'sstable_compression': 'org.apache.cassandra.io.compress.LZ4Compressor'}
AND dclocal_read_repair_chance = 0.1
AND default_time_to_live = 0
AND gc_grace_seconds = 864000
AND max_index_interval = 2048
AND memtable_flush_period_in_ms = 0
AND min_index_interval = 128
AND read_repair_chance = 0.0
AND speculative_retry = 'NONE';
*/"""
ccm = get_cluster()
ccm.run_cli(cli_script)
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
session = cluster.connect()
legacy_meta = cluster.metadata.keyspaces['legacy']
self.assert_equal_diff(legacy_meta.export_as_string(), expected_string)
session.execute('DROP KEYSPACE legacy')
cluster.shutdown()
class TokenMetadataTest(unittest.TestCase):
"""
Test of TokenMap creation and other behavior.
"""
def test_token(self):
expected_node_count = len(get_cluster().nodes)
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
cluster.connect()
tmap = cluster.metadata.token_map
self.assertTrue(issubclass(tmap.token_class, Token))
self.assertEqual(expected_node_count, len(tmap.ring))
cluster.shutdown()
def test_getting_replicas(self):
tokens = [MD5Token(i) for i in range(0, (2 ** 127 - 1), 2 ** 125)]
hosts = [Host("ip%d" % i, SimpleConvictionPolicy) for i in range(len(tokens))]
token_to_primary_replica = dict(zip(tokens, hosts))
keyspace = KeyspaceMetadata("ks", True, "SimpleStrategy", {"replication_factor": "1"})
metadata = Mock(spec=Metadata, keyspaces={'ks': keyspace})
token_map = TokenMap(MD5Token, token_to_primary_replica, tokens, metadata)
# tokens match node tokens exactly
for i, token in enumerate(tokens):
expected_host = hosts[(i + 1) % len(hosts)]
replicas = token_map.get_replicas("ks", token)
self.assertEqual(set(replicas), set([expected_host]))
# shift the tokens back by one
for token, expected_host in zip(tokens, hosts):
replicas = token_map.get_replicas("ks", MD5Token(token.value - 1))
self.assertEqual(set(replicas), set([expected_host]))
# shift the tokens forward by one
for i, token in enumerate(tokens):
replicas = token_map.get_replicas("ks", MD5Token(token.value + 1))
expected_host = hosts[(i + 1) % len(hosts)]
self.assertEqual(set(replicas), set([expected_host]))
class KeyspaceAlterMetadata(unittest.TestCase):
"""
Test verifies that table metadata is preserved on keyspace alter
"""
def setUp(self):
self.cluster = Cluster(protocol_version=PROTOCOL_VERSION)
self.session = self.cluster.connect()
name = self._testMethodName.lower()
crt_ks = '''
CREATE KEYSPACE %s WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1} AND durable_writes = true''' % name
self.session.execute(crt_ks)
def tearDown(self):
name = self._testMethodName.lower()
self.session.execute('DROP KEYSPACE %s' % name)
self.cluster.shutdown()
def test_keyspace_alter(self):
"""
Table info is preserved upon keyspace alter:
Create table
Verify schema
Alter ks
Verify that table metadata is still present
PYTHON-173
"""
name = self._testMethodName.lower()
self.session.execute('CREATE TABLE %s.d (d INT PRIMARY KEY)' % name)
original_keyspace_meta = self.cluster.metadata.keyspaces[name]
self.assertEqual(original_keyspace_meta.durable_writes, True)
self.assertEqual(len(original_keyspace_meta.tables), 1)
self.session.execute('ALTER KEYSPACE %s WITH durable_writes = false' % name)
new_keyspace_meta = self.cluster.metadata.keyspaces[name]
self.assertNotEqual(original_keyspace_meta, new_keyspace_meta)
self.assertEqual(new_keyspace_meta.durable_writes, False)
class IndexMapTests(unittest.TestCase):
keyspace_name = 'index_map_tests'
@property
def table_name(self):
return self._testMethodName.lower()
@classmethod
def setup_class(cls):
cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION)
cls.session = cls.cluster.connect()
try:
if cls.keyspace_name in cls.cluster.metadata.keyspaces:
cls.session.execute("DROP KEYSPACE %s" % cls.keyspace_name)
cls.session.execute(
"""
CREATE KEYSPACE %s
WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'};
""" % cls.keyspace_name)
cls.session.set_keyspace(cls.keyspace_name)
except Exception:
cls.cluster.shutdown()
raise
@classmethod
def teardown_class(cls):
try:
cls.session.execute("DROP KEYSPACE %s" % cls.keyspace_name)
finally:
cls.cluster.shutdown()
def create_basic_table(self):
self.session.execute("CREATE TABLE %s (k int PRIMARY KEY, a int)" % self.table_name)
def drop_basic_table(self):
self.session.execute("DROP TABLE %s" % self.table_name)
def test_index_updates(self):
self.create_basic_table()
ks_meta = self.cluster.metadata.keyspaces[self.keyspace_name]
table_meta = ks_meta.tables[self.table_name]
self.assertNotIn('a_idx', ks_meta.indexes)
self.assertNotIn('b_idx', ks_meta.indexes)
self.assertNotIn('a_idx', table_meta.indexes)
self.assertNotIn('b_idx', table_meta.indexes)
self.session.execute("CREATE INDEX a_idx ON %s (a)" % self.table_name)
self.session.execute("ALTER TABLE %s ADD b int" % self.table_name)
self.session.execute("CREATE INDEX b_idx ON %s (b)" % self.table_name)
ks_meta = self.cluster.metadata.keyspaces[self.keyspace_name]
table_meta = ks_meta.tables[self.table_name]
self.assertIsInstance(ks_meta.indexes['a_idx'], IndexMetadata)
self.assertIsInstance(ks_meta.indexes['b_idx'], IndexMetadata)
self.assertIsInstance(table_meta.indexes['a_idx'], IndexMetadata)
self.assertIsInstance(table_meta.indexes['b_idx'], IndexMetadata)
# both indexes updated when index dropped
self.session.execute("DROP INDEX a_idx")
# temporarily synchronously refresh the schema metadata, until CASSANDRA-9391 is merged in
self.cluster.refresh_table_metadata(self.keyspace_name, self.table_name)
ks_meta = self.cluster.metadata.keyspaces[self.keyspace_name]
table_meta = ks_meta.tables[self.table_name]
self.assertNotIn('a_idx', ks_meta.indexes)
self.assertIsInstance(ks_meta.indexes['b_idx'], IndexMetadata)
self.assertNotIn('a_idx', table_meta.indexes)
self.assertIsInstance(table_meta.indexes['b_idx'], IndexMetadata)
# keyspace index updated when table dropped
self.drop_basic_table()
ks_meta = self.cluster.metadata.keyspaces[self.keyspace_name]
self.assertNotIn(self.table_name, ks_meta.tables)
self.assertNotIn('a_idx', ks_meta.indexes)
self.assertNotIn('b_idx', ks_meta.indexes)
def test_index_follows_alter(self):
self.create_basic_table()
idx = self.table_name + '_idx'
self.session.execute("CREATE INDEX %s ON %s (a)" % (idx, self.table_name))
ks_meta = self.cluster.metadata.keyspaces[self.keyspace_name]
table_meta = ks_meta.tables[self.table_name]
self.assertIsInstance(ks_meta.indexes[idx], IndexMetadata)
self.assertIsInstance(table_meta.indexes[idx], IndexMetadata)
self.session.execute('ALTER KEYSPACE %s WITH durable_writes = false' % self.keyspace_name)
old_meta = ks_meta
ks_meta = self.cluster.metadata.keyspaces[self.keyspace_name]
self.assertIsNot(ks_meta, old_meta)
table_meta = ks_meta.tables[self.table_name]
self.assertIsInstance(ks_meta.indexes[idx], IndexMetadata)
self.assertIsInstance(table_meta.indexes[idx], IndexMetadata)
self.drop_basic_table()
class FunctionTest(unittest.TestCase):
"""
Base functionality for Function and Aggregate metadata test classes
"""
def setUp(self):
"""
Tests are skipped if run with native protocol version < 4
"""
if PROTOCOL_VERSION < 4:
raise unittest.SkipTest("Function metadata requires native protocol version 4+")
@property
def function_name(self):
return self._testMethodName.lower()
@classmethod
def setup_class(cls):
if PROTOCOL_VERSION >= 4:
cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION)
cls.keyspace_name = cls.__name__.lower()
cls.session = cls.cluster.connect()
cls.session.execute("CREATE KEYSPACE IF NOT EXISTS %s WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}" % cls.keyspace_name)
cls.session.set_keyspace(cls.keyspace_name)
cls.keyspace_function_meta = cls.cluster.metadata.keyspaces[cls.keyspace_name].functions
cls.keyspace_aggregate_meta = cls.cluster.metadata.keyspaces[cls.keyspace_name].aggregates
@classmethod
def teardown_class(cls):
if PROTOCOL_VERSION >= 4:
cls.session.execute("DROP KEYSPACE IF EXISTS %s" % cls.keyspace_name)
cls.cluster.shutdown()
class Verified(object):
def __init__(self, test_case, meta_class, element_meta, **function_kwargs):
self.test_case = test_case
self.function_kwargs = dict(function_kwargs)
self.meta_class = meta_class
self.element_meta = element_meta
def __enter__(self):
tc = self.test_case
expected_meta = self.meta_class(**self.function_kwargs)
tc.assertNotIn(expected_meta.signature, self.element_meta)
tc.session.execute(expected_meta.as_cql_query())
tc.assertIn(expected_meta.signature, self.element_meta)
generated_meta = self.element_meta[expected_meta.signature]
self.test_case.assertEqual(generated_meta.as_cql_query(), expected_meta.as_cql_query())
return self
def __exit__(self, exc_type, exc_val, exc_tb):
tc = self.test_case
tc.session.execute("DROP %s %s.%s" % (self.meta_class.__name__, tc.keyspace_name, self.signature))
tc.assertNotIn(self.signature, self.element_meta)
@property
def signature(self):
return SignatureDescriptor.format_signature(self.function_kwargs['name'],
self.function_kwargs['argument_types'])
class VerifiedFunction(Verified):
def __init__(self, test_case, **kwargs):
super(FunctionTest.VerifiedFunction, self).__init__(test_case, Function, test_case.keyspace_function_meta, **kwargs)
class VerifiedAggregate(Verified):
def __init__(self, test_case, **kwargs):
super(FunctionTest.VerifiedAggregate, self).__init__(test_case, Aggregate, test_case.keyspace_aggregate_meta, **kwargs)
class FunctionMetadata(FunctionTest):
def make_function_kwargs(self, called_on_null=True):
return {'keyspace': self.keyspace_name,
'name': self.function_name,
'argument_types': ['double', 'int'],
'argument_names': ['d', 'i'],
'return_type': 'double',
'language': 'java',
'body': 'return new Double(0.0);',
'called_on_null_input': called_on_null}
def test_functions_after_udt(self):
"""
Test to to ensure functions come after UDTs in in keyspace dump
test_functions_after_udt creates a basic function. Then queries that function and make sure that in the results
that UDT's are listed before any corresponding functions, when we dump the keyspace
Ideally we would make a function that takes a udt type, but this presently fails because C* c059a56 requires
udt to be frozen to create, but does not store meta indicating frozen
SEE https://issues.apache.org/jira/browse/CASSANDRA-9186
Maybe update this after release
kwargs = self.make_function_kwargs()
kwargs['argument_types'][0] = "frozen<%s>" % udt_name
expected_meta = Function(**kwargs)
with self.VerifiedFunction(self, **kwargs):
@since 2.6.0
@jira_ticket PYTHON-211
@expected_result UDT's should come before any functions
@test_category function
"""
self.assertNotIn(self.function_name, self.keyspace_function_meta)
udt_name = 'udtx'
self.session.execute("CREATE TYPE %s (x int)" % udt_name)
with self.VerifiedFunction(self, **self.make_function_kwargs()):
# udts must come before functions in keyspace dump
keyspace_cql = self.cluster.metadata.keyspaces[self.keyspace_name].export_as_string()
type_idx = keyspace_cql.rfind("CREATE TYPE")
func_idx = keyspace_cql.find("CREATE FUNCTION")
self.assertNotIn(-1, (type_idx, func_idx), "TYPE or FUNCTION not found in keyspace_cql: " + keyspace_cql)
self.assertGreater(func_idx, type_idx)
def test_function_same_name_diff_types(self):
"""
Test to verify to that functions with different signatures are differentiated in metadata
test_function_same_name_diff_types Creates two functions. One with the same name but a slightly different
signature. Then ensures that both are surfaced separately in our metadata.
@since 2.6.0
@jira_ticket PYTHON-211
@expected_result function with the same name but different signatures should be surfaced separately
@test_category function
"""
# Create a function
kwargs = self.make_function_kwargs()
with self.VerifiedFunction(self, **kwargs):
# another function: same name, different type sig.
self.assertGreater(len(kwargs['argument_types']), 1)
self.assertGreater(len(kwargs['argument_names']), 1)
kwargs['argument_types'] = kwargs['argument_types'][:1]
kwargs['argument_names'] = kwargs['argument_names'][:1]
# Ensure they are surfaced separately
with self.VerifiedFunction(self, **kwargs):
functions = [f for f in self.keyspace_function_meta.values() if f.name == self.function_name]
self.assertEqual(len(functions), 2)
self.assertNotEqual(functions[0].argument_types, functions[1].argument_types)
def test_function_no_parameters(self):
"""
Test to verify CQL output for functions with zero parameters
Creates a function with no input parameters, verify that CQL output is correct.
@since 2.7.1
@jira_ticket PYTHON-392
@expected_result function with no parameters should generate proper CQL
@test_category function
"""
kwargs = self.make_function_kwargs()
kwargs['argument_types'] = []
kwargs['argument_names'] = []
kwargs['return_type'] = 'bigint'
kwargs['body'] = 'return System.currentTimeMillis() / 1000L;'
with self.VerifiedFunction(self, **kwargs) as vf:
fn_meta = self.keyspace_function_meta[vf.signature]
self.assertRegexpMatches(fn_meta.as_cql_query(), "CREATE FUNCTION.*%s\(\) .*" % kwargs['name'])
def test_functions_follow_keyspace_alter(self):
"""
Test to verify to that functions maintain equality after a keyspace is altered
test_functions_follow_keyspace_alter creates a function then alters a the keyspace associated with that function.
After the alter we validate that the function maintains the same metadata
@since 2.6.0
@jira_ticket PYTHON-211
@expected_result functions are the same after parent keyspace is altered
@test_category function
"""
# Create function
with self.VerifiedFunction(self, **self.make_function_kwargs()):
original_keyspace_meta = self.cluster.metadata.keyspaces[self.keyspace_name]
self.session.execute('ALTER KEYSPACE %s WITH durable_writes = false' % self.keyspace_name)
# After keyspace alter ensure that we maintain function equality.
try:
new_keyspace_meta = self.cluster.metadata.keyspaces[self.keyspace_name]
self.assertNotEqual(original_keyspace_meta, new_keyspace_meta)
self.assertIs(original_keyspace_meta.functions, new_keyspace_meta.functions)
finally:
self.session.execute('ALTER KEYSPACE %s WITH durable_writes = true' % self.keyspace_name)
def test_function_cql_called_on_null(self):
"""
Test to verify to that that called on null argument is honored on function creation.
test_functions_follow_keyspace_alter create two functions. One with the called_on_null_input set to true,
the other with it set to false. We then verify that the metadata constructed from those function is correctly
reflected
@since 2.6.0
@jira_ticket PYTHON-211
@expected_result functions metadata correctly reflects called_on_null_input flag.
@test_category function
"""
kwargs = self.make_function_kwargs()
kwargs['called_on_null_input'] = True
with self.VerifiedFunction(self, **kwargs) as vf:
fn_meta = self.keyspace_function_meta[vf.signature]
self.assertRegexpMatches(fn_meta.as_cql_query(), "CREATE FUNCTION.*\) CALLED ON NULL INPUT RETURNS .*")
kwargs['called_on_null_input'] = False
with self.VerifiedFunction(self, **kwargs) as vf:
fn_meta = self.keyspace_function_meta[vf.signature]
self.assertRegexpMatches(fn_meta.as_cql_query(), "CREATE FUNCTION.*\) RETURNS NULL ON NULL INPUT RETURNS .*")
class AggregateMetadata(FunctionTest):
@classmethod
def setup_class(cls):
if PROTOCOL_VERSION >= 4:
super(AggregateMetadata, cls).setup_class()
cls.session.execute("""CREATE OR REPLACE FUNCTION sum_int(s int, i int)
RETURNS NULL ON NULL INPUT
RETURNS int
LANGUAGE javascript AS 's + i';""")
cls.session.execute("""CREATE OR REPLACE FUNCTION sum_int_two(s int, i int, j int)
RETURNS NULL ON NULL INPUT
RETURNS int
LANGUAGE javascript AS 's + i + j';""")
cls.session.execute("""CREATE OR REPLACE FUNCTION "List_As_String"(l list<text>)
RETURNS NULL ON NULL INPUT
RETURNS int
LANGUAGE javascript AS ''''' + l';""")
cls.session.execute("""CREATE OR REPLACE FUNCTION extend_list(s list<text>, i int)
CALLED ON NULL INPUT
RETURNS list<text>
LANGUAGE java AS 'if (i != null) s.add(i.toString()); return s;';""")
cls.session.execute("""CREATE OR REPLACE FUNCTION update_map(s map<int, int>, i int)
RETURNS NULL ON NULL INPUT
RETURNS map<int, int>
LANGUAGE java AS 's.put(new Integer(i), new Integer(i)); return s;';""")
cls.session.execute("""CREATE TABLE IF NOT EXISTS t
(k int PRIMARY KEY, v int)""")
for x in range(4):
cls.session.execute("INSERT INTO t (k,v) VALUES (%s, %s)", (x, x))
cls.session.execute("INSERT INTO t (k) VALUES (%s)", (4,))
def make_aggregate_kwargs(self, state_func, state_type, final_func=None, init_cond=None):
return {'keyspace': self.keyspace_name,
'name': self.function_name + '_aggregate',
'argument_types': ['int'],
'state_func': state_func,
'state_type': state_type,
'final_func': final_func,
'initial_condition': init_cond,
'return_type': "does not matter for creation"}
def test_return_type_meta(self):
"""
Test to verify to that the return type of a an aggregate is honored in the metadata
test_return_type_meta creates an aggregate then ensures the return type of the created
aggregate is correctly surfaced in the metadata
@since 2.6.0
@jira_ticket PYTHON-211
@expected_result aggregate has the correct return typ in the metadata
@test_category aggregate
"""
with self.VerifiedAggregate(self, **self.make_aggregate_kwargs('sum_int', 'int', init_cond='1')) as va:
self.assertEqual(self.keyspace_aggregate_meta[va.signature].return_type, 'int')
def test_init_cond(self):
"""
Test to verify that various initial conditions are correctly surfaced in various aggregate functions
test_init_cond creates several different types of aggregates, and given various initial conditions it verifies that
they correctly impact the aggregate's execution
@since 2.6.0
@jira_ticket PYTHON-211
@expected_result initial conditions are correctly evaluated as part of the aggregates
@test_category aggregate
"""
# This is required until the java driver bundled with C* is updated to support v4
c = Cluster(protocol_version=3)
s = c.connect(self.keyspace_name)
encoder = Encoder()
expected_values = range(4)
# int32
for init_cond in (-1, 0, 1):
cql_init = encoder.cql_encode_all_types(init_cond)
with self.VerifiedAggregate(self, **self.make_aggregate_kwargs('sum_int', 'int', init_cond=cql_init)) as va:
sum_res = s.execute("SELECT %s(v) AS sum FROM t" % va.function_kwargs['name'])[0].sum
self.assertEqual(sum_res, int(init_cond) + sum(expected_values))
# list<text>
for init_cond in ([], ['1', '2']):
cql_init = encoder.cql_encode_all_types(init_cond)
with self.VerifiedAggregate(self, **self.make_aggregate_kwargs('extend_list', 'list<text>', init_cond=cql_init)) as va:
list_res = s.execute("SELECT %s(v) AS list_res FROM t" % va.function_kwargs['name'])[0].list_res
self.assertListEqual(list_res[:len(init_cond)], init_cond)
self.assertEqual(set(i for i in list_res[len(init_cond):]),
set(str(i) for i in expected_values))
# map<int,int>
expected_map_values = dict((i, i) for i in expected_values)
expected_key_set = set(expected_values)
for init_cond in ({}, {1: 2, 3: 4}, {5: 5}):
cql_init = encoder.cql_encode_all_types(init_cond)
with self.VerifiedAggregate(self, **self.make_aggregate_kwargs('update_map', 'map<int, int>', init_cond=cql_init)) as va:
map_res = s.execute("SELECT %s(v) AS map_res FROM t" % va.function_kwargs['name'])[0].map_res
self.assertDictContainsSubset(expected_map_values, map_res)
init_not_updated = dict((k, init_cond[k]) for k in set(init_cond) - expected_key_set)
self.assertDictContainsSubset(init_not_updated, map_res)
c.shutdown()
def test_aggregates_after_functions(self):
"""
Test to verify that aggregates are listed after function in metadata
test_aggregates_after_functions creates an aggregate, and then verifies that they are listed
after any function creations when the keypspace dump is preformed
@since 2.6.0
@jira_ticket PYTHON-211
@expected_result aggregates are declared after any functions
@test_category aggregate
"""
# functions must come before functions in keyspace dump
with self.VerifiedAggregate(self, **self.make_aggregate_kwargs('extend_list', 'list<text>')):
keyspace_cql = self.cluster.metadata.keyspaces[self.keyspace_name].export_as_string()
func_idx = keyspace_cql.find("CREATE FUNCTION")
aggregate_idx = keyspace_cql.rfind("CREATE AGGREGATE")
self.assertNotIn(-1, (aggregate_idx, func_idx), "AGGREGATE or FUNCTION not found in keyspace_cql: " + keyspace_cql)
self.assertGreater(aggregate_idx, func_idx)
def test_same_name_diff_types(self):
"""
Test to verify to that aggregates with different signatures are differentiated in metadata
test_same_name_diff_types Creates two Aggregates. One with the same name but a slightly different
signature. Then ensures that both are surfaced separately in our metadata.
@since 2.6.0
@jira_ticket PYTHON-211
@expected_result aggregates with the same name but different signatures should be surfaced separately
@test_category function
"""
kwargs = self.make_aggregate_kwargs('sum_int', 'int', init_cond='0')
with self.VerifiedAggregate(self, **kwargs):
kwargs['state_func'] = 'sum_int_two'
kwargs['argument_types'] = ['int', 'int']
with self.VerifiedAggregate(self, **kwargs):
aggregates = [a for a in self.keyspace_aggregate_meta.values() if a.name == kwargs['name']]
self.assertEqual(len(aggregates), 2)
self.assertNotEqual(aggregates[0].argument_types, aggregates[1].argument_types)
def test_aggregates_follow_keyspace_alter(self):
"""
Test to verify to that aggregates maintain equality after a keyspace is altered
test_aggregates_follow_keyspace_alter creates a function then alters a the keyspace associated with that
function. After the alter we validate that the function maintains the same metadata
@since 2.6.0
@jira_ticket PYTHON-211
@expected_result aggregates are the same after parent keyspace is altered
@test_category function
"""
with self.VerifiedAggregate(self, **self.make_aggregate_kwargs('sum_int', 'int', init_cond='0')):
original_keyspace_meta = self.cluster.metadata.keyspaces[self.keyspace_name]
self.session.execute('ALTER KEYSPACE %s WITH durable_writes = false' % self.keyspace_name)
try:
new_keyspace_meta = self.cluster.metadata.keyspaces[self.keyspace_name]
self.assertNotEqual(original_keyspace_meta, new_keyspace_meta)
self.assertIs(original_keyspace_meta.aggregates, new_keyspace_meta.aggregates)
finally:
self.session.execute('ALTER KEYSPACE %s WITH durable_writes = true' % self.keyspace_name)
def test_cql_optional_params(self):
"""
Test to verify that the initial_cond and final_func parameters are correctly honored
test_cql_optional_params creates various aggregates with different combinations of initial_condition,
and final_func parameters set. It then ensures they are correctly honored.
@since 2.6.0
@jira_ticket PYTHON-211
@expected_result initial_condition and final_func parameters are honored correctly
@test_category function
"""
kwargs = self.make_aggregate_kwargs('extend_list', 'list<text>')
encoder = Encoder()
# no initial condition, final func
self.assertIsNone(kwargs['initial_condition'])
self.assertIsNone(kwargs['final_func'])
with self.VerifiedAggregate(self, **kwargs) as va:
meta = self.keyspace_aggregate_meta[va.signature]
self.assertIsNone(meta.initial_condition)
self.assertIsNone(meta.final_func)
cql = meta.as_cql_query()
self.assertEqual(cql.find('INITCOND'), -1)
self.assertEqual(cql.find('FINALFUNC'), -1)
# initial condition, no final func
kwargs['initial_condition'] = encoder.cql_encode_all_types(['init', 'cond'])
with self.VerifiedAggregate(self, **kwargs) as va:
meta = self.keyspace_aggregate_meta[va.signature]
self.assertEqual(meta.initial_condition, kwargs['initial_condition'])
self.assertIsNone(meta.final_func)
cql = meta.as_cql_query()
search_string = "INITCOND %s" % kwargs['initial_condition']
self.assertGreater(cql.find(search_string), 0, '"%s" search string not found in cql:\n%s' % (search_string, cql))
self.assertEqual(cql.find('FINALFUNC'), -1)
# no initial condition, final func
kwargs['initial_condition'] = None
kwargs['final_func'] = 'List_As_String'
with self.VerifiedAggregate(self, **kwargs) as va:
meta = self.keyspace_aggregate_meta[va.signature]
self.assertIsNone(meta.initial_condition)
self.assertEqual(meta.final_func, kwargs['final_func'])
cql = meta.as_cql_query()
self.assertEqual(cql.find('INITCOND'), -1)
search_string = 'FINALFUNC "%s"' % kwargs['final_func']
self.assertGreater(cql.find(search_string), 0, '"%s" search string not found in cql:\n%s' % (search_string, cql))
# both
kwargs['initial_condition'] = encoder.cql_encode_all_types(['init', 'cond'])
kwargs['final_func'] = 'List_As_String'
with self.VerifiedAggregate(self, **kwargs) as va:
meta = self.keyspace_aggregate_meta[va.signature]
self.assertEqual(meta.initial_condition, kwargs['initial_condition'])
self.assertEqual(meta.final_func, kwargs['final_func'])
cql = meta.as_cql_query()
init_cond_idx = cql.find("INITCOND %s" % kwargs['initial_condition'])
final_func_idx = cql.find('FINALFUNC "%s"' % kwargs['final_func'])
self.assertNotIn(-1, (init_cond_idx, final_func_idx))
self.assertGreater(init_cond_idx, final_func_idx)
class BadMetaTest(unittest.TestCase):
"""
Test behavior when metadata has unexpected form
Verify that new cluster/session can still connect, and the CQL output indicates the exception with a warning.
PYTHON-370
"""
class BadMetaException(Exception):
pass
@property
def function_name(self):
return self._testMethodName.lower()
@classmethod
def setup_class(cls):
cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION)
cls.keyspace_name = cls.__name__.lower()
cls.session = cls.cluster.connect()
cls.session.execute("CREATE KEYSPACE %s WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}" % cls.keyspace_name)
cls.session.set_keyspace(cls.keyspace_name)
connection = cls.cluster.control_connection._connection
cls.parser_class = get_schema_parser(connection, str(CASS_SERVER_VERSION[0]), timeout=20).__class__
@classmethod
def teardown_class(cls):
drop_keyspace_shutdown_cluster(cls.keyspace_name, cls.session, cls.cluster)
def _skip_if_not_version(self, version):
if CASS_SERVER_VERSION < version:
raise unittest.SkipTest("Requires server version >= %s" % (version,))
def test_bad_keyspace(self):
with patch.object(self.parser_class, '_build_keyspace_metadata_internal', side_effect=self.BadMetaException):
self.cluster.refresh_keyspace_metadata(self.keyspace_name)
m = self.cluster.metadata.keyspaces[self.keyspace_name]
self.assertIs(m._exc_info[0], self.BadMetaException)
self.assertIn("/*\nWarning:", m.export_as_string())
def test_bad_table(self):
self.session.execute('CREATE TABLE %s (k int PRIMARY KEY, v int)' % self.function_name)
with patch.object(self.parser_class, '_build_column_metadata', side_effect=self.BadMetaException):
self.cluster.refresh_table_metadata(self.keyspace_name, self.function_name)
m = self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_name]
self.assertIs(m._exc_info[0], self.BadMetaException)
self.assertIn("/*\nWarning:", m.export_as_string())
def test_bad_index(self):
self.session.execute('CREATE TABLE %s (k int PRIMARY KEY, v int)' % self.function_name)
self.session.execute('CREATE INDEX ON %s(v)' % self.function_name)
with patch.object(self.parser_class, '_build_index_metadata', side_effect=self.BadMetaException):
self.cluster.refresh_table_metadata(self.keyspace_name, self.function_name)
m = self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_name]
self.assertIs(m._exc_info[0], self.BadMetaException)
self.assertIn("/*\nWarning:", m.export_as_string())
def test_bad_user_type(self):
self._skip_if_not_version((2, 1, 0))
self.session.execute('CREATE TYPE %s (i int, d double)' % self.function_name)
with patch.object(self.parser_class, '_build_user_type', side_effect=self.BadMetaException):
self.cluster.refresh_schema_metadata() # presently do not capture these errors on udt direct refresh -- make sure it's contained during full refresh
m = self.cluster.metadata.keyspaces[self.keyspace_name]
self.assertIs(m._exc_info[0], self.BadMetaException)
self.assertIn("/*\nWarning:", m.export_as_string())
def test_bad_user_function(self):
self._skip_if_not_version((2, 2, 0))
self.session.execute("""CREATE FUNCTION IF NOT EXISTS %s (key int, val int)
RETURNS NULL ON NULL INPUT
RETURNS int
LANGUAGE javascript AS 'key + val';""" % self.function_name)
with patch.object(self.parser_class, '_build_function', side_effect=self.BadMetaException):
self.cluster.refresh_schema_metadata() # presently do not capture these errors on udt direct refresh -- make sure it's contained during full refresh
m = self.cluster.metadata.keyspaces[self.keyspace_name]
self.assertIs(m._exc_info[0], self.BadMetaException)
self.assertIn("/*\nWarning:", m.export_as_string())
def test_bad_user_aggregate(self):
self._skip_if_not_version((2, 2, 0))
self.session.execute("""CREATE FUNCTION IF NOT EXISTS sum_int (key int, val int)
RETURNS NULL ON NULL INPUT
RETURNS int
LANGUAGE javascript AS 'key + val';""")
self.session.execute("""CREATE AGGREGATE %s(int)
SFUNC sum_int
STYPE int
INITCOND 0""" % self.function_name)
with patch.object(self.parser_class, '_build_aggregate', side_effect=self.BadMetaException):
self.cluster.refresh_schema_metadata() # presently do not capture these errors on udt direct refresh -- make sure it's contained during full refresh
m = self.cluster.metadata.keyspaces[self.keyspace_name]
self.assertIs(m._exc_info[0], self.BadMetaException)
self.assertIn("/*\nWarning:", m.export_as_string())
class MaterializedViewMetadataTestSimple(BasicSharedKeyspaceUnitTestCase):
def setUp(self):
if CASS_SERVER_VERSION < (3, 0):
raise unittest.SkipTest("Materialized views require Cassandra 3.0+")
self.session.execute("CREATE TABLE {0}.{1} (pk int PRIMARY KEY, c int)".format(self.keyspace_name, self.function_table_name))
self.session.execute("CREATE MATERIALIZED VIEW {0}.mv1 AS SELECT c FROM {0}.{1} WHERE c IS NOT NULL PRIMARY KEY (pk, c)".format(self.keyspace_name, self.function_table_name))
def tearDown(self):
self.session.execute("DROP MATERIALIZED VIEW {0}.mv1".format(self.keyspace_name))
self.session.execute("DROP TABLE {0}.{1}".format(self.keyspace_name, self.function_table_name))
def test_materialized_view_metadata_creation(self):
"""
test for materialized view metadata creation
test_materialized_view_metadata_creation tests that materialized view metadata properly created implicitly in
both keyspace and table metadata under "views". It creates a simple base table and then creates a view based
on that table. It then checks that the materialized view metadata is contained in the keyspace and table
metadata. Finally, it checks that the keyspace_name and the base_table_name in the view metadata is properly set.
@since 3.0.0
@jira_ticket PYTHON-371
@expected_result Materialized view metadata in both the ks and table should be created with a new view is created.
@test_category metadata
"""
self.assertIn("mv1", self.cluster.metadata.keyspaces[self.keyspace_name].views)
self.assertIn("mv1", self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views)
self.assertEqual(self.keyspace_name, self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views["mv1"].keyspace_name)
self.assertEqual(self.function_table_name, self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views["mv1"].base_table_name)
def test_materialized_view_metadata_alter(self):
"""
test for materialized view metadata alteration
test_materialized_view_metadata_alter tests that materialized view metadata is properly updated implicitly in the
table metadata once that view is updated. It creates a simple base table and then creates a view based
on that table. It then alters that materalized view and checks that the materialized view metadata is altered in
the table metadata.
@since 3.0.0
@jira_ticket PYTHON-371
@expected_result Materialized view metadata should be updated with the view is altered.
@test_category metadata
"""
self.assertIn("SizeTieredCompactionStrategy", self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views["mv1"].options["compaction"]["class"] )
self.session.execute("ALTER MATERIALIZED VIEW {0}.mv1 WITH compaction = {{ 'class' : 'LeveledCompactionStrategy' }}".format(self.keyspace_name))
self.assertIn("LeveledCompactionStrategy", self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views["mv1"].options["compaction"]["class"])
def test_materialized_view_metadata_drop(self):
"""
test for materialized view metadata dropping
test_materialized_view_metadata_drop tests that materialized view metadata is properly removed implicitly in
both keyspace and table metadata once that view is dropped. It creates a simple base table and then creates a view
based on that table. It then drops that materalized view and checks that the materialized view metadata is removed
from the keyspace and table metadata.
@since 3.0.0
@jira_ticket PYTHON-371
@expected_result Materialized view metadata in both the ks and table should be removed with the view is dropped.
@test_category metadata
"""
self.session.execute("DROP MATERIALIZED VIEW {0}.mv1".format(self.keyspace_name))
self.assertNotIn("mv1", self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views)
self.assertNotIn("mv1", self.cluster.metadata.keyspaces[self.keyspace_name].views)
self.assertDictEqual({}, self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views)
self.assertDictEqual({}, self.cluster.metadata.keyspaces[self.keyspace_name].views)
self.session.execute("CREATE MATERIALIZED VIEW {0}.mv1 AS SELECT c FROM {0}.{1} WHERE c IS NOT NULL PRIMARY KEY (pk, c)".format(self.keyspace_name, self.function_table_name))
class MaterializedViewMetadataTestComplex(BasicSegregatedKeyspaceUnitTestCase):
def setUp(self):
if CASS_SERVER_VERSION < (3, 0):
raise unittest.SkipTest("Materialized views require Cassandra 3.0+")
super(MaterializedViewMetadataTestComplex, self).setUp()
def test_create_view_metadata(self):
"""
test to ensure that materialized view metadata is properly constructed
test_create_view_metadata tests that materialized views metadata is properly constructed. It runs a simple
query to construct a materialized view, then proceeds to inspect the metadata associated with that MV.
Columns are inspected to insure that all are of the proper type, and in the proper type.
@since 3.0.0
@jira_ticket PYTHON-371
@expected_result Materialized view metadata should be constructed appropriately.
@test_category metadata
"""
create_table = """CREATE TABLE {0}.scores(
user TEXT,
game TEXT,
year INT,
month INT,
day INT,
score INT,
PRIMARY KEY (user, game, year, month, day)
)""".format(self.keyspace_name)
self.session.execute(create_table)
create_mv = """CREATE MATERIALIZED VIEW {0}.monthlyhigh AS
SELECT game, year, month, score, user, day FROM {0}.scores
WHERE game IS NOT NULL AND year IS NOT NULL AND month IS NOT NULL AND score IS NOT NULL AND user IS NOT NULL AND day IS NOT NULL
PRIMARY KEY ((game, year, month), score, user, day)
WITH CLUSTERING ORDER BY (score DESC, user ASC, day ASC)""".format(self.keyspace_name)
self.session.execute(create_mv)
score_table = self.cluster.metadata.keyspaces[self.keyspace_name].tables['scores']
mv = self.cluster.metadata.keyspaces[self.keyspace_name].views['monthlyhigh']
self.assertIsNotNone(score_table.views["monthlyhigh"])
self.assertIsNotNone(len(score_table.views), 1)
# Make sure user is a partition key, and not null
self.assertEqual(len(score_table.partition_key), 1)
self.assertIsNotNone(score_table.columns['user'])
self.assertTrue(score_table.columns['user'], score_table.partition_key[0])
# Validate clustering keys
self.assertEqual(len(score_table.clustering_key), 4)
self.assertIsNotNone(score_table.columns['game'])
self.assertTrue(score_table.columns['game'], score_table.clustering_key[0])
self.assertIsNotNone(score_table.columns['year'])
self.assertTrue(score_table.columns['year'], score_table.clustering_key[1])
self.assertIsNotNone(score_table.columns['month'])
self.assertTrue(score_table.columns['month'], score_table.clustering_key[2])
self.assertIsNotNone(score_table.columns['day'])
self.assertTrue(score_table.columns['day'], score_table.clustering_key[3])
self.assertIsNotNone(score_table.columns['score'])
# Validate basic mv information
self.assertEquals(mv.keyspace_name, self.keyspace_name)
self.assertEquals(mv.name, "monthlyhigh")
self.assertEquals(mv.base_table_name, "scores")
self.assertFalse(mv.include_all_columns)
# Validate that all columns are preset and correct
mv_columns = list(mv.columns.values())
self.assertEquals(len(mv_columns), 6)
game_column = mv_columns[0]
self.assertIsNotNone(game_column)
self.assertEquals(game_column.name, 'game')
self.assertEquals(game_column, mv.partition_key[0])
year_column = mv_columns[1]
self.assertIsNotNone(year_column)
self.assertEquals(year_column.name, 'year')
self.assertEquals(year_column, mv.partition_key[1])
month_column = mv_columns[2]
self.assertIsNotNone(month_column)
self.assertEquals(month_column.name, 'month')
self.assertEquals(month_column, mv.partition_key[2])
def compare_columns(a, b, name):
self.assertEquals(a.name, name)
self.assertEquals(a.name, b.name)
self.assertEquals(a.table, b.table)
self.assertEquals(a.cql_type, b.cql_type)
self.assertEquals(a.is_static, b.is_static)
self.assertEquals(a.is_reversed, b.is_reversed)
score_column = mv_columns[3]
compare_columns(score_column, mv.clustering_key[0], 'score')
user_column = mv_columns[4]
compare_columns(user_column, mv.clustering_key[1], 'user')
day_column = mv_columns[5]
compare_columns(day_column, mv.clustering_key[2], 'day')
def test_base_table_column_addition_mv(self):
"""
test to ensure that materialized view metadata is properly updated with base columns are added
test_create_view_metadata tests that materialized views metadata is properly updated when columns are added to
the base table.
@since 3.0.0
@jira_ticket PYTHON-419
@expected_result Materialized view metadata should be updated correctly
@test_category metadata
"""
create_table = """CREATE TABLE {0}.scores(
user TEXT,
game TEXT,
year INT,
month INT,
day INT,
score TEXT,
PRIMARY KEY (user, game, year, month, day)
)""".format(self.keyspace_name)
self.session.execute(create_table)
create_mv = """CREATE MATERIALIZED VIEW {0}.monthlyhigh AS
SELECT game, year, month, score, user, day FROM {0}.scores
WHERE game IS NOT NULL AND year IS NOT NULL AND month IS NOT NULL AND score IS NOT NULL AND user IS NOT NULL AND day IS NOT NULL
PRIMARY KEY ((game, year, month), score, user, day)
WITH CLUSTERING ORDER BY (score DESC, user ASC, day ASC)""".format(self.keyspace_name)
create_mv_alltime = """CREATE MATERIALIZED VIEW {0}.alltimehigh AS
SELECT * FROM {0}.scores
WHERE game IS NOT NULL AND score IS NOT NULL AND user IS NOT NULL AND year IS NOT NULL AND month IS NOT NULL AND day IS NOT NULL
PRIMARY KEY (game, score, user, year, month, day)
WITH CLUSTERING ORDER BY (score DESC)""".format(self.keyspace_name)
self.session.execute(create_mv)
self.session.execute(create_mv_alltime)
score_table = self.cluster.metadata.keyspaces[self.keyspace_name].tables['scores']
self.assertIsNotNone(score_table.views["monthlyhigh"])
self.assertIsNotNone(score_table.views["alltimehigh"])
self.assertEqual(len(self.cluster.metadata.keyspaces[self.keyspace_name].views), 2)
insert_fouls = """ALTER TABLE {0}.scores ADD fouls INT""".format((self.keyspace_name))
self.session.execute(insert_fouls)
self.assertEqual(len(self.cluster.metadata.keyspaces[self.keyspace_name].views), 2)
score_table = self.cluster.metadata.keyspaces[self.keyspace_name].tables['scores']
self.assertIn("fouls", score_table.columns)
# This is a workaround for mv notifications being separate from base table schema responses.
# This maybe fixed with future protocol changes
for i in range(10):
mv_alltime = self.cluster.metadata.keyspaces[self.keyspace_name].views["alltimehigh"]
if("fouls" in mv_alltime.columns):
break
time.sleep(.2)
self.assertIn("fouls", mv_alltime.columns)
mv_alltime_fouls_comumn = self.cluster.metadata.keyspaces[self.keyspace_name].views["alltimehigh"].columns['fouls']
self.assertEquals(mv_alltime_fouls_comumn.cql_type, 'int')
def test_base_table_type_alter_mv(self):
"""
test to ensure that materialized view metadata is properly updated when a type in the base table
is updated.
test_create_view_metadata tests that materialized views metadata is properly updated when the type of base table
column is changed.
@since 3.0.0
@jira_ticket CASSANDRA-10424
@expected_result Materialized view metadata should be updated correctly
@test_category metadata
"""
create_table = """CREATE TABLE {0}.scores(
user TEXT,
game TEXT,
year INT,
month INT,
day INT,
score TEXT,
PRIMARY KEY (user, game, year, month, day)
)""".format(self.keyspace_name)
self.session.execute(create_table)
create_mv = """CREATE MATERIALIZED VIEW {0}.monthlyhigh AS
SELECT game, year, month, score, user, day FROM {0}.scores
WHERE game IS NOT NULL AND year IS NOT NULL AND month IS NOT NULL AND score IS NOT NULL AND user IS NOT NULL AND day IS NOT NULL
PRIMARY KEY ((game, year, month), score, user, day)
WITH CLUSTERING ORDER BY (score DESC, user ASC, day ASC)""".format(self.keyspace_name)
self.session.execute(create_mv)
self.assertEqual(len(self.cluster.metadata.keyspaces[self.keyspace_name].views), 1)
alter_scores = """ALTER TABLE {0}.scores ALTER score TYPE blob""".format((self.keyspace_name))
self.session.execute(alter_scores)
self.assertEqual(len(self.cluster.metadata.keyspaces[self.keyspace_name].views), 1)
score_column = self.cluster.metadata.keyspaces[self.keyspace_name].tables['scores'].columns['score']
self.assertEquals(score_column.cql_type, 'blob')
# until CASSANDRA-9920+CASSANDRA-10500 MV updates are only available later with an async event
for i in range(10):
score_mv_column = self.cluster.metadata.keyspaces[self.keyspace_name].views["monthlyhigh"].columns['score']
if "blob" == score_mv_column.cql_type:
break
time.sleep(.2)
self.assertEquals(score_mv_column.cql_type, 'blob')
def test_metadata_with_quoted_identifiers(self):
"""
test to ensure that materialized view metadata is properly constructed when quoted identifiers are used
test_metadata_with_quoted_identifiers tests that materialized views metadata is properly constructed.
It runs a simple query to construct a materialized view, then proceeds to inspect the metadata associated with
that MV. The caveat here is that the tables and the materialized view both have quoted identifiers
Columns are inspected to insure that all are of the proper type, and in the proper type.
@since 3.0.0
@jira_ticket PYTHON-371
@expected_result Materialized view metadata should be constructed appropriately even with quoted identifiers.
@test_category metadata
"""
create_table = """CREATE TABLE {0}.t1 (
"theKey" int,
"the;Clustering" int,
"the Value" int,
PRIMARY KEY ("theKey", "the;Clustering"))""".format(self.keyspace_name)
self.session.execute(create_table)
create_mv = """CREATE MATERIALIZED VIEW {0}.mv1 AS
SELECT "theKey", "the;Clustering", "the Value"
FROM {0}.t1
WHERE "theKey" IS NOT NULL AND "the;Clustering" IS NOT NULL AND "the Value" IS NOT NULL
PRIMARY KEY ("theKey", "the;Clustering")""".format(self.keyspace_name)
self.session.execute(create_mv)
t1_table = self.cluster.metadata.keyspaces[self.keyspace_name].tables['t1']
mv = self.cluster.metadata.keyspaces[self.keyspace_name].views['mv1']
self.assertIsNotNone(t1_table.views["mv1"])
self.assertIsNotNone(len(t1_table.views), 1)
# Validate partition key, and not null
self.assertEqual(len(t1_table.partition_key), 1)
self.assertIsNotNone(t1_table.columns['theKey'])
self.assertTrue(t1_table.columns['theKey'], t1_table.partition_key[0])
# Validate clustering key column
self.assertEqual(len(t1_table.clustering_key), 1)
self.assertIsNotNone(t1_table.columns['the;Clustering'])
self.assertTrue(t1_table.columns['the;Clustering'], t1_table.clustering_key[0])
# Validate regular column
self.assertIsNotNone(t1_table.columns['the Value'])
# Validate basic mv information
self.assertEquals(mv.keyspace_name, self.keyspace_name)
self.assertEquals(mv.name, "mv1")
self.assertEquals(mv.base_table_name, "t1")
self.assertFalse(mv.include_all_columns)
# Validate that all columns are preset and correct
mv_columns = list(mv.columns.values())
self.assertEquals(len(mv_columns), 3)
theKey_column = mv_columns[0]
self.assertIsNotNone(theKey_column)
self.assertEquals(theKey_column.name, 'theKey')
self.assertEquals(theKey_column, mv.partition_key[0])
cluster_column = mv_columns[1]
self.assertIsNotNone(cluster_column)
self.assertEquals(cluster_column.name, 'the;Clustering')
self.assertEquals(cluster_column.name, mv.clustering_key[0].name)
self.assertEquals(cluster_column.table, mv.clustering_key[0].table)
self.assertEquals(cluster_column.is_static, mv.clustering_key[0].is_static)
self.assertEquals(cluster_column.is_reversed, mv.clustering_key[0].is_reversed)
value_column = mv_columns[2]
self.assertIsNotNone(value_column)
self.assertEquals(value_column.name, 'the Value')
@dseonly
class DSEMetadataTest(BasicExistingSegregatedKeyspaceUnitTestCase):
def test_dse_specific_meta(self):
"""
Test to ensure DSE metadata is populated appropriately.
@since 3.4
@jira_ticket PYTHON-555
@expected_result metadata for dse_version, and dse_workload should be populated on dse clusters
@test_category metadata
"""
for host in self.cluster.metadata.all_hosts():
self.assertIsNotNone(host.dse_version, "Dse version not populated as expected")
self.assertEqual(host.dse_version, DSE_VERSION)
self.assertTrue("Cassandra" in host.dse_workload)
| kishkaru/python-driver | tests/integration/standard/test_metadata.py | Python | apache-2.0 | 113,783 |
#!/usr/bin/env python3
# SPDX-License-Identifier: LGPL-2.1+
# systemd-networkd tests
import os
import re
import shutil
import signal
import socket
import subprocess
import sys
import time
import unittest
from shutil import copytree
network_unit_file_path='/run/systemd/network'
networkd_runtime_directory='/run/systemd/netif'
networkd_ci_path='/run/networkd-ci'
network_sysctl_ipv6_path='/proc/sys/net/ipv6/conf'
network_sysctl_ipv4_path='/proc/sys/net/ipv4/conf'
dnsmasq_pid_file='/run/networkd-ci/test-test-dnsmasq.pid'
dnsmasq_log_file='/run/networkd-ci/test-dnsmasq-log-file'
def is_module_available(module_name):
lsmod_output = subprocess.check_output('lsmod', universal_newlines=True)
module_re = re.compile(r'^{0}\b'.format(re.escape(module_name)), re.MULTILINE)
return module_re.search(lsmod_output) or not subprocess.call(["modprobe", module_name])
def expectedFailureIfModuleIsNotAvailable(module_name):
def f(func):
if not is_module_available(module_name):
return unittest.expectedFailure(func)
return func
return f
def expectedFailureIfERSPANModuleIsNotAvailable():
def f(func):
rc = subprocess.call(['ip', 'link', 'add', 'dev', 'erspan99', 'type', 'erspan', 'seq', 'key', '30', 'local', '192.168.1.4', 'remote', '192.168.1.1', 'erspan_ver', '1', 'erspan', '123'])
if rc == 0:
subprocess.call(['ip', 'link', 'del', 'erspan99'])
return func
else:
return unittest.expectedFailure(func)
return f
def expectedFailureIfRoutingPolicyPortRangeIsNotAvailable():
def f(func):
rc = subprocess.call(['ip', 'rule', 'add', 'from', '192.168.100.19', 'sport', '1123-1150', 'dport', '3224-3290', 'table', '7'])
if rc == 0:
subprocess.call(['ip', 'rule', 'del', 'from', '192.168.100.19', 'sport', '1123-1150', 'dport', '3224-3290', 'table', '7'])
return func
else:
return unittest.expectedFailure(func)
return f
def expectedFailureIfRoutingPolicyIPProtoIsNotAvailable():
def f(func):
rc = subprocess.call(['ip', 'rule', 'add', 'not', 'from', '192.168.100.19', 'ipproto', 'tcp', 'table', '7'])
if rc == 0:
subprocess.call(['ip', 'rule', 'del', 'not', 'from', '192.168.100.19', 'ipproto', 'tcp', 'table', '7'])
return func
else:
return unittest.expectedFailure(func)
return f
def setUpModule():
os.makedirs(network_unit_file_path, exist_ok=True)
os.makedirs(networkd_ci_path, exist_ok=True)
shutil.rmtree(networkd_ci_path)
copytree(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'conf'), networkd_ci_path)
subprocess.check_call('systemctl stop systemd-networkd.socket', shell=True)
def tearDownModule():
shutil.rmtree(networkd_ci_path)
subprocess.check_call('systemctl stop systemd-networkd.service', shell=True)
subprocess.check_call('systemctl start systemd-networkd.socket', shell=True)
subprocess.check_call('systemctl start systemd-networkd.service', shell=True)
class Utilities():
dhcp_server_data = []
def read_link_attr(self, link, dev, attribute):
with open(os.path.join(os.path.join(os.path.join('/sys/class/net/', link), dev), attribute)) as f:
return f.readline().strip()
def read_bridge_port_attr(self, bridge, link, attribute):
path_bridge = os.path.join('/sys/devices/virtual/net', bridge)
path_port = 'lower_' + link + '/brport'
path = os.path.join(path_bridge, path_port)
with open(os.path.join(path, attribute)) as f:
return f.readline().strip()
def link_exits(self, link):
return os.path.exists(os.path.join('/sys/class/net', link))
def link_remove(self, links):
for link in links:
if os.path.exists(os.path.join('/sys/class/net', link)):
subprocess.call(['ip', 'link', 'del', 'dev', link])
time.sleep(1)
def read_ipv6_sysctl_attr(self, link, attribute):
with open(os.path.join(os.path.join(network_sysctl_ipv6_path, link), attribute)) as f:
return f.readline().strip()
def read_ipv4_sysctl_attr(self, link, attribute):
with open(os.path.join(os.path.join(network_sysctl_ipv4_path, link), attribute)) as f:
return f.readline().strip()
def copy_unit_to_networkd_unit_path(self, *units):
for unit in units:
shutil.copy(os.path.join(networkd_ci_path, unit), network_unit_file_path)
if (os.path.exists(os.path.join(networkd_ci_path, unit + '.d'))):
copytree(os.path.join(networkd_ci_path, unit + '.d'), os.path.join(network_unit_file_path, unit + '.d'))
def remove_unit_from_networkd_path(self, units):
for unit in units:
if (os.path.exists(os.path.join(network_unit_file_path, unit))):
os.remove(os.path.join(network_unit_file_path, unit))
if (os.path.exists(os.path.join(network_unit_file_path, unit + '.d'))):
shutil.rmtree(os.path.join(network_unit_file_path, unit + '.d'))
def start_dnsmasq(self, additional_options=''):
dnsmasq_command = 'dnsmasq -8 /var/run/networkd-ci/test-dnsmasq-log-file --log-queries=extra --log-dhcp --pid-file=/var/run/networkd-ci/test-test-dnsmasq.pid --conf-file=/dev/null --interface=veth-peer --enable-ra --dhcp-range=2600::10,2600::20 --dhcp-range=192.168.5.10,192.168.5.200 -R --dhcp-leasefile=/var/run/networkd-ci/lease --dhcp-option=26,1492 --dhcp-option=option:router,192.168.5.1 --dhcp-option=33,192.168.5.4,192.168.5.5 --port=0 ' + additional_options
subprocess.check_call(dnsmasq_command, shell=True)
time.sleep(10)
def stop_dnsmasq(self, pid_file):
if os.path.exists(pid_file):
with open(pid_file, 'r') as f:
pid = f.read().rstrip(' \t\r\n\0')
os.kill(int(pid), signal.SIGTERM)
os.remove(pid_file)
def search_words_in_dnsmasq_log(self, words, show_all=False):
if os.path.exists(dnsmasq_log_file):
with open (dnsmasq_log_file) as in_file:
contents = in_file.read()
if show_all:
print(contents)
for line in contents.split('\n'):
if words in line:
in_file.close()
print("%s, %s" % (words, line))
return True
return False
def remove_lease_file(self):
if os.path.exists(os.path.join(networkd_ci_path, 'lease')):
os.remove(os.path.join(networkd_ci_path, 'lease'))
def remove_log_file(self):
if os.path.exists(dnsmasq_log_file):
os.remove(dnsmasq_log_file)
def start_networkd(self):
if (os.path.exists(os.path.join(networkd_runtime_directory, 'state'))):
subprocess.check_call('systemctl stop systemd-networkd', shell=True)
os.remove(os.path.join(networkd_runtime_directory, 'state'))
subprocess.check_call('systemctl start systemd-networkd', shell=True)
else:
subprocess.check_call('systemctl restart systemd-networkd', shell=True)
time.sleep(5)
print()
class NetworkdNetDevTests(unittest.TestCase, Utilities):
links =[
'6rdtun99',
'bond99',
'bridge99',
'dropin-test',
'dummy98',
'erspan-test',
'geneve99',
'gretap99',
'gretun99',
'ip6gretap99',
'ip6tnl99',
'ipiptun99',
'ipvlan99',
'isataptun99',
'macvlan99',
'macvtap99',
'sittun99',
'tap99',
'test1',
'tun99',
'vcan99',
'veth99',
'vlan99',
'vrf99',
'vti6tun99',
'vtitun99',
'vxlan99',
'wg98',
'wg99']
units = [
'10-dropin-test.netdev',
'11-dummy.netdev',
'12-dummy.netdev',
'21-macvlan.netdev',
'21-macvtap.netdev',
'21-vlan.netdev',
'21-vlan.network',
'25-6rd-tunnel.netdev',
'25-bond.netdev',
'25-bond-balanced-tlb.netdev',
'25-bridge.netdev',
'25-erspan-tunnel.netdev',
'25-geneve.netdev',
'25-gretap-tunnel.netdev',
'25-gre-tunnel.netdev',
'25-ip6gre-tunnel.netdev',
'25-ip6tnl-tunnel.netdev',
'25-ipip-tunnel-independent.netdev',
'25-ipip-tunnel.netdev',
'25-ipvlan.netdev',
'25-isatap-tunnel.netdev',
'25-sit-tunnel.netdev',
'25-tap.netdev',
'25-tun.netdev',
'25-vcan.netdev',
'25-veth.netdev',
'25-vrf.netdev',
'25-vti6-tunnel.netdev',
'25-vti-tunnel.netdev',
'25-vxlan.netdev',
'25-wireguard-23-peers.netdev',
'25-wireguard-23-peers.network',
'25-wireguard.netdev',
'6rd.network',
'gre.network',
'gretap.network',
'gretun.network',
'ip6gretap.network',
'ip6tnl.network',
'ipip.network',
'ipvlan.network',
'isatap.network',
'macvlan.network',
'macvtap.network',
'sit.network',
'vti6.network',
'vti.network',
'vxlan.network']
def setUp(self):
self.link_remove(self.links)
def tearDown(self):
self.link_remove(self.links)
self.remove_unit_from_networkd_path(self.units)
def test_dropin(self):
self.copy_unit_to_networkd_unit_path('10-dropin-test.netdev')
self.start_networkd()
self.assertTrue(self.link_exits('dropin-test'))
output = subprocess.check_output(['ip', 'link', 'show', 'dropin-test']).rstrip().decode('utf-8')
print(output)
self.assertRegex(output, '00:50:56:c0:00:28')
output = subprocess.check_output(['networkctl', 'list']).rstrip().decode('utf-8')
self.assertRegex(output, '1 lo ')
self.assertRegex(output, 'dropin-test')
output = subprocess.check_output(['networkctl', 'list', 'dropin-test']).rstrip().decode('utf-8')
self.assertNotRegex(output, '1 lo ')
self.assertRegex(output, 'dropin-test')
output = subprocess.check_output(['networkctl', 'list', 'dropin-*']).rstrip().decode('utf-8')
self.assertNotRegex(output, '1 lo ')
self.assertRegex(output, 'dropin-test')
def test_bridge(self):
self.copy_unit_to_networkd_unit_path('25-bridge.netdev')
self.start_networkd()
self.assertTrue(self.link_exits('bridge99'))
self.assertEqual('900', self.read_link_attr('bridge99', 'bridge', 'hello_time'))
self.assertEqual('900', self.read_link_attr('bridge99', 'bridge', 'max_age'))
self.assertEqual('900', self.read_link_attr('bridge99', 'bridge','forward_delay'))
self.assertEqual('900', self.read_link_attr('bridge99', 'bridge','ageing_time'))
self.assertEqual('9', self.read_link_attr('bridge99', 'bridge','priority'))
self.assertEqual('1', self.read_link_attr('bridge99', 'bridge','multicast_querier'))
self.assertEqual('1', self.read_link_attr('bridge99', 'bridge','multicast_snooping'))
self.assertEqual('1', self.read_link_attr('bridge99', 'bridge','stp_state'))
def test_bond(self):
self.copy_unit_to_networkd_unit_path('25-bond.netdev')
self.start_networkd()
self.assertTrue(self.link_exits('bond99'))
self.assertEqual('802.3ad 4', self.read_link_attr('bond99', 'bonding', 'mode'))
self.assertEqual('layer3+4 1', self.read_link_attr('bond99', 'bonding', 'xmit_hash_policy'))
self.assertEqual('1000', self.read_link_attr('bond99', 'bonding', 'miimon'))
self.assertEqual('fast 1', self.read_link_attr('bond99', 'bonding', 'lacp_rate'))
self.assertEqual('2000', self.read_link_attr('bond99', 'bonding', 'updelay'))
self.assertEqual('2000', self.read_link_attr('bond99', 'bonding', 'downdelay'))
self.assertEqual('4', self.read_link_attr('bond99', 'bonding', 'resend_igmp'))
self.assertEqual('1', self.read_link_attr('bond99', 'bonding', 'min_links'))
self.assertEqual('1218', self.read_link_attr('bond99', 'bonding', 'ad_actor_sys_prio'))
self.assertEqual('811', self.read_link_attr('bond99', 'bonding', 'ad_user_port_key'))
self.assertEqual('00:11:22:33:44:55', self.read_link_attr('bond99', 'bonding', 'ad_actor_system'))
def test_bond_balanced_tlb(self):
self.copy_unit_to_networkd_unit_path('25-bond-balanced-tlb.netdev')
self.start_networkd()
self.assertTrue(self.link_exits('bond99'))
self.assertEqual('balance-tlb 5', self.read_link_attr('bond99', 'bonding', 'mode'))
self.assertEqual('1', self.read_link_attr('bond99', 'bonding', 'tlb_dynamic_lb'))
def test_vlan(self):
self.copy_unit_to_networkd_unit_path('21-vlan.netdev', '11-dummy.netdev', '21-vlan.network')
self.start_networkd()
self.assertTrue(self.link_exits('vlan99'))
output = subprocess.check_output(['ip', '-d', 'link', 'show', 'vlan99']).rstrip().decode('utf-8')
print(output)
self.assertTrue(output, 'REORDER_HDR')
self.assertTrue(output, 'LOOSE_BINDING')
self.assertTrue(output, 'GVRP')
self.assertTrue(output, 'MVRP')
self.assertTrue(output, '99')
def test_macvtap(self):
self.copy_unit_to_networkd_unit_path('21-macvtap.netdev', '11-dummy.netdev', 'macvtap.network')
self.start_networkd()
self.assertTrue(self.link_exits('macvtap99'))
def test_macvlan(self):
self.copy_unit_to_networkd_unit_path('21-macvlan.netdev', '11-dummy.netdev', 'macvlan.network')
self.start_networkd()
self.assertTrue(self.link_exits('macvlan99'))
@expectedFailureIfModuleIsNotAvailable('ipvlan')
def test_ipvlan(self):
self.copy_unit_to_networkd_unit_path('25-ipvlan.netdev', '11-dummy.netdev', 'ipvlan.network')
self.start_networkd()
self.assertTrue(self.link_exits('ipvlan99'))
def test_veth(self):
self.copy_unit_to_networkd_unit_path('25-veth.netdev')
self.start_networkd()
self.assertTrue(self.link_exits('veth99'))
def test_dummy(self):
self.copy_unit_to_networkd_unit_path('11-dummy.netdev')
self.start_networkd()
self.assertTrue(self.link_exits('test1'))
def test_tun(self):
self.copy_unit_to_networkd_unit_path('25-tun.netdev')
self.start_networkd()
self.assertTrue(self.link_exits('tun99'))
def test_tap(self):
self.copy_unit_to_networkd_unit_path('25-tap.netdev')
self.start_networkd()
self.assertTrue(self.link_exits('tap99'))
@expectedFailureIfModuleIsNotAvailable('vrf')
def test_vrf(self):
self.copy_unit_to_networkd_unit_path('25-vrf.netdev')
self.start_networkd()
self.assertTrue(self.link_exits('vrf99'))
@expectedFailureIfModuleIsNotAvailable('vcan')
def test_vcan(self):
self.copy_unit_to_networkd_unit_path('25-vcan.netdev')
self.start_networkd()
self.assertTrue(self.link_exits('vcan99'))
@expectedFailureIfModuleIsNotAvailable('wireguard')
def test_wireguard(self):
self.copy_unit_to_networkd_unit_path('25-wireguard.netdev')
self.start_networkd()
if shutil.which('wg'):
subprocess.call('wg')
output = subprocess.check_output(['wg', 'show', 'wg99', 'listen-port']).rstrip().decode('utf-8')
self.assertTrue(output, '51820')
output = subprocess.check_output(['wg', 'show', 'wg99', 'fwmark']).rstrip().decode('utf-8')
self.assertTrue(output, '0x4d2')
output = subprocess.check_output(['wg', 'show', 'wg99', 'allowed-ips']).rstrip().decode('utf-8')
self.assertTrue(output, 'RDf+LSpeEre7YEIKaxg+wbpsNV7du+ktR99uBEtIiCA=\t192.168.26.0/24 fd31:bf08:57cb::/48')
output = subprocess.check_output(['wg', 'show', 'wg99', 'persistent-keepalive']).rstrip().decode('utf-8')
self.assertTrue(output, 'RDf+LSpeEre7YEIKaxg+wbpsNV7du+ktR99uBEtIiCA=\t20')
output = subprocess.check_output(['wg', 'show', 'wg99', 'endpoints']).rstrip().decode('utf-8')
self.assertTrue(output, 'RDf+LSpeEre7YEIKaxg+wbpsNV7du+ktR99uBEtIiCA=\t192.168.27.3:51820')
self.assertTrue(self.link_exits('wg99'))
@expectedFailureIfModuleIsNotAvailable('wireguard')
def test_wireguard_23_peers(self):
self.copy_unit_to_networkd_unit_path('25-wireguard-23-peers.netdev', '25-wireguard-23-peers.network')
self.start_networkd()
if shutil.which('wg'):
subprocess.call('wg')
self.assertTrue(self.link_exits('wg98'))
def test_geneve(self):
self.copy_unit_to_networkd_unit_path('25-geneve.netdev')
self.start_networkd()
self.assertTrue(self.link_exits('geneve99'))
output = subprocess.check_output(['ip', '-d', 'link', 'show', 'geneve99']).rstrip().decode('utf-8')
print(output)
self.assertTrue(output, '192.168.22.1')
self.assertTrue(output, '6082')
self.assertTrue(output, 'udpcsum')
self.assertTrue(output, 'udp6zerocsumrx')
def test_ipip_tunnel(self):
self.copy_unit_to_networkd_unit_path('12-dummy.netdev', '25-ipip-tunnel.netdev', 'ipip.network')
self.start_networkd()
self.assertTrue(self.link_exits('dummy98'))
self.assertTrue(self.link_exits('ipiptun99'))
def test_gre_tunnel(self):
self.copy_unit_to_networkd_unit_path('12-dummy.netdev', '25-gre-tunnel.netdev', 'gretun.network')
self.start_networkd()
self.assertTrue(self.link_exits('dummy98'))
self.assertTrue(self.link_exits('gretun99'))
def test_gretap_tunnel(self):
self.copy_unit_to_networkd_unit_path('12-dummy.netdev', '25-gretap-tunnel.netdev', 'gretap.network')
self.start_networkd()
self.assertTrue(self.link_exits('dummy98'))
self.assertTrue(self.link_exits('gretap99'))
def test_ip6gretap_tunnel(self):
self.copy_unit_to_networkd_unit_path('12-dummy.netdev', '25-ip6gre-tunnel.netdev', 'ip6gretap.network')
self.start_networkd()
self.assertTrue(self.link_exits('dummy98'))
self.assertTrue(self.link_exits('ip6gretap99'))
def test_vti_tunnel(self):
self.copy_unit_to_networkd_unit_path('12-dummy.netdev', '25-vti-tunnel.netdev', 'vti.network')
self.start_networkd()
self.assertTrue(self.link_exits('dummy98'))
self.assertTrue(self.link_exits('vtitun99'))
def test_vti6_tunnel(self):
self.copy_unit_to_networkd_unit_path('12-dummy.netdev', '25-vti6-tunnel.netdev', 'vti6.network')
self.start_networkd()
self.assertTrue(self.link_exits('dummy98'))
self.assertTrue(self.link_exits('vti6tun99'))
def test_ip6tnl_tunnel(self):
self.copy_unit_to_networkd_unit_path('12-dummy.netdev', '25-ip6tnl-tunnel.netdev', 'ip6tnl.network')
self.start_networkd()
self.assertTrue(self.link_exits('dummy98'))
self.assertTrue(self.link_exits('ip6tnl99'))
def test_sit_tunnel(self):
self.copy_unit_to_networkd_unit_path('12-dummy.netdev', '25-sit-tunnel.netdev', 'sit.network')
self.start_networkd()
self.assertTrue(self.link_exits('dummy98'))
self.assertTrue(self.link_exits('sittun99'))
def test_isatap_tunnel(self):
self.copy_unit_to_networkd_unit_path('12-dummy.netdev', '25-isatap-tunnel.netdev', 'isatap.network')
self.start_networkd()
self.assertTrue(self.link_exits('dummy98'))
self.assertTrue(self.link_exits('isataptun99'))
output = subprocess.check_output(['ip', '-d', 'link', 'show', 'isataptun99']).rstrip().decode('utf-8')
print(output)
self.assertRegex(output, "isatap ")
def test_6rd_tunnel(self):
self.copy_unit_to_networkd_unit_path('12-dummy.netdev', '25-6rd-tunnel.netdev', '6rd.network')
self.start_networkd()
self.assertTrue(self.link_exits('dummy98'))
self.assertTrue(self.link_exits('sittun99'))
@expectedFailureIfERSPANModuleIsNotAvailable()
def test_erspan_tunnel(self):
self.copy_unit_to_networkd_unit_path('25-erspan-tunnel.netdev')
self.start_networkd()
self.assertTrue(self.link_exits('erspan-test'))
output = subprocess.check_output(['ip', '-d', 'link', 'show', 'erspan-test']).rstrip().decode('utf-8')
print(output)
self.assertTrue(output, '172.16.1.200')
self.assertTrue(output, '172.16.1.100')
self.assertTrue(output, '101')
def test_tunnel_independent(self):
self.copy_unit_to_networkd_unit_path('25-ipip-tunnel-independent.netdev')
self.start_networkd()
self.assertTrue(self.link_exits('ipiptun99'))
def test_vxlan(self):
self.copy_unit_to_networkd_unit_path('25-vxlan.netdev', 'vxlan.network','11-dummy.netdev')
self.start_networkd()
self.assertTrue(self.link_exits('vxlan99'))
output = subprocess.check_output(['ip', '-d', 'link', 'show', 'vxlan99']).rstrip().decode('utf-8')
print(output)
self.assertRegex(output, "999")
self.assertRegex(output, '5555')
self.assertRegex(output, 'l2miss')
self.assertRegex(output, 'l3miss')
self.assertRegex(output, 'udpcsum')
self.assertRegex(output, 'udp6zerocsumtx')
self.assertRegex(output, 'udp6zerocsumrx')
self.assertRegex(output, 'remcsumtx')
self.assertRegex(output, 'remcsumrx')
self.assertRegex(output, 'gbp')
class NetworkdNetWorkTests(unittest.TestCase, Utilities):
links = [
'bond199',
'dummy98',
'dummy99',
'test1']
units = [
'11-dummy.netdev',
'12-dummy.netdev',
'23-active-slave.network',
'23-bond199.network',
'23-primary-slave.network',
'23-test1-bond199.network',
'25-address-link-section.network',
'25-address-section-miscellaneous.network',
'25-address-section.network',
'25-bind-carrier.network',
'25-bond-active-backup-slave.netdev',
'25-fibrule-invert.network',
'25-fibrule-port-range.network',
'25-ipv6-address-label-section.network',
'25-neighbor-section.network',
'25-link-local-addressing-no.network',
'25-link-local-addressing-yes.network',
'25-link-section-unmanaged.network',
'25-route-gateway.network',
'25-route-gateway-on-link.network',
'25-route-ipv6-src.network',
'25-route-reverse-order.network',
'25-route-section.network',
'25-route-tcp-window-settings.network',
'25-route-type.network',
'25-sysctl.network',
'configure-without-carrier.network',
'routing-policy-rule.network',
'test-static.network']
def setUp(self):
self.link_remove(self.links)
def tearDown(self):
self.link_remove(self.links)
self.remove_unit_from_networkd_path(self.units)
def test_static_address(self):
self.copy_unit_to_networkd_unit_path('12-dummy.netdev', 'test-static.network')
self.start_networkd()
self.assertTrue(self.link_exits('dummy98'))
output = subprocess.check_output(['networkctl', 'status', 'dummy98']).rstrip().decode('utf-8')
print(output)
self.assertRegex(output, '192.168.0.15')
self.assertRegex(output, '192.168.0.1')
self.assertRegex(output, 'routable')
def test_configure_without_carrier(self):
self.copy_unit_to_networkd_unit_path('configure-without-carrier.network', '11-dummy.netdev')
self.start_networkd()
self.assertTrue(self.link_exits('test1'))
output = subprocess.check_output(['networkctl', 'status', 'test1']).rstrip().decode('utf-8')
print(output)
self.assertRegex(output, '192.168.0.15')
self.assertRegex(output, '192.168.0.1')
self.assertRegex(output, 'routable')
def test_bond_active_slave(self):
self.copy_unit_to_networkd_unit_path('23-active-slave.network', '23-bond199.network', '25-bond-active-backup-slave.netdev', '12-dummy.netdev')
self.start_networkd()
self.assertTrue(self.link_exits('dummy98'))
self.assertTrue(self.link_exits('bond199'))
output = subprocess.check_output(['ip', '-d', 'link', 'show', 'bond199']).rstrip().decode('utf-8')
print(output)
self.assertRegex(output, 'active_slave dummy98')
def test_bond_primary_slave(self):
self.copy_unit_to_networkd_unit_path('23-primary-slave.network', '23-test1-bond199.network', '25-bond-active-backup-slave.netdev', '11-dummy.netdev')
self.start_networkd()
self.assertTrue(self.link_exits('test1'))
self.assertTrue(self.link_exits('bond199'))
output = subprocess.check_output(['ip', '-d', 'link', 'show', 'bond199']).rstrip().decode('utf-8')
print(output)
self.assertRegex(output, 'primary test1')
def test_routing_policy_rule(self):
self.copy_unit_to_networkd_unit_path('routing-policy-rule.network', '11-dummy.netdev')
self.start_networkd()
self.assertTrue(self.link_exits('test1'))
output = subprocess.check_output(['ip', 'rule']).rstrip().decode('utf-8')
print(output)
self.assertRegex(output, '111')
self.assertRegex(output, 'from 192.168.100.18')
self.assertRegex(output, r'tos (?:0x08|throughput)\s')
self.assertRegex(output, 'iif test1')
self.assertRegex(output, 'oif test1')
self.assertRegex(output, 'lookup 7')
subprocess.call(['ip', 'rule', 'del', 'table', '7'])
@expectedFailureIfRoutingPolicyPortRangeIsNotAvailable()
def test_routing_policy_rule_port_range(self):
self.copy_unit_to_networkd_unit_path('25-fibrule-port-range.network', '11-dummy.netdev')
self.start_networkd()
self.assertTrue(self.link_exits('test1'))
output = subprocess.check_output(['ip', 'rule']).rstrip().decode('utf-8')
print(output)
self.assertRegex(output, '111')
self.assertRegex(output, 'from 192.168.100.18')
self.assertRegex(output, '1123-1150')
self.assertRegex(output, '3224-3290')
self.assertRegex(output, 'tcp')
self.assertRegex(output, 'lookup 7')
subprocess.call(['ip', 'rule', 'del', 'table', '7'])
@expectedFailureIfRoutingPolicyIPProtoIsNotAvailable()
def test_routing_policy_rule_invert(self):
self.copy_unit_to_networkd_unit_path('25-fibrule-invert.network', '11-dummy.netdev')
self.start_networkd()
self.assertTrue(self.link_exits('test1'))
output = subprocess.check_output(['ip', 'rule']).rstrip().decode('utf-8')
print(output)
self.assertRegex(output, '111')
self.assertRegex(output, 'not.*?from.*?192.168.100.18')
self.assertRegex(output, 'tcp')
self.assertRegex(output, 'lookup 7')
subprocess.call(['ip', 'rule', 'del', 'table', '7'])
def test_address_peer(self):
self.copy_unit_to_networkd_unit_path('25-address-section.network', '12-dummy.netdev')
self.start_networkd()
self.assertTrue(self.link_exits('dummy98'))
output = subprocess.check_output(['ip', 'address', 'show', 'dummy98']).rstrip().decode('utf-8')
print(output)
self.assertRegex(output, 'inet 10.2.3.4 peer 10.2.3.5/16 scope global 32')
self.assertRegex(output, 'inet 10.6.7.8/16 brd 10.6.255.255 scope global 33')
self.assertRegex(output, 'inet6 2001:db8::20 peer 2001:db8::10/128 scope global')
output = subprocess.check_output(['networkctl', 'status', 'dummy98']).rstrip().decode('utf-8')
print(output)
self.assertRegex(output, 'State: routable \(configured\)')
def test_address_preferred_lifetime_zero_ipv6(self):
self.copy_unit_to_networkd_unit_path('25-address-section-miscellaneous.network', '12-dummy.netdev')
self.start_networkd()
self.assertTrue(self.link_exits('dummy98'))
output = subprocess.check_output(['ip', 'address', 'show', 'dummy98']).rstrip().decode('utf-8')
print(output)
self.assertRegex(output, 'inet 10.2.3.4/16 brd 10.2.255.255 scope link deprecated dummy98')
self.assertRegex(output, 'inet6 2001:db8:0:f101::1/64 scope global')
def test_ip_route(self):
self.copy_unit_to_networkd_unit_path('25-route-section.network', '12-dummy.netdev')
self.start_networkd()
self.assertTrue(self.link_exits('dummy98'))
output = subprocess.check_output(['ip', 'route', 'list', 'dev', 'dummy98']).rstrip().decode('utf-8')
print(output)
self.assertRegex(output, '192.168.0.1')
self.assertRegex(output, 'static')
self.assertRegex(output, '192.168.0.0/24')
def test_ip_route_reverse(self):
self.copy_unit_to_networkd_unit_path('25-route-reverse-order.network', '12-dummy.netdev')
self.start_networkd()
self.assertTrue(self.link_exits('dummy98'))
output = subprocess.check_output(['ip', '-6', 'route', 'show', 'dev', 'dummy98']).rstrip().decode('utf-8')
print(output)
self.assertRegex(output, '2001:1234:5:8fff:ff:ff:ff:ff')
self.assertRegex(output, '2001:1234:5:8f63::1')
def test_ip_route_blackhole_unreachable_prohibit(self):
self.copy_unit_to_networkd_unit_path('25-route-type.network', '12-dummy.netdev')
self.start_networkd()
self.assertTrue(self.link_exits('dummy98'))
output = subprocess.check_output(['ip', 'route', 'list']).rstrip().decode('utf-8')
print(output)
self.assertRegex(output, 'blackhole')
self.assertRegex(output, 'unreachable')
self.assertRegex(output, 'prohibit')
subprocess.call(['ip', 'route', 'del', 'blackhole', '202.54.1.2'])
subprocess.call(['ip', 'route', 'del', 'unreachable', '202.54.1.3'])
subprocess.call(['ip', 'route', 'del', 'prohibit', '202.54.1.4'])
def test_ip_route_tcp_window(self):
self.copy_unit_to_networkd_unit_path('25-route-tcp-window-settings.network', '11-dummy.netdev')
self.start_networkd()
self.assertTrue(self.link_exits('test1'))
output = subprocess.check_output(['ip', 'route', 'list']).rstrip().decode('utf-8')
print(output)
self.assertRegex(output, 'initcwnd 20')
self.assertRegex(output, 'initrwnd 30')
def test_ip_route_gateway(self):
self.copy_unit_to_networkd_unit_path('25-route-gateway.network', '12-dummy.netdev')
self.start_networkd()
self.assertTrue(self.link_exits('dummy98'))
output = subprocess.check_output(['ip', 'route', 'list', 'dev', 'dummy98', 'default']).rstrip().decode('utf-8')
print(output)
self.assertRegex(output, 'default')
self.assertRegex(output, 'via')
self.assertRegex(output, '149.10.124.64')
self.assertRegex(output, 'proto')
self.assertRegex(output, 'static')
output = subprocess.check_output(['ip', 'route', 'list', 'dev', 'dummy98', 'src', '149.10.124.58']).rstrip().decode('utf-8')
print(output)
self.assertRegex(output, '149.10.124.48/28')
self.assertRegex(output, 'proto')
self.assertRegex(output, 'kernel')
self.assertRegex(output, 'scope')
self.assertRegex(output, 'link')
def test_ip_route_gateway_on_link(self):
self.copy_unit_to_networkd_unit_path('25-route-gateway-on-link.network', '12-dummy.netdev')
self.start_networkd()
self.assertTrue(self.link_exits('dummy98'))
output = subprocess.check_output(['ip', 'route', 'list', 'dev', 'dummy98', 'default']).rstrip().decode('utf-8')
print(output)
self.assertRegex(output, 'default')
self.assertRegex(output, 'via')
self.assertRegex(output, '149.10.125.65')
self.assertRegex(output, 'proto')
self.assertRegex(output, 'static')
self.assertRegex(output, 'onlink')
output = subprocess.check_output(['ip', 'route', 'list', 'dev', 'dummy98', 'src', '149.10.124.58']).rstrip().decode('utf-8')
print(output)
self.assertRegex(output, '149.10.124.48/28')
self.assertRegex(output, 'proto')
self.assertRegex(output, 'kernel')
self.assertRegex(output, 'scope')
self.assertRegex(output, 'link')
def test_ip_route_ipv6_src_route(self):
# a dummy device does not make the addresses go through tentative state, so we
# reuse a bond from an earlier test, which does make the addresses go through
# tentative state, and do our test on that
self.copy_unit_to_networkd_unit_path('23-active-slave.network', '25-route-ipv6-src.network', '25-bond-active-backup-slave.netdev', '12-dummy.netdev')
self.start_networkd()
self.assertTrue(self.link_exits('dummy98'))
self.assertTrue(self.link_exits('bond199'))
output = subprocess.check_output(['ip', '-6', 'route', 'list', 'dev', 'bond199']).rstrip().decode('utf-8')
print(output)
self.assertRegex(output, 'abcd::/16')
self.assertRegex(output, 'src')
self.assertRegex(output, '2001:1234:56:8f63::2')
def test_ip_link_mac_address(self):
self.copy_unit_to_networkd_unit_path('25-address-link-section.network', '12-dummy.netdev')
self.start_networkd()
self.assertTrue(self.link_exits('dummy98'))
output = subprocess.check_output(['ip', 'link', 'show', 'dummy98']).rstrip().decode('utf-8')
print(output)
self.assertRegex(output, '00:01:02:aa:bb:cc')
def test_ip_link_unmanaged(self):
self.copy_unit_to_networkd_unit_path('25-link-section-unmanaged.network', '12-dummy.netdev')
self.start_networkd()
self.assertTrue(self.link_exits('dummy98'))
output = subprocess.check_output(['networkctl', 'status', 'dummy98']).rstrip().decode('utf-8')
print(output)
self.assertRegex(output, 'unmanaged')
def test_ipv6_address_label(self):
self.copy_unit_to_networkd_unit_path('25-ipv6-address-label-section.network', '12-dummy.netdev')
self.start_networkd()
self.assertTrue(self.link_exits('dummy98'))
output = subprocess.check_output(['ip', 'addrlabel', 'list']).rstrip().decode('utf-8')
print(output)
self.assertRegex(output, '2004:da8:1::/64')
def test_ipv6_neighbor(self):
self.copy_unit_to_networkd_unit_path('25-neighbor-section.network', '12-dummy.netdev')
self.start_networkd()
self.assertTrue(self.link_exits('dummy98'))
output = subprocess.check_output(['ip', 'neigh', 'list']).rstrip().decode('utf-8')
print(output)
self.assertRegex(output, '192.168.10.1.*00:00:5e:00:02:65.*PERMANENT')
self.assertRegex(output, '2004:da8:1::1.*00:00:5e:00:02:66.*PERMANENT')
def test_link_local_addressing(self):
self.copy_unit_to_networkd_unit_path('25-link-local-addressing-yes.network', '11-dummy.netdev',
'25-link-local-addressing-no.network', '12-dummy.netdev')
self.start_networkd()
self.assertTrue(self.link_exits('test1'))
self.assertTrue(self.link_exits('dummy98'))
time.sleep(10)
output = subprocess.check_output(['ip', 'address', 'show', 'dev', 'test1']).rstrip().decode('utf-8')
print(output)
self.assertRegex(output, 'inet .* scope link')
self.assertRegex(output, 'inet6 .* scope link')
output = subprocess.check_output(['ip', 'address', 'show', 'dev', 'dummy98']).rstrip().decode('utf-8')
print(output)
self.assertNotRegex(output, 'inet6* .* scope link')
output = subprocess.check_output(['networkctl', 'status', 'test1']).rstrip().decode('utf-8')
print(output)
self.assertRegex(output, 'State: degraded \(configured\)')
output = subprocess.check_output(['networkctl', 'status', 'dummy98']).rstrip().decode('utf-8')
print(output)
self.assertRegex(output, 'State: carrier \(configured\)')
'''
Documentation/networking/ip-sysctl.txt
addr_gen_mode - INTEGER
Defines how link-local and autoconf addresses are generated.
0: generate address based on EUI64 (default)
1: do no generate a link-local address, use EUI64 for addresses generated
from autoconf
2: generate stable privacy addresses, using the secret from
stable_secret (RFC7217)
3: generate stable privacy addresses, using a random secret if unset
'''
test1_addr_gen_mode = ''
if os.path.exists(os.path.join(os.path.join(network_sysctl_ipv6_path, 'test1'), 'stable_secret')):
with open(os.path.join(os.path.join(network_sysctl_ipv6_path, 'test1'), 'stable_secret')) as f:
try:
f.readline()
except IOError:
# if stable_secret is unset, then EIO is returned
test1_addr_gen_mode = '0'
else:
test1_addr_gen_mode = '2'
else:
test1_addr_gen_mode = '0'
if os.path.exists(os.path.join(os.path.join(network_sysctl_ipv6_path, 'test1'), 'addr_gen_mode')):
self.assertEqual(self.read_ipv6_sysctl_attr('test1', 'addr_gen_mode'), '0')
if os.path.exists(os.path.join(os.path.join(network_sysctl_ipv6_path, 'dummy98'), 'addr_gen_mode')):
self.assertEqual(self.read_ipv6_sysctl_attr('dummy98', 'addr_gen_mode'), '1')
def test_sysctl(self):
self.copy_unit_to_networkd_unit_path('25-sysctl.network', '12-dummy.netdev')
self.start_networkd()
self.assertTrue(self.link_exits('dummy98'))
self.assertEqual(self.read_ipv6_sysctl_attr('dummy98', 'forwarding'), '1')
self.assertEqual(self.read_ipv6_sysctl_attr('dummy98', 'use_tempaddr'), '2')
self.assertEqual(self.read_ipv6_sysctl_attr('dummy98', 'dad_transmits'), '3')
self.assertEqual(self.read_ipv6_sysctl_attr('dummy98', 'hop_limit'), '5')
self.assertEqual(self.read_ipv6_sysctl_attr('dummy98', 'proxy_ndp'), '1')
self.assertEqual(self.read_ipv4_sysctl_attr('dummy98', 'forwarding'),'1')
self.assertEqual(self.read_ipv4_sysctl_attr('dummy98', 'proxy_arp'), '1')
def test_bind_carrier(self):
self.copy_unit_to_networkd_unit_path('25-bind-carrier.network', '11-dummy.netdev')
self.start_networkd()
self.assertTrue(self.link_exits('test1'))
self.assertEqual(subprocess.call(['ip', 'link', 'add', 'dummy98', 'type', 'dummy']), 0)
self.assertEqual(subprocess.call(['ip', 'link', 'set', 'dummy98', 'up']), 0)
time.sleep(4)
output = subprocess.check_output(['ip', 'address', 'show', 'test1']).rstrip().decode('utf-8')
print(output)
self.assertRegex(output, 'UP,LOWER_UP')
self.assertRegex(output, 'inet 192.168.10.30/24 brd 192.168.10.255 scope global test1')
output = subprocess.check_output(['networkctl', 'status', 'test1']).rstrip().decode('utf-8')
self.assertRegex(output, 'State: routable \(configured\)')
self.assertEqual(subprocess.call(['ip', 'link', 'add', 'dummy99', 'type', 'dummy']), 0)
self.assertEqual(subprocess.call(['ip', 'link', 'set', 'dummy99', 'up']), 0)
time.sleep(4)
output = subprocess.check_output(['ip', 'address', 'show', 'test1']).rstrip().decode('utf-8')
print(output)
self.assertRegex(output, 'UP,LOWER_UP')
self.assertRegex(output, 'inet 192.168.10.30/24 brd 192.168.10.255 scope global test1')
output = subprocess.check_output(['networkctl', 'status', 'test1']).rstrip().decode('utf-8')
self.assertRegex(output, 'State: routable \(configured\)')
self.assertEqual(subprocess.call(['ip', 'link', 'del', 'dummy98']), 0)
time.sleep(4)
output = subprocess.check_output(['ip', 'address', 'show', 'test1']).rstrip().decode('utf-8')
print(output)
self.assertRegex(output, 'UP,LOWER_UP')
self.assertRegex(output, 'inet 192.168.10.30/24 brd 192.168.10.255 scope global test1')
output = subprocess.check_output(['networkctl', 'status', 'test1']).rstrip().decode('utf-8')
self.assertRegex(output, 'State: routable \(configured\)')
self.assertEqual(subprocess.call(['ip', 'link', 'del', 'dummy99']), 0)
time.sleep(4)
output = subprocess.check_output(['ip', 'address', 'show', 'test1']).rstrip().decode('utf-8')
print(output)
self.assertNotRegex(output, 'UP,LOWER_UP')
self.assertRegex(output, 'DOWN')
self.assertNotRegex(output, '192.168.10')
output = subprocess.check_output(['networkctl', 'status', 'test1']).rstrip().decode('utf-8')
self.assertRegex(output, 'State: off \(configured\)')
self.assertEqual(subprocess.call(['ip', 'link', 'add', 'dummy98', 'type', 'dummy']), 0)
self.assertEqual(subprocess.call(['ip', 'link', 'set', 'dummy98', 'up']), 0)
time.sleep(4)
output = subprocess.check_output(['ip', 'address', 'show', 'test1']).rstrip().decode('utf-8')
print(output)
self.assertRegex(output, 'UP,LOWER_UP')
self.assertRegex(output, 'inet 192.168.10.30/24 brd 192.168.10.255 scope global test1')
output = subprocess.check_output(['networkctl', 'status', 'test1']).rstrip().decode('utf-8')
self.assertRegex(output, 'State: routable \(configured\)')
class NetworkdNetWorkBridgeTests(unittest.TestCase, Utilities):
links = [
'bridge99',
'dummy98',
'test1']
units = [
'11-dummy.netdev',
'12-dummy.netdev',
'26-bridge.netdev',
'26-bridge-slave-interface-1.network',
'26-bridge-slave-interface-2.network',
'bridge99-ignore-carrier-loss.network',
'bridge99.network']
def setUp(self):
self.link_remove(self.links)
def tearDown(self):
self.link_remove(self.links)
self.remove_unit_from_networkd_path(self.units)
def test_bridge_property(self):
self.copy_unit_to_networkd_unit_path('11-dummy.netdev', '12-dummy.netdev', '26-bridge.netdev',
'26-bridge-slave-interface-1.network', '26-bridge-slave-interface-2.network',
'bridge99.network')
self.start_networkd()
self.assertTrue(self.link_exits('dummy98'))
self.assertTrue(self.link_exits('test1'))
self.assertTrue(self.link_exits('bridge99'))
output = subprocess.check_output(['ip', '-d', 'link', 'show', 'test1']).rstrip().decode('utf-8')
print(output)
self.assertRegex(output, 'master')
self.assertRegex(output, 'bridge')
output = subprocess.check_output(['ip', '-d', 'link', 'show', 'dummy98']).rstrip().decode('utf-8')
print(output)
self.assertRegex(output, 'master')
self.assertRegex(output, 'bridge')
output = subprocess.check_output(['ip', 'addr', 'show', 'bridge99']).rstrip().decode('utf-8')
print(output)
self.assertRegex(output, '192.168.0.15')
self.assertRegex(output, '192.168.0.1')
output = subprocess.check_output(['bridge', '-d', 'link', 'show', 'dummy98']).rstrip().decode('utf-8')
print(output)
self.assertEqual(self.read_bridge_port_attr('bridge99', 'dummy98', 'hairpin_mode'), '1')
self.assertEqual(self.read_bridge_port_attr('bridge99', 'dummy98', 'path_cost'), '400')
self.assertEqual(self.read_bridge_port_attr('bridge99', 'dummy98', 'unicast_flood'), '1')
self.assertEqual(self.read_bridge_port_attr('bridge99', 'dummy98', 'multicast_fast_leave'), '1')
# CONFIG_BRIDGE_IGMP_SNOOPING=y
if (os.path.exists('/sys/devices/virtual/net/bridge00/lower_dummy98/brport/multicast_to_unicast')):
self.assertEqual(self.read_bridge_port_attr('bridge99', 'dummy98', 'multicast_to_unicast'), '1')
self.assertEqual(subprocess.call(['ip', 'address', 'add', '192.168.0.16/24', 'dev', 'bridge99']), 0)
time.sleep(1)
self.assertEqual(subprocess.call(['ip', 'link', 'del', 'test1']), 0)
self.assertEqual(subprocess.call(['ip', 'link', 'del', 'dummy98']), 0)
time.sleep(3)
output = subprocess.check_output(['ip', 'address', 'show', 'bridge99']).rstrip().decode('utf-8')
print(output)
self.assertRegex(output, 'NO-CARRIER')
self.assertNotRegex(output, '192.168.0.15/24')
self.assertNotRegex(output, '192.168.0.16/24')
def test_bridge_ignore_carrier_loss(self):
self.copy_unit_to_networkd_unit_path('11-dummy.netdev', '12-dummy.netdev', '26-bridge.netdev',
'26-bridge-slave-interface-1.network', '26-bridge-slave-interface-2.network',
'bridge99-ignore-carrier-loss.network')
self.start_networkd()
self.assertTrue(self.link_exits('dummy98'))
self.assertTrue(self.link_exits('test1'))
self.assertTrue(self.link_exits('bridge99'))
self.assertEqual(subprocess.call(['ip', 'address', 'add', '192.168.0.16/24', 'dev', 'bridge99']), 0)
time.sleep(1)
self.assertEqual(subprocess.call(['ip', 'link', 'del', 'test1']), 0)
self.assertEqual(subprocess.call(['ip', 'link', 'del', 'dummy98']), 0)
time.sleep(3)
output = subprocess.check_output(['ip', 'address', 'show', 'bridge99']).rstrip().decode('utf-8')
print(output)
self.assertRegex(output, 'NO-CARRIER')
self.assertRegex(output, 'inet 192.168.0.15/24 brd 192.168.0.255 scope global bridge99')
self.assertRegex(output, 'inet 192.168.0.16/24 scope global secondary bridge99')
class NetworkdNetWorkLLDPTests(unittest.TestCase, Utilities):
links = ['veth99']
units = [
'23-emit-lldp.network',
'24-lldp.network',
'25-veth.netdev']
def setUp(self):
self.link_remove(self.links)
def tearDown(self):
self.link_remove(self.links)
self.remove_unit_from_networkd_path(self.units)
def test_lldp(self):
self.copy_unit_to_networkd_unit_path('23-emit-lldp.network', '24-lldp.network', '25-veth.netdev')
self.start_networkd()
self.assertTrue(self.link_exits('veth99'))
output = subprocess.check_output(['networkctl', 'lldp']).rstrip().decode('utf-8')
print(output)
self.assertRegex(output, 'veth-peer')
self.assertRegex(output, 'veth99')
class NetworkdNetworkRATests(unittest.TestCase, Utilities):
links = ['veth99']
units = [
'25-veth.netdev',
'ipv6-prefix.network',
'ipv6-prefix-veth.network']
def setUp(self):
self.link_remove(self.links)
def tearDown(self):
self.link_remove(self.links)
self.remove_unit_from_networkd_path(self.units)
def test_ipv6_prefix_delegation(self):
self.copy_unit_to_networkd_unit_path('25-veth.netdev', 'ipv6-prefix.network', 'ipv6-prefix-veth.network')
self.start_networkd()
self.assertTrue(self.link_exits('veth99'))
output = subprocess.check_output(['networkctl', 'status', 'veth99']).rstrip().decode('utf-8')
print(output)
self.assertRegex(output, '2002:da8:1:0')
class NetworkdNetworkDHCPServerTests(unittest.TestCase, Utilities):
links = [
'dummy98',
'veth99']
units = [
'12-dummy.netdev',
'24-search-domain.network',
'25-veth.netdev',
'dhcp-client.network',
'dhcp-client-timezone-router.network',
'dhcp-server.network',
'dhcp-server-timezone-router.network']
def setUp(self):
self.link_remove(self.links)
def tearDown(self):
self.link_remove(self.links)
self.remove_unit_from_networkd_path(self.units)
def test_dhcp_server(self):
self.copy_unit_to_networkd_unit_path('25-veth.netdev', 'dhcp-client.network', 'dhcp-server.network')
self.start_networkd()
self.assertTrue(self.link_exits('veth99'))
output = subprocess.check_output(['networkctl', 'status', 'veth99']).rstrip().decode('utf-8')
print(output)
self.assertRegex(output, '192.168.5.*')
self.assertRegex(output, 'Gateway: 192.168.5.1')
self.assertRegex(output, 'DNS: 192.168.5.1')
self.assertRegex(output, 'NTP: 192.168.5.1')
def test_domain(self):
self.copy_unit_to_networkd_unit_path('12-dummy.netdev', '24-search-domain.network')
self.start_networkd()
self.assertTrue(self.link_exits('dummy98'))
output = subprocess.check_output(['networkctl', 'status', 'dummy98']).rstrip().decode('utf-8')
print(output)
self.assertRegex(output, 'Address: 192.168.42.100')
self.assertRegex(output, 'DNS: 192.168.42.1')
self.assertRegex(output, 'Search Domains: one')
def test_emit_router_timezone(self):
self.copy_unit_to_networkd_unit_path('25-veth.netdev', 'dhcp-client-timezone-router.network', 'dhcp-server-timezone-router.network')
self.start_networkd()
self.assertTrue(self.link_exits('veth99'))
output = subprocess.check_output(['networkctl', 'status', 'veth99']).rstrip().decode('utf-8')
print(output)
self.assertRegex(output, 'Gateway: 192.168.5.*')
self.assertRegex(output, '192.168.5.*')
self.assertRegex(output, 'Europe/Berlin')
class NetworkdNetworkDHCPClientTests(unittest.TestCase, Utilities):
links = [
'dummy98',
'veth99']
units = [
'25-veth.netdev',
'dhcp-client-anonymize.network',
'dhcp-client-critical-connection.network',
'dhcp-client-ipv4-dhcp-settings.network',
'dhcp-client-ipv4-only-ipv6-disabled.network',
'dhcp-client-ipv4-only.network',
'dhcp-client-ipv6-only.network',
'dhcp-client-ipv6-rapid-commit.network',
'dhcp-client-listen-port.network',
'dhcp-client-route-metric.network',
'dhcp-client-route-table.network',
'dhcp-client.network',
'dhcp-server-veth-peer.network',
'dhcp-v4-server-veth-peer.network',
'static.network']
def setUp(self):
self.link_remove(self.links)
self.stop_dnsmasq(dnsmasq_pid_file)
def tearDown(self):
self.link_remove(self.links)
self.remove_unit_from_networkd_path(self.units)
self.stop_dnsmasq(dnsmasq_pid_file)
self.remove_lease_file()
self.remove_log_file()
def test_dhcp_client_ipv6_only(self):
self.copy_unit_to_networkd_unit_path('25-veth.netdev', 'dhcp-server-veth-peer.network', 'dhcp-client-ipv6-only.network')
self.start_networkd()
self.assertTrue(self.link_exits('veth99'))
self.start_dnsmasq()
output = subprocess.check_output(['networkctl', 'status', 'veth99']).rstrip().decode('utf-8')
print(output)
self.assertRegex(output, '2600::')
self.assertNotRegex(output, '192.168.5')
def test_dhcp_client_ipv4_only(self):
self.copy_unit_to_networkd_unit_path('25-veth.netdev', 'dhcp-server-veth-peer.network', 'dhcp-client-ipv4-only-ipv6-disabled.network')
self.start_networkd()
self.assertTrue(self.link_exits('veth99'))
self.start_dnsmasq()
output = subprocess.check_output(['networkctl', 'status', 'veth99']).rstrip().decode('utf-8')
print(output)
self.assertNotRegex(output, '2600::')
self.assertRegex(output, '192.168.5')
def test_dhcp_client_ipv4_ipv6(self):
self.copy_unit_to_networkd_unit_path('25-veth.netdev', 'dhcp-server-veth-peer.network', 'dhcp-client-ipv6-only.network',
'dhcp-client-ipv4-only.network')
self.start_networkd()
self.assertTrue(self.link_exits('veth99'))
self.start_dnsmasq()
output = subprocess.check_output(['networkctl', 'status', 'veth99']).rstrip().decode('utf-8')
print(output)
self.assertRegex(output, '2600::')
self.assertRegex(output, '192.168.5')
def test_dhcp_client_settings(self):
self.copy_unit_to_networkd_unit_path('25-veth.netdev', 'dhcp-server-veth-peer.network', 'dhcp-client-ipv4-dhcp-settings.network')
self.start_networkd()
self.assertTrue(self.link_exits('veth99'))
self.start_dnsmasq()
output = subprocess.check_output(['ip', 'address', 'show', 'dev', 'veth99']).rstrip().decode('utf-8')
print(output)
self.assertRegex(output, '12:34:56:78:9a:bc')
self.assertRegex(output, '192.168.5')
self.assertRegex(output, '1492')
output = subprocess.check_output(['ip', 'route']).rstrip().decode('utf-8')
print(output)
self.assertRegex(output, 'default.*dev veth99 proto dhcp')
self.assertTrue(self.search_words_in_dnsmasq_log('vendor class: SusantVendorTest', True))
self.assertTrue(self.search_words_in_dnsmasq_log('DHCPDISCOVER(veth-peer) 12:34:56:78:9a:bc'))
self.assertTrue(self.search_words_in_dnsmasq_log('client provides name: test-hostname'))
self.assertTrue(self.search_words_in_dnsmasq_log('26:mtu'))
def test_dhcp6_client_settings_rapidcommit_true(self):
self.copy_unit_to_networkd_unit_path('25-veth.netdev', 'dhcp-server-veth-peer.network', 'dhcp-client-ipv6-only.network')
self.start_networkd()
self.assertTrue(self.link_exits('veth99'))
self.start_dnsmasq()
output = subprocess.check_output(['ip', 'address', 'show', 'dev', 'veth99']).rstrip().decode('utf-8')
print(output)
self.assertRegex(output, '12:34:56:78:9a:bc')
self.assertTrue(self.search_words_in_dnsmasq_log('14:rapid-commit', True))
def test_dhcp6_client_settings_rapidcommit_false(self):
self.copy_unit_to_networkd_unit_path('25-veth.netdev', 'dhcp-server-veth-peer.network', 'dhcp-client-ipv6-rapid-commit.network')
self.start_networkd()
self.assertTrue(self.link_exits('veth99'))
self.start_dnsmasq()
output = subprocess.check_output(['ip', 'address', 'show', 'dev', 'veth99']).rstrip().decode('utf-8')
print(output)
self.assertRegex(output, '12:34:56:78:9a:bc')
self.assertFalse(self.search_words_in_dnsmasq_log('14:rapid-commit', True))
def test_dhcp_client_settings_anonymize(self):
self.copy_unit_to_networkd_unit_path('25-veth.netdev', 'dhcp-server-veth-peer.network', 'dhcp-client-anonymize.network')
self.start_networkd()
self.assertTrue(self.link_exits('veth99'))
self.start_dnsmasq()
self.assertFalse(self.search_words_in_dnsmasq_log('VendorClassIdentifier=SusantVendorTest', True))
self.assertFalse(self.search_words_in_dnsmasq_log('test-hostname'))
self.assertFalse(self.search_words_in_dnsmasq_log('26:mtu'))
def test_dhcp_client_listen_port(self):
self.copy_unit_to_networkd_unit_path('25-veth.netdev', 'dhcp-server-veth-peer.network', 'dhcp-client-listen-port.network')
self.start_networkd()
self.assertTrue(self.link_exits('veth99'))
self.start_dnsmasq('--dhcp-alternate-port=67,5555')
output = subprocess.check_output(['ip', '-4', 'address', 'show', 'dev', 'veth99']).rstrip().decode('utf-8')
print(output)
self.assertRegex(output, '192.168.5.* dynamic')
def test_dhcp_route_table_id(self):
self.copy_unit_to_networkd_unit_path('25-veth.netdev', 'dhcp-v4-server-veth-peer.network', 'dhcp-client-route-table.network')
self.start_networkd()
self.assertTrue(self.link_exits('veth99'))
self.start_dnsmasq()
output = subprocess.check_output(['ip', 'route', 'show', 'table', '12']).rstrip().decode('utf-8')
print(output)
self.assertRegex(output, 'veth99 proto dhcp')
self.assertRegex(output, '192.168.5.1')
def test_dhcp_route_metric(self):
self.copy_unit_to_networkd_unit_path('25-veth.netdev', 'dhcp-v4-server-veth-peer.network', 'dhcp-client-route-metric.network')
self.start_networkd()
self.assertTrue(self.link_exits('veth99'))
self.start_dnsmasq()
output = subprocess.check_output(['ip', 'route', 'show', 'dev', 'veth99']).rstrip().decode('utf-8')
print(output)
self.assertRegex(output, 'metric 24')
def test_dhcp_route_criticalconnection_true(self):
self.copy_unit_to_networkd_unit_path('25-veth.netdev', 'dhcp-v4-server-veth-peer.network', 'dhcp-client-critical-connection.network')
self.start_networkd()
self.assertTrue(self.link_exits('veth99'))
self.start_dnsmasq()
output = subprocess.check_output(['networkctl', 'status', 'veth99']).rstrip().decode('utf-8')
print(output)
self.assertRegex(output, '192.168.5.*')
# Stoping dnsmasq as networkd won't be allowed to renew the DHCP lease.
self.stop_dnsmasq(dnsmasq_pid_file)
# Sleep for 120 sec as the dnsmasq minimum lease time can only be set to 120
time.sleep(125)
output = subprocess.check_output(['networkctl', 'status', 'veth99']).rstrip().decode('utf-8')
print(output)
self.assertRegex(output, '192.168.5.*')
def test_dhcp_client_reuse_address_as_static(self):
self.copy_unit_to_networkd_unit_path('25-veth.netdev', 'dhcp-server-veth-peer.network', 'dhcp-client.network')
self.start_networkd()
self.assertTrue(self.link_exits('veth99'))
self.start_dnsmasq()
output = subprocess.check_output(['ip', 'address', 'show', 'dev', 'veth99', 'scope', 'global']).rstrip().decode('utf-8')
print(output)
self.assertRegex(output, '192.168.5')
self.assertRegex(output, '2600::')
ipv4_address = re.search('192\.168\.5\.[0-9]*/24', output)
ipv6_address = re.search('2600::[0-9a-f:]*/128', output)
static_network = '\n'.join(['[Match]', 'Name=veth99', '[Network]', 'IPv6AcceptRA=no', 'Address=' + ipv4_address.group(), 'Address=' + ipv6_address.group()])
print(static_network)
self.remove_unit_from_networkd_path(['dhcp-client.network'])
with open(os.path.join(network_unit_file_path, 'static.network'), mode='w') as f:
f.write(static_network)
self.start_networkd()
self.assertTrue(self.link_exits('veth99'))
output = subprocess.check_output(['ip', '-4', 'address', 'show', 'dev', 'veth99', 'scope', 'global']).rstrip().decode('utf-8')
print(output)
self.assertRegex(output, '192.168.5')
self.assertRegex(output, 'valid_lft forever preferred_lft forever')
output = subprocess.check_output(['ip', '-6', 'address', 'show', 'dev', 'veth99', 'scope', 'global']).rstrip().decode('utf-8')
print(output)
self.assertRegex(output, '2600::')
self.assertRegex(output, 'valid_lft forever preferred_lft forever')
if __name__ == '__main__':
unittest.main(testRunner=unittest.TextTestRunner(stream=sys.stdout,
verbosity=3))
| bengal/systemd | test/test-network/systemd-networkd-tests.py | Python | gpl-2.0 | 59,885 |
# Get the configuration
import os
import json
spath = os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "settings.json")
configuration = json.loads(open(spath, "r").read())
| jonwedell/homeautomation | modules/__init__.py | Python | gpl-3.0 | 190 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for string_split_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
class StringSplitOpTest(test.TestCase):
def testStringSplit(self):
strings = ["pigs on the wing", "animals"]
with self.cached_session() as sess:
tokens = string_ops.string_split(strings)
indices, values, shape = self.evaluate(tokens)
self.assertAllEqual(indices, [[0, 0], [0, 1], [0, 2], [0, 3], [1, 0]])
self.assertAllEqual(values, [b"pigs", b"on", b"the", b"wing", b"animals"])
self.assertAllEqual(shape, [2, 4])
@test_util.run_deprecated_v1
def testStringSplitEmptyDelimiter(self):
strings = ["hello", "hola", b"\xF0\x9F\x98\x8E"] # Last string is U+1F60E
with self.cached_session() as sess:
tokens = string_ops.string_split(strings, delimiter="")
indices, values, shape = self.evaluate(tokens)
self.assertAllEqual(indices, [[0, 0], [0, 1], [0, 2], [0, 3], [0, 4],
[1, 0], [1, 1], [1, 2], [1, 3], [2, 0],
[2, 1], [2, 2], [2, 3]])
expected = np.array(
[
"h", "e", "l", "l", "o", "h", "o", "l", "a", b"\xf0", b"\x9f",
b"\x98", b"\x8e"
],
dtype="|S1")
self.assertAllEqual(values.tolist(), expected)
self.assertAllEqual(shape, [3, 5])
def testStringSplitEmptyToken(self):
strings = ["", " a", "b ", " c", " ", " d ", " e", "f ", " g ", " "]
with self.cached_session() as sess:
tokens = string_ops.string_split(strings)
indices, values, shape = self.evaluate(tokens)
self.assertAllEqual(
indices,
[[1, 0], [2, 0], [3, 0], [5, 0], [6, 0], [7, 0], [8, 0]])
self.assertAllEqual(values, [b"a", b"b", b"c", b"d", b"e", b"f", b"g"])
self.assertAllEqual(shape, [10, 1])
def testStringSplitOnSetEmptyToken(self):
strings = ["", " a", "b ", " c", " ", " d ", ". e", "f .", " .g. ", " ."]
with self.cached_session() as sess:
tokens = string_ops.string_split(strings, delimiter=" .")
indices, values, shape = self.evaluate(tokens)
self.assertAllEqual(
indices,
[[1, 0], [2, 0], [3, 0], [5, 0], [6, 0], [7, 0], [8, 0]])
self.assertAllEqual(values, [b"a", b"b", b"c", b"d", b"e", b"f", b"g"])
self.assertAllEqual(shape, [10, 1])
@test_util.run_deprecated_v1
def testStringSplitWithDelimiter(self):
strings = ["hello|world", "hello world"]
with self.cached_session() as sess:
self.assertRaises(
ValueError, string_ops.string_split, strings, delimiter=["|", ""])
self.assertRaises(
ValueError, string_ops.string_split, strings, delimiter=["a"])
tokens = string_ops.string_split(strings, delimiter="|")
indices, values, shape = self.evaluate(tokens)
self.assertAllEqual(indices, [[0, 0], [0, 1], [1, 0]])
self.assertAllEqual(values, [b"hello", b"world", b"hello world"])
self.assertAllEqual(shape, [2, 2])
tokens = string_ops.string_split(strings, delimiter="| ")
indices, values, shape = self.evaluate(tokens)
self.assertAllEqual(indices, [[0, 0], [0, 1], [1, 0], [1, 1]])
self.assertAllEqual(values, [b"hello", b"world", b"hello", b"world"])
self.assertAllEqual(shape, [2, 2])
@test_util.run_deprecated_v1
def testStringSplitWithDelimiterTensor(self):
strings = ["hello|world", "hello world"]
with self.cached_session() as sess:
delimiter = array_ops.placeholder(dtypes.string)
tokens = string_ops.string_split(strings, delimiter=delimiter)
with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run(tokens, feed_dict={delimiter: ["a", "b"]})
with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run(tokens, feed_dict={delimiter: ["a"]})
indices, values, shape = sess.run(tokens, feed_dict={delimiter: "|"})
self.assertAllEqual(indices, [[0, 0], [0, 1], [1, 0]])
self.assertAllEqual(values, [b"hello", b"world", b"hello world"])
self.assertAllEqual(shape, [2, 2])
@test_util.run_deprecated_v1
def testStringSplitWithDelimitersTensor(self):
strings = ["hello.cruel,world", "hello cruel world"]
with self.cached_session() as sess:
delimiter = array_ops.placeholder(dtypes.string)
tokens = string_ops.string_split(strings, delimiter=delimiter)
with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run(tokens, feed_dict={delimiter: ["a", "b"]})
with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run(tokens, feed_dict={delimiter: ["a"]})
indices, values, shape = sess.run(tokens, feed_dict={delimiter: ".,"})
self.assertAllEqual(indices, [[0, 0], [0, 1], [0, 2], [1, 0]])
self.assertAllEqual(values,
[b"hello", b"cruel", b"world", b"hello cruel world"])
self.assertAllEqual(shape, [2, 3])
def testStringSplitWithNoSkipEmpty(self):
strings = ["#a", "b#", "#c#"]
with self.cached_session() as sess:
tokens = string_ops.string_split(strings, "#", skip_empty=False)
indices, values, shape = self.evaluate(tokens)
self.assertAllEqual(indices, [[0, 0], [0, 1],
[1, 0], [1, 1],
[2, 0], [2, 1], [2, 2]])
self.assertAllEqual(values, [b"", b"a", b"b", b"", b"", b"c", b""])
self.assertAllEqual(shape, [3, 3])
with self.cached_session() as sess:
tokens = string_ops.string_split(strings, "#")
indices, values, shape = self.evaluate(tokens)
self.assertAllEqual(values, [b"a", b"b", b"c"])
self.assertAllEqual(indices, [[0, 0], [1, 0], [2, 0]])
self.assertAllEqual(shape, [3, 1])
class StringSplitV2OpTest(test.TestCase):
def testSplitV2(self):
strings = ["pigs on the wing", "animals"]
with self.cached_session() as sess:
tokens = string_ops.string_split_v2(strings)
indices, values, shape = self.evaluate(tokens)
self.assertAllEqual(indices, [[0, 0], [0, 1], [0, 2], [0, 3], [1, 0]])
self.assertAllEqual(values, [b"pigs", b"on", b"the", b"wing", b"animals"])
self.assertAllEqual(shape, [2, 4])
def testSplitV2MultiCharSeparator(self):
# Match Python behavior:
# >>> '1<>2<>3'.split('<>')
# ['1', '2', '3']
# >>> "<><>4<>5<><>6<>".split("<>")
# ['', '', '4', '5', '', '6', '']
strings = ["1<>2<>3", "<><>4<>5<><>6<>"]
with self.cached_session() as sess:
tokens = string_ops.string_split_v2(strings, sep="<>")
indices, values, shape = self.evaluate(tokens)
self.assertAllEqual(
indices, [[0, 0], [0, 1], [0, 2],
[1, 0], [1, 1], [1, 2], [1, 3], [1, 4], [1, 5], [1, 6]])
self.assertAllEqual(values, [b"1", b"2", b"3",
b"", b"", b"4", b"5", b"", b"6", b""])
self.assertAllEqual(shape, [2, 7])
def testSplitV2SimpleSeparator(self):
# Match Python behavior:
# >>> '1,2,3'.split(',')
# ['1', '2', '3']
# >>> '1,2,,3,'.split(',')
# ['1', '2', '', '3', '']
strings = ["1,2,3", "4,5,,6,"]
with self.cached_session() as sess:
tokens = string_ops.string_split_v2(strings, sep=',')
indices, values, shape = self.evaluate(tokens)
self.assertAllEqual(indices, [[0, 0], [0, 1], [0, 2],
[1, 0], [1, 1], [1, 2], [1, 3], [1, 4]])
self.assertAllEqual(values, [b"1", b"2", b"3",
b"4", b"5", b"", b"6", b""])
self.assertAllEqual(shape, [2, 5])
def testSplitV2EmptySeparator(self):
# Match Python behavior:
# >>> '1 2 3'.split()
# ['1', '2', '3']
#>>> ' 1 2 3 '.split()
#['1', '2', '3']
strings = ["1 2 3", " 4 5 6 "]
with self.cached_session() as sess:
tokens = string_ops.string_split_v2(strings)
indices, values, shape = self.evaluate(tokens)
self.assertAllEqual(indices, [[0, 0], [0, 1], [0, 2],
[1, 0], [1, 1], [1, 2]])
self.assertAllEqual(values, [b"1", b"2", b"3", b"4", b"5", b"6"])
self.assertAllEqual(shape, [2, 3])
def testSplitV2SimpleSeparatorMaxSplit(self):
# Match Python behavior:
# >>> '1,2,3'.split(',', maxsplit=1)
# ['1', '2,3']
# >>> '4,5,,6,'.split(',', maxsplit=1)
# ['4', '5,,6,']
strings = ["1,2,3", "4,5,,6,"]
with self.cached_session() as sess:
tokens = string_ops.string_split_v2(strings, sep=',', maxsplit=1)
indices, values, shape = self.evaluate(tokens)
self.assertAllEqual(indices, [[0, 0], [0, 1],
[1, 0], [1, 1]])
self.assertAllEqual(values, [b"1", b"2,3", b"4", b"5,,6,"])
self.assertAllEqual(shape, [2, 2])
def testSplitV2EmptySeparatorMaxSplit(self):
# Match Python behavior:
# '1 2 3'.split(maxsplit=1)
# ['1', '2 3']
# >>> " 4 5 6 ".split(maxsplit=1)
# ['4', '5 6 ']
strings = ["1 2 3", " 4 5 6 "]
with self.cached_session() as sess:
tokens = string_ops.string_split_v2(strings, maxsplit=1)
indices, values, shape = self.evaluate(tokens)
self.assertAllEqual(indices, [[0, 0], [0, 1],
[1, 0], [1, 1]])
self.assertAllEqual(values, [b"1", b"2 3", b"4", b"5 6 "])
self.assertAllEqual(shape, [2, 2])
if __name__ == "__main__":
test.main()
| jbedorf/tensorflow | tensorflow/python/kernel_tests/string_split_op_test.py | Python | apache-2.0 | 10,529 |
from __future__ import print_function
import numpy as np
import tensorflow as tf
import argparse
import time
import os
from six.moves import cPickle
from utils import TextLoader
from model import Model
from six import text_type
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--save_dir', type=str, default='save',
help='model directory to store checkpointed models')
parser.add_argument('-n', type=int, default=500,
help='number of characters to sample')
parser.add_argument('--prime', type=text_type, default=u' ',
help='prime text')
parser.add_argument('--sample', type=int, default=1,
help='0 to use max at each timestep, 1 to sample at each timestep, 2 to sample on spaces')
args = parser.parse_args()
sample(args)
def sample(args):
with open(os.path.join(args.save_dir, 'config.pkl'), 'rb') as f:
saved_args = cPickle.load(f)
with open(os.path.join(args.save_dir, 'chars_vocab.pkl'), 'rb') as f:
chars, vocab = cPickle.load(f)
model = Model(saved_args, True)
with tf.Session() as sess:
tf.initialize_all_variables().run()
saver = tf.train.Saver(tf.all_variables())
ckpt = tf.train.get_checkpoint_state(args.save_dir)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
print(model.sample(sess, chars, vocab, args.n, args.prime, args.sample))
if __name__ == '__main__':
main()
| hpssjellis/char-rnn-tensorflow-music-3dprinting | sample.py | Python | mit | 1,557 |
# -*- encoding: utf-8 -*-
from . import FixtureTest
class BikeShopTest(FixtureTest):
def test_bicycle_shop_node(self):
import dsl
z, x, y = (16, 10479, 25331)
self.generate_fixtures(
# https://www.openstreetmap.org/node/2484293076
dsl.point(2484293076, (-122.432202, 37.771036), {
'addr:city': 'San Francisco',
'addr:housenumber': '520',
'addr:street': 'Waller Street',
'name': 'Wiggle Bicycles',
'opening_hours': 'Tu-Fr 10:00-18:30; Sa-Su 10:00-17:00',
'service:bicycle:diy': 'no',
'service:bicycle:pump': 'yes',
'service:bicycle:repair': 'yes',
'shop': 'bicycle',
'source': 'openstreetmap.org',
}),
)
self.assert_has_feature(
z, x, y, 'pois', {
'id': 2484293076,
'kind': 'bicycle',
'min_zoom': 17,
})
def test_bicycle_shop_large_way(self):
import dsl
z, x, y = (15, 5242, 12665)
self.generate_fixtures(
# https://www.openstreetmap.org/way/260354461
dsl.way(260354461, dsl.box_area(z, x, y, 723), {
'addr:city': 'San Francisco',
'addr:housenumber': '1090',
'addr:postcode': '94103',
'addr:state': 'CA',
'addr:street': 'Folsom Street',
'building': 'yes',
'height': '7',
'name': 'SF Bike Connection',
'shop': 'bicycle',
'source': 'openstreetmap.org',
'website': 'http://bikeconnection.net/',
}),
)
self.assert_has_feature(
z, x, y, 'pois', {
'id': 260354461,
'kind': 'bicycle',
# should be in z15 tile, so min_zoom between 15 and 16
'min_zoom': lambda z: 15 <= z < 16,
})
def test_bicycle_shop_small_way(self):
import dsl
z, x, y = (16, 10476, 25332)
self.generate_fixtures(
# https://www.openstreetmap.org/way/264534357
dsl.way(264534357, dsl.box_area(z, x, y, 362), {
'addr:city': 'San Francisco',
'addr:housenumber': '858',
'addr:postcode': '94117',
'addr:street': 'Stanyan Street',
'building': 'yes',
'height': '5',
'name': 'American Cyclery Too',
'operator': 'American Cyclery',
'service:bicycle:pump': 'yes',
'service:bicycle:repair': 'yes',
'service:bicycle:retail': 'yes',
'service:bicycle:second_hand': 'yes',
'shop': 'bicycle',
'source': 'openstreetmap.org',
}),
)
self.assert_has_feature(
z, x, y, 'pois', {
'id': 264534357,
'kind': 'bicycle',
# min_zoom between 16 and 17, less than the node at z17
'min_zoom': lambda z: 16 <= z < 17,
})
class TheatreTest(FixtureTest):
def test_theatre_node(self):
import dsl
z, x, y = (16, 10483, 25329)
self.generate_fixtures(
# https://www.openstreetmap.org/node/358805392
dsl.point(358805392, (-122.411371, 37.782168), {
'amenity': 'theatre',
'ele': '14',
'gnis:county_id': '075',
'gnis:created': '01/01/1995',
'gnis:feature_id': '1657186',
'gnis:state_id': '06',
'name': 'Market Street Theatre',
'source': 'openstreetmap.org',
}),
)
self.assert_has_feature(
z, x, y, 'pois', {
'id': 358805392,
'kind': 'theatre',
'min_zoom': 17,
})
def test_theatre_medium_way(self):
import dsl
z, x, y = (16, 10483, 25330)
self.generate_fixtures(
# https://www.openstreetmap.org/way/35115840
dsl.way(35115840, dsl.box_area(z, x, y, 4782), {
'amenity': 'theatre',
'building': 'yes',
'height': '46 m',
'name': 'Orpheum Theatre',
'source': 'openstreetmap.org',
'wikidata': 'Q7103971',
}),
)
self.assert_has_feature(
z, x, y, 'pois', {
'id': 35115840,
'kind': 'theatre',
'min_zoom': lambda z: 16 <= z < 17,
})
def test_theatre_large_way(self):
import dsl
z, x, y = (15, 9650, 12314)
self.generate_fixtures(
# https://www.openstreetmap.org/way/266170808
dsl.way(266170808, dsl.box_area(z, x, y, 7492), {
'amenity': 'theatre',
'building': 'yes',
'building:colour': '#CAC3A9',
'building:part': 'yes',
'height': '30',
'name': 'Radio City Music Hall',
'nycdoitt:bin': '1083862',
'opening_hours': '09:30-17:00',
'roof:colour': '#956C66',
'roof:material': 'concrete',
'roof:shape': 'flat',
'source': 'openstreetmap.org',
'tourism': 'yes',
'website': 'http://www.radiocity.com/',
'wikidata': 'Q753437',
'wikipedia': 'en:Radio City Music Hall',
}),
)
self.assert_has_feature(
z, x, y, 'pois', {
'id': 266170808,
'kind': 'theatre',
'min_zoom': lambda z: 15 <= z < 16,
})
class WaterTowerTest(FixtureTest):
def test_water_tower_no_height(self):
import dsl
z, x, y = (16, 10477, 25334)
self.generate_fixtures(
# https://www.openstreetmap.org/way/247759532
dsl.way(247759532, dsl.box_area(z, x, y, 448), {
'man_made': 'water_tower',
'name': 'Ashbury tank',
'source': 'openstreetmap.org',
}),
)
self.assert_has_feature(
z, x, y, 'pois', {
'id': 247759532,
'kind': 'water_tower',
'min_zoom': 17,
})
def test_water_tower_tall(self):
import dsl
z, x, y = (15, 5240, 12671)
self.generate_fixtures(
# https://www.openstreetmap.org/way/424957085
dsl.way(424957085, dsl.box_area(z, x, y, 146), {
'building': 'yes',
'height': '23',
'man_made': 'water_tower',
'source': 'openstreetmap.org',
}),
)
self.assert_has_feature(
z, x, y, 'pois', {
'id': 424957085,
'kind': 'water_tower',
'min_zoom': 15,
})
def test_water_tower_short(self):
import dsl
z, x, y = (16, 11310, 26168)
self.generate_fixtures(
# https://www.openstreetmap.org/way/470968538
dsl.way(470968538, dsl.box_area(z, x, y, 198), {
'ele': '300.5',
'height': '10.9',
'lacounty:ain': '8277036900',
'lacounty:bld_id': '600581840682',
'man_made': 'water_tower',
'source': 'openstreetmap.org',
}),
)
self.assert_has_feature(
z, x, y, 'pois', {
'id': 470968538,
'kind': 'water_tower',
'min_zoom': 16,
})
| mapzen/vector-datasource | integration-test/1627-modify-zoom-bicycle-theatre-water-tower.py | Python | mit | 7,856 |
# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
# copyright 2003-2010 Sylvain Thenault, all rights reserved.
# contact mailto:thenault@gmail.com
#
# This file is part of logilab-astng.
#
# logilab-astng is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 2.1 of the License, or (at your
# option) any later version.
#
# logilab-astng is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-astng. If not, see <http://www.gnu.org/licenses/>.
from __future__ import generators
from data.module import YO, YOUPI
import data
class Specialization(YOUPI, YO): pass
class Metaclass(type): pass
class Interface: pass
class MyIFace(Interface): pass
class AnotherIFace(Interface): pass
class MyException(Exception): pass
class MyError(MyException): pass
class AbstractClass(object):
def to_override(self, whatever):
raise NotImplementedError()
def return_something(self, param):
if param:
return 'toto'
return
class Concrete0:
__implements__ = MyIFace
class Concrete1:
__implements__ = MyIFace, AnotherIFace
class Concrete2:
__implements__ = (MyIFace,
AnotherIFace)
class Concrete23(Concrete1): pass
del YO.member
del YO
[SYN1, SYN2] = Concrete0, Concrete1
assert `1`
b = 1 | 2 & 3 ^ 8
bb = 1 | two | 6
ccc = one & two & three
dddd = x ^ o ^ r
exec 'c = 3'
exec 'c = 3' in {}, {}
def raise_string(a=2, *args, **kwargs):
raise 'pas glop'
raise Exception, 'yo'
yield 'coucou'
a = b + 2
c = b * 2
c = b / 2
c = b // 2
c = b - 2
c = b % 2
c = b ** 2
c = b << 2
c = b >> 2
c = ~b
c = not b
d = [c]
e = d[:]
e = d[a:b:c]
raise_string(*args, **kwargs)
print >> stream, 'bonjour'
print >> stream, 'salut',
def make_class(any, base=data.module.YO, *args, **kwargs):
"""check base is correctly resolved to Concrete0"""
class Aaaa(base):
"""dynamic class"""
return Aaaa
from os.path import abspath
import os as myos
class A:
pass
class A(A):
pass
| dbbhattacharya/kitsune | vendor/packages/logilab-astng/test/data/module2.py | Python | bsd-3-clause | 2,470 |
# -*- coding: utf-8 -*-
import six
from jenkinsapi_utils.compat import (
to_string,
needs_encoding,
)
def test_needs_encoding_py2():
if six.PY2:
unicode_str = u'юникод'
assert needs_encoding(unicode_str)
assert not needs_encoding('string')
assert not needs_encoding(5)
assert not needs_encoding(['list', 'of', 'strings'])
def test_to_string():
assert isinstance(to_string(5), str)
assert isinstance(to_string('string'), str)
assert isinstance(to_string(['list', 'of', 'strings']), str)
assert isinstance(to_string(u'unicode'), str)
| salimfadhley/jenkinsapi | jenkinsapi_tests/unittests/test_compat.py | Python | mit | 599 |
from django import forms
from stall.forms.bases import BaseForm
class SellerForm(BaseForm):
circle_name = forms.CharField(max_length=40)
circle_description = forms.CharField(required=False, widget=forms.Textarea)
proposer_name = forms.CharField(max_length=20)
proposer_sex = forms.CharField(max_length=20)
proposer_qq = forms.CharField(required=False, max_length=11)
proposer_phone = forms.CharField(max_length=20)
proposer_id = forms.CharField(required=False, max_length=18)
| sunoru/pokemon_only | stall/forms/seller.py | Python | gpl-2.0 | 507 |
# -*- coding: utf-8 -
def app(environ, start_response):
data = 'Hello, World!\n'
status = '200 OK'
response_headers = [
('Content-type','text/plain'),
('Content-Length', str(len(data)))
]
start_response(status, response_headers)
return iter([data])
| tanacasino/gunicorn-ssl | app.py | Python | apache-2.0 | 290 |
# -*- coding: utf-8 -*-
#
# Jansson documentation build configuration file, created by
# sphinx-quickstart on Sun Sep 5 21:47:20 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('ext'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['refcounting']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Jansson'
copyright = u'2009-2012, Petri Lehtinen'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.4'
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
default_role = 'c:func'
primary_domain = 'c'
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Janssondoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Jansson.tex', u'Jansson Documentation',
u'Petri Lehtinen', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'jansson', u'Jansson Documentation',
[u'Petri Lehtinen'], 1)
]
| andrei14vl/cubrid | external/jansson-2.4/doc/conf.py | Python | gpl-3.0 | 7,025 |
"""
Test_collection_measures
------------------------
Test the module functions.
"""
import numpy as np
from corrs_measures import measure_corrs_sumstd, measure_corrs_std,\
measure_corrs_mean
def test():
### Measures testing
corrs = np.random.random((10, 10))
val = measure_corrs_sumstd(corrs)
val = measure_corrs_std(corrs)
val = measure_corrs_mean(corrs)
| tgquintela/pythonUtils | pythonUtils/CollectionMeasures/test_collectionmeasures.py | Python | mit | 386 |
"""
NeuroTools.parameters
=====================
A module for dealing with model parameters.
Classes
-------
Parameter
ParameterRange - for specifying a list of possible values for a given parameter.
ParameterReference - specify a parameter in terms of the value of another parameter.
ParameterSet - for representing/managing hierarchical parameter sets.
ParameterTable - a sub-class of ParameterSet that can represent a table of parameters.
ParameterSpace - a collection of ParameterSets, representing multiple points in
parameter space.
**Imported from NeuroTools.parameters.validators**
ParameterSchema - A sub-class of ParameterSet against which other ParameterSets can be validated
against using a Validator as found in the sub-package
NeuroTools.parameters.validators
CongruencyValidator - A CongruencyValidator validates a ParameterSet against a ParameterSchema
via member "validate(parameter_set,parameter_schema)".
ValidationError - The Exception raised when validation fails
SchemaBase - The base class of all "active" Schema objects to be placed in a ParameterSchema.
-> Sublass - Validates the same-path ParameterSet value if it is of the specified type.
-> Eval - Validates the same-path ParameterSet value if the provided expression
evaluates ("eval") to True.
Functions
---------
nesteddictwalk - Walk a nested dict structure, using a generator.
nesteddictflatten - Return a flattened version of a nested dict structure.
string_table - Convert a table written as a multi-line string into a dict of dicts.
Sub-Packages
------------
validators - A module implementing validation of ParameterSets against ParameterSchema.
"""
# import copy
import warnings
import math
import numpy
import operator
from functools import wraps
try:
from urllib2 import build_opener, install_opener, urlopen, ProxyHandler # Python 2
from urlparse import urlparse
except ImportError:
from urllib.request import build_opener, install_opener, urlopen, ProxyHandler # Python 3
from urllib.parse import urlparse
from NeuroTools.random import ParameterDist, GammaDist, UniformDist, NormalDist
from os import environ, path
import random
from copy import copy
try:
basestring
except NameError:
basestring = str
try:
next # Python 3
except NameError:
def next(obj): # Python 2
return obj.next()
__version__ = '0.2.1'
if 'HTTP_PROXY' in environ:
HTTP_PROXY = environ['HTTP_PROXY'] # user has to define it
''' next lines are for communication to urllib of proxy information '''
proxy_support = ProxyHandler({"https": HTTP_PROXY})
opener = build_opener(proxy_support, HTTPHandler)
install_opener(opener)
def isiterable(x):
return (hasattr(x, '__iter__') and not isinstance(x, basestring))
def contains_instance(collection, cls):
return any(isinstance(o, cls) for o in collection)
def nesteddictwalk(d, separator='.'):
"""
Walk a nested dict structure, using a generator.
Composite keys are created by joining each key to the key of the parent dict
using `separator`.
"""
for key1, value1 in d.items():
if isinstance(value1, dict):
for key2, value2 in nesteddictwalk(value1, separator): # recurse into subdict
yield "%s%s%s" % (key1, separator, key2), value2
else:
yield key1, value1
def nesteddictflatten(d, separator='.'):
"""
Return a flattened version of a nested dict structure.
Composite keys are created by joining each key to the key of the parent dict
using `separator`.
"""
flatd = {}
for k, v in nesteddictwalk(d, separator):
flatd[k] = v
return flatd
# --- Parameters, and ranges and distributions of them -------------------
class Parameter(object):
def __init__(self, value, units=None, name=""):
self.name = name
self.value = value
self.units = units
self.type = type(value)
def __repr__(self):
s = "%s = %s" % (self.name, self.value)
if self.units is not None:
s += " %s" % self.units
return s
class ParameterRange(Parameter):
"""
A class for specifying a list of possible values for a given parameter.
The value must be an iterable. It acts like a Parameter, but .next() can be
called to iterate through the values
"""
def __init__(self, value, units=None, name="", shuffle=False):
if not isiterable(value):
raise TypeError("A ParameterRange value must be iterable")
Parameter.__init__(self, next(value.__iter__()), units, name)
self._values = copy(value)
self._iter_values = self._values.__iter__()
if shuffle:
random.shuffle(self._values)
def __repr__(self):
units_str = ''
if self.units:
units_str = ', units="%s"' % self.units
return 'ParameterRange(%s%s)' % (self._values.__repr__(), units_str)
def __iter__(self):
self._iter_values = self._values.__iter__()
return self._iter_values
def __next__(self):
self._value = next(self._iter_values)
return self._value
def next(self):
return self.__next__()
def __len__(self):
return len(self._values)
def __eq__(self, o):
if (type(self) == type(o) and
self.name == o.name and
self._values == o._values and
self.units == o.units):
return True
else:
return False
# --- ReferenceParameter
def reverse(func):
"""Given a function f(a, b), returns f(b, a)"""
@wraps(func)
def reversed_func(a, b):
return func(b, a)
reversed_func.__doc__ = "Reversed argument form of %s" % func.__doc__
reversed_func.__name__ = "reversed %s" % func.__name__
return reversed_func
def lazy_operation(name, reversed=False):
def op(self, val):
f = getattr(operator, name)
if reversed:
f = reverse(f)
self.operations.append((f, val))
return self
return op
class ParameterReference(object):
"""
This class provides a place-holder for a reference parameter that will
later be replaced with the value of the parameter pointed to by the
reference. This class also allows for lazy application of operations,
meaning that one can use the reference in simple formulas that will get
evaluated at the moment the reference is replaced.
Check below which operations are supported.
"""
def __init__(self,reference):
object.__init__(self)
self.reference_path = reference
self.operations = []
def _apply_operations(self, x):
for f, arg in self.operations:
try:
if arg is None:
x = f(x)
else:
x = f(x, arg)
except TypeError:
raise TypeError("ParameterReference: error applying operation " + str(f) + " with argument " + str(arg) + " to " + str(x))
return x
def evaluate(self,parameter_set):
"""
This function evaluetes the reference, using the ParameterSet in parameter_set as the source.
"""
ref_value = parameter_set[self.reference_path]
if isinstance(ref_value,ParameterSet):
if self.operations == []:
return ref_value.tree_copy()
else:
raise ValueError("ParameterReference: lazy operations cannot be applied to argument of type ParameterSet> %s" % self.reference_path)
elif isinstance(ref_value,ParameterReference):
#lets wait until the refe
return self
else:
return self._apply_operations(ref_value)
def copy(self):
pr = ParameterReference(self.reference_path)
for f, arg in self.operations:
if isinstance(arg,ParameterReference):
pr.operations.append((f,arg.copy()))
else:
pr.operations.append((f,arg))
return pr
__add__ = lazy_operation('add')
__radd__ = __add__
__sub__ = lazy_operation('sub')
__rsub__ = lazy_operation('sub', reversed=True)
__mul__ = lazy_operation('mul')
__rmul__ = __mul__
__div__ = lazy_operation('div')
__rdiv__ = lazy_operation('div', reversed=True)
__truediv__ = lazy_operation('truediv')
__rtruediv__ = lazy_operation('truediv', reversed=True)
__pow__ = lazy_operation('pow')
def load_parameters(parameter_url, modified_parameters):
"""
This is a function that should be used to load a ParameterSet from a url.
`modified_parameters` should be a dictionary of parameters and their values.
These will be replaced in the loaded parameter set before the references are
expanded.
"""
parameters = ParameterSet(parameter_url)
parameters.replace_values(**modified_parameters)
parameters.replace_references()
return parameters
class ParameterSet(dict):
"""
A class to manage hierarchical parameter sets.
Usage example::
>>> sim_params = ParameterSet({'dt': 0.1, 'tstop': 1000.0})
>>> exc_cell_params = ParameterSet("http://neuralensemble.org/svn/NeuroTools/example.params")
>>> inh_cell_params = ParameterSet({'tau_m': 15.0, 'cm': 0.5})
>>> network_params = ParameterSet({'excitatory_cells': exc_cell_params, 'inhibitory_cells': inh_cell_params})
>>> P = ParameterSet({'sim': sim_params, 'network': network_params})
>>> P.sim.dt
0.1
>>> P.network.inhibitory_cells.tau_m
15.0
>>> print P.pretty()
"""
non_parameter_attributes = ['_url', 'label', 'names', 'parameters', 'flat',
'flatten', 'non_parameter_attributes']
invalid_names = ['parameters', 'names'] # should probably add dir(dict)
@staticmethod
def read_from_str(s, update_namespace=None):
"""
`ParameterSet` definition `s` should be a Python dict definition
string, containing objects of types `int`, `float`, `str`, `list`,
`dict` plus the classes defined in this module, `Parameter`,
`ParameterRange`, etc. No other object types are allowed,
except the function `url('some_url')` or `ref('point.delimited.path')`,
e.g.::
{ 'a' : {'A': 3, 'B': 4},
'b' : [1,2,3],
'c' : 'hello world',
'd' : url('http://example.com/my_cool_parameter_set')
'e' : ref('level1_param_name.level2_param_name.level3_param_name') }
This is largely the JSON (www.json.org) format, but with
extra keywords in the namespace such as `ParameterRange`, `GammaDist`, etc.
"""
global_dict = dict(ref=ParameterReference,
url=ParameterSet,
ParameterSet=ParameterSet,
ParameterRange=ParameterRange,
ParameterTable=ParameterTable,
GammaDist=GammaDist,
UniformDist=UniformDist,
NormalDist=NormalDist,
pi=math.pi,
true=True, # these are for reading JSON
false=False, # files
)
if update_namespace:
global_dict.update(update_namespace)
D = None
try:
if 'file://' in s:
path = s.split('file://')[1]
ifile = open(path, 'r')
content = ifile.read()
ifile.close()
D = eval(content, global_dict)
else:
D = eval(s, global_dict)
except SyntaxError as e:
raise SyntaxError(
"Invalid string for ParameterSet definition: %s\n%s" % (s, e))
except TypeError as e:
raise SyntaxError(
"Invalid string for ParameterSet definition: %s" % e)
return D or {}
@staticmethod
def check_validity(k):
"""docstring missing"""
if k in ParameterSet.invalid_names:
raise Exception("'%s' is not allowed as a parameter name." % k)
def __init__(self, initialiser, label=None, update_namespace=None):
def walk(d, label):
# Iterate through the dictionary `d`, replacing `dict`s by
# `ParameterSet` objects.
for k, v in d.items():
ParameterSet.check_validity(k)
if isinstance(v, ParameterSet):
d[k] = v
elif isinstance(v, dict):
d[k] = walk(v, k)
else:
d[k] = v
return ParameterSet(d, label)
self._url = None
if isinstance(initialiser, basestring): # url or str
if path.exists(initialiser):
f = open(initialiser, 'r')
pstr = f.read()
self._url = initialiser
f.close()
else:
try:
f = urlopen(initialiser)
pstr = f.read().decode()
self._url = initialiser
except IOError as e:
pstr = initialiser
self._url = None
else:
f.close()
# is it a yaml url?
if self._url:
o = urlparse(self._url)
base, ext = path.splitext(o.path)
if ext in ['.yaml', '.yml']:
import yaml
initialiser = yaml.load(pstr)
else:
initialiser = ParameterSet.read_from_str(pstr,
update_namespace)
else:
initialiser = ParameterSet.read_from_str(pstr,
update_namespace)
# By this stage, `initialiser` should be a dict. Iterate through it,
# copying its contents into the current instance, and replacing dicts by
# ParameterSet objects.
if isinstance(initialiser, dict):
for k, v in initialiser.items():
ParameterSet.check_validity(k)
if isinstance(v, ParameterSet):
self[k] = v
elif isinstance(v, dict):
self[k] = walk(v, k)
else:
self[k] = v
else:
raise TypeError(
"`initialiser` must be a `dict`, a `ParameterSet` object, a string, or a valid URL")
# Set the label
if hasattr(initialiser, 'label'):
self.label = label or initialiser.label # if initialiser was a ParameterSet, keep the existing label if the label arg is None
else:
self.label = label
# Define some aliases, allowing, e.g.:
# for name, value in P.parameters():
# for name in P.names():
self.names = self.keys
self.parameters = self.items
def flat(self):
__doc__ = nesteddictwalk.__doc__
return nesteddictwalk(self)
def flatten(self):
__doc__ = nesteddictflatten.__doc__
return nesteddictflatten(self)
def __getattr__(self, name):
"""Allow accessing parameters using dot notation."""
try:
return self[name]
except KeyError:
return self.__getattribute__(name)
def __setattr__(self, name, value):
"""Allow setting parameters using dot notation."""
if name in self.non_parameter_attributes:
object.__setattr__(self, name, value)
else:
# should we check the parameter type hasn't changed?
self[name] = value
def __getitem__(self, name):
""" Modified get that detects dots '.' in the names and goes down the
nested tree to find it"""
split = name.split('.', 1)
if len(split) == 1:
return dict.__getitem__(self, name)
# nested get
return dict.__getitem__(self, split[0])[split[1]]
def flat_add(self, name, value):
""" Like `__setitem__`, but it will add `ParameterSet({})` objects
into the namespace tree if needed. """
split = name.split('.', 1)
if len(split) == 1:
dict.__setitem__(self, name, value)
else:
# nested set
try:
ps = dict.__getitem__(self, split[0])
except KeyError:
# setting nested name without parent existing
# create parent
ps = ParameterSet({})
dict.__setitem__(self, split[0], ps)
# and try again
ps.flat_add(split[1], value)
def __setitem__(self, name, value):
""" Modified set that detects dots '.' in the names and goes down the
nested tree to set it """
split = name.split('.', 1)
if len(split) == 1:
dict.__setitem__(self, name, value)
else:
# nested set
dict.__getitem__(self, split[0])[split[1]] = value
def update(self, E, **F):
"""docstring missing"""
if hasattr(E, "has_key"):
for k in E:
self[k] = E[k]
else:
for (k, v) in E:
self[k] = v
for k in F:
self[k] = F[k]
# should __len__() be the usual dict length, or the flattened length? Probably the former for consistency with dicts
# can always use len(ps.flatten())
# what about __contains__()? Should we drill down to lower levels in the
# hierarchy? I think so.
def __getstate__(self):
"""For pickling."""
return self
def save(self, url=None, expand_urls=False):
"""
Write the parameter set to a text file.
The text file syntax is open to discussion. My idea is that it should be
valid Python code, preferably importable as a module.
If `url` is `None`, try to save to `self._url` (if it is not `None`),
otherwise save to `url`.
"""
# possible solution for HTTP PUT: http://inamidst.com/proj/put/put.py
if not url:
url = self._url
assert url != ''
if not self._url:
self._url = url
scheme, netloc, path, parameters, query, fragment = urlparse(url)
if scheme == 'file' or (scheme == '' and netloc == ''):
f = open(path, 'w')
f.write(self.pretty(expand_urls=expand_urls))
f.close()
else:
if scheme:
raise Exception(
"Saving using the %s protocol is not implemented" % scheme)
else:
raise Exception("No protocol (http, ftp, etc) specified.")
def pretty(self, indent=' ', expand_urls=False):
"""
Return a unicode string representing the structure of the `ParameterSet`.
evaluating the string should recreate the object.
"""
def walk(d, indent, ind_incr):
s = []
for k, v in d.items():
if hasattr(v, 'items'):
if expand_urls is False and hasattr(v, '_url') and v._url:
s.append('%s"%s": url("%s"),' % (indent, k, v._url))
else:
s.append('%s"%s": {' % (indent, k))
s.append(walk(v, indent+ind_incr, ind_incr))
s.append('%s},' % indent)
elif isinstance(v, basestring):
s.append('%s"%s": "%s",' % (indent, k, v))
else: # what if we have a dict or ParameterSet inside a list? currently they are not expanded. Should they be?
s.append('%s"%s": %s,' % (indent, k, v))
return '\n'.join(s)
return '{\n' + walk(self, indent, indent) + '\n}'
def tree_copy(self):
"""Return a copy of the `ParameterSet` tree structure.
Nodes are not copied, but re-referenced."""
tmp = ParameterSet({})
for key in self:
value = self[key]
if isinstance(value, ParameterSet):
tmp[key] = value.tree_copy()
elif isinstance(value,ParameterReference):
tmp[key] = value.copy()
else:
tmp[key] = value
if tmp._is_space():
tmp = ParameterSpace(tmp)
return tmp
def as_dict(self):
"""Return a copy of the `ParameterSet` tree structure
as a nested dictionary"""
tmp = {}
for key in self:
value = self[key]
if isinstance(value, ParameterSet):
# recurse
tmp[key] = value.as_dict()
else:
tmp[key] = value
return tmp
def __sub__(self, other):
"""
Return the difference between this `ParameterSet` and another.
Not yet properly implemented.
"""
self_keys = set(self)
other_keys = set(other)
intersection = self_keys.intersection(other_keys)
difference1 = self_keys.difference(other_keys)
difference2 = other_keys.difference(self_keys)
result1 = dict([(key, self[key]) for key in difference1])
result2 = dict([(key, other[key]) for key in difference2])
# Now need to check values for intersection....
for item in intersection:
if isinstance(self[item], ParameterSet):
d1, d2 = self[item] - other[item]
if d1:
result1[item] = d1
if d2:
result2[item] = d2
elif self[item] != other[item]:
result1[item] = self[item]
result2[item] = other[item]
if len(result1) + len(result2) == 0:
assert self == other, "Error in ParameterSet.diff()"
return result1, result2
def _is_space(self):
"""
Check for the presence of `ParameterRanges` or `ParameterDists` to
determine if this is a `ParameterSet` or a `ParameterSpace`.
"""
for k, v in self.flat():
if isinstance(v, ParameterRange) or isinstance(v, ParameterDist):
return True
return False
def export(self, filename, format='latex', **kwargs):
"""
docstring missing
"""
if format == 'latex':
from .export import parameters_to_latex
parameters_to_latex(filename, self, **kwargs)
def replace_references(self):
while True:
refs = self.find_references()
if len(refs) == 0:
break
for s, k, v in refs:
s[k] = v.evaluate(self)
def find_references(self):
l = []
for k, v in self.items():
if isinstance(v, ParameterReference):
l += [(self, k, v)]
elif isinstance(v, ParameterSet):
l += v.find_references()
return l
def replace_values(self,**args):
"""
This expects its arguments to be in the form path=value, where path is a
. (dot) delimited path to a parameter in the parameter tree rooted in
this ParameterSet instance.
This function replaces the values of each parameter in the args with the
corresponding values supplied in the arguments.
"""
for k in args.keys():
self[k] = args[k]
class ParameterSpace(ParameterSet):
"""
A collection of `ParameterSets`, representing multiple points in
parameter space. Created by putting `ParameterRange` and/or `ParameterDist`
objects within a `ParameterSet`.
"""
def iter_range_key(self, range_key):
""" An iterator of the `ParameterSpace` which yields the
`ParameterSet` with the `ParameterRange` given by `range_key` replaced with
each of its values"""
tmp = self.tree_copy()
for val in self[range_key]:
tmp[range_key] = val
yield tmp
def iter_inner_range_keys(self, keys, copy=False):
""" An iterator of the `ParameterSpace` which yields
`ParameterSets` with all combinations of `ParameterRange` elements
which are given by the `keys` list.
Note: each newly yielded value is one and the same object
so storing the returned values results in a collection
of many of the lastly yielded object.
`copy=True` causes each yielded object to be a newly
created object, but be careful because this is
spawning many dictionaries!
"""
if len(keys) == 0:
# return an iterator over 1 copy for modifying
yield self.tree_copy()
return
if not copy:
# recursively iterate over remaining keys
for tmp in self.iter_inner_range_keys(keys[1:]):
# iterator over range of our present attention
for val in self[keys[0]]:
tmp[keys[0]] = val
if not tmp._is_space():
tmp = ParameterSet(tmp)
yield tmp
else:
# Each yielded ParameterSet is a tree_copy of self
# recursively iterate over remaining keys
for tmp in self.iter_inner_range_keys(keys[1:]):
# iterator over range of our present attention
for val in self[keys[0]]:
tmp_copy = tmp.tree_copy()
tmp_copy[keys[0]] = val
if not tmp_copy._is_space():
tmp = ParameterSet(tmp)
yield tmp_copy
def range_keys(self):
"""Return the list of keys for those elements which are `ParameterRanges`."""
return [key for key, value in self.flat() if isinstance(value, ParameterRange)]
def iter_inner(self, copy=False):
"""An iterator of the `ParameterSpace` which yields
`ParameterSets` with all combinations of `ParameterRange` elements"""
return self.iter_inner_range_keys(self.range_keys(), copy)
def num_conditions(self):
"""Return the number of `ParameterSets` that will be returned by the
`iter_inner()` method."""
# Not properly tested
n = 1
for key in self.range_keys():
n *= len(self[key])
return n
def dist_keys(self):
"""Return the list of keys for those elements which are `ParameterDists`."""
def is_or_contains_dist(value):
return isinstance(value, ParameterDist) or (
isiterable(value) and contains_instance(value, ParameterDist))
return [key for key, value in self.flat() if is_or_contains_dist(value)]
def realize_dists(self, n=1, copy=False):
"""For each `ParameterDist`, realize the distribution and yield the result.
If `copy==True`, causes each yielded object to be a newly
created object, but be careful because this is
spawning many dictionaries!"""
def next(item, n):
if isinstance(item, ParameterDist):
return item.next(n)
else:
return [item]*n
# pre-generate random numbers
rngs = {}
for key in self.dist_keys():
if isiterable(self[key]):
rngs[key] = [next(item, n) for item in self[key]]
else:
rngs[key] = self[key].next(n)
# get a copy to fill in the rngs
if copy:
tmp = self.tree_copy()
for i in range(n):
for key in rngs:
if isiterable(self[key]):
tmp[key] = [rngs[key][j][i]
for j in range(len(rngs[key]))]
else:
tmp[key] = rngs[key][i]
yield tmp.tree_copy()
else:
tmp = self.tree_copy()
for i in range(n):
for key in rngs:
if isiterable(self[key]):
tmp[key] = [rngs[key][j][i]
for j in range(len(rngs[key]))]
else:
tmp[key] = rngs[key][i]
yield tmp
def parameter_space_dimension_labels(self):
"""
Return the dimensions and labels of the keys for those elements which are `ParameterRanges`.
`range_keys` are sorted to ensure the same ordering each time.
"""
range_keys = self.range_keys()
range_keys.sort()
dim = []
label = []
for key in range_keys:
label.append(key)
dim.append(len(eval('self.'+key)))
return dim, label
def parameter_space_index(self, current_experiment):
"""
Return the index of the current experiment in the dimension of the parameter space
i.e. parameter space dimension: [2,3]
i.e. index: (1,0)
Example::
p = ParameterSet({})
p.b = ParameterRange([1,2,3])
p.a = ParameterRange(['p','y','t','h','o','n'])
results_dim, results_label = p.parameter_space_dimension_labels()
results = numpy.empty(results_dim)
for experiment in p.iter_inner():
index = p.parameter_space_index(experiment)
results[index] = 2.
"""
index = []
range_keys = self.range_keys()
range_keys.sort()
for key in range_keys:
value = eval('current_experiment.'+key)
try:
value_index = list(eval('self.'+key)._values).index(value)
except ValueError:
raise ValueError(
"The ParameterSet provided is not within the ParameterSpace")
index.append(value_index)
return tuple(index)
def get_ranges_values(self):
"""
Return a dict with the keys and values of the parameters with `ParameterRanges`
Example::
>>> p = ParameterSpace({})
>>> p.b = ParameterRange([1,2,3])
>>> p.a = ParameterRange(['p','y','t','h','o','n'])
>>> data = p.get_ranges_values()
>>> data
{'a': ['p', 'y', 't', 'h', 'o', 'n'], 'b': [1, 2, 3]}
"""
data = {}
range_keys = self.range_keys()
range_keys.sort()
for key in range_keys:
data[key] = eval('self.'+key)._values
return data
def string_table(tablestring):
"""Convert a table written as a multi-line string into a dict of dicts."""
tabledict = {}
rows = tablestring.strip().split('\n')
column_headers = rows[0].split()
for row in rows[1:]:
row = row.split()
row_header = row[0]
tabledict[row_header] = {}
for col_header, item in zip(column_headers[1:], row[1:]):
tabledict[row_header][col_header] = float(item)
return tabledict
class ParameterTable(ParameterSet):
"""
A sub-class of `ParameterSet` that can represent a table of parameters.
i.e., it is limited to one-level of nesting, and each sub-dict must have
the same keys. In addition to the possible initialisers for ParameterSet,
a ParameterTable can be initialised from a multi-line string, e.g.::
>>> pt = ParameterTable('''
... # col1 col2 col3
... row1 1 2 3
... row2 4 5 6
... row3 7 8 9
... ''')
>>> pt.row2.col3
6.0
>>> pt.column('col1')
{'row1': 1.0, 'row2': 4.0, 'row3': 7.0}
>>> pt.transpose().col3.row2
6.0
"""
non_parameter_attributes = ParameterSet.non_parameter_attributes + \
['row', 'rows', 'row_labels',
'column', 'columns', 'column_labels']
def __init__(self, initialiser, label=None):
if isinstance(initialiser, basestring): # url or table string
tabledict = string_table(initialiser)
# if initialiser is a URL, string_table() should return an empty dict
# since URLs do not contain spaces.
if tabledict: # string table
initialiser = tabledict
ParameterSet.__init__(self, initialiser, label)
# Now need to check that the contents actually define a table, i.e.
# two levels of nesting and each sub-dict has the same keys
self._check_is_table()
self.rows = self.items
# self.rows.__doc__ = "Return a list of (row_label, row) pairs, as 2-tuples."""
self.row_labels = self.keys
# self.row_labels.__doc__ = "Return a list of row labels."
def _check_is_table(self):
"""
Checks that the contents actually define a table, i.e.
one level of nesting and each sub-dict has the same keys.
Raises an `Exception` if these requirements are violated.
"""
# to be implemented
pass
def row(self, row_label):
"""Return a `ParameterSet` object containing the requested row."""
return self[row_label]
def column(self, column_label):
"""Return a `ParameterSet` object containing the requested column."""
col = {}
for row_label, row in self.rows():
col[row_label] = row[column_label]
return ParameterSet(col)
def columns(self):
"""Return a list of `(column_label, column)` pairs, as 2-tuples."""
return [(column_label, self.column(column_label)) for column_label in self.column_labels()]
def column_labels(self):
"""Return a list of column labels."""
sample_row = self[list(self.row_labels())[0]]
return sample_row.keys()
def transpose(self):
"""
Return a new `ParameterTable` object with the same data as the current
one but with rows and columns swapped.
"""
new_table = ParameterTable({})
for column_label, column in self.columns():
new_table[column_label] = column
return new_table
def table_string(self):
"""
Returns the table as a string, suitable for being used as the
initialiser for a new `ParameterTable`.
"""
# formatting could definitely be improved
column_labels = self.column_labels()
lines = ["#\t " + "\t".join(column_labels)]
for row_label, row in self.rows():
lines.append(
row_label + "\t" + "\t".join(["%s" % row[col]
for col in column_labels]))
return "\n".join(lines)
| meduz/NeuroTools | src/parameters/__init__.py | Python | gpl-2.0 | 35,171 |
#!/usr/bin/env python
import re
from setuptools import setup, find_packages
def read_version():
with open("pandas_td/version.py") as f:
m = re.match(r'__version__ = "([^\"]*)"', f.read())
return m.group(1)
requires = [
"certifi",
"pytz",
"tzlocal",
"pandas>=0.16.0",
"requests>=2.21.0",
"td-client>=0.4.0",
]
setup(
name="pandas-td",
version=read_version(),
description="Pandas extension for Treasure Data",
author="Treasure Data, Inc.",
author_email="support@treasure-data.com",
url="https://github.com/treasure-data/pandas-td",
install_requires=requires,
extras_require={
"testing": ["pytest>=3.6", "pytest-cov"],
"dev": ["black==19.3b0", "isort"],
},
packages=find_packages(),
license="Apache License 2.0",
platforms="Posix; MacOS X; Windows",
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"Framework :: IPython",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Topic :: Software Development",
],
)
| treasure-data/pandas-td | setup.py | Python | apache-2.0 | 1,186 |
#!/usr/bin/env python
"""
Utils to interact with the redis DB
"""
import csv
from datetime import datetime
import json
import os
import redis
LOCAL_REDIS = 'redis://localhost:6379/0'
REDIS_URL = os.environ.get('REDISCLOUD_URL', LOCAL_REDIS)
CALLEE_COUNTER_KEY = 'callee_counter'
EVENTS_KEY = 'events'
CALLED_NUMBERS_SET_KEY = 'called_numbers_set'
redis = redis.from_url(REDIS_URL)
def store_event(event_name, data):
event = dict(
name=event_name,
timestamp=datetime.utcnow().isoformat(),
data=data,
)
redis.rpush(EVENTS_KEY, json.dumps(event))
def count_calls():
return redis.get(CALLEE_COUNTER_KEY)
CALLEES = list(csv.DictReader(open('data/callees.csv')))
def get_next_callee():
index = redis.incr(CALLEE_COUNTER_KEY) - 1
callee = CALLEES[index]
if redis.sismember(CALLED_NUMBERS_SET_KEY, callee['phone']):
store_event('skipped_repeat_number', callee)
return get_next_callee()
else:
redis.sadd(CALLED_NUMBERS_SET_KEY, callee['phone'])
return index, callee
def get_events():
events = {}
for e in redis.lrange("events", 0, -1):
e = json.loads(e)
events.setdefault(e['name'], []).append(e)
return events
def coalesce_dicts(signins):
user = {}
keys = set()
keys.update(*signins)
for k in keys:
for s in signins:
if s.get(k):
user[k] = s.get(k)
return user
def sort_dicts_by_key(items, sort_key, mutate=lambda k, v: k):
retval = {}
for i in items:
key = mutate(i.get(sort_key), i)
retval.setdefault(key, []).append(i)
return retval
def get_calls_by_phone():
events = get_events()
call_data = [e['data']['raw_data'] for e in events['save_call']]
caller_data = [e['data']['raw_data']['caller'] for e in events['save_call']]
def remove_dashes(k, v):
if k:
return k.replace('-', '')
else:
return k
return sort_dicts_by_key(caller_data, 'phoneNumber', mutate=remove_dashes)
def get_full_leaderboard():
calls_by_phone = get_calls_by_phone()
leaders = sorted([(len(v), k) for k,v in calls_by_phone.items()], reverse=True)
users = [coalesce_dicts(calls_by_phone[k]) for v,k in leaders]
full_leaderboard = [dict(calls=v, **u) for u, (v,k) in zip(users, leaders)]
for l in full_leaderboard: del l['sessionId']
return full_leaderboard
def get_leaderboard():
users = get_full_leaderboard()
names = ['{} {}.'.format(u.get('firstName', 'Anonymous').title(), u.get('lastName', 'Badger')[:1].upper()) for u in users]
return [{'name': n, 'calls': u['calls']} for n, u in zip(names, users)]
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| gabrielgrant/callaborate | db.py | Python | apache-2.0 | 2,734 |
import maya.cmds as cmds
try:
from MetacubeScripts import MetacubeFileNameConvention
except:
pass
class genericRigStructure(object):
def __init__(self):
self.groups = {
"mesh": {"group": "mesh_grp",
"subGroup": ["body_grp", "cloth_grp", "accesories_grp", "hair_grp", "trackers_grp",
"collision_grp", "pxycloth_grp", "pxyhair_grp", "dynspline_grp"]},
"rig": {"group": "rig_grp", "subGroup": None},
"controls": {"group": "control_grp", "subGroup": None}
}
try:
self.FileNameConv = MetacubeFileNameConvention.MetacubeFileNameConvention()
except:
self.FileNameConv = None
self.CreateStructure()
def CreateStructure(self):
MainGroupName = None
if self.FileNameConv != None:
if self.FileNameConv.nameInFormat:
MainGroupName = self.FileNameConv.AssetType + "_" + self.FileNameConv.AssetName + "_rig"
else:
MainGroupName = "MainCharacter"
else:
MainGroupName = "MainCharacter"
if cmds.objExists(MainGroupName):
MainGroup = MainGroupName
else:
MainGroup = cmds.group(empty=True, name=MainGroupName)
for genericGroup in self.groups:
if cmds.objExists(self.groups[genericGroup]["group"]):
if not cmds.listRelatives(self.groups[genericGroup]["group"], parent=True)[0] == MainGroupName:
cmds.parent(self.groups[genericGroup]["group"], MainGroupName)
else:
cmds.group(empty=True, name=self.groups[genericGroup]["group"])
cmds.parent(self.groups[genericGroup]["group"], MainGroupName)
if self.groups[genericGroup]["subGroup"]:
for eachgroup in self.groups[genericGroup]["subGroup"]:
if not cmds.objExists(eachgroup):
cmds.group(empty=True, name=eachgroup)
cmds.parent(eachgroup, self.groups[genericGroup]["group"])
| rendermotion/RMPY | AutoRig/RMGenericRigStructure.py | Python | lgpl-3.0 | 2,106 |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
import sys
# This script prints information about the build system, the operating
# system and the iOS or Mac SDK (depending on the platform "iphonesimulator",
# "iphoneos" or "macosx" generally).
#
# In the GYP build, this is done inside GYP itself based on the SDKROOT
# variable.
def FormatVersion(version):
"""Converts Xcode version to a format required for Info.plist."""
version = version.replace('.', '')
version = version + '0' * (3 - len(version))
return version.zfill(4)
def FillXcodeVersion(settings):
"""Fills the Xcode version and build number into |settings|."""
lines = subprocess.check_output(['xcodebuild', '-version']).splitlines()
settings['xcode_version'] = FormatVersion(lines[0].split()[-1])
settings['xcode_build'] = lines[-1].split()[-1]
def FillMachineOSBuild(settings):
"""Fills OS build number into |settings|."""
settings['machine_os_build'] = subprocess.check_output(
['sw_vers', '-buildVersion']).strip()
def FillSDKPathAndVersion(settings, platform, xcode_version):
"""Fills the SDK path and version for |platform| into |settings|."""
settings['sdk_path'] = subprocess.check_output([
'xcrun', '-sdk', platform, '--show-sdk-path']).strip()
settings['sdk_version'] = subprocess.check_output([
'xcrun', '-sdk', platform, '--show-sdk-version']).strip()
# TODO: unconditionally use --show-sdk-build-version once Xcode 7.2 or
# higher is required to build Chrome for iOS or OS X.
if xcode_version >= '0720':
settings['sdk_build'] = subprocess.check_output([
'xcrun', '-sdk', platform, '--show-sdk-build-version']).strip()
else:
settings['sdk_build'] = settings['sdk_version']
if __name__ == '__main__':
if len(sys.argv) != 2:
sys.stderr.write(
'usage: %s [iphoneos|iphonesimulator|macosx]\n' %
os.path.basename(sys.argv[0]))
sys.exit(1)
settings = {}
FillMachineOSBuild(settings)
FillXcodeVersion(settings)
FillSDKPathAndVersion(settings, sys.argv[1], settings['xcode_version'])
for key in sorted(settings):
print '%s="%s"' % (key, settings[key])
| axinging/chromium-crosswalk | build/config/mac/sdk_info.py | Python | bsd-3-clause | 2,283 |
# encoding: UTF-8
from distutils.core import setup
from Cython.Build import cythonize
import numpy
setup(
name = 'crrCython',
ext_modules = cythonize("crrCython.pyx"),
include_dirs = [numpy.get_include()]
)
| mumuwoyou/vnpy-master | vnpy/pricing/crrCython/setup.py | Python | mit | 215 |
#!/usr/bin/env python3
#-------------------------------------------------------------------------------
# Optimal 2 Heap O(1) Solution
#-------------------------------------------------------------------------------
from heapq import *
class MedianFinder:
def __init__(self):
"""
initialize your data structure here.
"""
# Create two heaps.
# One to keep track of the smaller half and one to keep track of the larger half
self.small = [] # Max Heap
self.large = [] # Min Heap
def addNum(self, num):
"""
:type num: int
:rtype: void
"""
# If heaps are equal size add appropriate element to large heap
# To determine what element to put in large heap, first we must compare and extract
# the greatest element in the small heap
#
# Note: Negative values in small heap because we want it to mimic the behavior of a max heap
if len(self.small) == len(self.large):
heappush(self.large, -heappushpop(self.small, -num))
else:
heappush(self.small, -heappushpop(self.large, num))
def findMedian(self):
"""
:rtype: float
"""
# If heaps uneven, find mean. Otherwise just remove first element in bigger heap.
if len(self.small) == len(self.large):
return float(self.large[0] - self.small[0]) / 2.0
else:
return float(self.large[0])
# Your MedianFinder object will be instantiated and called as such:
# obj = MedianFinder()
# obj.addNum(num)
# param_2 = obj.findMedian()
#-------------------------------------------------------------------------------
# NAIVE SORTING SOLUTION O(nlogn)
#-------------------------------------------------------------------------------
class MedianFinder:
def __init__(self):
"""
initialize your data structure here.
"""
self.nums = []
def addNum(self, num):
"""
:type num: int
:rtype: void
"""
self.nums.append(num)
def findMedian(self):
"""
:rtype: float
"""
self.nums.sort()
if len(self.nums) == 0:
return None
elif len(self.nums) % 2 == 1:
return float(self.nums[len(self.nums)//2])
else:
return (self.nums[len(self.nums)//2] + self.nums[(len(self.nums)//2)-1]) / 2
# Your MedianFinder object will be instantiated and called as such:
# obj = MedianFinder()
# obj.addNum(num)
# param_2 = obj.findMedian()
#-------------------------------------------------------------------------------
| kyle8998/Practice-Coding-Questions | leetcode/295-Hard-Find-Median-From-Data-Stream/answer.py | Python | unlicense | 2,682 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# python-simple-rest-client documentation build configuration file, created by
# sphinx-quickstart on Sat Apr 15 17:57:28 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "python-simple-rest-client"
copyright = "2017, Allisson Azevedo"
author = "Allisson Azevedo"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "0.1"
# The full version, including alpha/beta/rc tags.
release = "0.1.0"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "default"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "python-simple-rest-clientdoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"python-simple-rest-client.tex",
"python-simple-rest-client Documentation",
"Allisson Azevedo",
"manual",
)
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, "python-simple-rest-client", "python-simple-rest-client Documentation", [author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"python-simple-rest-client",
"python-simple-rest-client Documentation",
author,
"python-simple-rest-client",
"One line description of project.",
"Miscellaneous",
)
]
| allisson/python-simple-rest-client | docs/conf.py | Python | mit | 4,990 |
# !/usr/bin/env python
import unittest
from xor_string import xor_string
class EqualityTest(unittest.TestCase):
def testEqual(self):
ori = 'attack at dawn'
enc = xor_string(ori, 's3cr3t')
dec = xor_string(enc, 's3cr3t')
self.failUnlessEqual(ori, dec)
if __name__ == '__main__':
unittest.main()
| Akagi201/xor_string | tests/test_xor_string.py | Python | mit | 338 |
import json
from rest_framework import serializers
from rest_framework_hstore.serializers import HStoreSerializer
from nodeshot.core.nodes.models import Node
from nodeshot.core.nodes.serializers import NodeListSerializer
from .models import Layer
from .settings import ADDITIONAL_LAYER_FIELDS
__all__ = [
'LayerDetailSerializer',
'LayerListSerializer',
'LayerNodeListSerializer',
'CustomNodeListSerializer',
]
class LayerListSerializer(HStoreSerializer):
"""
Layer list
"""
details = serializers.HyperlinkedIdentityField(view_name='api_layer_detail', lookup_field='slug')
nodes = serializers.HyperlinkedIdentityField(view_name='api_layer_nodes_list', lookup_field='slug')
geojson = serializers.HyperlinkedIdentityField(view_name='api_layer_nodes_geojson', lookup_field='slug')
center = serializers.SerializerMethodField()
has_contact = serializers.SerializerMethodField()
def get_center(self, obj):
return json.loads(obj.center.geojson)
def get_has_contact(self, obj):
return bool(obj.email)
class Meta:
model = Layer
fields = [
'id', 'slug', 'name', 'center', 'area', 'organization',
'nodes_minimum_distance', 'new_nodes_allowed', 'is_external',
'has_contact', 'details', 'nodes', 'geojson'
] + ADDITIONAL_LAYER_FIELDS
class LayerDetailSerializer(LayerListSerializer):
"""
Layer details
"""
class Meta:
model = Layer
fields = ['name', 'slug', 'center', 'area', 'organization', 'is_external',
'nodes_minimum_distance', 'new_nodes_allowed',
'description', 'text', 'has_contact',
'website', 'nodes', 'geojson'] + ADDITIONAL_LAYER_FIELDS
class CustomNodeListSerializer(NodeListSerializer):
class Meta:
model = Node
fields = [
'name', 'slug', 'user',
'geometry', 'elev', 'address', 'description',
'updated', 'added', 'details'
]
read_only_fields = ['added', 'updated']
geo_field = 'geometry'
class LayerNodeListSerializer(LayerDetailSerializer):
"""
Nodes of a Layer
"""
class Meta:
model = Layer
fields = ['name', 'description', 'text', 'organization', 'website'] + ADDITIONAL_LAYER_FIELDS
| ninuxorg/nodeshot | nodeshot/core/layers/serializers.py | Python | gpl-3.0 | 2,337 |
# -*- coding: utf-8 -*-
from functools import wraps
from http.client import BadStatusLine
from socket import error as socket_error
from socket import gaierror
from ssl import SSLError
from tweepy import API as TwitterAPI # NOQA: N811
from tweepy import OAuthHandler as TwitterOAuthHandler
from tweepy import TweepError
from baseframe import _
from lastuser_core.registry import LoginCallbackError, LoginInitError, LoginProvider
from .flask_oauth import OAuth, OAuthException # OAuth 1.0a
__all__ = ['TwitterProvider']
def twitter_exception_handler(f):
@wraps(f)
def decorated_function(*args, **kwargs):
try:
return f(*args, **kwargs)
except (
OAuthException,
BadStatusLine,
AttributeError,
socket_error,
gaierror,
) as e:
raise LoginCallbackError(e)
except KeyError:
# XXX: Twitter sometimes returns a 404 with no Content-Type header. This causes a
# KeyError in the Flask-OAuth library. Catching the KeyError here is a kludge.
# We need to get Flask-OAuth fixed or stop using it.
raise LoginCallbackError(
_("Twitter had an intermittent error. Please try again")
)
return decorated_function
class TwitterProvider(LoginProvider):
at_username = True
def __init__(
self,
name,
title,
key,
secret,
access_key,
access_secret,
at_login=True,
priority=True,
icon=None,
):
self.name = name
self.title = title
self.at_login = at_login
self.priority = priority
self.icon = icon
self.consumer_key = key
self.consumer_secret = secret
self.access_key = access_key
self.access_secret = access_secret
oauth = OAuth()
twitter = oauth.remote_app(
'twitter',
base_url='https://api.twitter.com/1/',
request_token_url='https://api.twitter.com/oauth/request_token',
access_token_url='https://api.twitter.com/oauth/access_token',
authorize_url='https://api.twitter.com/oauth/authenticate',
consumer_key=key,
consumer_secret=secret,
)
twitter.tokengetter(lambda token=None: None) # We have no use for tokengetter
self.callback = twitter_exception_handler(
twitter.authorized_handler(self.unwrapped_callback)
)
self.twitter = twitter
def do(self, callback_url):
try:
return self.twitter.authorize(callback=callback_url)
except (OAuthException, BadStatusLine, SSLError, socket_error, gaierror) as e:
raise LoginInitError(e)
except KeyError:
# As above, the lack of a Content-Type header in a 404 response breaks Flask-OAuth. Catch it.
raise LoginInitError(
_("Twitter had an intermittent error. Please try again")
)
def unwrapped_callback(self, resp):
if resp is None:
raise LoginCallbackError(_("You denied the request to login"))
# Try to read more from the user's Twitter profile
auth = TwitterOAuthHandler(self.consumer_key, self.consumer_secret)
auth.set_access_token(resp['oauth_token'], resp['oauth_token_secret'])
api = TwitterAPI(auth)
try:
twinfo = api.verify_credentials(
include_entities='false', skip_status='true', include_email='true'
)
fullname = twinfo.name
avatar_url = twinfo.profile_image_url_https.replace('_normal.', '_bigger.')
email = getattr(twinfo, 'email', None)
except TweepError:
fullname = None
avatar_url = None
email = None
return {
'email': email,
'userid': resp['user_id'],
'username': resp['screen_name'],
'fullname': fullname,
'avatar_url': avatar_url,
'oauth_token': resp['oauth_token'],
'oauth_token_secret': resp['oauth_token_secret'],
'oauth_token_type': None, # Twitter doesn't have token types
}
| hasgeek/lastuser | lastuser_oauth/providers/twitter.py | Python | bsd-2-clause | 4,255 |
from yozuch.generators.post_item_collection import PostItemCollectionGenerator
class TagGenerator(PostItemCollectionGenerator):
def __init__(self, url_template, name, template=None):
super().__init__('tags', 'tag', url_template, name, template)
| akrylysov/yozuch | yozuch/generators/tag.py | Python | bsd-3-clause | 260 |
#!/usr/bin/env python
from tools.multiclass_shared import prepare_data
[traindat, label_traindat, testdat, label_testdat] = prepare_data(False)
parameter_list = [[traindat,testdat,label_traindat,label_testdat,2.1,1,1e-5],[traindat,testdat,label_traindat,label_testdat,2.2,1,1e-5]]
def classifier_multilabeloutputliblinear_modular (fm_train_real=traindat,fm_test_real=testdat,label_train_multiclass=label_traindat,label_test_multiclass=label_testdat,width=2.1,C=1,epsilon=1e-5):
from modshogun import RealFeatures, MulticlassLabels, MultilabelLabels
from modshogun import MulticlassLibLinear
feats_train=RealFeatures(fm_train_real)
feats_test=RealFeatures(fm_test_real)
labels=MulticlassLabels(label_train_multiclass)
classifier = MulticlassLibLinear(C,feats_train,labels)
classifier.train()
label_pred = classifier.apply_multilabel_output(feats_test,2)
out = label_pred.get_labels()
#print out
return out
if __name__=='__main__':
print('MultilabelOutputLibLinear')
classifier_multilabeloutputliblinear_modular(*parameter_list[0])
| abhiatgithub/shogun-toolbox | examples/undocumented/python_modular/classifier_multilabeloutputliblinear_modular.py | Python | gpl-3.0 | 1,051 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-04-24 22:58
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('events', '0013_auto_20160424_2247'),
]
operations = [
migrations.AlterModelOptions(
name='city',
options={'verbose_name_plural': 'cities'},
),
migrations.AlterField(
model_name='likedcity',
name='city',
field=models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to='events.City'),
preserve_default=False,
),
]
| EricZaporzan/evention | evention/events/migrations/0014_auto_20160424_2258.py | Python | mit | 709 |
# (c) 2014, Chris Church <chris@ninemoreminutes.com>
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import base64
import inspect
import os
import re
import shlex
import traceback
from ansible.compat.six.moves.urllib.parse import urlunsplit
from ansible.errors import AnsibleError
try:
from winrm import Response
from winrm.exceptions import WinRMTransportError
from winrm.protocol import Protocol
except ImportError:
raise AnsibleError("winrm is not installed")
HAVE_KERBEROS = False
try:
import kerberos
HAVE_KERBEROS = True
except ImportError:
pass
from ansible.errors import AnsibleFileNotFound
from ansible.plugins.connection import ConnectionBase
from ansible.utils.path import makedirs_safe
from ansible.utils.unicode import to_bytes, to_unicode, to_str
from ansible.utils.vars import combine_vars
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class Connection(ConnectionBase):
'''WinRM connections over HTTP/HTTPS.'''
module_implementation_preferences = ('.ps1', '')
def __init__(self, *args, **kwargs):
self.has_pipelining = False
self.protocol = None
self.shell_id = None
self.delegate = None
self._shell_type = 'powershell'
# TODO: Add runas support
self.become_methods_supported=[]
super(Connection, self).__init__(*args, **kwargs)
@property
def transport(self):
''' used to identify this connection object from other classes '''
return 'winrm'
def set_host_overrides(self, host):
'''
Override WinRM-specific options from host variables.
'''
host_vars = combine_vars(host.get_group_vars(), host.get_vars())
self._winrm_host = self._play_context.remote_addr
self._winrm_port = int(self._play_context.port or 5986)
self._winrm_scheme = host_vars.get('ansible_winrm_scheme', 'http' if self._winrm_port == 5985 else 'https')
self._winrm_path = host_vars.get('ansible_winrm_path', '/wsman')
self._winrm_user = self._play_context.remote_user
self._winrm_pass = self._play_context.password
if '@' in self._winrm_user:
self._winrm_realm = self._winrm_user.split('@', 1)[1].strip() or None
else:
self._winrm_realm = None
self._winrm_realm = host_vars.get('ansible_winrm_realm', self._winrm_realm) or None
transport_selector = 'ssl' if self._winrm_scheme == 'https' else 'plaintext'
if HAVE_KERBEROS and ('@' in self._winrm_user or self._winrm_realm):
self._winrm_transport = 'kerberos,%s' % transport_selector
else:
self._winrm_transport = transport_selector
self._winrm_transport = host_vars.get('ansible_winrm_transport', self._winrm_transport)
if isinstance(self._winrm_transport, basestring):
self._winrm_transport = [x.strip() for x in self._winrm_transport.split(',') if x.strip()]
self._winrm_kwargs = dict(username=self._winrm_user, password=self._winrm_pass, realm=self._winrm_realm)
argspec = inspect.getargspec(Protocol.__init__)
for arg in argspec.args:
if arg in ('self', 'endpoint', 'transport', 'username', 'password', 'realm'):
continue
if 'ansible_winrm_%s' % arg in host_vars:
self._winrm_kwargs[arg] = host_vars['ansible_winrm_%s' % arg]
def _winrm_connect(self):
'''
Establish a WinRM connection over HTTP/HTTPS.
'''
display.vvv("ESTABLISH WINRM CONNECTION FOR USER: %s on PORT %s TO %s" %
(self._winrm_user, self._winrm_port, self._winrm_host), host=self._winrm_host)
netloc = '%s:%d' % (self._winrm_host, self._winrm_port)
endpoint = urlunsplit((self._winrm_scheme, netloc, self._winrm_path, '', ''))
errors = []
for transport in self._winrm_transport:
if transport == 'kerberos' and not HAVE_KERBEROS:
errors.append('kerberos: the python kerberos library is not installed')
continue
display.vvvvv('WINRM CONNECT: transport=%s endpoint=%s' % (transport, endpoint), host=self._winrm_host)
try:
protocol = Protocol(endpoint, transport=transport, **self._winrm_kwargs)
protocol.send_message('')
return protocol
except Exception as e:
err_msg = (str(e) or repr(e)).strip()
if re.search(r'Operation\s+?timed\s+?out', err_msg, re.I):
raise AnsibleError('the connection attempt timed out')
m = re.search(r'Code\s+?(\d{3})', err_msg)
if m:
code = int(m.groups()[0])
if code == 401:
err_msg = 'the username/password specified for this server was incorrect'
elif code == 411:
return protocol
errors.append('%s: %s' % (transport, err_msg))
display.vvvvv('WINRM CONNECTION ERROR: %s\n%s' % (err_msg, traceback.format_exc()), host=self._winrm_host)
if errors:
raise AnsibleError(', '.join(errors))
else:
raise AnsibleError('No transport found for WinRM connection')
def _winrm_exec(self, command, args=(), from_exec=False):
if from_exec:
display.vvvvv("WINRM EXEC %r %r" % (command, args), host=self._winrm_host)
else:
display.vvvvvv("WINRM EXEC %r %r" % (command, args), host=self._winrm_host)
if not self.protocol:
self.protocol = self._winrm_connect()
if not self.shell_id:
self.shell_id = self.protocol.open_shell(codepage=65001) # UTF-8
command_id = None
try:
command_id = self.protocol.run_command(self.shell_id, to_bytes(command), map(to_bytes, args))
response = Response(self.protocol.get_command_output(self.shell_id, command_id))
if from_exec:
display.vvvvv('WINRM RESULT %r' % to_unicode(response), host=self._winrm_host)
else:
display.vvvvvv('WINRM RESULT %r' % to_unicode(response), host=self._winrm_host)
display.vvvvvv('WINRM STDOUT %s' % to_unicode(response.std_out), host=self._winrm_host)
display.vvvvvv('WINRM STDERR %s' % to_unicode(response.std_err), host=self._winrm_host)
return response
finally:
if command_id:
self.protocol.cleanup_command(self.shell_id, command_id)
def _connect(self):
if not self.protocol:
self.protocol = self._winrm_connect()
return self
def exec_command(self, cmd, in_data=None, sudoable=True):
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
cmd_parts = shlex.split(to_bytes(cmd), posix=False)
cmd_parts = map(to_unicode, cmd_parts)
script = None
cmd_ext = cmd_parts and self._shell._unquote(cmd_parts[0]).lower()[-4:] or ''
# Support running .ps1 files (via script/raw).
if cmd_ext == '.ps1':
script = '& %s' % cmd
# Support running .bat/.cmd files; change back to the default system encoding instead of UTF-8.
elif cmd_ext in ('.bat', '.cmd'):
script = '[System.Console]::OutputEncoding = [System.Text.Encoding]::Default; & %s' % cmd
# Encode the command if not already encoded; supports running simple PowerShell commands via raw.
elif '-EncodedCommand' not in cmd_parts:
script = cmd
if script:
cmd_parts = self._shell._encode_script(script, as_list=True, strict_mode=False)
if '-EncodedCommand' in cmd_parts:
encoded_cmd = cmd_parts[cmd_parts.index('-EncodedCommand') + 1]
decoded_cmd = to_unicode(base64.b64decode(encoded_cmd).decode('utf-16-le'))
display.vvv("EXEC %s" % decoded_cmd, host=self._winrm_host)
else:
display.vvv("EXEC %s" % cmd, host=self._winrm_host)
try:
result = self._winrm_exec(cmd_parts[0], cmd_parts[1:], from_exec=True)
except Exception:
traceback.print_exc()
raise AnsibleError("failed to exec cmd %s" % cmd)
result.std_out = to_bytes(result.std_out)
result.std_err = to_bytes(result.std_err)
return (result.status_code, result.std_out, result.std_err)
def put_file(self, in_path, out_path):
super(Connection, self).put_file(in_path, out_path)
out_path = self._shell._unquote(out_path)
display.vvv('PUT "%s" TO "%s"' % (in_path, out_path), host=self._winrm_host)
if not os.path.exists(in_path):
raise AnsibleFileNotFound('file or module does not exist: "%s"' % in_path)
with open(in_path) as in_file:
in_size = os.path.getsize(in_path)
script_template = '''
$s = [System.IO.File]::OpenWrite("%s");
[void]$s.Seek(%d, [System.IO.SeekOrigin]::Begin);
$b = [System.Convert]::FromBase64String("%s");
[void]$s.Write($b, 0, $b.length);
[void]$s.SetLength(%d);
[void]$s.Close();
'''
# Determine max size of data we can pass per command.
script = script_template % (self._shell._escape(out_path), in_size, '', in_size)
cmd = self._shell._encode_script(script)
# Encode script with no data, subtract its length from 8190 (max
# windows command length), divide by 2.67 (UTF16LE base64 command
# encoding), then by 1.35 again (data base64 encoding).
buffer_size = int(((8190 - len(cmd)) / 2.67) / 1.35)
for offset in xrange(0, in_size or 1, buffer_size):
try:
out_data = in_file.read(buffer_size)
if offset == 0:
if out_data.lower().startswith('#!powershell') and not out_path.lower().endswith('.ps1'):
out_path = out_path + '.ps1'
b64_data = base64.b64encode(out_data)
script = script_template % (self._shell._escape(out_path), offset, b64_data, in_size)
display.vvvvv('WINRM PUT "%s" to "%s" (offset=%d size=%d)' % (in_path, out_path, offset, len(out_data)), host=self._winrm_host)
cmd_parts = self._shell._encode_script(script, as_list=True)
result = self._winrm_exec(cmd_parts[0], cmd_parts[1:])
if result.status_code != 0:
raise IOError(to_str(result.std_err))
except Exception:
traceback.print_exc()
raise AnsibleError('failed to transfer file to "%s"' % out_path)
def fetch_file(self, in_path, out_path):
super(Connection, self).fetch_file(in_path, out_path)
in_path = self._shell._unquote(in_path)
out_path = out_path.replace('\\', '/')
display.vvv('FETCH "%s" TO "%s"' % (in_path, out_path), host=self._winrm_host)
buffer_size = 2**19 # 0.5MB chunks
makedirs_safe(os.path.dirname(out_path))
out_file = None
try:
offset = 0
while True:
try:
script = '''
If (Test-Path -PathType Leaf "%(path)s")
{
$stream = [System.IO.File]::OpenRead("%(path)s");
$stream.Seek(%(offset)d, [System.IO.SeekOrigin]::Begin) | Out-Null;
$buffer = New-Object Byte[] %(buffer_size)d;
$bytesRead = $stream.Read($buffer, 0, %(buffer_size)d);
$bytes = $buffer[0..($bytesRead-1)];
[System.Convert]::ToBase64String($bytes);
$stream.Close() | Out-Null;
}
ElseIf (Test-Path -PathType Container "%(path)s")
{
Write-Host "[DIR]";
}
Else
{
Write-Error "%(path)s does not exist";
Exit 1;
}
''' % dict(buffer_size=buffer_size, path=self._shell._escape(in_path), offset=offset)
display.vvvvv('WINRM FETCH "%s" to "%s" (offset=%d)' % (in_path, out_path, offset), host=self._winrm_host)
cmd_parts = self._shell._encode_script(script, as_list=True)
result = self._winrm_exec(cmd_parts[0], cmd_parts[1:])
if result.status_code != 0:
raise IOError(to_str(result.std_err))
if result.std_out.strip() == '[DIR]':
data = None
else:
data = base64.b64decode(result.std_out.strip())
if data is None:
makedirs_safe(out_path)
break
else:
if not out_file:
# If out_path is a directory and we're expecting a file, bail out now.
if os.path.isdir(out_path):
break
out_file = open(out_path, 'wb')
out_file.write(data)
if len(data) < buffer_size:
break
offset += len(data)
except Exception:
traceback.print_exc()
raise AnsibleError('failed to transfer file to "%s"' % out_path)
finally:
if out_file:
out_file.close()
def close(self):
if self.protocol and self.shell_id:
self.protocol.close_shell(self.shell_id)
self.shell_id = None
| kisoku/ansible | lib/ansible/plugins/connection/winrm.py | Python | gpl-3.0 | 14,871 |
r"""
Run Link Grammar Python scripts using the build target locations.
This program sets PYTHONPATH and PATH.
This program is designed to reside in this directory.
It reads Local.props in order to find Python's EXE location.
It also generates relative PYTHONPATH and PATH.
In case it is desired to move it to another directory, there is a need to
change the related variables.
The default script directory is binding\python-examples and the default script
to run is tests.py. In order to run the file example.py there, the following
can be used:
console-prompt>make-check.py x64\Debug\Python3 example.py
The following starts an interactive python program:
console-prompt>make-check.py x64\Debug\Python3 ""
"""
import os
import sys
import re
local_prop_file = 'Local.props' # In this directory
scriptdir = r'..\bindings\python-examples'
pyscript = 'tests.py'
#os.environ["LINK_GRAMMAR_DATA"] = r'../data'
def error(msg):
if msg:
print(msg)
prog = os.path.basename(sys.argv[0])
print("Usage: ", prog, '[python_flag] PYTHON_OUTDIR [script.py] [script_args]')
print(r' OUTDIR is in the format of "x64\Debug\Python3"')
sys.exit(1)
local_prop = {}
def read_props(vsfile):
""" Read all the User Macros from the local properties file. """
vs_f = open(vsfile, 'r')
macdef_re = re.compile(r'<(\w+)>([^<]*)<')
for line in vs_f:
read_m = re.search(macdef_re, line)
if read_m is None:
continue
if len(read_m.groups()) != 2:
error('Bad line in "{}": {}'.format(vsfile, line))
local_prop[read_m.group(1)] = read_m.group(2)
if not local_prop:
error('No properties found in {}.'.format(vsfile))
NODEFAULT = object()
prop_re = re.compile(r'\$\((\w+)')
def get_prop(prop, default=NODEFAULT):
"""
Resolve a macro definition.
"""
prop_val = local_prop.get(prop, None)
if prop_val is None:
if default is NODEFAULT:
error('Property "{}" not found in {}' .format(prop, local_prop_file))
return default
while True:
prop_m = re.search(prop_re, prop_val)
if prop_m is None:
break
prop_rep = prop_m.group(1)
prop_repval = local_prop.get(prop_rep, None)
if prop_repval is None:
prop_repval = os.getenv(prop_rep)
if prop_repval is None:
error('Property "{}" not found in "{}" and also not in the environment'.
format(prop_rep, local_prop_file))
prop_val = str.replace(prop_val, '$('+prop_rep+')', prop_repval)
return prop_val
#---
#print('Running by:', sys.executable)
rundir = os.path.dirname(sys.argv[0]) or '.'
if rundir == '':
rundir = '.'
local_prop_file = rundir + '\\' + local_prop_file
read_props(local_prop_file)
if len(sys.argv) < 2:
error('Missing argument')
pyargs = ''
if sys.argv[1] and sys.argv[1][0] == '-':
pyargs = sys.argv.pop(1)
if len(sys.argv) < 2:
error('Missing argument')
outdir = rundir + '\\' + sys.argv.pop(1)
if not os.path.isdir(outdir):
error('Directory "{}" doesn\'t exist'.format(outdir))
m = re.search(r'(.*)\\(.*)$', outdir)
if not m or len(m.groups()) != 2:
error('Invalid output directory "{}"'.format(outdir))
config = m.group(1)
pydir = m.group(2).upper()
pyexe = get_prop(pydir+'_EXE')
if len(sys.argv) == 2:
if sys.argv[1] == '' or sys.argv[1][0] != '-':
pyscript = sys.argv.pop(1)
if pyscript != '':
if '\\' not in pyscript:
pyscript = rundir + '\\' + scriptdir + '\\' + pyscript
args = ''
if len(sys.argv) >= 2:
args = ' '.join(sys.argv[2:])
path = os.environ["PATH"]
dllpath = get_prop('LG_DLLPATH')
# For DLLs - linkgrammar-*.dll and regex.dll
os.environ["PATH"] = ('{};{};{}').format(config, dllpath, path)
#print("PATH=" + os.environ["PATH"])
# For linkgrammar.py, clinkgrammar.py and _clinkgrammar.pyd
os.environ["PYTHONPATH"] = \
rundir + '\\' + r'..\bindings\python;{}'.format(outdir)
print("PYTHONPATH=" + os.environ["PYTHONPATH"])
#print("Searching modules in:\n" + '\n'.join(sys.path))
cmd = ' '.join((pyexe, pyargs, pyscript, args))
print('Issuing command:', cmd)
os.system(cmd)
| linas/link-grammar | msvc/make-check.py | Python | lgpl-2.1 | 4,317 |
#!/usr/bin/python3
# -*- coding: UTF-8 -*-
# ------------------------------------------------------------
# Libraries
# ------------------------------------------------------------
import os
import sys
# ------------------------------------------------------------
# Variables and Constants
# ------------------------------------------------------------
# Constants
youtube = "http://www.youtube.com/watch?v="
messageIfDownloadFinished = 'Done. Saved'
messageIfAlreadyDownloaded = 'has been fully downloaded'
# Configuration
mainIndexFilename = 'index' # File with the courses to be downloaded
downloadOutputFilename = 'temp'
ffmpeg_threads = '-threads 4 ' # threads for ffmpeg
retryIfDownloadFail = True
convertVideos = False
debug = False
# Command line
wget = 'wget -c --output-document'
getflash = 'get_flash_videos --quality low --filename'
ffmpeg_p1 = 'ffmpeg -y -benchmark ' + ffmpeg_threads + '-strict experimental -i'
ffmpeg_p2 = '-acodec aac -ab 128k -vcodec mpeg4 -b 1200k -mbd 2 -flags +mv4+aic -trellis 1 -cmp 2 -subcmp 2 -s 320x240 -metadata title='
ffmpeg_p3 = '-metadata album='
# ------------------------------------------------------------
# Functions
# ------------------------------------------------------------
def returnFile(fileName):
fileHandle = open(fileName, "r")
lines = (fileHandle.read()).splitlines()
fileHandle.close()
return lines
def message(msg):
print( '\n\n' )
print( '#' + 80*'-' )
print( '# ' + msg )
print( '#' + 80*'-' )
def removeSpecialChars(string):
print(' ')
def download():
# Get file with all classes from a course
fileindex = returnFile(mainIndexFilename) # Load file with desired courses
for line in fileindex:
addressList = []
courseIndexFile = line.split('<>')[0]
courseDirName = line.split('<>')[1].replace(' ','_')
courseFileName = '.' + courseDirName
message( 'Getting ' + courseFileName )
courseFileName = courseFileName.lower()
tmp = wget + ' ' + courseFileName + ' ' + courseIndexFile
message(tmp)
if not debug: os.system(tmp)
# Create a list with all links from a course
for i in returnFile( courseFileName ):
if '<option ' in i:
result = [ i.split('<')[1].split('>')[0].split('\"')[1], i.split('<')[1].split('>')[1] ]
addressList.append( str( str(result[0]) ) + '<>' + str(result[1]) )
# Clean
tmp = 'rm -f ' + courseFileName + ' ' + downloadOutputFilename
message( tmp )
if not debug: os.system(tmp)
# Download class by class
classroomList = []
counter = 0
for i in addressList:
address = i.split('<>')[0]
classroom = ' "' + str(counter).zfill(3) + '_' + i.split('<>')[1].replace(' ','_') + '.flv" '
classroomList.append( classroom )
tmp = getflash + classroom + youtube + address + ' 2>&1 | tee ' + downloadOutputFilename
message( tmp )
if not debug: os.system(tmp)
if retryIfDownloadFail:
for j in returnFile( downloadOutputFilename ):
if messageIfAlreadyDownloaded in j:
flagRetry = False
break
if messageIfDownloadFinished in j:
flagRetry = False
break
flagRetry = True
if flagRetry:
if not debug: os.system(tmp)
counter = counter + 1
# Convert the files to the desired format
if convertVideos:
counter = 0
for classroom in classroomList:
finalFile = classroom.replace('.flv','.mp4')[1:].replace(' ','')
finalFileTest = classroom.replace('.flv','.mp4')[1:].replace(' ','').replace('"', '')
if not os.path.exists( finalFileTest ):
tmp = ffmpeg_p1 + classroom + ffmpeg_p2 + classroom.replace('.flv','')[1:] + ffmpeg_p3 + courseDirName + ' ' + finalFile
message( tmp )
if not debug: os.system(tmp)
counter = counter + 1
else:
message("File already converted.")
# Create the directory
tmp = 'mkdir -p ' + courseDirName
message(tmp)
if not debug: os.system(tmp)
# Move files
counter = 0
for classroom in classroomList:
if convertVideos: classroom = classroom + ' ' + classroom.replace('.flv','.mp4')
tmp = 'mv -f' + classroom + courseDirName
message( tmp )
if not debug: os.system(tmp)
counter = counter + 1
# ------------------------------------------------------------
# Main
# ------------------------------------------------------------
if __name__ == "__main__":
download()
| brunocalado/downloadkhanacademy | downloadkhanacademy.py | Python | gpl-3.0 | 5,050 |
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
__all__ = ["Discontinuity"]
import numpy as np
from ..pipeline import Pipeline
from .prepare import LightCurve
class Discontinuity(Pipeline):
query_parameters = dict(
discont_window=(51, False),
discont_duration=(0.4, False),
discont_min_sig=(75., False),
discont_min_fact=(0.5, False),
discont_min_dt=(1.0, False),
discont_min_size=(20, False),
)
def get_result(self, query, parent_response):
lcs = parent_response.light_curves
# Parameters.
N = query["discont_window"]
duration = query["discont_duration"]
min_dis_sig = query["discont_min_sig"]
min_dis_fact = query["discont_min_fact"]
min_dis_dt = query["discont_min_dt"]
min_dis_size = query["discont_min_size"]
# Pre-allocate some shit.
t0 = N // 2
x = np.arange(N)
A = np.vander(x, 2)
lc_out = []
for k, lc in enumerate(lcs):
# Compute the typical time spacing in the LC.
dt = int(0.5 * duration / np.median(np.diff(lc.time)))
# The step function hypothesis.
model1 = np.ones(N)
model1[t0:] = -1.0
# The transit hypothesis.
model2 = np.zeros(N)
model2[t0-dt:t0+dt] = -1.0
# Initialize the work arrays.
chi2 = np.empty((len(lc.time) - N, 3))
# Loop over each time and compare the hypotheses.
for i in range(len(lc.time) - N):
y = np.array(lc.flux[i:i+N])
ivar = 1. / np.array(lc.ferr[i:i+N]) ** 2
# Loop over the different models, do the fit, and compute the
# chi^2.
for j, model in enumerate((None, model1, model2)):
if model is not None:
A1 = np.hstack((A, np.atleast_2d(model).T))
else:
A1 = np.array(A)
ATA = np.dot(A1.T, A1 * ivar[:, None])
w = np.linalg.solve(ATA, np.dot(A1.T, y * ivar))
pred = np.dot(A1, w)
chi2[i, j] = np.sum((pred - y) ** 2 * ivar)
# Detect the peaks.
z = chi2[:, 2] - chi2[:, 1]
p1 = (z[1:-1] > z[:-2]) * (z[1:-1] > z[2:])
p0 = p1 * (z[1:-1] > min_dis_sig)
# Remove peaks with other nearby higher peaks.
m = z[p0][:, None] - z[p0][None, :] > min_dis_fact * z[p0][:, None]
# Remove the nearby peaks.
t = lc.time[t0:t0+len(z)]
m += np.abs(t[p0][:, None] - t[p0][None, :]) > min_dis_dt
m[np.diag_indices_from(m)] = True
m = np.all(m, axis=1)
peak_inds = np.arange(1, len(z)-1)[p0][m]
# Split on the peaks.
peak_inds = np.concatenate(([0], peak_inds + t0, [len(lc.time)]))
for i in range(len(peak_inds) - 1):
m = np.arange(peak_inds[i], peak_inds[i+1])
if len(m) < min_dis_size:
continue
lc_out.append(
LightCurve(lc.time[m], lc.flux[m], lc.ferr[m],
np.zeros_like(lc.time[m], dtype=int),
(p[m] for p in lc.predictors))
)
return dict(light_curves=lc_out)
| dfm/ketu | ketu/kepler/discontinuity.py | Python | mit | 3,464 |
#coding:utf-8
# 感知器 y = f(Wn * x + b)
# 代码实现的是一个逻辑AND操作,输入最后一项一直为1,代表我们可以理解偏置项b的特征值输入一直为1
# 这样就是 y = f(Wn+1*[x,1]), Wn+1就是b
# https://www.zybuluo.com/hanbingtao/note/433855
from numpy import array, dot, random
from random import choice
def fun_1_or_0(x): return 0 if x < 0 else 1
training_data = [(array([0, 0, 1]), 0), (array([0, 1, 1]), 0),
(array([1, 0, 1]), 0), (array([1, 1, 1]), 1)]
weights = random.random(3)
print("before traning, weights:",weights)
learning_rate = 0.2
num_iteratios = 100
for i in range(num_iteratios):
input, truth = choice(training_data)
result = dot(weights, input)
error = truth - fun_1_or_0(result)
weights += learning_rate * error * input
print("after traning, weights:",weights)
for x, _ in training_data:
result = dot(x, weights)
print("{}:{}->{}".format(x[:2], result, fun_1_or_0(result)))
| kkzzzzzz/Deep-Learing | Sensor/code.py | Python | mit | 991 |
from django.conf import settings
from django.template import Library
import os
from django.utils.safestring import mark_safe
register = Library()
base_directory = settings.STATICFILES_DIRS[0]
@register.simple_tag
def css_href(directory=""):
links_css = ""
completed_directory = base_directory +"/css/" + directory
for elem in os.listdir(completed_directory):
links_css +="<link href={0} rel='stylesheet'>".format("/static/css/" + elem)
links_css += "\n"
return mark_safe(links_css)
@register.simple_tag
def js_src(directory=""):
links_js = ""
completed_directory = base_directory +"/js/" + directory
for elem in os.listdir(completed_directory):
links_js +="<script type='text/javascript' src='{0}'></script>".format("/static/js/" + elem)
links_js += "\n"
return mark_safe(links_js)
| Joneyviana/todolist-django-angular | utils/templatetags/get_links_assets.py | Python | mit | 809 |
import numpy as np
from .status import StatusGrid
from .links import link_is_active, find_active_links, LinkGrid
from .links import _split_link_ends
from .cells import CellGrid
from .nodes import NodeGrid
from landlab.utils.decorators import deprecated
def _default_axis_names(n_dims):
"""Returns a tuple of the default axis names."""
_DEFAULT_NAMES = ('z', 'y', 'x')
return _DEFAULT_NAMES[- n_dims:]
def _default_axis_units(n_dims):
"""Returns a tuple of the default axis units."""
return ('-', ) * n_dims
class BaseGrid(object):
"""__init__([coord0, coord1, ...], axis_name=None, axis_units=None)
Parameters
----------
coord0, coord1, ... : sequence of array-like
Coordinates of grid nodes
axis_name : sequence of strings, optional
Names of coordinate axes
axis_units : sequence of strings, optional
Units of coordinate axes
Returns
-------
BaseGrid :
A newly-created BaseGrid
Examples
--------
>>> from landlab.grid.unstructured.base import BaseGrid
>>> ngrid = BaseGrid(([0, 0, 1, 1], [0, 1, 0, 1]))
>>> ngrid.number_of_nodes
4
>>> ngrid.x_at_node
array([ 0., 1., 0., 1.])
>>> ngrid.x_at_node[2]
0.0
>>> ngrid.point_at_node[2]
array([ 1., 0.])
>>> ngrid.coord_at_node[:, [2, 3]]
array([[ 1., 1.],
[ 0., 1.]])
>>> cells = ([0, 1, 2, 1, 3, 2], [3, 3], [0, 1])
>>> ngrid = BaseGrid(([0, 0, 1, 1], [0, 1, 0, 1]), cells=cells)
>>> ngrid.number_of_cells
2
>>> ngrid.node_at_cell
array([0, 1])
>>> links = [(0, 2), (1, 3), (0, 1), (1, 2), (0, 3)]
>>> ngrid = BaseGrid(([0, 0, 1, 1], [0, 1, 0, 1]), links=zip(*links))
>>> ngrid.number_of_links
5
>>> ngrid.links_leaving_at_node(0)
array([0, 2, 4])
>>> len(ngrid.links_entering_at_node(0)) == 0
True
>>> tails, heads = zip(*links)
>>> grid = BaseGrid(([0, 0, 1, 1], [0, 1, 0, 1]),
... node_status=[0, 0, 0, 4], links=[tails, heads])
>>> grid.status_at_node
array([0, 0, 0, 4])
>>> len(grid.active_links_entering_at_node(0)) == 0
True
>>> grid.active_links_leaving_at_node(0)
array([0, 2])
"""
def __init__(self, nodes, axis_name=None, axis_units=None,
node_status=None, links=None, cells=None):
"""__init__([coord0, coord1, ...], axis_name=None, axis_units=None)
Parameters
----------
coord0, coord1, ... : sequence of array-like
Coordinates of grid nodes
axis_name : sequence of strings, optional
Names of coordinate axes
axis_units : sequence of strings, optional
Units of coordinate axes
Returns
-------
BaseGrid :
A newly-created BaseGrid
Examples
--------
>>> from landlab.grid.unstructured.base import BaseGrid
>>> ngrid = BaseGrid(([0, 0, 1, 1], [0, 1, 0, 1]))
>>> ngrid.number_of_nodes
4
>>> ngrid.x_at_node
array([ 0., 1., 0., 1.])
>>> ngrid.x_at_node[2]
0.0
>>> ngrid.point_at_node[2]
array([ 1., 0.])
>>> ngrid.coord_at_node[:, [2, 3]]
array([[ 1., 1.],
[ 0., 1.]])
>>> cells = ([0, 1, 2, 1, 3, 2], [3, 3], [0, 1])
>>> ngrid = BaseGrid(([0, 0, 1, 1], [0, 1, 0, 1]), cells=cells)
>>> ngrid.number_of_cells
2
>>> ngrid.node_at_cell
array([0, 1])
>>> links = [(0, 2), (1, 3), (0, 1), (1, 2), (0, 3)]
>>> ngrid = BaseGrid(([0, 0, 1, 1], [0, 1, 0, 1]), links=zip(*links))
>>> ngrid.number_of_links
5
>>> ngrid.links_leaving_at_node(0)
array([0, 2, 4])
>>> len(ngrid.links_entering_at_node(0)) == 0
True
>>> tails, heads = zip(*links)
>>> grid = BaseGrid(([0, 0, 1, 1], [0, 1, 0, 1]),
... node_status=[0, 0, 0, 4], links=[tails, heads])
>>> grid.status_at_node
array([0, 0, 0, 4])
>>> len(grid.active_links_entering_at_node(0)) == 0
True
>>> grid.active_links_leaving_at_node(0)
array([0, 2])
"""
self._node_grid = NodeGrid(nodes)
self._axis_name = tuple(axis_name or _default_axis_names(self.ndim))
self._axis_units = tuple(axis_units or _default_axis_units(self.ndim))
if cells is not None:
try:
self._cell_grid = CellGrid(*cells)
except TypeError:
self._cell_grid = cells
if links is not None:
try:
self._link_grid = LinkGrid(links, self.number_of_nodes)
except TypeError:
self._link_grid = links
if node_status is not None:
self._status_grid = StatusGrid(node_status)
if links is not None and node_status is not None:
links = _split_link_ends(links)
self._active_link_grid = BaseGrid.create_active_link_grid(
self.status_at_node, links, self.number_of_nodes)
@staticmethod
def create_active_link_grid(node_status, links, number_of_nodes):
active_link_ids = find_active_links(node_status, links)
return LinkGrid((links[0][active_link_ids], links[1][active_link_ids]),
number_of_nodes, link_ids=active_link_ids)
@property
def ndim(self):
return self._node_grid.ndim
@property
def axis_units(self):
"""Coordinate units of each axis.
Returns
-------
tuple of strings :
Coordinate units of each axis.
Examples
--------
>>> from landlab.grid.unstructured.base import BaseGrid
>>> ngrid = BaseGrid(([0, 1, 0], [1, 1, 0]))
>>> ngrid.axis_units
('-', '-')
>>> ngrid = BaseGrid(([0, 1, 0], [1, 1, 0]),
... axis_units=['degrees_north', 'degrees_east'])
>>> ngrid.axis_units
('degrees_north', 'degrees_east')
"""
return self._axis_units
@property
def axis_name(self):
"""Name of each axis.
Returns
-------
tuple of strings :
Names of each axis.
Examples
--------
>>> from landlab.grid.unstructured.base import BaseGrid
>>> ngrid = BaseGrid(([0, 1, 0], [1, 1, 0]))
>>> ngrid.axis_name
('y', 'x')
>>> ngrid = BaseGrid(([0, 1, 0], [1, 1, 0]), axis_name=['lat', 'lon'])
>>> ngrid.axis_name
('lat', 'lon')
"""
return self._axis_name
@property
def number_of_links(self):
"""Number of links.
"""
return self._link_grid.number_of_links
@property
def number_of_cells(self):
"""Number of cells.
"""
return self._cell_grid.number_of_cells
@property
def number_of_nodes(self):
"""Number of nodes.
"""
return self._node_grid.number_of_nodes
@property
def coord_at_node(self):
return self._node_grid.coord
@property
def x_at_node(self):
return self._node_grid.x
@property
def y_at_node(self):
return self._node_grid.y
@property
def point_at_node(self):
return self._node_grid.point
def links_leaving_at_node(self, node):
return self._link_grid.out_link_at_node(node)
def links_entering_at_node(self, node):
return self._link_grid.in_link_at_node(node)
def active_links_leaving_at_node(self, node):
return self._active_link_grid.out_link_at_node(node)
def active_links_entering_at_node(self, node):
return self._active_link_grid.in_link_at_node(node)
@property
def node_at_link_start(self):
return self._link_grid.node_at_link_start
@property
def node_at_link_end(self):
return self._link_grid.node_at_link_end
@property
def node_at_cell(self):
return self._cell_grid.node_at_cell
@property
def cell_at_node(self):
return self._cell_grid.cell_at_node
def core_cells(self):
return self.cell_at_node[self.core_nodes]
@property
def status_at_node(self):
return self._status_grid.node_status
@status_at_node.setter
def status_at_node(self, status):
self._status_grid.node_status = status
self._active_link_grid = BaseGrid.create_active_link_grid(
self.status_at_node, (self.node_at_link_start,
self.node_at_link_end), self.number_of_nodes)
def active_nodes(self):
return self._status_grid.active_nodes()
def core_nodes(self):
return self._status_grid.core_nodes()
def boundary_nodes(self):
return self._status_grid.boundary_nodes()
def closed_boundary_nodes(self):
return self._status_grid.closed_boundary_nodes()
def fixed_gradient_boundary_nodes(self):
return self._status_grid.fixed_gradient_boundary_nodes()
def fixed_value_boundary_nodes(self):
return self._status_grid.fixed_value_boundary_nodes()
def active_links(self):
return self._active_link_grid.link_id
@deprecated(use='length_of_link', version=1.0)
def link_length(self, link=None):
return self.length_of_link(link=link)
def length_of_link(self, link=None):
"""Length of grid links.
Parameters
----------
link : array-like, optional
Link IDs
Examples
--------
>>> from landlab.grid.unstructured.base import BaseGrid
>>> links = [(0, 2), (1, 3), (0, 1), (2, 3), (0, 3)]
>>> grid = BaseGrid(([0, 0, 4, 4], [0, 3, 0, 3]), links=links)
>>> grid.length_of_link()
array([ 4., 4., 3., 3., 5.])
>>> grid.length_of_link(0)
array([ 4.])
>>> grid.length_of_link().min()
3.0
>>> grid.length_of_link().max()
5.0
"""
if link is None:
node0, node1 = (self.node_at_link_start, self.node_at_link_end)
else:
node0, node1 = (self.node_at_link_start[link],
self.node_at_link_end[link])
return self.node_to_node_distance(node0, node1)
def node_to_node_distance(self, node0, node1, out=None):
"""Distance between nodes.
Parameters
----------
node0 : array-like
Node ID of start
node1 : array-like
Node ID of end
Returns
-------
array :
Distances between nodes.
Examples
--------
>>> from landlab.grid.unstructured.base import BaseGrid
>>> grid = BaseGrid(([0, 0, 4, 4], [0, 3, 0, 3]))
>>> grid.node_to_node_distance(0, 3)
array([ 5.])
>>> grid.node_to_node_distance(0, [0, 1, 2, 3])
array([ 0., 3., 4., 5.])
"""
return point_to_point_distance(
self._get_coord_at_node(node0), self._get_coord_at_node(node1),
out=out)
node0, node1 = np.broadcast_arrays(node0, node1)
return np.sqrt(np.sum((self.coord_at_node[:, node1] -
self.coord_at_node[:, node0]) ** 2, axis=0))
def point_to_node_distance(self, point, node=None, out=None):
"""Distance from a point to a node.
Parameters
----------
point : tuple
Coordinates of point
node : array-like
Node IDs
Returns
-------
array :
Distances from point to node.
Examples
--------
>>> from landlab.grid.unstructured.base import BaseGrid
>>> grid = BaseGrid(([0, 0, 4, 4], [0, 3, 0, 3]))
>>> grid.point_to_node_distance((0., 0.), [1, 2, 3])
array([ 3., 4., 5.])
>>> grid.point_to_node_distance((0., 0.))
array([ 0., 3., 4., 5.])
>>> out = np.empty(4)
>>> out is grid.point_to_node_distance((0., 0.), out=out)
True
>>> out
array([ 0., 3., 4., 5.])
"""
return point_to_point_distance(point, self._get_coord_at_node(node),
out=out)
def point_to_node_angle(self, point, node=None, out=None):
"""Angle from a point to a node.
Parameters
----------
point : tuple
Coordinates of point
node : array-like
Node IDs
Returns
-------
array :
Angles from point to node as radians.
Examples
--------
>>> from landlab.grid.unstructured.base import BaseGrid
>>> grid = BaseGrid(([0, 0, 1, 1], [0, 1, 0, 1]))
>>> grid.point_to_node_angle((0., 0.), [1, 2, 3]) / np.pi
array([ 0. , 0.5 , 0.25])
>>> grid.point_to_node_angle((0., 0.)) / np.pi
array([ 0. , 0. , 0.5 , 0.25])
>>> out = np.empty(4)
>>> out is grid.point_to_node_angle((0., 0.), out=out)
True
>>> out / np.pi
array([ 0. , 0. , 0.5 , 0.25])
"""
return point_to_point_angle(point, self._get_coord_at_node(node),
out=out)
def point_to_node_azimuth(self, point, node=None, out=None):
"""Azimuth from a point to a node.
Parameters
----------
point : tuple
Coordinates of point
node : array-like
Node IDs
Returns
-------
array :
Azimuths from point to node.
Examples
--------
>>> from landlab.grid.unstructured.base import BaseGrid
>>> grid = BaseGrid(([0, 0, 1, 1], [0, 1, 0, 1]))
>>> grid.point_to_node_azimuth((0., 0.), [1, 2, 3])
array([ 90., 0., 45.])
>>> grid.point_to_node_azimuth((0., 0.))
array([ 90., 90., 0., 45.])
>>> grid.point_to_node_azimuth((0., 0.), 1)
array([ 90.])
>>> out = np.empty(4)
>>> out is grid.point_to_node_azimuth((0., 0.), out=out)
True
>>> out
array([ 90., 90., 0., 45.])
"""
return point_to_point_azimuth(point, self._get_coord_at_node(node),
out=out)
def point_to_node_vector(self, point, node=None, out=None):
"""Azimuth from a point to a node.
Parameters
----------
point : tuple
Coordinates of point
node : array-like
Node IDs
Returns
-------
array :
Vector from point to node.
Examples
--------
>>> from landlab.grid.unstructured.base import BaseGrid
>>> grid = BaseGrid(([0, 0, 1, 1], [0, 1, 0, 1]))
>>> grid.point_to_node_vector((0., 0.), [1, 2, 3])
array([[ 0., 1., 1.],
[ 1., 0., 1.]])
>>> grid.point_to_node_vector((0., 0.))
array([[ 0., 0., 1., 1.],
[ 0., 1., 0., 1.]])
>>> grid.point_to_node_vector((0., 0.), 1)
array([[ 0.],
[ 1.]])
>>> out = np.empty((2, 1))
>>> out is grid.point_to_node_vector((0., 0.), 1, out=out)
True
>>> out
array([[ 0.],
[ 1.]])
"""
return point_to_point_vector(point, self._get_coord_at_node(node),
out=out)
def _get_coord_at_node(self, node=None):
if node is None:
return self.coord_at_node
else:
return self.coord_at_node[:, node].reshape((2, -1))
def point_to_point_distance(point0, point1, out=None):
"""Length of vector that joins two points.
Parameters
----------
(y0, x0) : tuple of array_like
(y1, x1) : tuple of array_like
out : array_like, optional
An array to store the output. Must be the same shape as the output
would have.
Returns
-------
l : array_like
Length of vector joining points; if *out* is provided, *v* will be
equal to *out*.
Examples
--------
>>> from landlab.grid.unstructured.base import point_to_point_distance
>>> point_to_point_distance((0, 0), (3, 4))
array([ 5.])
>>> point_to_point_distance((0, 0), ([3, 6], [4, 8]))
array([ 5., 10.])
"""
point0 = np.reshape(point0, (2, -1))
point1 = np.reshape(point1, (2, -1))
if out is None:
sum_of_squares = np.sum((point1 - point0) ** 2., axis=0)
return np.sqrt(sum_of_squares)
else:
sum_of_squares = np.sum((point1 - point0) ** 2., axis=0, out=out)
return np.sqrt(sum_of_squares, out=out)
def point_to_point_angle(point0, point1, out=None):
"""Angle of vector that joins two points.
Parameters
----------
(y0, x0) : tuple of array_like
(y1, x1) : tuple of array_like
out : array_like, optional
An array to store the output. Must be the same shape as the output
would have.
Returns
-------
a : array_like
Angle of vector joining points; if *out* is provided, *v* will be equal
to *out*.
"""
point0 = np.reshape(point0, (-1, 1))
diff = point_to_point_vector(point0, point1)
if out is None:
return np.arctan2(diff[0], diff[1])
else:
return np.arctan2(diff[0], diff[1], out=out)
def point_to_point_azimuth(point0, point1, out=None):
"""Azimuth of vector that joins two points.
Parameters
----------
(y0, x0) : tuple of array_like
(y1, x1) : tuple of array_like
out : array_like, optional
An array to store the output. Must be the same shape as the output
would have.
Returns
-------
azimuth : array_like
Azimuth of vector joining points; if *out* is provided, *v* will be
equal to *out*.
Examples
--------
>>> from landlab.grid.unstructured.base import point_to_point_azimuth
>>> point_to_point_azimuth((0, 0), (1, 0))
array([ 0.])
>>> point_to_point_azimuth([(0, 1), (0, 1)], (1, 0))
array([ 0., -90.])
>>> point_to_point_azimuth([(0, 1, 0), (0, 1, 2)], [(1, 1, 2), (0, 0, 4)])
array([ 0., -90., 45.])
"""
azimuth_in_rads = point_to_point_angle(point0, point1, out=out)
if out is None:
return (np.pi * .5 - azimuth_in_rads) * 180. / np.pi
else:
np.subtract(np.pi * .5, azimuth_in_rads, out=out)
return np.multiply(out, 180. / np.pi, out=out)
def point_to_point_vector(point0, point1, out=None):
"""Vector that joins two points.
Parameters
----------
(y0, x0) : tuple of array_like
(y1, x1) : tuple of array_like
out : array_like, optional
An array to store the output. Must be the same shape as the output
would have.
Returns
-------
(dy, dx) : tuple of array_like
Vectors between points; if *out* is provided, *v* will be equal to
*out*.
Examples
--------
>>> from landlab.grid.unstructured.base import point_to_point_vector
>>> point_to_point_vector((0, 0), (1, 2))
array([[1],
[2]])
>>> point_to_point_vector([(0, 1), (0, 1)], (1, 2))
array([[1, 0],
[2, 1]])
>>> point_to_point_vector([(0, 0, 0), (0, 1, 2)], [(1, 2, 2), (2, 4, 4)])
array([[1, 2, 2],
[2, 3, 2]])
"""
point0 = np.reshape(point0, (2, -1))
point1 = np.reshape(point1, (2, -1))
if out is None:
return np.subtract(point1, point0)
else:
return np.subtract(point1, point0, out=out)
| csherwood-usgs/landlab | landlab/grid/unstructured/base.py | Python | mit | 19,608 |
"""
DFO-GN
====================
A derivative-free solver for least squares minimisation with bound constraints
Call structure is:
x, f, nf, exit_flag, exit_str = dfogn(objfun, x0, lower, upper,
maxfun, init_tr_radius, rhoend=1e-8)
Required inputs:
objfun Objective function, callable as: residual_vector = objfun(x)
x0 Initial starting point, NumPy ndarray
Optional inputs:
lower, upper Lower and upper bound constraints (lower <= x <= upper),
must be NumPy ndarrays of same size as x0 (default +/-1e20)
maxfun Maximum number of allowable function evalutions (default 1000)
init_tr_radius Initial trust region radius (default 0.1*max(1, ||x0||_infty)
rhoend Termination condition on trust region radius (default 1e-8)
Outputs:
x Estimate of minimiser
f Value of least squares objective at x (f = ||objfun(x)||^2)
nf Number of objective evaluations used to find x
exit_flag Integer flag indicating termination criterion (see list below imports)
exit_str String with more detailed termination message
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
The development of this software was sponsored by NAG Ltd. (http://www.nag.co.uk)
and the EPSRC Centre For Doctoral Training in Industrially Focused Mathematical
Modelling (EP/L015803/1) at the University of Oxford. Please contact NAG for
alternative licensing.
Copyright 2017, Lindon Roberts
"""
# Ensure compatibility with Python 2
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from math import sqrt
import numpy as np
import scipy.linalg as sp_linalg
import warnings
from .util import *
from .trust_region import *
from .alternative_move import *
from .linear_altmov import *
__all__ = ['solve', 'EXIT_SUCCESS', 'EXIT_INPUT_ERROR', 'EXIT_MAXFUN_WARNING', 'EXIT_TR_INCREASE_ERROR',
'EXIT_LINALG_ERROR', 'EXIT_ALTMOV_MEMORY_ERROR']
#######################
# Exit codes
EXIT_SUCCESS = 0 # successful finish (rho=rhoend or sufficient objective reduction)
EXIT_INPUT_ERROR = 1 # error, bad inputs
EXIT_MAXFUN_WARNING = 2 # warning, reached max function evals
EXIT_TR_INCREASE_ERROR = 3 # error, trust region step increased model value
EXIT_LINALG_ERROR = 4 # error, linalg error (singular matrix encountered)
EXIT_ALTMOV_MEMORY_ERROR = 5 # error, stpsav issue in ALTMOV
#######################
class OptimResults:
def __init__(self, xmin, rmin, fmin, jacmin, nf, exit_flag, exit_msg):
self.x = xmin
self.resid = rmin
self.f = fmin
self.jacobian = jacmin
self.nf = nf
self.flag = exit_flag
self.msg = exit_msg
# Set standard names for exit flags
self.EXIT_MAXFUN_WARNING = EXIT_MAXFUN_WARNING
self.EXIT_SUCCESS = EXIT_SUCCESS
self.EXIT_INPUT_ERROR = EXIT_INPUT_ERROR
self.EXIT_TR_INCREASE_ERROR = EXIT_TR_INCREASE_ERROR
self.EXIT_LINALG_ERROR = EXIT_LINALG_ERROR
self.EXIT_ALTMOV_MEMORY_ERROR = EXIT_ALTMOV_MEMORY_ERROR
class Model:
def __init__(self, n, m, npt, x0, xl, xu):
assert npt==n+1, "Require strictly linear model"
# Problem sizes
self.n = n
self.m = m
self.npt = npt
# Actual model info
# Here, the model for each residual is centred around xbase
# m(x) = model_const_term + gqv*(x-xbase)
self.kbase = 0 # index of base point
self.xbase = x0 # base point
self.xl = xl # lower bounds (absolute terms)
self.xu = xu # upper bounds (absolute terms)
self.sl = xl - x0 # lower bounds (adjusted for xbase), should be -ve (actually < -rhobeg)
self.su = xu - x0 # upper bounds (adjusted for xbase), should be +ve (actually > rhobeg)
self.xpt = np.zeros((npt, n)) # interpolation points
self.fval_v = np.zeros((npt, m)) # residual vectors at each xpt(+xbase)
self.fval = np.zeros((npt, )) # total sum of squares at each xpt(+xbase)
self.model_const_term_v = np.zeros((m,)) # constant term of each mini-model
self.gqv = np.zeros((n, m)) # interpolated gradients for each mini-model
self.kopt = None # index of current best x
self.fbeg = None # initial sum of squares at x0
self.xsave = None # possible final return value (abs coords)
self.rsave = None # residuals for possible final return value
self.fsave = None # sum of squares for final return value
self.jacsave = None # approximate Jacobian at possible final return value
self.lu = None # LU decomp of interp matrix
self.piv = None # pivots for LU decomposition of interp matrix
self.lu_current = False # whether current LU factorisation of interp matrix is up-to-date or not
self.EXACT_CONST_TERM = True # use exact c=r(xopt) for interpolation (improve conditioning)
# Affects mini-model interpolation / interpolation matrix, but also geometry updating
def x_within_bounds(self, k=None, x=None):
# Get x value for k-th point or x vector (in absolute terms, force within bounds)
if k is not None:
return np.minimum(np.maximum(self.xl, self.xbase + self.xpt[k, :]), self.xu)
elif x is not None:
return np.minimum(np.maximum(self.xl, self.xbase + x), self.xu)
else:
return None
def xopt(self):
# Current best x (relative to xbase)
return self.xpt[self.kopt, :].copy()
def fval_v_opt(self):
return self.fval_v[self.kopt,:]
def fval_opt(self):
return self.fval[self.kopt]
def update_point(self, knew, xnew, v_err, f):
# Add point xnew with objective vector v_err (full objective f) at the knew-th index
self.xpt[knew,:] = xnew
self.fval_v[knew, :] = v_err
self.fval[knew] = f
# Update XOPT, GOPT and KOPT if the new calculated F is less than FOPT.
if f < self.fval_opt():
self.kopt = knew
self.lu_current = False
return
def gqv_at_xopt(self):
return self.gqv
def shift_base(self, xbase_shift):
for m1 in range(self.m):
self.model_const_term_v[m1] += np.dot(self.gqv[:, m1], xbase_shift)
# The main updates
for k in range(self.npt):
self.xpt[k, :] = self.xpt[k, :] - xbase_shift
self.xbase += xbase_shift
self.sl = self.sl - xbase_shift
self.su = self.su - xbase_shift
self.lu_current = False
self.factorise_LU()
return
def interpolate_mini_models(self):
# Build interpolation matrix and factorise (in self.lu, self.piv)
try:
self.factorise_LU()
if self.EXACT_CONST_TERM:
idx_to_use = [k for k in range(self.npt) if k != self.kopt]
for m1 in range(self.m):
rhs = np.zeros((self.n,))
for i in range(self.n):
k = idx_to_use[i]
rhs[i] = self.fval_v[k, m1] - self.fval_v[self.kopt, m1] - \
np.dot(self.gqv[:, m1], self.xpt[k, :] - self.xopt())
soln = sp_linalg.lu_solve((self.lu, self.piv), rhs)
self.gqv[:, m1] += soln # whole solution is gradient
# shift constant term back
self.model_const_term_v = self.fval_v[self.kopt, :] - np.dot(self.gqv.T, self.xopt())
return True # flag ok
else:
model_values_v = np.zeros((self.npt, self.m))
for k in range(self.npt):
model_values_v[k, :] = self.predicted_values(self.xpt[k, :], d_based_at_xopt=False,
with_const_term=True)
# Sometimes when things get too close to a solution, we can get NaNs in model_values - flag error & quit
if np.any(np.isnan(model_values_v)):
self.gqv = None
return False # flag error
for m1 in range(self.m):
rhs = self.fval_v[:, m1] - model_values_v[:, m1]
soln = sp_linalg.lu_solve((self.lu, self.piv), rhs)
self.model_const_term_v[m1] += soln[0]
self.gqv[:, m1] += soln[1:] # first term is constant, rest is gradient term
return True # flag ok
except np.linalg.LinAlgError:
self.gqv = None
return False # flag error
except ValueError: # happens when LU decomposition has Inf or NaN
self.gqv = None
return False # flag error
def factorise_LU(self):
if not self.lu_current:
Wmat = self.build_interp_matrix()
self.lu, self.piv = sp_linalg.lu_factor(Wmat) # LU has L and U parts, piv indicates row swaps for pivoting
self.lu_current = True
return
def solve_LU(self, rhs):
# If lu_current, use that, otherwise revert to generic solver
if self.lu_current:
if self.EXACT_CONST_TERM:
return sp_linalg.lu_solve((self.lu, self.piv), rhs) # only get gradient (no const term)
else:
return sp_linalg.lu_solve((self.lu, self.piv), rhs)[1:] # only return gradient (1st term is constant)
else:
logging.warning("model.solve_LU not using factorisation")
Wmat = self.build_interp_matrix()
if self.EXACT_CONST_TERM:
return np.linalg.solve(Wmat, rhs) # only get gradient (no const term)
else:
return np.linalg.solve(Wmat, rhs)[1:] # only return gradient (1st term is constant)
def get_final_results(self):
# Called when about to exit BOBYQB
# Return x and fval for optimal point (either from xsave+fsave or kopt)
if self.fval_opt() <= self.fsave: # optimal has changed since xsave+fsave were last set
x = self.x_within_bounds(k=self.kopt)
rvec = self.fval_v_opt()
f = self.fval_opt()
jacmin = self.gqv_at_xopt().T
else:
x = self.xsave
rvec = self.rsave
f = self.fsave
jacmin = self.jacsave
return x, rvec, f, jacmin
def build_full_model(self):
# Build full least squares objective model from mini-models
# Centred around xopt = xpt[kopt, :]
v_temp = self.fval_v_opt() # m-vector
gqv_xopt = self.gqv_at_xopt() # J^T (transpose of Jacobian) at xopt, rather than xbase
# Use the gradient at xopt to formulate \sum_i (2*f_i \nabla f_i) = 2 J^t m(x_opt)
gopt = np.dot(gqv_xopt, v_temp) # n-vector (gqv = J^T)
# Gauss-Newton part of Hessian
hq = to_upper_triangular_vector(np.dot(gqv_xopt, gqv_xopt.T))
# Apply scaling based on convention for objective - this code uses sumsq(r_i) not 0.5*sumsq(r_i)
gopt = 2.0 * gopt
hq = 2.0 * hq
return gopt, hq
def build_interp_matrix(self):
if self.EXACT_CONST_TERM:
Wmat = np.zeros((self.n, self.n))
idx_to_use = [k for k in range(self.npt) if k != self.kopt]
for i in range(self.n):
Wmat[i,:] = self.xpt[idx_to_use[i], :] - self.xopt()
else:
Wmat = np.zeros((self.n + 1, self.n + 1))
Wmat[:, 0] = 1.0
Wmat[:, 1:] = self.xpt # size npt * n
return Wmat
def predicted_values(self, d, d_based_at_xopt=True, with_const_term=False):
if d_based_at_xopt:
Jd = np.dot(self.gqv.T, d + self.xopt()) # J^T * d (where Jacobian J = self.gqv^T)
else: # d based at xbase
Jd = np.dot(self.gqv.T, d) # J^T * d (where Jacobian J = self.gqv^T)
return Jd + (self.model_const_term_v if with_const_term else 0.0)
def square_distances_to_xopt(self):
sq_distances = np.zeros((self.npt,))
for k in range(self.npt):
sq_distances[k] = sumsq(self.xpt[k, :] - self.xopt())
return sq_distances
def min_objective_value(self, abs_tol=1.0e-12, rel_tol=1.0e-20):
# Set a minimum value so that if the full objective falls below it, we immediately finish
return max(abs_tol, rel_tol*self.fbeg)
def build_initial_set(objfun, x0, xl, xu, rhobeg, maxfun):
# Evaluate at initial point (also gets us m)
v_err0, f0 = eval_least_squares_objective(objfun, x0, eval_num=1)
# Get dimension of problem and number of sample points from x0 and v_err0 information
n = np.size(x0)
npt = n + 1
m = np.size(v_err0)
# Initialise model (sets x0 as base point and xpt = zeros, so xpt[0,:] = x0)
model = Model(n, m, npt, x0, xl, xu)
# Build initial sample set
at_upper_boundary = (model.su < 0.01 * rhobeg) # su = xu - x0, should be +ve, actually > rhobeg
for k in range(n):
step_size = (rhobeg if not at_upper_boundary[k] else -rhobeg)
model.xpt[k+1, k] = step_size
# Add results of objective evaluation at x0
model.fval_v[0, :] = v_err0
model.fval[0] = f0
model.kopt = 0
model.fbeg = f0
model.xsave = x0.copy()
model.rsave = v_err0.copy()
model.fsave = f0
model.jacmin = np.zeros((m, n))
# Evaluate objective at each point in the initial sample set
for nf in range(1, min(npt, maxfun)):
x = model.x_within_bounds(k=nf)
v_err, f = eval_least_squares_objective(objfun, x, eval_num=nf+1) # nf is one behind because of f(x0)
model.fval[nf] = f
model.fval_v[nf, :] = v_err
if f < model.fval_opt(): # update optimal point
model.kopt = nf
return model
def altmov_wrapper(model, knew, adelt):
model.factorise_LU()
# First need to get knew-th column of H matrix
if model.EXACT_CONST_TERM:
if knew == model.kopt:
ek = -np.ones((model.n, )) # matrix based on (y-xk), so different geom structure for kopt
else:
ek = np.zeros((model.n, ))
if knew < model.kopt:
ek[knew] = 1.0
else:
ek[knew - 1] = 1.0
H_knew = model.solve_LU(ek)
else:
ek = np.zeros((model.n + 1,))
ek[knew] = 1.0
H_knew = model.solve_LU(ek)
xnew, xalt, cauchy, abs_denom = altmov(model.xpt, model.sl, model.su, model.kopt,
model.xopt(), knew, adelt, H_knew)
# abs_denom is Lagrange_knew evaluated at xnew
return xnew, xalt, cauchy, abs_denom
def altmov_wrapper_v2(model, knew, adelt):
model.factorise_LU()
# First need to get knew-th column of H matrix
if model.EXACT_CONST_TERM:
if knew == model.kopt:
ek = -np.ones((model.n, )) # matrix based on (y-xk), so different geom structure for kopt
else:
ek = np.zeros((model.n, ))
if knew < model.kopt:
ek[knew] = 1.0
else:
ek[knew - 1] = 1.0
g = model.solve_LU(ek) # H_knew
else:
ek = np.zeros((model.n + 1,))
ek[knew] = 1.0
g = model.solve_LU(ek) # H_knew
c = 1 if knew == model.kopt else 0 # c, g are for knew-th Lagrange polynomial, based at xopt (c + g*(x-xopt))
xnew = max_step_in_box_and_ball(model.xopt(), c, g, model.sl, model.su, adelt)
return xnew
def choose_knew(model, delta, xnew, skip_kopt=True):
# in model, uses: n, npt, xpt, kopt/xopt, build_interp_matrix()
# model unchanged by this method
# Criteria is to maximise: max(1, ||yt-xk||^4/Delta^4) * abs(Lagrange_t(xnew))
# skip_kopt determines whether to check t=kopt as a possible candidate or not
model.factorise_LU() # Prep for linear solves
delsq = delta ** 2
scaden = -1.0
knew = None # may knew never be set here?
try:
for k in range(model.npt):
if skip_kopt and k == model.kopt:
continue # next k in this inner loop
if model.EXACT_CONST_TERM:
if k == model.kopt:
ek = -np.ones((model.n,)) # matrix based on (y-xk), so different geom structure for kopt
else:
ek = np.zeros((model.n, ))
if k < model.kopt:
ek[k] = 1.0
else:
ek[k-1] = 1.0
Hk = model.solve_LU(ek)
else:
ek = np.zeros((model.n + 1,))
ek[k] = 1.0
Hk = model.solve_LU(ek) # k-th column of H, except 1st entry (i.e. Lagrange polynomial gradient)
lagrange_k_at_d = 1.0 + np.dot(xnew-model.xpt[k, :], Hk)
distsq = sumsq(model.xpt[k, :] - model.xopt())
temp = max(1.0, (distsq / delsq) ** 2)
if temp * abs(lagrange_k_at_d) > scaden:
scaden = temp * abs(lagrange_k_at_d)
knew = k
linalg_error = False
except np.linalg.LinAlgError:
linalg_error = True
return knew, linalg_error
def trust_region_subproblem_least_squares(model, delta):
# in model, uses: n, npt, xpt, kopt/xopt, sl, su, build_full_model()
# model unchanged by this method
# Build model for full least squares objectives
gopt, hq = model.build_full_model()
# Call original BOBYQA trsbox function
d, gnew, crvmin = trsbox(model.xopt(), gopt, hq, model.sl, model.su, delta)
return d, gopt, hq, gnew, crvmin
def done_with_current_rho(model, nf, nfsav, rho, diffs, xnew, gnew, hq, crvmin):
# in model, uses: n, sl, su
# model unchanged by this method
if nf <= nfsav + 2:
return False
errbig = max(diffs)
frhosq = 0.125 * rho ** 2
if crvmin > 0.0 and errbig > frhosq * crvmin:
return False
bdtol = errbig / rho
for j in range(model.n):
bdtest = bdtol
if xnew[j] == model.sl[j]:
bdtest = gnew[j]
if xnew[j] == model.su[j]:
bdtest = -gnew[j]
if bdtest < bdtol:
curv = get_hessian_element(model.n, hq, j, j) # curv = Hessian(j, j)
bdtest += 0.5 * curv * rho
if bdtest < bdtol:
return False
return True
def reduce_rho(old_rho, rhoend):
ratio = old_rho/rhoend
if ratio <= 16.0:
new_rho = rhoend
elif ratio <= 250.0:
new_rho = sqrt(ratio)*rhoend
else:
new_rho = 0.1*old_rho
delta = max(0.5*old_rho, new_rho)
return delta, new_rho
def check_and_fix_geometry(model, objfun, distsq, delta, rho, dnorm, diffs, nf, nfsav, maxfun, rounding_error_const,
update_delta=True):
# [Fortran label 650]
# If any xpt more than distsq away from xopt, fix geometry
knew_tmp, distsq_tmp = get_vector_max(all_square_distances(model.xpt, model.xopt()))
if distsq_tmp > distsq: # fix geometry and quit
knew = knew_tmp
distsq = distsq_tmp
dist = sqrt(distsq)
if update_delta: # optional
delta = max(min(0.1 * delta, 0.5 * dist), 1.5 * rho) # use 0.5*dist, within range [0.1*delta, 1.5*rho]
adelt = max(min(0.1 * dist, delta), rho)
if adelt ** 2 <= rounding_error_const * sumsq(model.xopt()):
model.shift_base(model.xopt())
model, nf, nfsav, diffs, return_to_new_tr_iteration, exit_flag, exit_str \
= fix_geometry(model, objfun, knew, adelt, rho, dnorm, diffs, nf, nfsav, maxfun)
return model, delta, nf, nfsav, diffs, return_to_new_tr_iteration, exit_flag, exit_str
else:
# Do nothing, just quit
# return_to_new_tr_iteration = None when didn't fix geometry
return model, delta, nf, nfsav, diffs, None, None, None
def fix_geometry(model, objfun, knew, adelt, rho, dnorm, diffs, nf, nfsav, maxfun):
# in model, uses: n, npt, xpt, sl, su, kopt/xopt, build_interp_metrix, and others
# model is changed by this function: gqv from interp_mini_models, and others
USE_OLD_ALTMOV = False
try:
if USE_OLD_ALTMOV:
xnew, xalt, cauchy, denom = altmov_wrapper(model, knew, adelt)
else:
xnew = altmov_wrapper_v2(model, knew, adelt)
xalt = None
cauchy = None
denom = None
except np.linalg.LinAlgError:
exit_flag = EXIT_LINALG_ERROR
exit_str = "Singular matrix encountered in ALTMOV"
return_to_new_tr_iteration = False # return and quit
return model, nf, nfsav, diffs, return_to_new_tr_iteration, exit_flag, exit_str
if xnew is None: # issue with stpsav occurred, quit DFOGN
exit_flag = EXIT_ALTMOV_MEMORY_ERROR
exit_str = "Error in ALTMOV - stpsav undefined"
return_to_new_tr_iteration = False # return and quit
return model, nf, nfsav, diffs, return_to_new_tr_iteration, exit_flag, exit_str
if USE_OLD_ALTMOV and denom < cauchy and cauchy > 0.0:
xnew = xalt.copy()
d = xnew - model.xopt()
# [Fortran label 360]
x = model.x_within_bounds(x=xnew)
if nf >= maxfun:
exit_flag = EXIT_MAXFUN_WARNING
exit_str = "Objective has been called MAXFUN times"
return_to_new_tr_iteration = False # return and quit
return model, nf, nfsav, diffs, return_to_new_tr_iteration, exit_flag, exit_str
nf += 1
v_err, f = eval_least_squares_objective(objfun, x, eval_num=nf)
if f <= model.min_objective_value():
# Force model.get_final_results() to return this new point if it's better than xopt, then quit
model.xsave = x
model.rsave = v_err.copy()
model.fsave = f
model.jacsave = model.gqv_at_xopt().T
exit_flag = EXIT_SUCCESS
exit_str = "Sufficient reduction in objective value"
return_to_new_tr_iteration = False # return and quit
return model, nf, nfsav, diffs, return_to_new_tr_iteration, exit_flag, exit_str
# Use the quadratic model to predict the change in F due to the step D,
# and set DIFF to the error of this prediction.
gopt, hq = model.build_full_model()
if gopt is None: # Use this to indicate linalg error
if f < model.fval_opt():
# Force model.get_final_results() to return this new point if it's better than xopt, then quit
model.xsave = x
model.rsave = v_err.copy()
model.fsave = f
model.jacsave = model.gqv_at_xopt().T
exit_flag = EXIT_LINALG_ERROR
exit_str = "Singular matrix encountered in FIX_GEOMETRY (full model interpolation step)"
return_to_new_tr_iteration = False # return and quit
return model, nf, nfsav, diffs, return_to_new_tr_iteration, exit_flag, exit_str
pred_reduction = - calculate_model_value(gopt, hq, d)
actual_reduction = model.fval_opt() - f
diffs = [abs(pred_reduction - actual_reduction), diffs[0], diffs[1]]
if dnorm > rho:
nfsav = nf
# Update bmat, zmat, gopt, etc. (up to label ~560)
model.update_point(knew, xnew, v_err, f)
exit_flag = None
exit_str = None
return_to_new_tr_iteration = True # return and start new trust region iteration (label 60)
return model, nf, nfsav, diffs, return_to_new_tr_iteration, exit_flag, exit_str
def dfogn_main(objfun, x0, xl, xu, rhobeg, rhoend, maxfun):
exit_flag = None
exit_str = None
# One variable in BOBYQB depends on which code form we are using
if zhang_code_structure:
rounding_error_const = 0.1 # Zhang code
else:
rounding_error_const = 1.0e-3 # BOBYQA
###########################################################
# Set up initial interpolation set
###########################################################
model = build_initial_set(objfun, x0, xl, xu, rhobeg, maxfun)
if maxfun < model.npt:
exit_flag = EXIT_MAXFUN_WARNING
exit_str = "Objective has been called MAXFUN times"
x, rvec, f, jacmin = model.get_final_results()
return x, rvec, f, jacmin, maxfun, exit_flag, exit_str
# return x, f, maxfun, exit_flag, exit_str
###########################################################
# Set other variables before begin iterations
###########################################################
finished_main_loop = False
(rho, delta) = (rhobeg, rhobeg)
nf = min(maxfun, model.npt) # number of function evaluations so far
nfsav = nf
diffs = [0.0, 0.0, 0.0] # (diffa, diffb, diffc) in Fortran code, used in done_with_current_rho()
###########################################################
# Start of main loop [Fortran label 60]
###########################################################
while not finished_main_loop:
# Interpolate each mini-model
interp_ok = model.interpolate_mini_models()
if not interp_ok:
exit_flag = EXIT_LINALG_ERROR
exit_str = "Singular matrix in mini-model interpolation (main loop)"
finished_main_loop = True
break # quit
# Solve trust region subproblem to get tentative step d
# Model for full least squares objective is given by (gopt, hq)
d, gopt, hq, gnew, crvmin = trust_region_subproblem_least_squares(model, delta)
logging.debug("Trust region step is d = " + str(d))
xnew = model.xopt() + d
dsq = sumsq(d)
dnorm = min(delta, sqrt(dsq))
if dnorm < 0.5 * rho:
###################
# Start failed TR step
###################
logging.debug("Failed trust region step")
if not done_with_current_rho(model, nf, nfsav, rho, diffs, xnew, gnew, hq, crvmin):
# [Fortran label 650]
distsq = (10.0 * rho) ** 2
model, delta, nf, nfsav, diffs, return_to_new_tr_iteration, geom_exit_flag, geom_exit_str = \
check_and_fix_geometry(model, objfun, distsq, delta, rho, dnorm, diffs, nf, nfsav, maxfun,
rounding_error_const, update_delta=True)
if return_to_new_tr_iteration is not None: # i.e. if we did actually fix geometry
if return_to_new_tr_iteration:
finished_main_loop = False
continue # next trust region step
else: # quit
exit_flag = geom_exit_flag
exit_str = geom_exit_str
finished_main_loop = True
break # quit
# If we didn't fix geometry, reduce rho as below
# otherwise, if we are done with current rho, reduce rho as below
# Reduce rho and continue [Fortran label 680]
if rho > rhoend:
delta, rho = reduce_rho(rho, rhoend)
logging.info("New rho = %g after %i function evaluations" % (rho, nf))
logging.debug("Best so far: f = %.15g at x = " % (model.fval_opt()) + str(model.xbase + model.xopt()))
nfsav = nf
finished_main_loop = False
continue # next trust region step
else:
# Cannot reduce rho, so check xnew and quit
x = model.x_within_bounds(x=xnew)
if nf >= maxfun: # quit
exit_flag = EXIT_MAXFUN_WARNING
exit_str = "Objective has been called MAXFUN times"
finished_main_loop = True
break # quit
nf += 1
v_err, f = eval_least_squares_objective(objfun, x, eval_num=nf) # v_err not used here
# Force model.get_final_results() to return this new point if it's better than xopt, then quit
model.xsave = x
model.rsave = v_err.copy()
model.fsave = f
model.jacsave = model.gqv_at_xopt().T
exit_flag = EXIT_SUCCESS
exit_str = "rho has reached rhoend"
finished_main_loop = True
break # quit
###################
# End failed TR step
###################
else:
###################
# Start successful TR step
###################
logging.debug("Successful trust region step")
# Severe cancellation is likely to occur if XOPT is too far from XBASE. [Fortran label 90]
if dsq <= rounding_error_const * sumsq(model.xopt()):
base_shift = model.xopt()
xnew = xnew - base_shift # before xopt is updated
model.shift_base(base_shift) # includes a re-factorisation of the interpolation matrix
# Set KNEW to the index of the next interpolation point to be deleted to make room for a trust
# region step. Again RESCUE may be called if rounding errors have damaged
# the chosen denominator, which is the reason for attempting to select
# KNEW before calculating the next value of the objective function.
knew, linalg_error = choose_knew(model, delta, xnew, skip_kopt=True)
if linalg_error:
exit_flag = EXIT_LINALG_ERROR
exit_str = "Singular matrix when finding knew (in main loop)"
finished_main_loop = True
break # quit
# Calculate the value of the objective function at XBASE+XNEW, unless
# the limit on the number of calculations of F has been reached.
# [Fortran label 360, with ntrits > 0]
x = model.x_within_bounds(x=xnew)
if nf >= maxfun:
exit_flag = EXIT_MAXFUN_WARNING
exit_str = "Objective has been called MAXFUN times"
finished_main_loop = True
break # quit
nf += 1
v_err, f = eval_least_squares_objective(objfun, x, eval_num=nf)
if f <= model.min_objective_value():
# Force model.get_final_results() to return this new point if it's better than xopt, then quit
model.xsave = x
model.rsave = v_err.copy()
model.fsave = f
model.jacsave = model.gqv_at_xopt().T
exit_flag = EXIT_SUCCESS
exit_str = "Objective is sufficiently small"
finished_main_loop = True
break # quit
# Use the quadratic model to predict the change in F due to the step D,
# and set DIFF to the error of this prediction.
pred_reduction = - calculate_model_value(gopt, hq, d)
actual_reduction = model.fval_opt() - f
diffs = [abs(pred_reduction - actual_reduction), diffs[0], diffs[1]]
if dnorm > rho:
nfsav = nf
if pred_reduction < 0.0:
exit_flag = EXIT_TR_INCREASE_ERROR
exit_str = "Trust region step gave model increase"
finished_main_loop = True
break # quit
# Pick the next value of DELTA after a trust region step.
# Update trust region radius
ratio = actual_reduction / pred_reduction
if ratio <= 0.1:
delta = min(0.5 * delta, dnorm)
elif ratio <= 0.7:
delta = max(0.5 * delta, dnorm)
else: # (ratio > 0.7) Different updates depending on which code version we're using
if zhang_code_structure:
delta = min(max(2.0 * delta, 4.0 * dnorm), 1.0e10) # DFBOLS code version
elif bbqtr:
delta = max(0.5 * delta, 2.0 * dnorm) # BOBYQA version
else:
delta = max(delta, 2.0 * dnorm) # Zhang paper version
if delta <= 1.5 * rho: # cap trust region radius at rho
delta = rho
logging.debug("New delta = %g (rho = %g) from ratio %g" % (delta, rho, ratio))
# Recalculate KNEW and DENOM if the new F is less than FOPT.
if actual_reduction > 0.0: # f < model.fval_opt()
knew, linalg_error = choose_knew(model, delta, xnew, skip_kopt=False)
if linalg_error:
exit_flag = EXIT_LINALG_ERROR
exit_str = "Singular matrix when finding knew (in main loop, second time)"
finished_main_loop = True
break # quit
# Updating...
logging.debug("Updating with knew = %i" % knew)
model.update_point(knew, xnew, v_err, f)
# If a trust region step has provided a sufficient decrease in F, then
# branch for another trust region calculation.
if ratio >= 0.1:
finished_main_loop = False
continue # next trust region step
# Alternatively, find out if the interpolation points are close enough
# to the best point so far.
# [Fortran label 650]
distsq = max((2.0 * delta) ** 2, (10.0 * rho) ** 2)
model, delta, nf, nfsav, diffs, return_to_new_tr_iteration, geom_exit_flag, geom_exit_str = \
check_and_fix_geometry(model, objfun, distsq, delta, rho, dnorm, diffs, nf, nfsav, maxfun,
rounding_error_const, update_delta=False) # don't update delta when ntrits > 0
if return_to_new_tr_iteration is not None: # i.e. if we did actually fix geometry
if return_to_new_tr_iteration:
finished_main_loop = False
continue # next trust region step
else: # quit
exit_flag = geom_exit_flag
exit_str = geom_exit_str
finished_main_loop = True
break # quit
# If we didn't fix geometry, reduce rho [Fortran label 680]
if ratio > 0.0:
finished_main_loop = False
continue # next trust region step
if max(delta, dnorm) > rho:
finished_main_loop = False
continue # next trust region step
# Reduce rho and continue [Fortran label 680]
if rho > rhoend:
delta, rho = reduce_rho(rho, rhoend)
logging.info("New rho = %g after %i function evaluations" % (rho, nf))
logging.debug("Best so far: f = %.15g at x = " % (model.fval_opt()) + str(model.xbase + model.xopt()))
nfsav = nf
finished_main_loop = False
continue # next trust region step
else:
# Cannot reduce rho further
exit_flag = EXIT_SUCCESS
exit_str = "rho has reached rhoend"
finished_main_loop = True
break # quit
###################
# End successful TR step
###################
#############################
# End this iteration of main loop - take next TR step
#############################
###########################################################
# End of main loop [Fortran label 720]
###########################################################
x, rvec, f, jacmin = model.get_final_results()
logging.debug("At return from DFOGN, number of function evals = %i" % nf)
logging.debug("Smallest objective value = %.15g at x = " % f + str(x))
return x, rvec, f, jacmin, nf, exit_flag, exit_str
# return x, f, nf, exit_flag, exit_str
def solve(objfun, x0, lower=None, upper=None, maxfun=1000, rhobeg=None, rhoend=1e-8):
# If bounds not provided, set to something large
xl = (lower if lower is not None else -1.0e20 * np.ones(x0.shape))
xu = (upper if upper is not None else 1.0e20 * np.ones(x0.shape))
# Set default value of rhobeg to something sensible
rhobeg = (rhobeg if rhobeg is not None else 0.1 * max(np.max(np.abs(x0)), 1.0))
n = np.size(x0)
# Input & parameter checks
input_error_msg = None
if rhobeg < 0.0:
input_error_msg = "Input error: rhobeg must be strictly positive"
if rhoend < 0.0:
input_error_msg = "Input error: rhoend must be strictly positive"
if rhobeg <= rhoend:
input_error_msg = "Input error: rhobeg must be > rhoend"
if maxfun <= 0:
input_error_msg = "Input error: maxfun must be strictly positive"
if np.shape(x0) != (n,):
input_error_msg = "Input error: x0 must be a vector"
if np.shape(x0) != np.shape(xl):
input_error_msg = "Input error: lower bounds must have same shape as x0"
if np.shape(x0) != np.shape(xu):
input_error_msg = "Input error: upper bounds must have same shape as x0"
if np.min(xu - xl) < 2.0 * rhobeg:
input_error_msg = "Input error: gap between lower and upper must be at least 2*rhobeg"
# Process input errors
if input_error_msg is not None:
results = OptimResults(x0, None, None, None, 0, EXIT_INPUT_ERROR, "Input error: " + input_error_msg)
return results
if maxfun <= n + 1:
warnings.warn("maxfun <= npt: Are you sure your budget is large enough?", RuntimeWarning)
# Enforce lower bounds on x0 (ideally with gap of at least rhobeg)
idx = (xl < x0) & (x0 <= xl+rhobeg)
if np.any(idx):
warnings.warn("Some entries of x0 too close to lower bound, adjusting", RuntimeWarning)
x0[idx] = xl[idx] + rhobeg
idx = (x0 <= xl)
if np.any(idx):
warnings.warn("Some entries of x0 below lower bound, adjusting", RuntimeWarning)
x0[idx] = xl[idx]
# Enforce upper bounds on x0 (ideally with gap of at least rhobeg)
idx = (xu-rhobeg <= x0) & (x0 < xu)
if np.any(idx):
warnings.warn("Some entries of x0 too close to upper bound, adjusting", RuntimeWarning)
x0[idx] = xu[idx] - rhobeg
idx = (x0 >= xu)
if np.any(idx):
warnings.warn("Some entries of x0 above upper bound, adjusting", RuntimeWarning)
x0[idx] = xu[idx]
x, rvec, f, jacmin, nf, exit_flag, exit_str = dfogn_main(objfun, x0.copy(), xl, xu, rhobeg, rhoend, maxfun)
# Clean up exit_str to have better information:
if exit_flag == EXIT_SUCCESS:
exit_str = "Success: " + exit_str
elif exit_flag == EXIT_MAXFUN_WARNING:
exit_str = "Warning: " + exit_str
elif exit_flag == EXIT_INPUT_ERROR:
exit_str = "Input error: " + exit_str
elif exit_flag == EXIT_TR_INCREASE_ERROR:
exit_str = "Trust region subproblem error: " + exit_str
elif exit_flag == EXIT_LINALG_ERROR:
exit_str = "Linear algebra error: " + exit_str
elif exit_flag == EXIT_ALTMOV_MEMORY_ERROR:
exit_str = "ALTMOV memory error: " + exit_str
else:
exit_str = "Unknown exit flag " + str(exit_flag) + " with message " + exit_str
# Build solution object
results = OptimResults(x, rvec, f, jacmin, nf, exit_flag, exit_str)
# return x, f, nf, exit_flag, exit_str
return results
| numericalalgorithmsgroup/dfogn | dfogn/dfogn.py | Python | gpl-3.0 | 39,987 |
import sys
import wqio
if "--strict" in sys.argv:
sys.argv.remove("--strict")
tester = wqio.teststrict
else:
tester = wqio.test
sys.exit(tester(*sys.argv[1:]))
| phobson/wqio | check_wqio.py | Python | bsd-3-clause | 175 |
import wx
from cuttlebug import util
import os
class OptionsEvent(wx.PyEvent):
def __init__(self, type, object=None):
super(OptionsEvent, self).__init__()
self.SetEventType(type.typeId)
self.SetEventObject(object)
EVT_OPTION_CHANGED = wx.PyEventBinder(wx.NewEventType())
class Tree(wx.TreeCtrl):
'''
Just a wx.TreeCtrl but with better handling of icons and functions for easily fetching tree items and their data members
'''
def __init__(self, parent, id=-1, style=0):
super(Tree, self).__init__(parent, id, style=style | wx.TR_HIDE_ROOT)
#self.SetQuickBestSize(False)
self.clear()
self.clear_art()
def clear_art(self):
self.art = {}
self.image_list = wx.ImageList(16,16)
self.SetImageList(self.image_list)
def get_art(self, name):
il = self.GetImageList()
return il.GetBitmap(self.art[name])
def add_art(self, *arts):
for art in arts:
if art not in self.art:
self.art[art] = self.image_list.Add(util.get_icon(art))
self.SetImageList(self.image_list)
def clear(self):
self.best_size = None
self.DeleteAllItems()
self.root = self.AddRoot("root")
def depth(self):
return max(zip(*self.__walk_items())[1])
def __iter__(self):
return iter(self.get_items())
def __walk_items(self, item=None, depth=0):
'''
Return all the items in this tree, preorder traversal
'''
items = []
item = item or self.root
# Preorder traversal, add this item, then worry about children
items.append((item, depth))
# Base case, no more children
child, cookie= self.GetFirstChild(item)
while child.IsOk():
items.extend(self.__walk_items(child, depth+1))
child, cookie = self.GetNextChild(item, cookie)
return items
def get_items(self):
return zip(*self.__walk_items())[0]
def find_item(self, data):
for item in self.get_items():
if self.GetItemPyData(item) == data:
return item
raise IndexError("Item not found in tree.")
def add_item(self, item, name=None, parent=None, icon=None):
name = name or str(item)
if parent is not None:
try:
parent = self.find_item(parent)
except IndexError:
raise ValueError("Parent item not found in tree.")
else:
parent = self.root
# Actually install the item
node = self.AppendItem(parent, name)
self.SetItemPyData(node, item)
if icon is not None:
self.set_item_art(item, icon)
self.compute_best_size()
return node
def compute_best_size(self):
if os.name == 'nt':
best_size = (self.__max_width_win32(), -1)
else:
best_size = (self.__max_width(), -1)
self.SetMinSize(best_size)
def __max_width_win32(self):
dc = wx.ScreenDC()
dc.SetFont(self.GetFont())
widths = []
for item, depth in self.__walk_items():
if item != self.root:
width = dc.GetTextExtent(self.GetItemText(item))[0] + self.GetIndent()*depth
widths.append(width)
return max(widths) + self.GetIndent()
def __max_width(self):
self.Freeze()
expanded = {}
for item in self.get_items():
if item is not self.root:
expanded[item] = self.IsExpanded(item)
self.ExpandAll()
best_size = self.GetBestSize()
for item in expanded:
if not expanded[item]: self.Collapse(item)
self.Thaw()
return best_size[0]
def set_item_art(self, item, art, type = wx.TreeItemIcon_Normal):
item = self.find_item(item)
self.add_art(art)
self.SetItemImage(item, self.art[art], type)
class TreeBook(wx.Panel):
def __init__(self, *args, **kwargs):
super(TreeBook, self).__init__(*args, **kwargs)
self.tree = Tree(self, style=wx.TR_SINGLE | wx.TR_HAS_BUTTONS)
self.empty_panel = wx.Panel(self)
self.current_panel = self.empty_panel
self.sizer = wx.BoxSizer(wx.HORIZONTAL)
self.sizer.Add(self.tree, 0, wx.EXPAND)
self.sizer.Add(self.empty_panel, 1, wx.EXPAND)
self.SetSizer(self.sizer)
self.tree.Bind(wx.EVT_TREE_SEL_CHANGED, self.on_sel_changed)
def add_panel(self, panel,name, parent=None, icon=None):
node = self.tree.add_item(panel, name=name, parent=parent, icon=icon)
self.Freeze()
self.sizer.Add(panel, 1, wx.EXPAND)
panel.Hide()
self.Layout()
if self.current_panel == self.empty_panel:
self.tree.SelectItem(node, True)
self.Thaw()
def __show_panel(self, panel):
self.Freeze()
self.current_panel.Hide()
panel.Show()
self.Layout()
self.Thaw()
self.current_panel = panel
def on_sel_changed(self, evt):
panel = self.tree.GetItemPyData(evt.GetItem())
self.__show_panel(panel)
class OptionsTreeBook(TreeBook):
def __init__(self, parent, *args, **kwargs):
super(OptionsTreeBook, self).__init__(parent, *args, **kwargs)
self.parent = parent
def bind(self, widget, key):
if self.parent:
self.parent.bind(widget, key)
class OptionsPanel(wx.Panel):
def __init__(self, parent, name="Unnamed"):
wx.Panel.__init__(self, parent.book, -1)
self.name = name
self.groups = {}
self.parent = parent
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(util.padded(self.sizer,8))
def add(self, group, label, widget, key=None, label_on_right=False):
import widgets
if group not in self.groups:
box = wx.StaticBox(self, -1, group)
group_sizer = wx.StaticBoxSizer(box, wx.VERTICAL)
grid = wx.FlexGridSizer(1,2,8,8)
grid.AddGrowableCol(1,1)
self.sizer.Add(group_sizer, 0, wx.EXPAND)
group_sizer.Add(grid, 0, wx.EXPAND | wx.ALL, 8)
self.groups[group] = grid
if isinstance(widget, widgets.OptionsWidget):
w = widget
else:
w = widget(self, -1)
if key:
self.bind(w, key)
if label_on_right:
self.groups[group].Add(w, 0, wx.ALIGN_CENTER_VERTICAL | wx.EXPAND)
self.groups[group].Add(wx.StaticText(self, -1, str(label)), 0, wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_LEFT)
else:
self.groups[group].Add(wx.StaticText(self, -1, str(label)), 0, wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT)
self.groups[group].Add(w, 0, wx.ALIGN_CENTER_VERTICAL | wx.EXPAND)
w.Bind(EVT_OPTION_CHANGED, self.on_change)
def on_change(self, evt):
self.parent.change()
def bind(self, widget, key):
if self.parent:
self.parent.bind(widget, key)
class OptionsDialog(wx.Dialog):
def __init__(self, parent, title="Options", size=(600,400), icons=[], data=None, on_apply=None):
super(OptionsDialog, self).__init__(parent, -1, title=title, size=size)
self.bindings = {}
self.data = data
self.changed = False
dlg_sizer = wx.BoxSizer(wx.VERTICAL)
self.book = OptionsTreeBook(self, -1)
dlg_sizer.Add(self.book, 1, wx.EXPAND)
# The OK/Cancel/Apply buttons at the bottom
panel = wx.Panel(self, -1)
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.AddStretchSpacer(1)
self.btn_ok = util.button(panel, id=wx.ID_OK, func=self.on_ok)
sizer.Add(util.padded(self.btn_ok, 8), 0, wx.ALIGN_RIGHT)
self.btn_cancel = util.button(panel, id=wx.ID_CANCEL, func=self.on_cancel)
sizer.Add(util.padded(self.btn_cancel, 8), 0, wx.ALIGN_RIGHT)
sizer.AddSpacer(16)
self.btn_apply = util.button(panel, id=wx.ID_APPLY, func=self.on_apply)
sizer.Add(util.padded(self.btn_apply, 8), 0, wx.ALIGN_RIGHT)
self.btn_apply.Disable()
panel.SetSizer(sizer)
dlg_sizer.Add(panel, 0, wx.EXPAND)
self.SetSizer(dlg_sizer)
self.__apply_func = on_apply
def change(self):
self.changed = True
self.btn_apply.Enable()
def add_panel(self, page, parent=None, icon=None):
self.book.add_panel(page, page.name, parent=parent, icon=icon)
def on_apply(self, evt):
self.apply_changes()
def on_ok(self, evt):
self.apply_changes()
self.EndModal(self.changed)
def on_cancel(self, evt):
self.EndModal(0)
def apply_changes(self):
if self.data:
for key in self.bindings:
widget = self.bindings[key]
self.data[key] = widget.get_value()
if callable(self.__apply_func):
self.__apply_func()
self.btn_apply.Disable()
def bind(self, widget, key):
if self.data:
self.bindings[key] = widget
widget.set_value(self.data[key])
@classmethod
def show(cls, parent, data=None, on_apply=None):
dialog = cls(parent, data=data, on_apply=on_apply)
dialog.Centre()
dialog.ShowModal()
| ryansturmer/cuttlebug | cuttlebug/ui/options/options.py | Python | mit | 9,515 |
"""
ElasticSearch wrapper
"""
__RCSID__ = "$Id$"
import logging
from cmreslogging.handlers import CMRESHandler
from DIRAC.Resources.LogBackends.AbstractBackend import AbstractBackend
class ElasticSearchBackend(AbstractBackend):
"""
ElasticsearchBackend is used to create an abstraction of the handler and the formatter concepts from logging.
Here, we have a CMRESHandler which is part of an external library named 'cmreslogging' based on 'logging'.
CMRESHandler is a specific handler created to send log records to an ElasticSearch DB. It does not need a Formatter
object.
"""
def __init__(self):
"""
CMRESHandler needs, at least, a hostname, a username, a password, a port and a specific index
from the ElasticSearch DB to send log records.
"""
super(ElasticSearchBackend, self).__init__(None, None)
self.__host = ''
self.__user = None
self.__passwd = None
self.__port = 9203
self.__index = ''
self.__bufferSize = 1000
self.__flushTime = 1
def createHandler(self, parameters=None):
"""
Each backend can initialize its attributes and create its handler with them.
:params parameters: dictionary of parameters. ex: {'FileName': file.log}
"""
if parameters is not None:
self.__host = parameters.get('Host', self.__host)
self.__user = parameters.get('User', self.__user)
self.__passwd = parameters.get('Password', self.__passwd)
self.__port = int(parameters.get('Port', self.__port))
self.__index = parameters.get('Index', self.__index)
self.__bufferSize = int(parameters.get('BufferSize', self.__bufferSize))
self.__flushTime = int(parameters.get('FlushTime', self.__flushTime))
if self.__user is not None and self.__passwd is not None:
self._handler = CMRESHandler(hosts=[{'host': self.__host, 'port': self.__port}],
auth_type=CMRESHandler.AuthType.BASIC_AUTH,
auth_details=(self.__user, self.__passwd),
es_index_name=self.__index,
use_ssl=True,
verify_ssl=True,
buffer_size=self.__bufferSize,
flush_frequency_in_sec=self.__flushTime)
else:
self._handler = CMRESHandler(hosts=[{'host': self.__host, 'port': self.__port}],
auth_type=CMRESHandler.AuthType.NO_AUTH,
es_index_name=self.__index,
use_ssl=True,
verify_ssl=True,
buffer_size=self.__bufferSize,
flush_frequency_in_sec=self.__flushTime)
# We give a format containing only asctime to add the field in elasticsearch
# asctime is not created at the initialization of the LogRecords but built in the format process
self._handler.setFormatter(logging.Formatter('%(asctime)s'))
def setLevel(self, level):
"""
No possibility to set the level of the ElasticSearch handler.
It is not set by default so it can send all Log Records of all levels to ElasticSearch.
"""
pass
def setFormat(self, fmt, datefmt, options):
"""
Each backend give a format to their formatters and attach them to their handlers.
:params fmt: string representing the log format
:params datefmt: string representing the date format
:params component: string represented as "system/component"
:params options: dictionary of logging options. ex: {'Color': True}
"""
pass
| andresailer/DIRAC | Resources/LogBackends/ElasticSearchBackend.py | Python | gpl-3.0 | 3,666 |
"""Implementation of the WebSocket protocol.
`WebSockets <http://dev.w3.org/html5/websockets/>`_ allow for bidirectional
communication between the browser and server.
WebSockets are supported in the current versions of all major browsers,
although older versions that do not support WebSockets are still in use
(refer to http://caniuse.com/websockets for details).
This module implements the final version of the WebSocket protocol as
defined in `RFC 6455 <http://tools.ietf.org/html/rfc6455>`_. Certain
browser versions (notably Safari 5.x) implemented an earlier draft of
the protocol (known as "draft 76") and are not compatible with this module.
.. versionchanged:: 4.0
Removed support for the draft 76 protocol version.
"""
from __future__ import absolute_import, division, print_function, with_statement
# Author: Jacob Kristhammar, 2010
import base64
import collections
import hashlib
import os
import struct
import tornado.escape
import tornado.web
import zlib
from tornado.concurrent import TracebackFuture
from tornado.escape import utf8, native_str, to_unicode
from tornado import httpclient, httputil
from tornado.ioloop import IOLoop
from tornado.iostream import StreamClosedError
from tornado.log import gen_log, app_log
from tornado import simple_httpclient
from tornado.tcpclient import TCPClient
from tornado.util import _websocket_mask, PY3
if PY3:
from urllib.parse import urlparse # py2
xrange = range
else:
from urlparse import urlparse # py3
class WebSocketError(Exception):
pass
class WebSocketClosedError(WebSocketError):
"""Raised by operations on a closed connection.
.. versionadded:: 3.2
"""
pass
class WebSocketHandler(tornado.web.RequestHandler):
"""Subclass this class to create a basic WebSocket handler.
Override `on_message` to handle incoming messages, and use
`write_message` to send messages to the client. You can also
override `open` and `on_close` to handle opened and closed
connections.
See http://dev.w3.org/html5/websockets/ for details on the
JavaScript interface. The protocol is specified at
http://tools.ietf.org/html/rfc6455.
Here is an example WebSocket handler that echos back all received messages
back to the client:
.. testcode::
class EchoWebSocket(tornado.websocket.WebSocketHandler):
def open(self):
print("WebSocket opened")
def on_message(self, message):
self.write_message(u"You said: " + message)
def on_close(self):
print("WebSocket closed")
.. testoutput::
:hide:
WebSockets are not standard HTTP connections. The "handshake" is
HTTP, but after the handshake, the protocol is
message-based. Consequently, most of the Tornado HTTP facilities
are not available in handlers of this type. The only communication
methods available to you are `write_message()`, `ping()`, and
`close()`. Likewise, your request handler class should implement
`open()` method rather than ``get()`` or ``post()``.
If you map the handler above to ``/websocket`` in your application, you can
invoke it in JavaScript with::
var ws = new WebSocket("ws://localhost:8888/websocket");
ws.onopen = function() {
ws.send("Hello, world");
};
ws.onmessage = function (evt) {
alert(evt.data);
};
This script pops up an alert box that says "You said: Hello, world".
Web browsers allow any site to open a websocket connection to any other,
instead of using the same-origin policy that governs other network
access from javascript. This can be surprising and is a potential
security hole, so since Tornado 4.0 `WebSocketHandler` requires
applications that wish to receive cross-origin websockets to opt in
by overriding the `~WebSocketHandler.check_origin` method (see that
method's docs for details). Failure to do so is the most likely
cause of 403 errors when making a websocket connection.
When using a secure websocket connection (``wss://``) with a self-signed
certificate, the connection from a browser may fail because it wants
to show the "accept this certificate" dialog but has nowhere to show it.
You must first visit a regular HTML page using the same certificate
to accept it before the websocket connection will succeed.
"""
def __init__(self, application, request, **kwargs):
super(WebSocketHandler, self).__init__(application, request, **kwargs)
self.ws_connection = None
self.close_code = None
self.close_reason = None
self.stream = None
self._on_close_called = False
@tornado.web.asynchronous
def get(self, *args, **kwargs):
self.open_args = args
self.open_kwargs = kwargs
# Upgrade header should be present and should be equal to WebSocket
if self.request.headers.get("Upgrade", "").lower() != 'websocket':
self.clear()
self.set_status(400)
log_msg = "Can \"Upgrade\" only to \"WebSocket\"."
self.finish(log_msg)
gen_log.debug(log_msg)
return
# Connection header should be upgrade.
# Some proxy servers/load balancers
# might mess with it.
headers = self.request.headers
connection = map(lambda s: s.strip().lower(),
headers.get("Connection", "").split(","))
if 'upgrade' not in connection:
self.clear()
self.set_status(400)
log_msg = "\"Connection\" must be \"Upgrade\"."
self.finish(log_msg)
gen_log.debug(log_msg)
return
# Handle WebSocket Origin naming convention differences
# The difference between version 8 and 13 is that in 8 the
# client sends a "Sec-Websocket-Origin" header and in 13 it's
# simply "Origin".
if "Origin" in self.request.headers:
origin = self.request.headers.get("Origin")
else:
origin = self.request.headers.get("Sec-Websocket-Origin", None)
# If there was an origin header, check to make sure it matches
# according to check_origin. When the origin is None, we assume it
# did not come from a browser and that it can be passed on.
if origin is not None and not self.check_origin(origin):
self.clear()
self.set_status(403)
log_msg = "Cross origin websockets not allowed"
self.finish(log_msg)
gen_log.debug(log_msg)
return
self.stream = self.request.connection.detach()
self.stream.set_close_callback(self.on_connection_close)
self.ws_connection = self.get_websocket_protocol()
if self.ws_connection:
self.clear_header('Content-Type')
self.ws_connection.accept_connection()
else:
if not self.stream.closed():
self.stream.write(tornado.escape.utf8(
"HTTP/1.1 426 Upgrade Required\r\n"
"Sec-WebSocket-Version: 7, 8, 13\r\n\r\n"))
self.stream.close()
def write_message(self, message, binary=False):
"""Sends the given message to the client of this Web Socket.
The message may be either a string or a dict (which will be
encoded as json). If the ``binary`` argument is false, the
message will be sent as utf8; in binary mode any byte string
is allowed.
If the connection is already closed, raises `WebSocketClosedError`.
.. versionchanged:: 3.2
`WebSocketClosedError` was added (previously a closed connection
would raise an `AttributeError`)
.. versionchanged:: 4.3
Returns a `.Future` which can be used for flow control.
"""
if self.ws_connection is None:
raise WebSocketClosedError()
if isinstance(message, dict):
message = tornado.escape.json_encode(message)
return self.ws_connection.write_message(message, binary=binary)
def select_subprotocol(self, subprotocols):
"""Invoked when a new WebSocket requests specific subprotocols.
``subprotocols`` is a list of strings identifying the
subprotocols proposed by the client. This method may be
overridden to return one of those strings to select it, or
``None`` to not select a subprotocol. Failure to select a
subprotocol does not automatically abort the connection,
although clients may close the connection if none of their
proposed subprotocols was selected.
"""
return None
def get_compression_options(self):
"""Override to return compression options for the connection.
If this method returns None (the default), compression will
be disabled. If it returns a dict (even an empty one), it
will be enabled. The contents of the dict may be used to
control the memory and CPU usage of the compression,
but no such options are currently implemented.
.. versionadded:: 4.1
"""
return None
def open(self, *args, **kwargs):
"""Invoked when a new WebSocket is opened.
The arguments to `open` are extracted from the `tornado.web.URLSpec`
regular expression, just like the arguments to
`tornado.web.RequestHandler.get`.
"""
pass
def on_message(self, message):
"""Handle incoming messages on the WebSocket
This method must be overridden.
"""
raise NotImplementedError
def ping(self, data):
"""Send ping frame to the remote end."""
if self.ws_connection is None:
raise WebSocketClosedError()
self.ws_connection.write_ping(data)
def on_pong(self, data):
"""Invoked when the response to a ping frame is received."""
pass
def on_close(self):
"""Invoked when the WebSocket is closed.
If the connection was closed cleanly and a status code or reason
phrase was supplied, these values will be available as the attributes
``self.close_code`` and ``self.close_reason``.
.. versionchanged:: 4.0
Added ``close_code`` and ``close_reason`` attributes.
"""
pass
def close(self, code=None, reason=None):
"""Closes this Web Socket.
Once the close handshake is successful the socket will be closed.
``code`` may be a numeric status code, taken from the values
defined in `RFC 6455 section 7.4.1
<https://tools.ietf.org/html/rfc6455#section-7.4.1>`_.
``reason`` may be a textual message about why the connection is
closing. These values are made available to the client, but are
not otherwise interpreted by the websocket protocol.
.. versionchanged:: 4.0
Added the ``code`` and ``reason`` arguments.
"""
if self.ws_connection:
self.ws_connection.close(code, reason)
self.ws_connection = None
def check_origin(self, origin):
"""Override to enable support for allowing alternate origins.
The ``origin`` argument is the value of the ``Origin`` HTTP
header, the url responsible for initiating this request. This
method is not called for clients that do not send this header;
such requests are always allowed (because all browsers that
implement WebSockets support this header, and non-browser
clients do not have the same cross-site security concerns).
Should return True to accept the request or False to reject it.
By default, rejects all requests with an origin on a host other
than this one.
This is a security protection against cross site scripting attacks on
browsers, since WebSockets are allowed to bypass the usual same-origin
policies and don't use CORS headers.
To accept all cross-origin traffic (which was the default prior to
Tornado 4.0), simply override this method to always return true::
def check_origin(self, origin):
return True
To allow connections from any subdomain of your site, you might
do something like::
def check_origin(self, origin):
parsed_origin = urllib.parse.urlparse(origin)
return parsed_origin.netloc.endswith(".mydomain.com")
.. versionadded:: 4.0
"""
parsed_origin = urlparse(origin)
origin = parsed_origin.netloc
origin = origin.lower()
host = self.request.headers.get("Host")
# Check to see that origin matches host directly, including ports
return origin == host
def set_nodelay(self, value):
"""Set the no-delay flag for this stream.
By default, small messages may be delayed and/or combined to minimize
the number of packets sent. This can sometimes cause 200-500ms delays
due to the interaction between Nagle's algorithm and TCP delayed
ACKs. To reduce this delay (at the expense of possibly increasing
bandwidth usage), call ``self.set_nodelay(True)`` once the websocket
connection is established.
See `.BaseIOStream.set_nodelay` for additional details.
.. versionadded:: 3.1
"""
self.stream.set_nodelay(value)
def on_connection_close(self):
if self.ws_connection:
self.ws_connection.on_connection_close()
self.ws_connection = None
if not self._on_close_called:
self._on_close_called = True
self.on_close()
def send_error(self, *args, **kwargs):
if self.stream is None:
super(WebSocketHandler, self).send_error(*args, **kwargs)
else:
# If we get an uncaught exception during the handshake,
# we have no choice but to abruptly close the connection.
# TODO: for uncaught exceptions after the handshake,
# we can close the connection more gracefully.
self.stream.close()
def get_websocket_protocol(self):
websocket_version = self.request.headers.get("Sec-WebSocket-Version")
if websocket_version in ("7", "8", "13"):
return WebSocketProtocol13(
self, compression_options=self.get_compression_options(),
response_headers=self._headers)
def _wrap_method(method):
def _disallow_for_websocket(self, *args, **kwargs):
if self.stream is None:
method(self, *args, **kwargs)
else:
raise RuntimeError("Method not supported for Web Sockets")
return _disallow_for_websocket
for method in ["write", "redirect", "set_header", "set_cookie",
"set_status", "flush", "finish"]:
setattr(WebSocketHandler, method,
_wrap_method(getattr(WebSocketHandler, method)))
class WebSocketProtocol(object):
"""Base class for WebSocket protocol versions.
"""
def __init__(self, handler):
self.handler = handler
self.request = handler.request
self.stream = handler.stream
self.client_terminated = False
self.server_terminated = False
def _run_callback(self, callback, *args, **kwargs):
"""Runs the given callback with exception handling.
On error, aborts the websocket connection and returns False.
"""
try:
callback(*args, **kwargs)
except Exception:
app_log.error("Uncaught exception in %s",
self.request.path, exc_info=True)
self._abort()
def on_connection_close(self):
self._abort()
def _abort(self):
"""Instantly aborts the WebSocket connection by closing the socket"""
self.client_terminated = True
self.server_terminated = True
self.stream.close() # forcibly tear down the connection
self.close() # let the subclass cleanup
class _PerMessageDeflateCompressor(object):
def __init__(self, persistent, max_wbits):
if max_wbits is None:
max_wbits = zlib.MAX_WBITS
# There is no symbolic constant for the minimum wbits value.
if not (8 <= max_wbits <= zlib.MAX_WBITS):
raise ValueError("Invalid max_wbits value %r; allowed range 8-%d",
max_wbits, zlib.MAX_WBITS)
self._max_wbits = max_wbits
if persistent:
self._compressor = self._create_compressor()
else:
self._compressor = None
def _create_compressor(self):
return zlib.compressobj(tornado.web.GZipContentEncoding.GZIP_LEVEL,
zlib.DEFLATED, -self._max_wbits)
def compress(self, data):
compressor = self._compressor or self._create_compressor()
data = (compressor.compress(data) +
compressor.flush(zlib.Z_SYNC_FLUSH))
assert data.endswith(b'\x00\x00\xff\xff')
return data[:-4]
class _PerMessageDeflateDecompressor(object):
def __init__(self, persistent, max_wbits):
if max_wbits is None:
max_wbits = zlib.MAX_WBITS
if not (8 <= max_wbits <= zlib.MAX_WBITS):
raise ValueError("Invalid max_wbits value %r; allowed range 8-%d",
max_wbits, zlib.MAX_WBITS)
self._max_wbits = max_wbits
if persistent:
self._decompressor = self._create_decompressor()
else:
self._decompressor = None
def _create_decompressor(self):
return zlib.decompressobj(-self._max_wbits)
def decompress(self, data):
decompressor = self._decompressor or self._create_decompressor()
return decompressor.decompress(data + b'\x00\x00\xff\xff')
class WebSocketProtocol13(WebSocketProtocol):
"""Implementation of the WebSocket protocol from RFC 6455.
This class supports versions 7 and 8 of the protocol in addition to the
final version 13.
"""
# Bit masks for the first byte of a frame.
FIN = 0x80
RSV1 = 0x40
RSV2 = 0x20
RSV3 = 0x10
RSV_MASK = RSV1 | RSV2 | RSV3
OPCODE_MASK = 0x0f
def __init__(self, handler, mask_outgoing=False,
compression_options=None, response_headers=None):
WebSocketProtocol.__init__(self, handler)
self._response_headers = response_headers
self.mask_outgoing = mask_outgoing
self._final_frame = False
self._frame_opcode = None
self._masked_frame = None
self._frame_mask = None
self._frame_length = None
self._fragmented_message_buffer = None
self._fragmented_message_opcode = None
self._waiting = None
self._compression_options = compression_options
self._decompressor = None
self._compressor = None
self._frame_compressed = None
# The total uncompressed size of all messages received or sent.
# Unicode messages are encoded to utf8.
# Only for testing; subject to change.
self._message_bytes_in = 0
self._message_bytes_out = 0
# The total size of all packets received or sent. Includes
# the effect of compression, frame overhead, and control frames.
self._wire_bytes_in = 0
self._wire_bytes_out = 0
def accept_connection(self):
try:
self._handle_websocket_headers()
self._accept_connection()
except ValueError:
gen_log.debug("Malformed WebSocket request received",
exc_info=True)
self._abort()
return
def _handle_websocket_headers(self):
"""Verifies all invariant- and required headers
If a header is missing or have an incorrect value ValueError will be
raised
"""
fields = ("Host", "Sec-Websocket-Key", "Sec-Websocket-Version")
if not all(map(lambda f: self.request.headers.get(f), fields)):
raise ValueError("Missing/Invalid WebSocket headers")
@staticmethod
def compute_accept_value(key):
"""Computes the value for the Sec-WebSocket-Accept header,
given the value for Sec-WebSocket-Key.
"""
sha1 = hashlib.sha1()
sha1.update(utf8(key))
sha1.update(b"258EAFA5-E914-47DA-95CA-C5AB0DC85B11") # Magic value
return native_str(base64.b64encode(sha1.digest()))
def _challenge_response(self):
return WebSocketProtocol13.compute_accept_value(
self.request.headers.get("Sec-Websocket-Key"))
def _accept_connection(self):
subprotocol_header = ''
subprotocols = self.request.headers.get("Sec-WebSocket-Protocol", '')
subprotocols = [s.strip() for s in subprotocols.split(',')]
if subprotocols:
selected = self.handler.select_subprotocol(subprotocols)
if selected:
assert selected in subprotocols
subprotocol_header = ("Sec-WebSocket-Protocol: %s\r\n"
% selected)
extension_header = ''
extensions = self._parse_extensions_header(self.request.headers)
for ext in extensions:
if (ext[0] == 'permessage-deflate' and
self._compression_options is not None):
# TODO: negotiate parameters if compression_options
# specifies limits.
self._create_compressors('server', ext[1])
if ('client_max_window_bits' in ext[1] and
ext[1]['client_max_window_bits'] is None):
# Don't echo an offered client_max_window_bits
# parameter with no value.
del ext[1]['client_max_window_bits']
extension_header = ('Sec-WebSocket-Extensions: %s\r\n' %
httputil._encode_header(
'permessage-deflate', ext[1]))
break
response_headers = ''
if self._response_headers is not None:
for header_name, header_value in self._response_headers.get_all():
response_headers += '%s: %s\r\n' % (header_name, header_value)
if self.stream.closed():
self._abort()
return
self.stream.write(tornado.escape.utf8(
"HTTP/1.1 101 Switching Protocols\r\n"
"Upgrade: websocket\r\n"
"Connection: Upgrade\r\n"
"Sec-WebSocket-Accept: %s\r\n"
"%s%s%s"
"\r\n" % (self._challenge_response(), subprotocol_header,
extension_header, response_headers)))
self._run_callback(self.handler.open, *self.handler.open_args,
**self.handler.open_kwargs)
self._receive_frame()
def _parse_extensions_header(self, headers):
extensions = headers.get("Sec-WebSocket-Extensions", '')
if extensions:
return [httputil._parse_header(e.strip())
for e in extensions.split(',')]
return []
def _process_server_headers(self, key, headers):
"""Process the headers sent by the server to this client connection.
'key' is the websocket handshake challenge/response key.
"""
assert headers['Upgrade'].lower() == 'websocket'
assert headers['Connection'].lower() == 'upgrade'
accept = self.compute_accept_value(key)
assert headers['Sec-Websocket-Accept'] == accept
extensions = self._parse_extensions_header(headers)
for ext in extensions:
if (ext[0] == 'permessage-deflate' and
self._compression_options is not None):
self._create_compressors('client', ext[1])
else:
raise ValueError("unsupported extension %r", ext)
def _get_compressor_options(self, side, agreed_parameters):
"""Converts a websocket agreed_parameters set to keyword arguments
for our compressor objects.
"""
options = dict(
persistent=(side + '_no_context_takeover') not in agreed_parameters)
wbits_header = agreed_parameters.get(side + '_max_window_bits', None)
if wbits_header is None:
options['max_wbits'] = zlib.MAX_WBITS
else:
options['max_wbits'] = int(wbits_header)
return options
def _create_compressors(self, side, agreed_parameters):
# TODO: handle invalid parameters gracefully
allowed_keys = set(['server_no_context_takeover',
'client_no_context_takeover',
'server_max_window_bits',
'client_max_window_bits'])
for key in agreed_parameters:
if key not in allowed_keys:
raise ValueError("unsupported compression parameter %r" % key)
other_side = 'client' if (side == 'server') else 'server'
self._compressor = _PerMessageDeflateCompressor(
**self._get_compressor_options(side, agreed_parameters))
self._decompressor = _PerMessageDeflateDecompressor(
**self._get_compressor_options(other_side, agreed_parameters))
def _write_frame(self, fin, opcode, data, flags=0):
if fin:
finbit = self.FIN
else:
finbit = 0
frame = struct.pack("B", finbit | opcode | flags)
l = len(data)
if self.mask_outgoing:
mask_bit = 0x80
else:
mask_bit = 0
if l < 126:
frame += struct.pack("B", l | mask_bit)
elif l <= 0xFFFF:
frame += struct.pack("!BH", 126 | mask_bit, l)
else:
frame += struct.pack("!BQ", 127 | mask_bit, l)
if self.mask_outgoing:
mask = os.urandom(4)
data = mask + _websocket_mask(mask, data)
frame += data
self._wire_bytes_out += len(frame)
try:
return self.stream.write(frame)
except StreamClosedError:
self._abort()
def write_message(self, message, binary=False):
"""Sends the given message to the client of this Web Socket."""
if binary:
opcode = 0x2
else:
opcode = 0x1
message = tornado.escape.utf8(message)
assert isinstance(message, bytes)
self._message_bytes_out += len(message)
flags = 0
if self._compressor:
message = self._compressor.compress(message)
flags |= self.RSV1
return self._write_frame(True, opcode, message, flags=flags)
def write_ping(self, data):
"""Send ping frame."""
assert isinstance(data, bytes)
self._write_frame(True, 0x9, data)
def _receive_frame(self):
try:
self.stream.read_bytes(2, self._on_frame_start)
except StreamClosedError:
self._abort()
def _on_frame_start(self, data):
self._wire_bytes_in += len(data)
header, payloadlen = struct.unpack("BB", data)
self._final_frame = header & self.FIN
reserved_bits = header & self.RSV_MASK
self._frame_opcode = header & self.OPCODE_MASK
self._frame_opcode_is_control = self._frame_opcode & 0x8
if self._decompressor is not None and self._frame_opcode != 0:
self._frame_compressed = bool(reserved_bits & self.RSV1)
reserved_bits &= ~self.RSV1
if reserved_bits:
# client is using as-yet-undefined extensions; abort
self._abort()
return
self._masked_frame = bool(payloadlen & 0x80)
payloadlen = payloadlen & 0x7f
if self._frame_opcode_is_control and payloadlen >= 126:
# control frames must have payload < 126
self._abort()
return
try:
if payloadlen < 126:
self._frame_length = payloadlen
if self._masked_frame:
self.stream.read_bytes(4, self._on_masking_key)
else:
self.stream.read_bytes(self._frame_length,
self._on_frame_data)
elif payloadlen == 126:
self.stream.read_bytes(2, self._on_frame_length_16)
elif payloadlen == 127:
self.stream.read_bytes(8, self._on_frame_length_64)
except StreamClosedError:
self._abort()
def _on_frame_length_16(self, data):
self._wire_bytes_in += len(data)
self._frame_length = struct.unpack("!H", data)[0]
try:
if self._masked_frame:
self.stream.read_bytes(4, self._on_masking_key)
else:
self.stream.read_bytes(self._frame_length, self._on_frame_data)
except StreamClosedError:
self._abort()
def _on_frame_length_64(self, data):
self._wire_bytes_in += len(data)
self._frame_length = struct.unpack("!Q", data)[0]
try:
if self._masked_frame:
self.stream.read_bytes(4, self._on_masking_key)
else:
self.stream.read_bytes(self._frame_length, self._on_frame_data)
except StreamClosedError:
self._abort()
def _on_masking_key(self, data):
self._wire_bytes_in += len(data)
self._frame_mask = data
try:
self.stream.read_bytes(self._frame_length,
self._on_masked_frame_data)
except StreamClosedError:
self._abort()
def _on_masked_frame_data(self, data):
# Don't touch _wire_bytes_in; we'll do it in _on_frame_data.
self._on_frame_data(_websocket_mask(self._frame_mask, data))
def _on_frame_data(self, data):
self._wire_bytes_in += len(data)
if self._frame_opcode_is_control:
# control frames may be interleaved with a series of fragmented
# data frames, so control frames must not interact with
# self._fragmented_*
if not self._final_frame:
# control frames must not be fragmented
self._abort()
return
opcode = self._frame_opcode
elif self._frame_opcode == 0: # continuation frame
if self._fragmented_message_buffer is None:
# nothing to continue
self._abort()
return
self._fragmented_message_buffer += data
if self._final_frame:
opcode = self._fragmented_message_opcode
data = self._fragmented_message_buffer
self._fragmented_message_buffer = None
else: # start of new data message
if self._fragmented_message_buffer is not None:
# can't start new message until the old one is finished
self._abort()
return
if self._final_frame:
opcode = self._frame_opcode
else:
self._fragmented_message_opcode = self._frame_opcode
self._fragmented_message_buffer = data
if self._final_frame:
self._handle_message(opcode, data)
if not self.client_terminated:
self._receive_frame()
def _handle_message(self, opcode, data):
if self.client_terminated:
return
if self._frame_compressed:
data = self._decompressor.decompress(data)
if opcode == 0x1:
# UTF-8 data
self._message_bytes_in += len(data)
try:
decoded = data.decode("utf-8")
except UnicodeDecodeError:
self._abort()
return
self._run_callback(self.handler.on_message, decoded)
elif opcode == 0x2:
# Binary data
self._message_bytes_in += len(data)
self._run_callback(self.handler.on_message, data)
elif opcode == 0x8:
# Close
self.client_terminated = True
if len(data) >= 2:
self.handler.close_code = struct.unpack('>H', data[:2])[0]
if len(data) > 2:
self.handler.close_reason = to_unicode(data[2:])
# Echo the received close code, if any (RFC 6455 section 5.5.1).
self.close(self.handler.close_code)
elif opcode == 0x9:
# Ping
self._write_frame(True, 0xA, data)
elif opcode == 0xA:
# Pong
self._run_callback(self.handler.on_pong, data)
else:
self._abort()
def close(self, code=None, reason=None):
"""Closes the WebSocket connection."""
if not self.server_terminated:
if not self.stream.closed():
if code is None and reason is not None:
code = 1000 # "normal closure" status code
if code is None:
close_data = b''
else:
close_data = struct.pack('>H', code)
if reason is not None:
close_data += utf8(reason)
self._write_frame(True, 0x8, close_data)
self.server_terminated = True
if self.client_terminated:
if self._waiting is not None:
self.stream.io_loop.remove_timeout(self._waiting)
self._waiting = None
self.stream.close()
elif self._waiting is None:
# Give the client a few seconds to complete a clean shutdown,
# otherwise just close the connection.
self._waiting = self.stream.io_loop.add_timeout(
self.stream.io_loop.time() + 5, self._abort)
class WebSocketClientConnection(simple_httpclient._HTTPConnection):
"""WebSocket client connection.
This class should not be instantiated directly; use the
`websocket_connect` function instead.
"""
def __init__(self, io_loop, request, on_message_callback=None,
compression_options=None):
self.compression_options = compression_options
self.connect_future = TracebackFuture()
self.protocol = None
self.read_future = None
self.read_queue = collections.deque()
self.key = base64.b64encode(os.urandom(16))
self._on_message_callback = on_message_callback
self.close_code = self.close_reason = None
scheme, sep, rest = request.url.partition(':')
scheme = {'ws': 'http', 'wss': 'https'}[scheme]
request.url = scheme + sep + rest
request.headers.update({
'Upgrade': 'websocket',
'Connection': 'Upgrade',
'Sec-WebSocket-Key': self.key,
'Sec-WebSocket-Version': '13',
})
if self.compression_options is not None:
# Always offer to let the server set our max_wbits (and even though
# we don't offer it, we will accept a client_no_context_takeover
# from the server).
# TODO: set server parameters for deflate extension
# if requested in self.compression_options.
request.headers['Sec-WebSocket-Extensions'] = (
'permessage-deflate; client_max_window_bits')
self.tcp_client = TCPClient(io_loop=io_loop)
super(WebSocketClientConnection, self).__init__(
io_loop, None, request, lambda: None, self._on_http_response,
104857600, self.tcp_client, 65536, 104857600)
def close(self, code=None, reason=None):
"""Closes the websocket connection.
``code`` and ``reason`` are documented under
`WebSocketHandler.close`.
.. versionadded:: 3.2
.. versionchanged:: 4.0
Added the ``code`` and ``reason`` arguments.
"""
if self.protocol is not None:
self.protocol.close(code, reason)
self.protocol = None
def on_connection_close(self):
if not self.connect_future.done():
self.connect_future.set_exception(StreamClosedError())
self.on_message(None)
self.tcp_client.close()
super(WebSocketClientConnection, self).on_connection_close()
def _on_http_response(self, response):
if not self.connect_future.done():
if response.error:
self.connect_future.set_exception(response.error)
else:
self.connect_future.set_exception(WebSocketError(
"Non-websocket response"))
def headers_received(self, start_line, headers):
if start_line.code != 101:
return super(WebSocketClientConnection, self).headers_received(
start_line, headers)
self.headers = headers
self.protocol = self.get_websocket_protocol()
self.protocol._process_server_headers(self.key, self.headers)
self.protocol._receive_frame()
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
self._timeout = None
self.stream = self.connection.detach()
self.stream.set_close_callback(self.on_connection_close)
# Once we've taken over the connection, clear the final callback
# we set on the http request. This deactivates the error handling
# in simple_httpclient that would otherwise interfere with our
# ability to see exceptions.
self.final_callback = None
self.connect_future.set_result(self)
def write_message(self, message, binary=False):
"""Sends a message to the WebSocket server."""
return self.protocol.write_message(message, binary)
def read_message(self, callback=None):
"""Reads a message from the WebSocket server.
If on_message_callback was specified at WebSocket
initialization, this function will never return messages
Returns a future whose result is the message, or None
if the connection is closed. If a callback argument
is given it will be called with the future when it is
ready.
"""
assert self.read_future is None
future = TracebackFuture()
if self.read_queue:
future.set_result(self.read_queue.popleft())
else:
self.read_future = future
if callback is not None:
self.io_loop.add_future(future, callback)
return future
def on_message(self, message):
if self._on_message_callback:
self._on_message_callback(message)
elif self.read_future is not None:
self.read_future.set_result(message)
self.read_future = None
else:
self.read_queue.append(message)
def on_pong(self, data):
pass
def get_websocket_protocol(self):
return WebSocketProtocol13(self, mask_outgoing=True,
compression_options=self.compression_options)
def websocket_connect(url, io_loop=None, callback=None, connect_timeout=None,
on_message_callback=None, compression_options=None):
"""Client-side websocket support.
Takes a url and returns a Future whose result is a
`WebSocketClientConnection`.
``compression_options`` is interpreted in the same way as the
return value of `.WebSocketHandler.get_compression_options`.
The connection supports two styles of operation. In the coroutine
style, the application typically calls
`~.WebSocketClientConnection.read_message` in a loop::
conn = yield websocket_connect(url)
while True:
msg = yield conn.read_message()
if msg is None: break
# Do something with msg
In the callback style, pass an ``on_message_callback`` to
``websocket_connect``. In both styles, a message of ``None``
indicates that the connection has been closed.
.. versionchanged:: 3.2
Also accepts ``HTTPRequest`` objects in place of urls.
.. versionchanged:: 4.1
Added ``compression_options`` and ``on_message_callback``.
The ``io_loop`` argument is deprecated.
"""
if io_loop is None:
io_loop = IOLoop.current()
if isinstance(url, httpclient.HTTPRequest):
assert connect_timeout is None
request = url
# Copy and convert the headers dict/object (see comments in
# AsyncHTTPClient.fetch)
request.headers = httputil.HTTPHeaders(request.headers)
else:
request = httpclient.HTTPRequest(url, connect_timeout=connect_timeout)
request = httpclient._RequestProxy(
request, httpclient.HTTPRequest._DEFAULTS)
conn = WebSocketClientConnection(io_loop, request,
on_message_callback=on_message_callback,
compression_options=compression_options)
if callback is not None:
io_loop.add_future(conn.connect_future, callback)
return conn.connect_future
| obsh/tornado | tornado/websocket.py | Python | apache-2.0 | 41,216 |
import numpy as np
from . import util
protocols = dict([(lambda a:(int(a[1]), a[0]))(l.split('\t')[:2]) for l in open('/etc/protocols', 'r').readlines() if not l.startswith('#')])
def process(msdu):
dsap, ssap, control = msdu[:3]
protocol_id = util.shiftin(util.shiftout(msdu[3:6][::-1], 8), 24)[0]
if not dsap == 0xaa and ssap == 0xaa and control == 3 and protocol_id == 0:
return {'error': 'Not using IPv4 over SNAP in LLC (RFC 1042)'}
ethertype = util.shiftin(util.shiftout(msdu[6:8][::-1], 8), 16)[0]
result = dict(ethertype='%04x' % ethertype)
if ethertype == 0x0800:
llc_payload = msdu[8:]
version, ihl = util.shiftin(util.shiftout(llc_payload[0], 8), 4)[::-1]
if not version == 4:
return dict(result, error='Not using IPv4')
ip_header = llc_payload[:ihl*4]
src_ip, dst_ip = ['.'.join(map(str, ip_header[offset:offset+4])) for offset in range(12, 20, 4)]
result.update(ethertype='ipv4', src_ip=src_ip, dst_ip=dst_ip)
ip_payload = llc_payload[ihl*4:]
proto = protocols.get(ip_header[9], 'unknown')
result.update(proto=proto)
if proto in ('tcp', 'udp'):
result.update(
src_port=util.shiftin(util.shiftout(ip_payload[0:2][::-1], 8), 16)[0],
dst_port=util.shiftin(util.shiftout(ip_payload[2:4][::-1], 8), 16)[0],
)
if proto == 'tcp':
data_offset = util.shiftin(util.shiftout(ip_payload[12], 8)[4:8], 4)[0]
result.update(payload=bytes(ip_payload[data_offset*4:].astype(np.uint8)))
elif proto == 'udp':
result.update(payload=bytes(ip_payload[8:].astype(np.uint8)))
return result
elif ethertype == 0x0806:
return dict(result, ethertype='arp')
elif ethertype == 0x8035:
return dict(result, ethertype='rarp')
elif ethertype == 0x86DD:
return dict(result, ethertype='ipv6')
else:
return dict(result, error='Unknown ethertype')
| piannucci/blurt | blurt_py_80211/streaming/blurt/phy/msdu.py | Python | mit | 2,013 |
import json
import os
import sys
def parsejson(directory):
base=os.path.basename(directory)
in_path= os.getcwd()+'/Data/Apk_data/'+base+'/'+base+"method.json" # input file path.
out_path = os.getcwd()+'/Data/Apk_data/'+base+'/'+base+'method.csv'
# out_path= os.path.join(directoryoutfile) # output file path.
print "Parsing json file.."
orig_stdout = sys.stdout # To write output to txt file using stdout.
f = file(out_path, 'w') # creating file object ....
sys.stdout = f
with open(in_path) as jsonData:
data = json.load(jsonData)
classes = data['classes']
for element in classes:
methods = classes[element]['methods']
for name in methods:
#print '"',element,'"| ',name['return'],'"| "',name['name'],'"| "',name['args'],'"'
#print name['name']
print '"',element,'"| "', name['return'],'"| "', name['name'],'"| "', name['args'],'"'
calls = name['calls']
for call in calls:
print '"',call['to_class'],'"| "', call['return'],'"| "', call['to_method'],'"| "', call['local_args'],'"'
sys.stdout = orig_stdout # To write output to txt file using stdout.
f.close() # close file.
print out_path," created"
| OmkarMozar/CUPAP | code/own/jsonparser.py | Python | apache-2.0 | 1,239 |
#---------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#---------------------------------------------------------------------------------------------
#pylint: skip-file
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .deployment_avail_set import DeploymentAvailSet
from .template_link import TemplateLink
from .parameters_link import ParametersLink
from .provider_resource_type import ProviderResourceType
from .provider import Provider
from .basic_dependency import BasicDependency
from .dependency import Dependency
from .deployment_properties_extended import DeploymentPropertiesExtended
from .deployment_extended import DeploymentExtended
from .avail_set_creation_client_enums import (
DeploymentMode,
)
__all__ = [
'DeploymentAvailSet',
'TemplateLink',
'ParametersLink',
'ProviderResourceType',
'Provider',
'BasicDependency',
'Dependency',
'DeploymentPropertiesExtended',
'DeploymentExtended',
'DeploymentMode',
]
| BurtBiel/azure-cli | src/command_modules/azure-cli-vm/azure/cli/command_modules/vm/mgmt_avail_set/lib/models/__init__.py | Python | mit | 1,439 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os.path
import string
import webbrowser
import shutil
import json
import base64
from PyQt5 import QtWidgets
from PyQt5 import QtGui
from PyQt5.QtCore import QFile
from PyQt5.QtCore import QUrl
from PyQt5.QtWebKit import QWebSettings
from PyQt5.QtWidgets import QInputDialog, QFileDialog
from PyQt5.QtWebKitWidgets import QWebView, QWebPage
# from bs4 import BeautifulSoup
from libs.python.pyquery import PyQuery as pq
from lxml import etree
import urllib
import time
from PyQt5.QtWidgets import QProgressDialog
from PyQt5.QtWidgets import QApplication
#from PIL import Image
import requests
from io import BytesIO
class html_editor(QWebView):
def __init__(self, parent=None, html=None, css_file=None):
#def __init__(self, html=None, style_filename=None):
super(html_editor, self).__init__(parent)
# http://stackoverflow.com/questions/21357157/is-there-any-solution-for-the-qtwebkit-memory-leak
# https://github.com/lycying/seeking
#self.page().setContentEditable(True)
#self.execute_js('document.designMode = "on"')
self.file_dialog_dir = '.'
# TO CHECK
# http://nullege.com/codes/show/src%40c%40a%40calibre-HEAD%40src%40calibre%40gui2%40viewer%40documentview.py/89/PyQt4.QtWebKit.QWebPage.setLinkDelegationPolicy/python
settings = self.settings()
# settings.setMaximumPagesInCache(0)
# settings.setObjectCacheCapacities(0, 0, 0)
# settings.setOfflineStorageDefaultQuota(0)
# settings.setOfflineWebApplicationCacheQuota(0)
# Security
settings.setAttribute(QWebSettings.JavaEnabled, False)
#settings.setAttribute(QWebSettings.PluginsEnabled, False)
#settings.setAttribute(QWebSettings.JavascriptCanOpenWindows, False)
#settings.setAttribute(QWebSettings.JavascriptCanAccessClipboard, False)
# Miscellaneous
settings.setAttribute(QWebSettings.LinksIncludedInFocusChain, True)
settings.setAttribute(QWebSettings.DeveloperExtrasEnabled, True)
# settings.setAttribute(QWebSettings.AutoLoadImages, False)
# Disable Hyperlinks following, open url on system browser
self.page().setLinkDelegationPolicy(QWebPage.DelegateAllLinks)
self.page().linkClicked.connect(lambda url: webbrowser.open(str(url.toString())))
if html:
self.setHtml(html)
else:
self.set_readonly(True)
# config
config_file_path = os.path.join(os.path.dirname(__file__), 'config.json')
self.config = None
if os.path.isfile(config_file_path):
with open(config_file_path) as outfile:
self.config = json.load(outfile)
outfile.close()
self.context_menu_actions = []
# TO CHECK
# https://github.com/gen2brain/pyhtmleditor/blob/master/src/pyhtmleditor/htmleditor.py
# https://github.com/kovidgoyal/calibre/blob/master/src/calibre/gui2/comments_editor.py
#if css_file:
# self.apply_stylefile(css_file)
############# TO IMPLEMENT ##########
#self.note_editor.execute_js(self.functions.get_javascript_plugins())
#self.load_functions = []
#self.settings().setAttribute(QWebSettings.AutoLoadImages, False)
#QWebSettings.globalSettings()->setAttribute(QWebSettings::DeveloperExtrasEnabled, true);
#QWebSettings.globalSettings().setAttribute(QWebSettings.OfflineWebApplicationCacheEnabled, True)
def get_config(self):
return self.config
def set_context_menu_append_actions(self, context_menu_actions):
self.context_menu_actions = context_menu_actions
def contextMenuEvent(self, event):
menu = self.page().createStandardContextMenu()
if 'default_context_menu_replace' in self.config:
if self.config['default_context_menu_replace'] == 'True':
menu = QtWidgets.QMenu(self)
if 'context_menu_actions' in self.config:
for action in self.context_menu_actions:
menu.addAction(action)
menu.exec_(QtGui.QCursor.pos())
def set_readonly(self, param=True):
if param == True:
self.execute_js('document.body.contentEditable = "false"')
elif param == False:
self.execute_js('document.body.contentEditable = "true"')
def set_writeable(self):
self.set_readonly(False)
def set_html(self, html=None):
if html:
self.setHtml(html)
def get_html(self,relative_path=None):
html = self.page().mainFrame().toHtml()
pd_content = pq(html)
if pd_content('img').length > 0:
num_img = 0
max_num_img = 0
# Dertemines the number of image to download and process
for img in pd_content('img'):
if "base64" not in img.attrib['src']:
max_num_img += 1
# There are image to download and process
if max_num_img > 0:
progress_dialog = QProgressDialog(self)
progress_dialog.setWindowTitle('Please Wait')
progress_dialog.setLabelText('Downloading and processing images. Please wait.')
progress_dialog.setRange(num_img, max_num_img)
progress_dialog.setValue(num_img)
progress_dialog.setCancelButton(None)
progress_dialog.show()
QApplication.processEvents()
for img in pd_content('img'):
if "base64" not in img.attrib['src']:
if 'http' in img.attrib['src'].lower() or 'ftp' in img.attrib['src'].lower():
# Downloads images
response = requests.get(img.attrib['src'])
# Generates base64 of the image
base64_img = base64.b64encode(response.content).decode('ascii')
# Build uri
uri = "data:" + response.headers['Content-Type'] + ";" + "base64," + base64_img
# Reasings src attrbiute with the uri data
img.attrib['src'] = uri
# Updates progress bar
num_img = num_img + 1
progress_dialog.setValue(num_img)
QApplication.processEvents()
html = pd_content.html()
return html
def get_content(self):
return self.get_html()
def set_content(self, content):
if content:
self.set_html(content)
def open_file(self, file_path):
with open(file_path, encoding='UTF-8', errors="ignore") as fd:
base_url = QUrl.fromLocalFile(os.path.join(os.path.dirname(file_path), ''))
self.setHtml(fd.read(), base_url)
fd.close()
# Generates uft8 bugs
# fd = QFile(file_path)
# if fd.open(QFile.ReadOnly):
# # Required for webkit to access local images
# base_url = QUrl.fromLocalFile(os.path.join(os.path.dirname(file_path),''))
# self.setContent(fd.readAll(), "text/html", base_url)
# fd.close()
def toggle_bold(self, parm=None):
self.page().triggerAction(QWebPage.ToggleBold)
def toggle_italic(self, parm=None):
self.page().triggerAction(QWebPage.ToggleItalic)
def heading(self, param=None):
if param and param in ['heading_1', 'heading_2', 'heading_3', 'heading_4', 'heading_5', 'heading_6']:
cmd_str = str("document.execCommand('formatblock', false, '%s');" % str('h'+param[8]))
self.execute_js(cmd_str)
def orderedlist(self, param=None):
self.page().triggerAction(QWebPage.InsertOrderedList)
def unorderedlist(self, param=None):
self.page().triggerAction(QWebPage.InsertUnorderedList)
def insert_horizontal_rule(self, param=None):
self.execute_js("document.execCommand('inserthorizontalrule', false, false);")
def block_quote(self, param=None):
self.execute_js("document.execCommand('formatblock', false, 'blockquote');")
def insert_html(self, param=None):
if param:
cmd_str = 'var range = document.getSelection().getRangeAt(0); \
document.execCommand("inserthtml",false,"' + param + '");'
self.execute_js(cmd_str)
def preformated_text(self, param=None):
self.execute_js("document.execCommand('formatblock', false, 'pre');")
# if self.page().hasSelection():
# #pass
#
# cmd_str = 'var range = document.getSelection().getRangeAt(0); \
# document.execCommand("inserthtml",false,"<pre><code>" + range + "</code></pre>");'
# self.execute_js(cmd_str)
def block_code(self, param=None):
if self.page().hasSelection():
cmd_str = 'var range = document.getSelection().getRangeAt(0); \
document.execCommand("inserthtml",false,"<code>" + range + "</code>");'
self.execute_js(cmd_str)
def insert_checkbox(self, param=None):
if self.page().hasSelection():
cmd_str = 'var range = document.getSelection().getRangeAt(0); \
document.execCommand("inserthtml",false,"<input type=\'checkbox\' name=\'test\' checked>" + selObj.toString() + range);'
self.execute_js(cmd_str)
def indent(self, param=None):
self.execute_js("document.execCommand('indent', false, true);")
def outdent(self, param=None):
self.execute_js("document.execCommand('outdent', false, true);")
def undo(self, param=None):
self.page().triggerAction(QWebPage.Undo)
def redo(self, param=None):
self.page().triggerAction(QWebPage.Redo)
def cut(self, param=None):
self.page().triggerAction(QWebPage.Cut)
def copy(self, param=None):
self.page().triggerAction(QWebPage.Copy)
def paste(self, param=None):
self.page().triggerAction(QWebPage.Paste)
def remove_format(self, param=None):
self.page().triggerAction(QWebPage.RemoveFormat)
self.execute_js("document.execCommand('formatBlock', false, 'p');")
def insert_link(self, param=None):
link, ok = QInputDialog.getText(None, 'Insert Link','Enter a url for the link (ex: http://www.google.com).') #QLineEdit.Normal
if ok:
self.execute_js("document.execCommand('createLink', false, '%s');" % link)
def insert_embedded_image(self, param=None):
if param:
filename, fileextension = os.path.splitext(param)
fileextension = fileextension[1:]
image_encoded_data = base64.b64encode(open(param, "rb").read())
self.insert_html("<img src='data:image/" + fileextension + ";base64," + image_encoded_data.decode('ascii') + "' />")
def insert_image(self, image_path=None, new_image_path=None):
#image_path, extra = QFileDialog.getOpenFileName(None, 'Select Image', self.file_dialog_dir, "All files (*.*);;JPEG (*.jpg *.jpeg);;TIFF (*.tif)")
image_path_base, file_extension = os.path.splitext(image_path)
file_name = os.path.basename(image_path)
copied = False
if image_path and new_image_path:
if not os.path.isfile(os.path.join(new_image_path, file_name)):
try:
shutil.copy2(image_path, new_image_path)
copied = True
except (OSError, IOError):
print("Unable to copy the file to :" + str(new_image_path))
else:
try:
new_location = image_path_base + '_' + time.strftime("%Y%m%d") + "_" + time.strftime("%H%M%S") + file_extension
shutil.copy2(image_path, new_location)
copied = True
except (OSError, IOError):
print("Unable to copy the file to :" + str(new_location))
if copied:
# file_path = QUrl.fromLocalFile(new_location).toString()
# self.execute_js("document.execCommand('insertImage', false, '%s');" % file_path)
self.insert_html("<img src ='" + file_name + "' />")
def execute_js(self, param=None):
if param:
#print ("**************************************************")
#print (param)
self.page().mainFrame().evaluateJavaScript(param)
def execute_jsfile(self, param=None):
if param:
js_content = None
file_path = os.path.join(os.path.dirname(__file__), param)
if os.path.isfile(file_path):
with open(file_path, encoding='UTF-8') as fd:
js_content = fd.read()
fd.close()
if js_content:
self.execute_js(js_content)
def apply_style(self, style=None, class_name=None):
if style:
style = style.replace("\"", "'").replace("\n", " ")
js_code = ""
if class_name:
js_code += "var elements = document.getElementsByClassName('" + class_name + "'); "
js_code += "while (elements.length > 0){ elements[0].parentNode.removeChild(elements[0]); } "
js_code += "var css = document.createElement('style'); "
js_code += "css.type = 'text/css'; "
if class_name:
js_code += "css.className = '" + class_name + "'; "
js_code += "var styles = '" + style + "'; "
js_code += "if(css.styleSheet){ css.styleSheet.cssText = styles; }else{ css.appendChild(document.createTextNode(styles)); } "
js_code += "document.getElementsByTagName('head')[0].appendChild(css); \n"
self.execute_js(js_code)
# ORIGINAL CODE
# original_html = self.page().mainFrame().toHtml()
#
# try:
# soup = BeautifulSoup(original_html, "lxml")# "html.parser")
# head = soup.head
#
# if class_name:
# note_styles = soup.find_all("style", {'class': class_name})
# if note_styles:
# for note_style in note_styles:
# note_style.decompose()
#
# if style:
# new_style = soup.new_tag('style')
# new_style['type'] = 'text/css'
#
# if class_name:
# new_style['class'] = class_name
#
# new_style.append(style)
# head.append(new_style)
#
# #new_html = soup.prettify()#()(formatter="minimal")
# new_html=str(soup)
# self.set_content(new_html)
# except Exception as e:
# self.set_content(original_html)
def apply_stylefile(self, file_path=None, class_name=None):
if file_path and os.path.isfile(file_path):
css_file_content_content = ""
with open(file_path, encoding='UTF-8', errors="ignore") as fd:
file_content = fd.read() # self.convert_markup(fd.read(), file_name, 'import', 'open')
if file_content is not None:
css_file_content_content = file_content
fd.close()
self.apply_style(css_file_content_content, class_name) | marcoconstancio/yanta | plugins/viewers/html_editor/html_editor.py | Python | gpl-2.0 | 16,041 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
from unittest import mock
import pytest
import requests_mock
from airflow.exceptions import AirflowException
from airflow.hooks.base import BaseHook
from airflow.models import Connection
from airflow.providers.openfaas.hooks.openfaas import OpenFaasHook
FUNCTION_NAME = "function_name"
class TestOpenFaasHook(unittest.TestCase):
GET_FUNCTION = "/system/function/"
INVOKE_ASYNC_FUNCTION = "/async-function/"
INVOKE_FUNCTION = "/function/"
DEPLOY_FUNCTION = "/system/functions"
UPDATE_FUNCTION = "/system/functions"
def setUp(self):
self.hook = OpenFaasHook(function_name=FUNCTION_NAME)
self.mock_response = {'ans': 'a'}
@mock.patch.object(BaseHook, 'get_connection')
@requests_mock.mock()
def test_is_function_exist_false(self, mock_get_connection, m):
m.get(
"http://open-faas.io" + self.GET_FUNCTION + FUNCTION_NAME,
json=self.mock_response,
status_code=404,
)
mock_connection = Connection(host="http://open-faas.io")
mock_get_connection.return_value = mock_connection
does_function_exist = self.hook.does_function_exist()
assert not does_function_exist
@mock.patch.object(BaseHook, 'get_connection')
@requests_mock.mock()
def test_is_function_exist_true(self, mock_get_connection, m):
m.get(
"http://open-faas.io" + self.GET_FUNCTION + FUNCTION_NAME,
json=self.mock_response,
status_code=202,
)
mock_connection = Connection(host="http://open-faas.io")
mock_get_connection.return_value = mock_connection
does_function_exist = self.hook.does_function_exist()
assert does_function_exist
@mock.patch.object(BaseHook, 'get_connection')
@requests_mock.mock()
def test_update_function_true(self, mock_get_connection, m):
m.put("http://open-faas.io" + self.UPDATE_FUNCTION, json=self.mock_response, status_code=202)
mock_connection = Connection(host="http://open-faas.io")
mock_get_connection.return_value = mock_connection
self.hook.update_function({}) # returns None
@mock.patch.object(BaseHook, 'get_connection')
@requests_mock.mock()
def test_update_function_false(self, mock_get_connection, m):
m.put("http://open-faas.io" + self.UPDATE_FUNCTION, json=self.mock_response, status_code=400)
mock_connection = Connection(host="http://open-faas.io")
mock_get_connection.return_value = mock_connection
with pytest.raises(AirflowException) as ctx:
self.hook.update_function({})
assert 'failed to update ' + FUNCTION_NAME in str(ctx.value)
@mock.patch.object(BaseHook, 'get_connection')
@requests_mock.mock()
def test_invoke_function_false(self, mock_get_connection, m):
m.post(
"http://open-faas.io" + self.INVOKE_FUNCTION + FUNCTION_NAME,
json=self.mock_response,
status_code=400,
)
mock_connection = Connection(host="http://open-faas.io")
mock_get_connection.return_value = mock_connection
with pytest.raises(AirflowException) as ctx:
self.hook.invoke_function({})
assert 'failed to invoke function' in str(ctx.value)
@mock.patch.object(BaseHook, 'get_connection')
@requests_mock.mock()
def test_invoke_function_true(self, mock_get_connection, m):
m.post(
"http://open-faas.io" + self.INVOKE_FUNCTION + FUNCTION_NAME,
json=self.mock_response,
status_code=200,
)
mock_connection = Connection(host="http://open-faas.io")
mock_get_connection.return_value = mock_connection
assert self.hook.invoke_function({}) is None
@mock.patch.object(BaseHook, 'get_connection')
@requests_mock.mock()
def test_invoke_async_function_false(self, mock_get_connection, m):
m.post(
"http://open-faas.io" + self.INVOKE_ASYNC_FUNCTION + FUNCTION_NAME,
json=self.mock_response,
status_code=400,
)
mock_connection = Connection(host="http://open-faas.io")
mock_get_connection.return_value = mock_connection
with pytest.raises(AirflowException) as ctx:
self.hook.invoke_async_function({})
assert 'failed to invoke function' in str(ctx.value)
@mock.patch.object(BaseHook, 'get_connection')
@requests_mock.mock()
def test_invoke_async_function_true(self, mock_get_connection, m):
m.post(
"http://open-faas.io" + self.INVOKE_ASYNC_FUNCTION + FUNCTION_NAME,
json=self.mock_response,
status_code=202,
)
mock_connection = Connection(host="http://open-faas.io")
mock_get_connection.return_value = mock_connection
assert self.hook.invoke_async_function({}) is None
@mock.patch.object(BaseHook, 'get_connection')
@requests_mock.mock()
def test_deploy_function_function_already_exist(self, mock_get_connection, m):
m.put("http://open-faas.io/" + self.UPDATE_FUNCTION, json=self.mock_response, status_code=202)
mock_connection = Connection(host="http://open-faas.io/")
mock_get_connection.return_value = mock_connection
assert self.hook.deploy_function(True, {}) is None
@mock.patch.object(BaseHook, 'get_connection')
@requests_mock.mock()
def test_deploy_function_function_not_exist(self, mock_get_connection, m):
m.post("http://open-faas.io" + self.DEPLOY_FUNCTION, json={}, status_code=202)
mock_connection = Connection(host="http://open-faas.io")
mock_get_connection.return_value = mock_connection
assert self.hook.deploy_function(False, {}) is None
| apache/incubator-airflow | tests/providers/openfaas/hooks/test_openfaas.py | Python | apache-2.0 | 6,559 |
from estrategias.jogadores import Jogador
import random
class MeuJogador(Jogador):
def __init__(self):
Jogador.__init__(self)
self.total_jogador = 0
def escolha_de_cacada(self, rodada, comida_atual, reputacao_atual, m, reputacoes_dos_jogadores):
if rodada == 1:
self.total_jogador = len(reputacoes_dos_jogadores)
escolhas = []
n_jogadores = len(reputacoes_dos_jogadores)
media = sum(reputacoes_dos_jogadores) / len(reputacoes_dos_jogadores)
for rep in reputacoes_dos_jogadores:
if (rep >= 0.45) and random.uniform(0, 1) > (1.1 - n_jogadores / self.total_jogador):
escolhas.append('c')
else:
escolhas.append('d')
return escolhas
| fccoelho/jogos_vorazes | estrategias/AraraClayton.py | Python | mit | 773 |
##########################################################################
#Copyright 2015 Rasmus Dall #
# #
#Licensed under the Apache License, Version 2.0 (the "License"); #
#you may not use this file except in compliance with the License. #
#You may obtain a copy of the License at #
# #
#http://www.apache.org/licenses/LICENSE-2.0 #
# #
#Unless required by applicable law or agreed to in writing, software #
#distributed under the License is distributed on an "AS IS" BASIS, #
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.#
#See the License for the specific language governing permissions and #
#limitations under the License. #
##########################################################################
#A few simple pre-written methods for visualising wavs.
#NOTE: Requires matplotlib which is not a standard python module.
#See here http://matplotlib.org/users/installing.html for how to get it.
#Load the SiReImports.pth file
import site
site.addsitedir("../")
import wave, argparse, math
from error_messages import SiReError
from numpy import fromstring
from matplotlib import pyplot
from matplotlib import axes
def get_wav(wav_path):
wav = wave.open(wav_path, "r")
if wav.getnchannels() == 2:
raise SiReError("Can only handle mono files. {0} had {1} channels.".format(wav_path, wav.getnchannels()))
return wav
def plot_wav(wav_path):
wav = get_wav(wav_path)
nf = wav.getnframes()
wav = fromstring(wav.readframes(-1), 'Int16')
pyplot.figure(1)
pyplot.title(wav_path)
pyplot.plot(wav)
pyplot.xlim(right=nf)
pyplot.xlabel("Frames")
pyplot.show()
def plot_spectogram(wav_path, f0=None):
wav =get_wav(wav_path)
fs = wav.getframerate()
nf = wav.getnframes()
ns = nf/float(fs)
wav = fromstring(wav.readframes(-1), 'Int16')
pyplot.figure(1)
pyplot.title(wav_path)
pyplot.specgram(wav, Fs=fs)
pyplot.xlim(right=ns)
pyplot.ylim(top=8000)
pyplot.xlabel("Seconds")
if f0:
x_points = [(ns/len(f0))*x for x in range(1, len(f0)+1)]
y_points = [x for x in f0]
pyplot.plot(x_points, y_points)
pyplot.show()
def plot_wav_and_spec(wav_path, f0):
wav = get_wav(wav_path)
fs = wav.getframerate()
nf = wav.getnframes()
ns = nf/float(fs)
wav = fromstring(wav.readframes(-1), 'Int16')
fig = pyplot.figure()
pyplot.title(wav_path)
w = pyplot.subplot(311)
w.set_xlim(right=nf)
w.plot(wav)
pyplot.xlabel("Frames")
s = pyplot.subplot(312)
pyplot.specgram(wav, Fs=fs)
s.set_xlim(right=ns)
s.set_ylim(top=8000)
if f0:
f = pyplot.subplot(313)
x_points = [(ns/len(f0))*x for x in range(1, len(f0)+1)]
y_points = [x for x in f0]
pyplot.plot(x_points, y_points)
f.set_xlim(right=ns)
pyplot.xlabel("Seconds")
pyplot.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='A few simple methods for visualisation of wavs etc.')
parser.add_argument('inwav', type=str, help="The input wavfile.")
parser.add_argument('-plot_wav', action='store_true', help="Plot a waveform.")
parser.add_argument('-plot_spec', action='store_true', help="Plot a spectogram.")
parser.add_argument('-plot_wav_and_spec', action='store_true', help="Plot both a waveform and spectogram.")
parser.add_argument('-add_f0', type=str, help="If specified the f0 at PATH will be plotted on any spectogram plotted. The f0 is assumed to be in 5ms frames.", metavar=("PATH"))
parser.add_argument('-add_lf0', type=str, help="If specified the lf0 at PATH will be plotted on any spectogram plotted. The lf0 will be converted to f0 before plotting and is assumed to be in 5ms frames and using the natural log. If both add_f0 and add_lf0 is specified the f0 file takes precedence.", metavar=("PATH"))
args = parser.parse_args()
if args.add_f0:
f0 = [float(x.strip()) for x in open(args.add_f0, "r").readlines()]
elif args.add_lf0:
f0 = [math.exp(float(x.strip())) for x in open(args.add_lf0, "r").readlines()]
if args.plot_wav:
plot_wav(args.inwav)
if args.plot_spec:
plot_spectogram(args.inwav, f0)
if args.plot_wav_and_spec:
plot_wav_and_spec(args.inwav, f0)
| RasmusD/SiRe | SiReUtils/visualisation_tools.py | Python | apache-2.0 | 4,519 |
# pylint: disable=fixme, invalid-name, too-many-arguments, too-many-locals, too-many-lines
# pylint: disable=too-many-branches, too-many-statements
"""MXNet model module"""
from __future__ import absolute_import, print_function
import time
import logging
import warnings
from collections import namedtuple
import numpy as np
from . import io
from . import nd
from . import symbol as sym
from . import optimizer as opt
from . import metric
from . import kvstore as kvs
from .context import Context, cpu
from .initializer import Uniform
from .optimizer import get_updater
from .executor_manager import DataParallelExecutorManager, _check_arguments, _load_data
from .io import DataDesc
from .base import mx_real_t
BASE_ESTIMATOR = object
try:
from sklearn.base import BaseEstimator
BASE_ESTIMATOR = BaseEstimator
except ImportError:
SKLEARN_INSTALLED = False
# Parameter to pass to batch_end_callback
BatchEndParam = namedtuple('BatchEndParams',
['epoch',
'nbatch',
'eval_metric',
'locals'])
def _create_kvstore(kvstore, num_device, arg_params):
"""Create kvstore
This function select and create a proper kvstore if given the kvstore type.
Parameters
----------
kvstore : KVStore or str
The kvstore.
num_device : int
The number of devices
arg_params : dict of str to `NDArray`.
Model parameter, dict of name to `NDArray` of net's weights.
"""
update_on_kvstore = True
if kvstore is None:
kv = None
elif isinstance(kvstore, kvs.KVStore):
kv = kvstore
elif isinstance(kvstore, str):
# create kvstore using the string type
if num_device is 1 and 'dist' not in kvstore:
# no need to use kv for single device and single machine
kv = None
else:
kv = kvs.create(kvstore)
if kvstore == 'local':
# automatically select a proper local
max_size = max(np.prod(param.shape) for param in
arg_params.values())
if max_size > 1024 * 1024 * 16:
update_on_kvstore = False
else:
raise TypeError('kvstore must be KVStore, str or None')
if kv is None:
update_on_kvstore = False
return (kv, update_on_kvstore)
def _initialize_kvstore(kvstore, param_arrays, arg_params, param_names,
update_on_kvstore):
"""Initialize kvstore"""
for idx, param_on_devs in enumerate(param_arrays):
name = param_names[idx]
kvstore.init(name, arg_params[name])
if update_on_kvstore:
kvstore.pull(name, param_on_devs, priority=-idx)
def _update_params_on_kvstore(param_arrays, grad_arrays, kvstore, param_names):
"""Perform update of param_arrays from grad_arrays on kvstore."""
for index, pair in enumerate(zip(param_arrays, grad_arrays)):
arg_list, grad_list = pair
if grad_list[0] is None:
continue
name = param_names[index]
# push gradient, priority is negative index
kvstore.push(name, grad_list, priority=-index)
# pull back the weights
kvstore.pull(name, arg_list, priority=-index)
def _update_params(param_arrays, grad_arrays, updater, num_device,
kvstore=None, param_names=None):
"""Perform update of param_arrays from grad_arrays not on kvstore."""
for index, pair in enumerate(zip(param_arrays, grad_arrays)):
arg_list, grad_list = pair
if grad_list[0] is None:
continue
if kvstore:
name = param_names[index]
# push gradient, priority is negative index
kvstore.push(name, grad_list, priority=-index)
# pull back the sum gradients, to the same locations.
kvstore.pull(name, grad_list, priority=-index)
for k, p in enumerate(zip(arg_list, grad_list)):
# faked an index here, to make optimizer create diff
# state for the same index but on diff devs, TODO(mli)
# use a better solution latter
w, g = p
updater(index*num_device+k, g, w)
def _multiple_callbacks(callbacks, *args, **kwargs):
"""Sends args and kwargs to any configured callbacks.
This handles the cases where the 'callbacks' variable
is ``None``, a single function, or a list.
"""
if isinstance(callbacks, list):
for cb in callbacks:
cb(*args, **kwargs)
return
if callbacks:
callbacks(*args, **kwargs)
def _train_multi_device(symbol, ctx, arg_names, param_names, aux_names,
arg_params, aux_params,
begin_epoch, end_epoch, epoch_size, optimizer,
kvstore, update_on_kvstore,
train_data, eval_data=None, eval_metric=None,
epoch_end_callback=None, batch_end_callback=None,
logger=None, work_load_list=None, monitor=None,
eval_end_callback=None,
eval_batch_end_callback=None, sym_gen=None):
"""Internal training function on multiple devices.
This function will also work for single device as well.
Parameters
----------
symbol : Symbol
The network configuration.
ctx : list of Context
The training devices.
arg_names: list of str
Name of all arguments of the network.
param_names: list of str
Name of all trainable parameters of the network.
aux_names: list of str
Name of all auxiliary states of the network.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
begin_epoch : int
The begining training epoch.
end_epoch : int
The end training epoch.
epoch_size : int, optional
Number of batches in a epoch. In default, it is set to
``ceil(num_train_examples / batch_size)``.
optimizer : Optimizer
The optimization algorithm
train_data : DataIter
Training data iterator.
eval_data : DataIter
Validation data iterator.
eval_metric : EvalMetric
An evaluation function or a list of evaluation functions.
epoch_end_callback : callable(epoch, symbol, arg_params, aux_states)
A callback that is invoked at end of each epoch.
This can be used to checkpoint model each epoch.
batch_end_callback : callable(BatchEndParams)
A callback that is invoked at end of each batch.
This can be used to measure speed, get result from evaluation metric. etc.
kvstore : KVStore
The KVStore.
update_on_kvstore : bool
Whether or not perform weight updating on kvstore.
logger : logging logger
When not specified, default logger will be used.
work_load_list : list of float or int, optional
The list of work load for different devices,
in the same order as ``ctx``.
monitor : Monitor, optional
Monitor installed to executor,
for monitoring outputs, weights, and gradients for debugging.
Notes
-----
- This function will inplace update the NDArrays in `arg_params` and `aux_states`.
"""
if logger is None:
logger = logging
executor_manager = DataParallelExecutorManager(symbol=symbol,
sym_gen=sym_gen,
ctx=ctx,
train_data=train_data,
param_names=param_names,
arg_names=arg_names,
aux_names=aux_names,
work_load_list=work_load_list,
logger=logger)
if monitor:
executor_manager.install_monitor(monitor)
executor_manager.set_params(arg_params, aux_params)
if not update_on_kvstore:
updater = get_updater(optimizer)
if kvstore:
_initialize_kvstore(kvstore=kvstore,
param_arrays=executor_manager.param_arrays,
arg_params=arg_params,
param_names=executor_manager.param_names,
update_on_kvstore=update_on_kvstore)
if update_on_kvstore:
kvstore.set_optimizer(optimizer)
# Now start training
train_data.reset()
for epoch in range(begin_epoch, end_epoch):
# Training phase
tic = time.time()
eval_metric.reset()
nbatch = 0
# Iterate over training data.
while True:
do_reset = True
for data_batch in train_data:
executor_manager.load_data_batch(data_batch)
if monitor is not None:
monitor.tic()
executor_manager.forward(is_train=True)
executor_manager.backward()
if update_on_kvstore:
_update_params_on_kvstore(executor_manager.param_arrays,
executor_manager.grad_arrays,
kvstore, executor_manager.param_names)
else:
_update_params(executor_manager.param_arrays,
executor_manager.grad_arrays,
updater=updater,
num_device=len(ctx),
kvstore=kvstore,
param_names=executor_manager.param_names)
if monitor is not None:
monitor.toc_print()
# evaluate at end, so we can lazy copy
executor_manager.update_metric(eval_metric, data_batch.label)
nbatch += 1
# batch callback (for print purpose)
if batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=epoch,
nbatch=nbatch,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(batch_end_callback, batch_end_params)
# this epoch is done possibly earlier
if epoch_size is not None and nbatch >= epoch_size:
do_reset = False
break
if do_reset:
logger.info('Epoch[%d] Resetting Data Iterator', epoch)
train_data.reset()
# this epoch is done
if epoch_size is None or nbatch >= epoch_size:
break
toc = time.time()
logger.info('Epoch[%d] Time cost=%.3f', epoch, (toc - tic))
if epoch_end_callback or epoch + 1 == end_epoch:
executor_manager.copy_to(arg_params, aux_params)
_multiple_callbacks(epoch_end_callback, epoch, symbol, arg_params, aux_params)
# evaluation
if eval_data:
eval_metric.reset()
eval_data.reset()
total_num_batch = 0
for i, eval_batch in enumerate(eval_data):
executor_manager.load_data_batch(eval_batch)
executor_manager.forward(is_train=False)
executor_manager.update_metric(eval_metric, eval_batch.label)
if eval_batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=epoch,
nbatch=i,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(eval_batch_end_callback, batch_end_params)
total_num_batch += 1
if eval_end_callback is not None:
eval_end_params = BatchEndParam(epoch=epoch,
nbatch=total_num_batch,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(eval_end_callback, eval_end_params)
eval_data.reset()
# end of all epochs
return
def save_checkpoint(prefix, epoch, symbol, arg_params, aux_params):
"""Checkpoint the model data into file.
Parameters
----------
prefix : str
Prefix of model name.
epoch : int
The epoch number of the model.
symbol : Symbol
The input Symbol.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
Notes
-----
- ``prefix-symbol.json`` will be saved for symbol.
- ``prefix-epoch.params`` will be saved for parameters.
"""
if symbol is not None:
symbol.save('%s-symbol.json' % prefix)
save_dict = {('arg:%s' % k) : v.as_in_context(cpu()) for k, v in arg_params.items()}
save_dict.update({('aux:%s' % k) : v.as_in_context(cpu()) for k, v in aux_params.items()})
param_name = '%s-%04d.params' % (prefix, epoch)
nd.save(param_name, save_dict)
logging.info('Saved checkpoint to \"%s\"', param_name)
def load_checkpoint(prefix, epoch):
"""Load model checkpoint from file.
Parameters
----------
prefix : str
Prefix of model name.
epoch : int
Epoch number of model we would like to load.
Returns
-------
symbol : Symbol
The symbol configuration of computation network.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
Notes
-----
- Symbol will be loaded from ``prefix-symbol.json``.
- Parameters will be loaded from ``prefix-epoch.params``.
"""
symbol = sym.load('%s-symbol.json' % prefix)
save_dict = nd.load('%s-%04d.params' % (prefix, epoch))
arg_params = {}
aux_params = {}
for k, v in save_dict.items():
tp, name = k.split(':', 1)
if tp == 'arg':
arg_params[name] = v
if tp == 'aux':
aux_params[name] = v
return (symbol, arg_params, aux_params)
from .callback import LogValidationMetricsCallback # pylint: disable=wrong-import-position
class FeedForward(BASE_ESTIMATOR):
"""Model class of MXNet for training and predicting feedforward nets.
This class is designed for a single-data single output supervised network.
Parameters
----------
symbol : Symbol
The symbol configuration of computation network.
ctx : Context or list of Context, optional
The device context of training and prediction.
To use multi GPU training, pass in a list of gpu contexts.
num_epoch : int, optional
Training parameter, number of training epochs(epochs).
epoch_size : int, optional
Number of batches in a epoch. In default, it is set to
``ceil(num_train_examples / batch_size)``.
optimizer : str or Optimizer, optional
Training parameter, name or optimizer object for training.
initializer : initializer function, optional
Training parameter, the initialization scheme used.
numpy_batch_size : int, optional
The batch size of training data.
Only needed when input array is numpy.
arg_params : dict of str to NDArray, optional
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray, optional
Model parameter, dict of name to NDArray of net's auxiliary states.
allow_extra_params : boolean, optional
Whether allow extra parameters that are not needed by symbol
to be passed by aux_params and ``arg_params``.
If this is True, no error will be thrown when ``aux_params`` and ``arg_params``
contain more parameters than needed.
begin_epoch : int, optional
The begining training epoch.
kwargs : dict
The additional keyword arguments passed to optimizer.
"""
def __init__(self, symbol, ctx=None,
num_epoch=None, epoch_size=None, optimizer='sgd',
initializer=Uniform(0.01),
numpy_batch_size=128,
arg_params=None, aux_params=None,
allow_extra_params=False,
begin_epoch=0,
**kwargs):
warnings.warn(
'\033[91mmxnet.model.FeedForward has been deprecated. ' + \
'Please use mxnet.mod.Module instead.\033[0m',
DeprecationWarning, stacklevel=2)
if isinstance(symbol, sym.Symbol):
self.symbol = symbol
self.sym_gen = None
else:
assert(callable(symbol))
self.symbol = None
self.sym_gen = symbol
# model parameters
self.arg_params = arg_params
self.aux_params = aux_params
self.allow_extra_params = allow_extra_params
self.argument_checked = False
if self.sym_gen is None:
self._check_arguments()
# basic configuration
if ctx is None:
ctx = [cpu()]
elif isinstance(ctx, Context):
ctx = [ctx]
self.ctx = ctx
# training parameters
self.num_epoch = num_epoch
self.epoch_size = epoch_size
self.kwargs = kwargs.copy()
self.optimizer = optimizer
self.initializer = initializer
self.numpy_batch_size = numpy_batch_size
# internal helper state
self._pred_exec = None
self.begin_epoch = begin_epoch
def _check_arguments(self):
"""verify the argument of the default symbol and user provided parameters"""
if self.argument_checked:
return
assert(self.symbol is not None)
self.argument_checked = True
# check if symbol contain duplicated names.
_check_arguments(self.symbol)
# rematch parameters to delete useless ones
if self.allow_extra_params:
if self.arg_params:
arg_names = set(self.symbol.list_arguments())
self.arg_params = {k : v for k, v in self.arg_params.items()
if k in arg_names}
if self.aux_params:
aux_names = set(self.symbol.list_auxiliary_states())
self.aux_params = {k : v for k, v in self.aux_params.items()
if k in aux_names}
@staticmethod
def _is_data_arg(name):
"""Check if name is a data argument."""
return name.endswith('data') or name.endswith('label')
def _init_params(self, inputs, overwrite=False):
"""Initialize weight parameters and auxiliary states."""
inputs = [x if isinstance(x, DataDesc) else DataDesc(*x) for x in inputs]
input_shapes = {item.name: item.shape for item in inputs}
arg_shapes, _, aux_shapes = self.symbol.infer_shape(**input_shapes)
assert arg_shapes is not None
input_dtypes = {item.name: item.dtype for item in inputs}
arg_dtypes, _, aux_dtypes = self.symbol.infer_type(**input_dtypes)
assert arg_dtypes is not None
arg_names = self.symbol.list_arguments()
input_names = input_shapes.keys()
param_names = [key for key in arg_names if key not in input_names]
aux_names = self.symbol.list_auxiliary_states()
param_name_attrs = [x for x in zip(arg_names, arg_shapes, arg_dtypes)
if x[0] in param_names]
arg_params = {k : nd.zeros(shape=s, dtype=t)
for k, s, t in param_name_attrs}
aux_name_attrs = [x for x in zip(aux_names, aux_shapes, aux_dtypes)
if x[0] in aux_names]
aux_params = {k : nd.zeros(shape=s, dtype=t)
for k, s, t in aux_name_attrs}
for k, v in arg_params.items():
if self.arg_params and k in self.arg_params and (not overwrite):
arg_params[k][:] = self.arg_params[k][:]
else:
self.initializer(k, v)
for k, v in aux_params.items():
if self.aux_params and k in self.aux_params and (not overwrite):
aux_params[k][:] = self.aux_params[k][:]
else:
self.initializer(k, v)
self.arg_params = arg_params
self.aux_params = aux_params
return (arg_names, list(param_names), aux_names)
def __getstate__(self):
this = self.__dict__.copy()
this['_pred_exec'] = None
return this
def __setstate__(self, state):
self.__dict__.update(state)
def _init_predictor(self, input_shapes, type_dict=None):
"""Initialize the predictor module for running prediction."""
if self._pred_exec is not None:
arg_shapes, _, _ = self.symbol.infer_shape(**dict(input_shapes))
assert arg_shapes is not None, "Incomplete input shapes"
pred_shapes = [x.shape for x in self._pred_exec.arg_arrays]
if arg_shapes == pred_shapes:
return
# for now only use the first device
pred_exec = self.symbol.simple_bind(
self.ctx[0], grad_req='null', type_dict=type_dict, **dict(input_shapes))
pred_exec.copy_params_from(self.arg_params, self.aux_params)
_check_arguments(self.symbol)
self._pred_exec = pred_exec
def _init_iter(self, X, y, is_train):
"""Initialize the iterator given input."""
if isinstance(X, (np.ndarray, nd.NDArray)):
if y is None:
if is_train:
raise ValueError('y must be specified when X is numpy.ndarray')
else:
y = np.zeros(X.shape[0])
if not isinstance(y, (np.ndarray, nd.NDArray)):
raise TypeError('y must be ndarray when X is numpy.ndarray')
if X.shape[0] != y.shape[0]:
raise ValueError("The numbers of data points and labels not equal")
if y.ndim == 2 and y.shape[1] == 1:
y = y.flatten()
if y.ndim != 1:
raise ValueError("Label must be 1D or 2D (with 2nd dimension being 1)")
if is_train:
return io.NDArrayIter(X, y, min(X.shape[0], self.numpy_batch_size),
shuffle=is_train, last_batch_handle='roll_over')
else:
return io.NDArrayIter(X, y, min(X.shape[0], self.numpy_batch_size), shuffle=False)
if not isinstance(X, io.DataIter):
raise TypeError('X must be DataIter, NDArray or numpy.ndarray')
return X
def _init_eval_iter(self, eval_data):
"""Initialize the iterator given eval_data."""
if eval_data is None:
return eval_data
if isinstance(eval_data, (tuple, list)) and len(eval_data) == 2:
if eval_data[0] is not None:
if eval_data[1] is None and isinstance(eval_data[0], io.DataIter):
return eval_data[0]
input_data = (np.array(eval_data[0]) if isinstance(eval_data[0], list)
else eval_data[0])
input_label = (np.array(eval_data[1]) if isinstance(eval_data[1], list)
else eval_data[1])
return self._init_iter(input_data, input_label, is_train=True)
else:
raise ValueError("Eval data is NONE")
if not isinstance(eval_data, io.DataIter):
raise TypeError('Eval data must be DataIter, or ' \
'NDArray/numpy.ndarray/list pair (i.e. tuple/list of length 2)')
return eval_data
def predict(self, X, num_batch=None, return_data=False, reset=True):
"""Run the prediction, always only use one device.
Parameters
----------
X : mxnet.DataIter
num_batch : int or None
The number of batch to run. Go though all batches if ``None``.
Returns
-------
y : numpy.ndarray or a list of numpy.ndarray if the network has multiple outputs.
The predicted value of the output.
"""
X = self._init_iter(X, None, is_train=False)
if reset:
X.reset()
data_shapes = X.provide_data
data_names = [x[0] for x in data_shapes]
type_dict = dict((key, value.dtype) for (key, value) in self.arg_params.items())
for x in X.provide_data:
if isinstance(x, DataDesc):
type_dict[x.name] = x.dtype
else:
type_dict[x[0]] = mx_real_t
self._init_predictor(data_shapes, type_dict)
batch_size = X.batch_size
data_arrays = [self._pred_exec.arg_dict[name] for name in data_names]
output_list = [[] for _ in range(len(self._pred_exec.outputs))]
if return_data:
data_list = [[] for _ in X.provide_data]
label_list = [[] for _ in X.provide_label]
i = 0
for batch in X:
_load_data(batch, data_arrays)
self._pred_exec.forward(is_train=False)
padded = batch.pad
real_size = batch_size - padded
for o_list, o_nd in zip(output_list, self._pred_exec.outputs):
o_list.append(o_nd[0:real_size].asnumpy())
if return_data:
for j, x in enumerate(batch.data):
data_list[j].append(x[0:real_size].asnumpy())
for j, x in enumerate(batch.label):
label_list[j].append(x[0:real_size].asnumpy())
i += 1
if num_batch is not None and i == num_batch:
break
outputs = [np.concatenate(x) for x in output_list]
if len(outputs) == 1:
outputs = outputs[0]
if return_data:
data = [np.concatenate(x) for x in data_list]
label = [np.concatenate(x) for x in label_list]
if len(data) == 1:
data = data[0]
if len(label) == 1:
label = label[0]
return outputs, data, label
else:
return outputs
def score(self, X, eval_metric='acc', num_batch=None, batch_end_callback=None, reset=True):
"""Run the model given an input and calculate the score
as assessed by an evaluation metric.
Parameters
----------
X : mxnet.DataIter
eval_metric : metric.metric
The metric for calculating score.
num_batch : int or None
The number of batches to run. Go though all batches if ``None``.
Returns
-------
s : float
The final score.
"""
# setup metric
if not isinstance(eval_metric, metric.EvalMetric):
eval_metric = metric.create(eval_metric)
X = self._init_iter(X, None, is_train=False)
if reset:
X.reset()
data_shapes = X.provide_data
data_names = [x[0] for x in data_shapes]
type_dict = dict((key, value.dtype) for (key, value) in self.arg_params.items())
for x in X.provide_data:
if isinstance(x, DataDesc):
type_dict[x.name] = x.dtype
else:
type_dict[x[0]] = mx_real_t
self._init_predictor(data_shapes, type_dict)
data_arrays = [self._pred_exec.arg_dict[name] for name in data_names]
for i, batch in enumerate(X):
if num_batch is not None and i == num_batch:
break
_load_data(batch, data_arrays)
self._pred_exec.forward(is_train=False)
eval_metric.update(batch.label, self._pred_exec.outputs)
if batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=0,
nbatch=i,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(batch_end_callback, batch_end_params)
return eval_metric.get()[1]
def fit(self, X, y=None, eval_data=None, eval_metric='acc',
epoch_end_callback=None, batch_end_callback=None, kvstore='local', logger=None,
work_load_list=None, monitor=None, eval_end_callback=LogValidationMetricsCallback(),
eval_batch_end_callback=None):
"""Fit the model.
Parameters
----------
X : DataIter, or numpy.ndarray/NDArray
Training data. If `X` is a `DataIter`, the name or (if name not available)
the position of its outputs should match the corresponding variable
names defined in the symbolic graph.
y : numpy.ndarray/NDArray, optional
Training set label.
If X is ``numpy.ndarray`` or `NDArray`, `y` is required to be set.
While y can be 1D or 2D (with 2nd dimension as 1), its first dimension must be
the same as `X`, i.e. the number of data points and labels should be equal.
eval_data : DataIter or numpy.ndarray/list/NDArray pair
If eval_data is numpy.ndarray/list/NDArray pair,
it should be ``(valid_data, valid_label)``.
eval_metric : metric.EvalMetric or str or callable
The evaluation metric. This could be the name of evaluation metric
or a custom evaluation function that returns statistics
based on a minibatch.
epoch_end_callback : callable(epoch, symbol, arg_params, aux_states)
A callback that is invoked at end of each epoch.
This can be used to checkpoint model each epoch.
batch_end_callback: callable(epoch)
A callback that is invoked at end of each batch for purposes of printing.
kvstore: KVStore or str, optional
The KVStore or a string kvstore type: 'local', 'dist_sync', 'dist_async'
In default uses 'local', often no need to change for single machiine.
logger : logging logger, optional
When not specified, default logger will be used.
work_load_list : float or int, optional
The list of work load for different devices,
in the same order as `ctx`.
Note
----
KVStore behavior
- 'local', multi-devices on a single machine, will automatically choose best type.
- 'dist_sync', multiple machines communicating via BSP.
- 'dist_async', multiple machines with asynchronous communication.
"""
data = self._init_iter(X, y, is_train=True)
eval_data = self._init_eval_iter(eval_data)
if self.sym_gen:
self.symbol = self.sym_gen(data.default_bucket_key) # pylint: disable=no-member
self._check_arguments()
self.kwargs["sym"] = self.symbol
arg_names, param_names, aux_names = \
self._init_params(data.provide_data+data.provide_label)
# setup metric
if not isinstance(eval_metric, metric.EvalMetric):
eval_metric = metric.create(eval_metric)
# create kvstore
(kvstore, update_on_kvstore) = _create_kvstore(
kvstore, len(self.ctx), self.arg_params)
param_idx2name = {}
if update_on_kvstore:
param_idx2name.update(enumerate(param_names))
else:
for i, n in enumerate(param_names):
for k in range(len(self.ctx)):
param_idx2name[i*len(self.ctx)+k] = n
self.kwargs["param_idx2name"] = param_idx2name
# init optmizer
if isinstance(self.optimizer, str):
batch_size = data.batch_size
if kvstore and 'dist' in kvstore.type and not '_async' in kvstore.type:
batch_size *= kvstore.num_workers
optimizer = opt.create(self.optimizer,
rescale_grad=(1.0/batch_size),
**(self.kwargs))
elif isinstance(self.optimizer, opt.Optimizer):
optimizer = self.optimizer
# do training
_train_multi_device(self.symbol, self.ctx, arg_names, param_names, aux_names,
self.arg_params, self.aux_params,
begin_epoch=self.begin_epoch, end_epoch=self.num_epoch,
epoch_size=self.epoch_size,
optimizer=optimizer,
train_data=data, eval_data=eval_data,
eval_metric=eval_metric,
epoch_end_callback=epoch_end_callback,
batch_end_callback=batch_end_callback,
kvstore=kvstore, update_on_kvstore=update_on_kvstore,
logger=logger, work_load_list=work_load_list, monitor=monitor,
eval_end_callback=eval_end_callback,
eval_batch_end_callback=eval_batch_end_callback,
sym_gen=self.sym_gen)
def save(self, prefix, epoch=None):
"""Checkpoint the model checkpoint into file.
You can also use `pickle` to do the job if you only work on Python.
The advantage of `load` and `save` (as compared to `pickle`) is that
the resulting file can be loaded from other MXNet language bindings.
One can also directly `load`/`save` from/to cloud storage(S3, HDFS)
Parameters
----------
prefix : str
Prefix of model name.
Notes
-----
- ``prefix-symbol.json`` will be saved for symbol.
- ``prefix-epoch.params`` will be saved for parameters.
"""
if epoch is None:
epoch = self.num_epoch
assert epoch is not None
save_checkpoint(prefix, epoch, self.symbol, self.arg_params, self.aux_params)
@staticmethod
def load(prefix, epoch, ctx=None, **kwargs):
"""Load model checkpoint from file.
Parameters
----------
prefix : str
Prefix of model name.
epoch : int
epoch number of model we would like to load.
ctx : Context or list of Context, optional
The device context of training and prediction.
kwargs : dict
Other parameters for model, including `num_epoch`, optimizer and `numpy_batch_size`.
Returns
-------
model : FeedForward
The loaded model that can be used for prediction.
Notes
-----
- ``prefix-symbol.json`` will be saved for symbol.
- ``prefix-epoch.params`` will be saved for parameters.
"""
symbol, arg_params, aux_params = load_checkpoint(prefix, epoch)
return FeedForward(symbol, ctx=ctx,
arg_params=arg_params, aux_params=aux_params,
begin_epoch=epoch,
**kwargs)
@staticmethod
def create(symbol, X, y=None, ctx=None,
num_epoch=None, epoch_size=None, optimizer='sgd', initializer=Uniform(0.01),
eval_data=None, eval_metric='acc',
epoch_end_callback=None, batch_end_callback=None,
kvstore='local', logger=None, work_load_list=None,
eval_end_callback=LogValidationMetricsCallback(),
eval_batch_end_callback=None, **kwargs):
"""Functional style to create a model.
This function is more consistent with functional
languages such as R, where mutation is not allowed.
Parameters
----------
symbol : Symbol
The symbol configuration of a computation network.
X : DataIter
Training data.
y : numpy.ndarray, optional
If `X` is a ``numpy.ndarray``, `y` must be set.
ctx : Context or list of Context, optional
The device context of training and prediction.
To use multi-GPU training, pass in a list of GPU contexts.
num_epoch : int, optional
The number of training epochs(epochs).
epoch_size : int, optional
Number of batches in a epoch. In default, it is set to
``ceil(num_train_examples / batch_size)``.
optimizer : str or Optimizer, optional
The name of the chosen optimizer, or an optimizer object, used for training.
initializer : initializer function, optional
The initialization scheme used.
eval_data : DataIter or numpy.ndarray pair
If `eval_set` is ``numpy.ndarray`` pair, it should
be (`valid_data`, `valid_label`).
eval_metric : metric.EvalMetric or str or callable
The evaluation metric. Can be the name of an evaluation metric
or a custom evaluation function that returns statistics
based on a minibatch.
epoch_end_callback : callable(epoch, symbol, arg_params, aux_states)
A callback that is invoked at end of each epoch.
This can be used to checkpoint model each epoch.
batch_end_callback: callable(epoch)
A callback that is invoked at end of each batch for print purposes.
kvstore: KVStore or str, optional
The KVStore or a string kvstore type: 'local', 'dist_sync', 'dis_async'.
Defaults to 'local', often no need to change for single machine.
logger : logging logger, optional
When not specified, default logger will be used.
work_load_list : list of float or int, optional
The list of work load for different devices,
in the same order as `ctx`.
"""
model = FeedForward(symbol, ctx=ctx, num_epoch=num_epoch,
epoch_size=epoch_size,
optimizer=optimizer, initializer=initializer, **kwargs)
model.fit(X, y, eval_data=eval_data, eval_metric=eval_metric,
epoch_end_callback=epoch_end_callback,
batch_end_callback=batch_end_callback,
kvstore=kvstore,
logger=logger,
work_load_list=work_load_list,
eval_end_callback=eval_end_callback,
eval_batch_end_callback=eval_batch_end_callback)
return model
| hotpxl/mxnet | python/mxnet/model.py | Python | apache-2.0 | 39,119 |
#
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for OpenStack Cinder volume drivers."""
import mock
import ast
from oslo_config import cfg
from oslo_utils import units
from cinder import context
from cinder import exception
from cinder.openstack.common import log as logging
from cinder import test
from cinder.tests import fake_hp_3par_client as hp3parclient
from cinder.volume.drivers.san.hp import hp_3par_common as hpcommon
from cinder.volume.drivers.san.hp import hp_3par_fc as hpfcdriver
from cinder.volume.drivers.san.hp import hp_3par_iscsi as hpdriver
from cinder.volume import qos_specs
from cinder.volume import utils as volume_utils
from cinder.volume import volume_types
hpexceptions = hp3parclient.hpexceptions
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
HP3PAR_CPG = 'OpenStackCPG'
HP3PAR_CPG2 = 'fakepool'
HP3PAR_CPG_QOS = 'qospool'
HP3PAR_CPG_SNAP = 'OpenStackCPGSnap'
HP3PAR_USER_NAME = 'testUser'
HP3PAR_USER_PASS = 'testPassword'
HP3PAR_SAN_IP = '2.2.2.2'
HP3PAR_SAN_SSH_PORT = 999
HP3PAR_SAN_SSH_CON_TIMEOUT = 44
HP3PAR_SAN_SSH_PRIVATE = 'foobar'
CHAP_USER_KEY = "HPQ-cinder-CHAP-name"
CHAP_PASS_KEY = "HPQ-cinder-CHAP-secret"
class HP3PARBaseDriver(object):
class CommentMatcher(object):
def __init__(self, f, expect):
self.assertEqual = f
self.expect = expect
def __eq__(self, actual):
actual_as_dict = dict(ast.literal_eval(actual))
self.assertEqual(self.expect, actual_as_dict)
return True
VOLUME_ID = 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7'
CLONE_ID = 'd03338a9-9115-48a3-8dfc-000000000000'
VOLUME_NAME = 'volume-' + VOLUME_ID
VOLUME_NAME_3PAR = 'osv-0DM4qZEVSKON-DXN-NwVpw'
SNAPSHOT_ID = '2f823bdc-e36e-4dc8-bd15-de1c7a28ff31'
SNAPSHOT_NAME = 'snapshot-2f823bdc-e36e-4dc8-bd15-de1c7a28ff31'
VOLUME_3PAR_NAME = 'osv-0DM4qZEVSKON-DXN-NwVpw'
SNAPSHOT_3PAR_NAME = 'oss-L4I73ONuTci9Fd4ceij-MQ'
FAKE_HOST = 'fakehost'
USER_ID = '2689d9a913974c008b1d859013f23607'
PROJECT_ID = 'fac88235b9d64685a3530f73e490348f'
VOLUME_ID_SNAP = '761fc5e5-5191-4ec7-aeba-33e36de44156'
FAKE_DESC = 'test description name'
FAKE_FC_PORTS = [{'portPos': {'node': 7, 'slot': 1, 'cardPort': 1},
'portWWN': '0987654321234',
'protocol': 1,
'mode': 2,
'linkState': 4},
{'portPos': {'node': 6, 'slot': 1, 'cardPort': 1},
'portWWN': '123456789000987',
'protocol': 1,
'mode': 2,
'linkState': 4}]
QOS = {'qos:maxIOPS': '1000', 'qos:maxBWS': '50',
'qos:minIOPS': '100', 'qos:minBWS': '25',
'qos:latency': '25', 'qos:priority': 'low'}
QOS_SPECS = {'maxIOPS': '1000', 'maxBWS': '50',
'minIOPS': '100', 'minBWS': '25',
'latency': '25', 'priority': 'low'}
VVS_NAME = "myvvs"
FAKE_ISCSI_PORT = {'portPos': {'node': 8, 'slot': 1, 'cardPort': 1},
'protocol': 2,
'mode': 2,
'IPAddr': '1.1.1.2',
'iSCSIName': ('iqn.2000-05.com.3pardata:'
'21810002ac00383d'),
'linkState': 4}
volume = {'name': VOLUME_NAME,
'id': VOLUME_ID,
'display_name': 'Foo Volume',
'size': 2,
'host': FAKE_HOST,
'volume_type': None,
'volume_type_id': None}
volume_pool = {'name': VOLUME_NAME,
'id': VOLUME_ID,
'display_name': 'Foo Volume',
'size': 2,
'host': volume_utils.append_host(FAKE_HOST, HP3PAR_CPG2),
'volume_type': None,
'volume_type_id': None}
volume_qos = {'name': VOLUME_NAME,
'id': VOLUME_ID,
'display_name': 'Foo Volume',
'size': 2,
'host': FAKE_HOST,
'volume_type': None,
'volume_type_id': 'gold'}
snapshot = {'name': SNAPSHOT_NAME,
'id': SNAPSHOT_ID,
'user_id': USER_ID,
'project_id': PROJECT_ID,
'volume_id': VOLUME_ID_SNAP,
'volume_name': VOLUME_NAME,
'status': 'creating',
'progress': '0%',
'volume_size': 2,
'display_name': 'fakesnap',
'display_description': FAKE_DESC}
wwn = ["123456789012345", "123456789054321"]
connector = {'ip': '10.0.0.2',
'initiator': 'iqn.1993-08.org.debian:01:222',
'wwpns': [wwn[0], wwn[1]],
'wwnns': ["223456789012345", "223456789054321"],
'host': FAKE_HOST}
volume_type = {'name': 'gold',
'deleted': False,
'updated_at': None,
'extra_specs': {'cpg': HP3PAR_CPG2,
'qos:maxIOPS': '1000',
'qos:maxBWS': '50',
'qos:minIOPS': '100',
'qos:minBWS': '25',
'qos:latency': '25',
'qos:priority': 'low'},
'deleted_at': None,
'id': 'gold'}
cpgs = [
{'SAGrowth': {'LDLayout': {'diskPatterns': [{'diskType': 2}]},
'incrementMiB': 8192},
'SAUsage': {'rawTotalMiB': 24576,
'rawUsedMiB': 768,
'totalMiB': 8192,
'usedMiB': 256},
'SDGrowth': {'LDLayout': {'RAIDType': 4,
'diskPatterns': [{'diskType': 2}]},
'incrementMiB': 32768},
'SDUsage': {'rawTotalMiB': 49152,
'rawUsedMiB': 1023,
'totalMiB': 36864,
'usedMiB': 1024 * 1},
'UsrUsage': {'rawTotalMiB': 57344,
'rawUsedMiB': 43349,
'totalMiB': 43008,
'usedMiB': 1024 * 20},
'additionalStates': [],
'degradedStates': [],
'failedStates': [],
'id': 5,
'name': HP3PAR_CPG,
'numFPVVs': 2,
'numTPVVs': 0,
'state': 1,
'uuid': '29c214aa-62b9-41c8-b198-543f6cf24edf'}]
TASK_DONE = 1
TASK_ACTIVE = 2
STATUS_DONE = {'status': 1}
STATUS_ACTIVE = {'status': 2}
mock_client_conf = {
'PORT_MODE_TARGET': 2,
'PORT_STATE_READY': 4,
'PORT_PROTO_ISCSI': 2,
'PORT_PROTO_FC': 1,
'TASK_DONE': TASK_DONE,
'TASK_ACTIVE': TASK_ACTIVE,
'HOST_EDIT_ADD': 1,
'CHAP_INITIATOR': 1,
'CHAP_TARGET': 2,
'getPorts.return_value': {
'members': FAKE_FC_PORTS + [FAKE_ISCSI_PORT]
}
}
RETYPE_VVS_NAME = "yourvvs"
RETYPE_HOST = {
u'host': u'mark-stack1@3parfc',
u'capabilities': {
'QoS_support': True,
u'location_info': u'HP3PARDriver:1234567:MARK_TEST_CPG',
u'timestamp': u'2014-06-04T19:03:32.485540',
u'allocated_capacity_gb': 0,
u'volume_backend_name': u'3parfc',
u'free_capacity_gb': u'infinite',
u'driver_version': u'2.0.3',
u'total_capacity_gb': u'infinite',
u'reserved_percentage': 0,
u'vendor_name': u'Hewlett-Packard',
u'storage_protocol': u'FC'
}
}
RETYPE_HOST_NOT3PAR = {
u'host': u'mark-stack1@3parfc',
u'capabilities': {
u'location_info': u'XXXDriverXXX:1610771:MARK_TEST_CPG',
}
}
RETYPE_QOS_SPECS = {'maxIOPS': '1000', 'maxBWS': '50',
'minIOPS': '100', 'minBWS': '25',
'latency': '25', 'priority': 'high'}
RETYPE_VOLUME_TYPE_ID = "FakeVolId"
RETYPE_VOLUME_TYPE_0 = {
'name': 'red',
'id': RETYPE_VOLUME_TYPE_ID,
'extra_specs': {
'cpg': HP3PAR_CPG,
'snap_cpg': HP3PAR_CPG_SNAP,
'vvs': RETYPE_VVS_NAME,
'qos': RETYPE_QOS_SPECS,
'tpvv': True,
'volume_type': volume_type
}
}
RETYPE_VOLUME_TYPE_1 = {
'name': 'white',
'id': RETYPE_VOLUME_TYPE_ID,
'extra_specs': {
'cpg': HP3PAR_CPG,
'snap_cpg': HP3PAR_CPG_SNAP,
'vvs': VVS_NAME,
'qos': QOS,
'tpvv': True,
'volume_type': volume_type
}
}
RETYPE_VOLUME_TYPE_2 = {
'name': 'blue',
'id': RETYPE_VOLUME_TYPE_ID,
'extra_specs': {
'cpg': HP3PAR_CPG_QOS,
'snap_cpg': HP3PAR_CPG_SNAP,
'vvs': RETYPE_VVS_NAME,
'qos': RETYPE_QOS_SPECS,
'tpvv': True,
'volume_type': volume_type
}
}
RETYPE_VOLUME_TYPE_BAD_PERSONA = {
'name': 'bad_persona',
'id': 'any_id',
'extra_specs': {
'hp3par:persona': '99 - invalid'
}
}
RETYPE_VOLUME_TYPE_BAD_CPG = {
'name': 'bad_cpg',
'id': 'any_id',
'extra_specs': {
'cpg': 'bogus',
'snap_cpg': 'bogus',
'hp3par:persona': '2 - Generic-ALUA'
}
}
MANAGE_VOLUME_INFO = {
'userCPG': 'testUserCpg0',
'snapCPG': 'testSnapCpg0',
'provisioningType': 1,
'comment': "{'display_name': 'Foo Volume'}"
}
RETYPE_TEST_COMMENT = "{'retype_test': 'test comment'}"
RETYPE_VOLUME_INFO_0 = {
'name': VOLUME_NAME,
'id': VOLUME_ID,
'display_name': 'Retype Vol0',
'size': 1,
'host': RETYPE_HOST,
'userCPG': 'testUserCpg0',
'snapCPG': 'testSnapCpg0',
'provisioningType': 1,
'comment': RETYPE_TEST_COMMENT
}
RETYPE_TEST_COMMENT_1 = "{'retype_test': 'test comment 1'}"
RETYPE_VOLUME_INFO_1 = {
'name': VOLUME_NAME,
'id': VOLUME_ID,
'display_name': 'Retype Vol1',
'size': 1,
'host': RETYPE_HOST,
'userCPG': HP3PAR_CPG,
'snapCPG': HP3PAR_CPG_SNAP,
'provisioningType': 1,
'comment': RETYPE_TEST_COMMENT
}
# Test for when we don't get a snapCPG.
RETYPE_VOLUME_INFO_NO_SNAP = {
'name': VOLUME_NAME,
'id': VOLUME_ID,
'display_name': 'Retype Vol2',
'size': 1,
'host': RETYPE_HOST,
'userCPG': 'testUserCpg2',
'provisioningType': 1,
'comment': '{}'
}
RETYPE_CONF = {
'TASK_ACTIVE': TASK_ACTIVE,
'TASK_DONE': TASK_DONE,
'getTask.return_value': STATUS_DONE,
'getStorageSystemInfo.return_value': {'serialNumber': '1234567'},
'getVolume.return_value': RETYPE_VOLUME_INFO_0,
'modifyVolume.return_value': ("anyResponse", {'taskid': 1})
}
# 3PAR retype currently doesn't use the diff. Existing code and fresh info
# from the array work better for the most part. Some use of the diff was
# intentionally removed to make _retype more usable for other use cases.
RETYPE_DIFF = None
standard_login = [
mock.call.login(HP3PAR_USER_NAME, HP3PAR_USER_PASS),
mock.call.setSSHOptions(
HP3PAR_SAN_IP,
HP3PAR_USER_NAME,
HP3PAR_USER_PASS,
missing_key_policy='AutoAddPolicy',
privatekey=HP3PAR_SAN_SSH_PRIVATE,
known_hosts_file=mock.ANY,
port=HP3PAR_SAN_SSH_PORT,
conn_timeout=HP3PAR_SAN_SSH_CON_TIMEOUT)]
standard_logout = [
mock.call.logout()]
def setup_configuration(self):
configuration = mock.Mock()
configuration.hp3par_debug = False
configuration.hp3par_username = HP3PAR_USER_NAME
configuration.hp3par_password = HP3PAR_USER_PASS
configuration.hp3par_api_url = 'https://1.1.1.1/api/v1'
configuration.hp3par_cpg = [HP3PAR_CPG, HP3PAR_CPG2]
configuration.hp3par_cpg_snap = HP3PAR_CPG_SNAP
configuration.iscsi_ip_address = '1.1.1.2'
configuration.iscsi_port = '1234'
configuration.san_ip = HP3PAR_SAN_IP
configuration.san_login = HP3PAR_USER_NAME
configuration.san_password = HP3PAR_USER_PASS
configuration.san_ssh_port = HP3PAR_SAN_SSH_PORT
configuration.ssh_conn_timeout = HP3PAR_SAN_SSH_CON_TIMEOUT
configuration.san_private_key = HP3PAR_SAN_SSH_PRIVATE
configuration.hp3par_snapshot_expiration = ""
configuration.hp3par_snapshot_retention = ""
configuration.hp3par_iscsi_ips = []
configuration.hp3par_iscsi_chap_enabled = False
return configuration
@mock.patch(
'hp3parclient.client.HP3ParClient',
spec=True,
)
def setup_mock_client(self, _m_client, driver, conf=None, m_conf=None):
_m_client = _m_client.return_value
# Configure the base constants, defaults etc...
_m_client.configure_mock(**self.mock_client_conf)
# If m_conf, drop those over the top of the base_conf.
if m_conf is not None:
_m_client.configure_mock(**m_conf)
if conf is None:
conf = self.setup_configuration()
self.driver = driver(configuration=conf)
self.driver.do_setup(None)
return _m_client
@mock.patch('hp3parclient.version', "3.0.9")
def test_unsupported_client_version(self):
self.assertRaises(exception.InvalidInput,
self.setup_driver)
@mock.patch('hp3parclient.version', "3.1.2")
def test_ssh_options(self):
expected_hosts_key_file = "test_hosts_key_file"
orig_ssh_hosts_key_file = CONF.ssh_hosts_key_file
orig_strict_ssh_host_key_policy = CONF.strict_ssh_host_key_policy
CONF.ssh_hosts_key_file = expected_hosts_key_file
CONF.strict_ssh_host_key_policy = False
self.ctxt = context.get_admin_context()
mock_client = self.setup_mock_client(driver=hpfcdriver.HP3PARFCDriver)
CONF.ssh_hosts_key_file = orig_ssh_hosts_key_file
CONF.strict_ssh_host_key_policy = orig_strict_ssh_host_key_policy
expected = [
mock.call.login(HP3PAR_USER_NAME, HP3PAR_USER_PASS),
mock.call.setSSHOptions(
HP3PAR_SAN_IP,
HP3PAR_USER_NAME,
HP3PAR_USER_PASS,
privatekey=HP3PAR_SAN_SSH_PRIVATE,
known_hosts_file=expected_hosts_key_file,
missing_key_policy="AutoAddPolicy",
port=HP3PAR_SAN_SSH_PORT,
conn_timeout=HP3PAR_SAN_SSH_CON_TIMEOUT),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getCPG(HP3PAR_CPG2)]
mock_client.assert_has_calls(
expected +
self.standard_logout)
@mock.patch('hp3parclient.version', "3.1.2")
def test_ssh_options_strict(self):
expected_hosts_key_file = "test_hosts_key_file"
orig_ssh_hosts_key_file = CONF.ssh_hosts_key_file
orig_strict_ssh_host_key_policy = CONF.strict_ssh_host_key_policy
CONF.ssh_hosts_key_file = expected_hosts_key_file
CONF.strict_ssh_host_key_policy = True
self.ctxt = context.get_admin_context()
mock_client = self.setup_mock_client(driver=hpfcdriver.HP3PARFCDriver)
CONF.ssh_hosts_key_file = orig_ssh_hosts_key_file
CONF.strict_ssh_host_key_policy = orig_strict_ssh_host_key_policy
expected = [
mock.call.login(HP3PAR_USER_NAME, HP3PAR_USER_PASS),
mock.call.setSSHOptions(
HP3PAR_SAN_IP,
HP3PAR_USER_NAME,
HP3PAR_USER_PASS,
privatekey=HP3PAR_SAN_SSH_PRIVATE,
known_hosts_file=expected_hosts_key_file,
missing_key_policy="RejectPolicy",
port=HP3PAR_SAN_SSH_PORT,
conn_timeout=HP3PAR_SAN_SSH_CON_TIMEOUT),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getCPG(HP3PAR_CPG2)]
mock_client.assert_has_calls(expected + self.standard_logout)
def test_task_waiter(self):
task_statuses = [self.STATUS_ACTIVE, self.STATUS_ACTIVE]
def side_effect(*args):
return task_statuses and task_statuses.pop(0) or self.STATUS_DONE
conf = {'getTask.side_effect': side_effect}
mock_client = self.setup_driver(mock_conf=conf)
task_id = 1234
interval = .001
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
waiter = common.TaskWaiter(mock_client, task_id, interval)
status = waiter.wait_for_task()
expected = [
mock.call.getTask(task_id),
mock.call.getTask(task_id),
mock.call.getTask(task_id)
]
mock_client.assert_has_calls(expected)
self.assertEqual(status, self.STATUS_DONE)
def test_create_volume(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
self.driver.create_volume(self.volume)
comment = (
'{"display_name": "Foo Volume", "type": "OpenStack",'
' "name": "volume-d03338a9-9115-48a3-8dfc-35cdfcdc15a7",'
' "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7"}')
expected = [
mock.call.createVolume(
self.VOLUME_3PAR_NAME,
HP3PAR_CPG,
1907, {
'comment': comment,
'tpvv': True,
'snapCPG': HP3PAR_CPG_SNAP})]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
def test_create_volume_in_pool(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
return_model = self.driver.create_volume(self.volume_pool)
comment = (
'{"display_name": "Foo Volume", "type": "OpenStack",'
' "name": "volume-d03338a9-9115-48a3-8dfc-35cdfcdc15a7",'
' "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7"}')
expected = [
mock.call.createVolume(
self.VOLUME_3PAR_NAME,
HP3PAR_CPG2,
1907, {
'comment': comment,
'tpvv': True,
'snapCPG': HP3PAR_CPG_SNAP})]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
self.assertEqual(return_model, None)
@mock.patch.object(volume_types, 'get_volume_type')
def test_get_snap_cpg_from_volume_type(self, _mock_volume_types):
mock_client = self.setup_driver()
expected_type_snap_cpg = "type_snap_cpg"
_mock_volume_types.return_value = {
'name': 'gold',
'extra_specs': {
'cpg': HP3PAR_CPG,
'snap_cpg': expected_type_snap_cpg,
'volume_type': self.volume_type}}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
result = common.get_volume_settings_from_type_id(
"mock", self.driver.configuration.hp3par_cpg)
self.assertEqual(expected_type_snap_cpg, result['snap_cpg'])
@mock.patch.object(volume_types, 'get_volume_type')
def test_get_snap_cpg_from_volume_type_cpg(self, _mock_volume_types):
mock_client = self.setup_driver()
expected_cpg = 'use_extra_specs_cpg'
_mock_volume_types.return_value = {
'name': 'gold',
'extra_specs': {
'cpg': expected_cpg,
'volume_type': self.volume_type}}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
result = common.get_volume_settings_from_type_id(
"mock", self.driver.configuration.hp3par_cpg)
self.assertEqual(expected_cpg, result['snap_cpg'])
@mock.patch.object(volume_types, 'get_volume_type')
def test_get_snap_cpg_from_volume_type_conf_snap_cpg(
self, _mock_volume_types):
_mock_volume_types.return_value = {
'name': 'gold',
'extra_specs': {
'volume_type': self.volume_type}}
conf = self.setup_configuration()
expected_snap_cpg = conf.hp3par_cpg_snap
mock_client = self.setup_driver(config=conf)
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
result = common.get_volume_settings_from_type_id(
"mock", self.driver.configuration.hp3par_cpg)
self.assertEqual(expected_snap_cpg, result['snap_cpg'])
@mock.patch.object(volume_types, 'get_volume_type')
def test_get_snap_cpg_from_volume_type_conf_cpg(
self, _mock_volume_types):
_mock_volume_types.return_value = {
'name': 'gold',
'extra_specs': {
'volume_type': self.volume_type}}
conf = self.setup_configuration()
conf.hp3par_cpg_snap = None
expected_cpg = conf.hp3par_cpg
mock_client = self.setup_driver(config=conf)
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
result = common.get_volume_settings_from_type_id(
"mock", self.driver.configuration.hp3par_cpg)
self.assertEqual(expected_cpg, result['snap_cpg'])
@mock.patch.object(volume_types, 'get_volume_type')
def test_create_volume_qos(self, _mock_volume_types):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
_mock_volume_types.return_value = {
'name': 'gold',
'extra_specs': {
'cpg': HP3PAR_CPG_QOS,
'snap_cpg': HP3PAR_CPG_SNAP,
'vvs_name': self.VVS_NAME,
'qos': self.QOS,
'tpvv': True,
'volume_type': self.volume_type}}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
return_model = self.driver.create_volume(self.volume_qos)
comment = (
'{"volume_type_name": "gold", "display_name": "Foo Volume"'
', "name": "volume-d03338a9-9115-48a3-8dfc-35cdfcdc15a7'
'", "volume_type_id": "gold", "volume_id": "d03338a9-91'
'15-48a3-8dfc-35cdfcdc15a7", "qos": {}, "type": "OpenStack"}')
expected = [
mock.call.getCPG(HP3PAR_CPG_QOS),
mock.call.createVolume(
self.VOLUME_3PAR_NAME,
HP3PAR_CPG_QOS,
1907, {
'comment': comment,
'tpvv': True,
'snapCPG': HP3PAR_CPG_SNAP})]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
self.assertEqual(return_model,
{'host': volume_utils.append_host(
self.FAKE_HOST,
HP3PAR_CPG_QOS)})
@mock.patch.object(volume_types, 'get_volume_type')
def test_retype_not_3par(self, _mock_volume_types):
_mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_1
mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF)
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
self.assertRaises(exception.InvalidHost,
self.driver.retype,
self.ctxt,
self.RETYPE_VOLUME_INFO_0,
self.RETYPE_VOLUME_TYPE_1,
self.RETYPE_DIFF,
self.RETYPE_HOST_NOT3PAR)
expected = [mock.call.getVolume(self.VOLUME_3PAR_NAME)]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
@mock.patch.object(volume_types, 'get_volume_type')
def test_retype_volume_not_found(self, _mock_volume_types):
_mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_1
mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF)
mock_client.getVolume.side_effect = hpexceptions.HTTPNotFound
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
self.assertRaises(hpexceptions.HTTPNotFound,
self.driver.retype,
self.ctxt,
self.RETYPE_VOLUME_INFO_0,
self.RETYPE_VOLUME_TYPE_1,
self.RETYPE_DIFF,
self.RETYPE_HOST)
expected = [mock.call.getVolume(self.VOLUME_3PAR_NAME)]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
@mock.patch.object(volume_types, 'get_volume_type')
def test_retype_specs_error_reverts_snap_cpg(self, _mock_volume_types):
_mock_volume_types.side_effect = [
self.RETYPE_VOLUME_TYPE_1, self.RETYPE_VOLUME_TYPE_0]
mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF)
mock_client.getVolume.return_value = self.RETYPE_VOLUME_INFO_0
# Fail the QOS setting to test the revert of the snap CPG rename.
mock_client.addVolumeToVolumeSet.side_effect = \
hpexceptions.HTTPForbidden
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
self.assertRaises(hpexceptions.HTTPForbidden,
self.driver.retype,
self.ctxt,
{'id': self.VOLUME_ID},
self.RETYPE_VOLUME_TYPE_0,
self.RETYPE_DIFF,
self.RETYPE_HOST)
old_settings = {
'snapCPG': self.RETYPE_VOLUME_INFO_0['snapCPG'],
'comment': self.RETYPE_VOLUME_INFO_0['comment']}
new_settings = {
'snapCPG': (
self.RETYPE_VOLUME_TYPE_1['extra_specs']['snap_cpg']),
'comment': mock.ANY}
expected = [
mock.call.modifyVolume(self.VOLUME_3PAR_NAME, new_settings)
]
mock_client.assert_has_calls(expected)
expected = [
mock.call.modifyVolume(self.VOLUME_3PAR_NAME, old_settings)
]
mock_client.assert_has_calls(expected + self.standard_logout)
@mock.patch.object(volume_types, 'get_volume_type')
def test_retype_revert_comment(self, _mock_volume_types):
_mock_volume_types.side_effect = [
self.RETYPE_VOLUME_TYPE_2, self.RETYPE_VOLUME_TYPE_1]
mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF)
mock_client.getVolume.return_value = self.RETYPE_VOLUME_INFO_1
# Fail the QOS setting to test the revert of the snap CPG rename.
mock_client.deleteVolumeSet.side_effect = hpexceptions.HTTPForbidden
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
self.assertRaises(hpexceptions.HTTPForbidden,
self.driver.retype,
self.ctxt,
{'id': self.VOLUME_ID},
self.RETYPE_VOLUME_TYPE_2,
self.RETYPE_DIFF,
self.RETYPE_HOST)
original = {
'snapCPG': self.RETYPE_VOLUME_INFO_1['snapCPG'],
'comment': self.RETYPE_VOLUME_INFO_1['comment']}
expected = [
mock.call.modifyVolume('osv-0DM4qZEVSKON-DXN-NwVpw', original)]
mock_client.assert_has_calls(expected + self.standard_logout)
@mock.patch.object(volume_types, 'get_volume_type')
def test_retype_different_array(self, _mock_volume_types):
_mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_1
mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF)
mock_client.getStorageSystemInfo.return_value = {
'serialNumber': 'XXXXXXX'}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
self.assertRaises(exception.InvalidHost,
self.driver.retype,
self.ctxt,
self.RETYPE_VOLUME_INFO_0,
self.RETYPE_VOLUME_TYPE_1,
self.RETYPE_DIFF,
self.RETYPE_HOST)
expected = [
mock.call.getVolume(self.VOLUME_3PAR_NAME),
mock.call.getStorageSystemInfo()]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
@mock.patch.object(volume_types, 'get_volume_type')
def test_retype_across_cpg_domains(self, _mock_volume_types):
_mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_1
mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF)
mock_client.getCPG.side_effect = [
{'domain': 'domain1'},
{'domain': 'domain2'},
]
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
self.assertRaises(exception.Invalid3PARDomain,
self.driver.retype,
self.ctxt,
self.RETYPE_VOLUME_INFO_0,
self.RETYPE_VOLUME_TYPE_1,
self.RETYPE_DIFF,
self.RETYPE_HOST)
expected = [
mock.call.getVolume(self.VOLUME_3PAR_NAME),
mock.call.getStorageSystemInfo(),
mock.call.getCPG(self.RETYPE_VOLUME_INFO_0['userCPG']),
mock.call.getCPG(
self.RETYPE_VOLUME_TYPE_1['extra_specs']['cpg'])
]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
@mock.patch.object(volume_types, 'get_volume_type')
def test_retype_across_snap_cpg_domains(self, _mock_volume_types):
_mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_1
mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF)
mock_client.getCPG.side_effect = [
{'domain': 'cpg_domain'},
{'domain': 'cpg_domain'},
{'domain': 'snap_cpg_domain_1'},
]
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
self.assertRaises(exception.Invalid3PARDomain,
self.driver.retype,
self.ctxt,
self.RETYPE_VOLUME_INFO_0,
self.RETYPE_VOLUME_TYPE_1,
self.RETYPE_DIFF,
self.RETYPE_HOST)
expected = [
mock.call.getVolume(self.VOLUME_3PAR_NAME),
mock.call.getStorageSystemInfo(),
mock.call.getCPG(self.RETYPE_VOLUME_INFO_0['userCPG']),
mock.call.getCPG(
self.RETYPE_VOLUME_TYPE_1['extra_specs']['cpg']),
mock.call.getCPG(
self.RETYPE_VOLUME_TYPE_1['extra_specs']['snap_cpg'])
]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
@mock.patch.object(volume_types, 'get_volume_type')
def test_retype_to_bad_persona(self, _mock_volume_types):
_mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_BAD_PERSONA
mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF)
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
self.assertRaises(exception.InvalidInput,
self.driver.retype,
self.ctxt,
self.RETYPE_VOLUME_INFO_0,
self.RETYPE_VOLUME_TYPE_BAD_PERSONA,
self.RETYPE_DIFF,
self.RETYPE_HOST)
expected = [mock.call.getVolume(self.VOLUME_3PAR_NAME)]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
@mock.patch.object(volume_types, 'get_volume_type')
def test_retype_to_bad_cpg(self, _mock_volume_types):
_mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_BAD_CPG
mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF)
mock_client.getCPG.side_effect = hpexceptions.HTTPNotFound
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
self.assertRaises(exception.InvalidInput,
self.driver.retype,
self.ctxt,
self.RETYPE_VOLUME_INFO_0,
self.RETYPE_VOLUME_TYPE_BAD_CPG,
self.RETYPE_DIFF,
self.RETYPE_HOST)
expected = [
mock.call.getCPG(
self.RETYPE_VOLUME_TYPE_BAD_CPG['extra_specs']['cpg'])
]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
@mock.patch.object(volume_types, 'get_volume_type')
def test_retype_tune(self, _mock_volume_types):
_mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_1
mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF)
qos_ref = qos_specs.create(self.ctxt, 'qos-specs-1', self.QOS)
type_ref = volume_types.create(self.ctxt,
"type1", {"qos:maxIOPS": "100",
"qos:maxBWS": "50",
"qos:minIOPS": "10",
"qos:minBWS": "20",
"qos:latency": "5",
"qos:priority": "high"})
qos_specs.associate_qos_with_type(self.ctxt,
qos_ref['id'],
type_ref['id'])
type_ref = volume_types.get_volume_type(self.ctxt, type_ref['id'])
volume = {'id': HP3PARBaseDriver.CLONE_ID}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
retyped = self.driver.retype(
self.ctxt, volume, type_ref, None, self.RETYPE_HOST)
self.assertTrue(retyped)
expected = [
mock.call.modifyVolume('osv-0DM4qZEVSKON-AAAAAAAAA',
{'comment': mock.ANY,
'snapCPG': 'OpenStackCPGSnap'}),
mock.call.deleteVolumeSet('vvs-0DM4qZEVSKON-AAAAAAAAA'),
mock.call.addVolumeToVolumeSet('myvvs',
'osv-0DM4qZEVSKON-AAAAAAAAA'),
mock.call.modifyVolume('osv-0DM4qZEVSKON-AAAAAAAAA',
{'action': 6,
'userCPG': 'OpenStackCPG',
'conversionOperation': 1,
'tuneOperation': 1}),
mock.call.getTask(1)
]
mock_client.assert_has_calls(expected + self.standard_logout)
@mock.patch.object(volume_types, 'get_volume_type')
def test_retype_qos_spec(self, _mock_volume_types):
_mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_1
mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF)
cpg = "any_cpg"
snap_cpg = "any_cpg"
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
common._retype(self.volume,
HP3PARBaseDriver.VOLUME_3PAR_NAME,
"old_type", "old_type_id",
HP3PARBaseDriver.RETYPE_HOST,
None, cpg, cpg, snap_cpg, snap_cpg,
True, True, None, None,
self.QOS_SPECS, self.RETYPE_QOS_SPECS,
"{}")
expected = [
mock.call.createVolumeSet('vvs-0DM4qZEVSKON-DXN-NwVpw', None),
mock.call.createQoSRules(
'vvs-0DM4qZEVSKON-DXN-NwVpw',
{'ioMinGoal': 100, 'ioMaxLimit': 1000,
'bwMinGoalKB': 25600, 'bwMaxLimitKB': 51200,
'priority': 3,
'latencyGoal': 25}
),
mock.call.addVolumeToVolumeSet(
'vvs-0DM4qZEVSKON-DXN-NwVpw',
'osv-0DM4qZEVSKON-DXN-NwVpw')]
mock_client.assert_has_calls(expected)
def test_delete_volume(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
self.driver.delete_volume(self.volume)
expected = [mock.call.deleteVolume(self.VOLUME_3PAR_NAME)]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
def test_create_cloned_volume(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
mock_client.copyVolume.return_value = {'taskid': 1}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
volume = {'name': HP3PARBaseDriver.VOLUME_NAME,
'id': HP3PARBaseDriver.CLONE_ID,
'display_name': 'Foo Volume',
'size': 2,
'host': volume_utils.append_host(self.FAKE_HOST,
HP3PAR_CPG2),
'source_volid': HP3PARBaseDriver.VOLUME_ID}
src_vref = {}
model_update = self.driver.create_cloned_volume(volume, src_vref)
self.assertIsNone(model_update)
expected = [
mock.call.copyVolume(
self.VOLUME_3PAR_NAME,
'osv-0DM4qZEVSKON-AAAAAAAAA',
HP3PAR_CPG2,
{'snapCPG': 'OpenStackCPGSnap', 'tpvv': True,
'online': True})]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
@mock.patch.object(volume_types, 'get_volume_type')
def test_create_cloned_qos_volume(self, _mock_volume_types):
_mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_2
mock_client = self.setup_driver()
mock_client.copyVolume.return_value = {'taskid': 1}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
src_vref = {}
volume = self.volume_qos.copy()
host = "TEST_HOST"
pool = "TEST_POOL"
volume_host = volume_utils.append_host(host, pool)
expected_cpg = self.RETYPE_VOLUME_TYPE_2['extra_specs']['cpg']
expected_volume_host = volume_utils.append_host(host, expected_cpg)
volume['id'] = HP3PARBaseDriver.CLONE_ID
volume['host'] = volume_host
volume['source_volid'] = HP3PARBaseDriver.VOLUME_ID
model_update = self.driver.create_cloned_volume(volume, src_vref)
self.assertEqual(model_update, {'host': expected_volume_host})
expected = [
mock.call.getCPG(expected_cpg),
mock.call.copyVolume(
self.VOLUME_3PAR_NAME,
'osv-0DM4qZEVSKON-AAAAAAAAA',
expected_cpg,
{'snapCPG': 'OpenStackCPGSnap', 'tpvv': True,
'online': True})]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
def test_migrate_volume(self):
conf = {
'getStorageSystemInfo.return_value': {
'serialNumber': '1234'},
'getTask.return_value': {
'status': 1},
'getCPG.return_value': {},
'copyVolume.return_value': {'taskid': 1},
'getVolume.return_value': self.RETYPE_VOLUME_INFO_1
}
mock_client = self.setup_driver(mock_conf=conf)
mock_client.getVolume.return_value = self.MANAGE_VOLUME_INFO
mock_client.modifyVolume.return_value = ("anyResponse", {'taskid': 1})
mock_client.getTask.return_value = self.STATUS_DONE
volume = {'name': HP3PARBaseDriver.VOLUME_NAME,
'id': HP3PARBaseDriver.CLONE_ID,
'display_name': 'Foo Volume',
'volume_type_id': None,
'size': 2,
'status': 'available',
'host': HP3PARBaseDriver.FAKE_HOST,
'source_volid': HP3PARBaseDriver.VOLUME_ID}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
volume_name_3par = common._encode_name(volume['id'])
loc_info = 'HP3PARDriver:1234:CPG-FC1'
host = {'host': 'stack@3parfc1#CPG-FC1',
'capabilities': {'location_info': loc_info}}
result = self.driver.migrate_volume(context.get_admin_context(),
volume, host)
self.assertIsNotNone(result)
self.assertEqual((True, None), result)
osv_matcher = 'osv-' + volume_name_3par
expected = [
mock.call.modifyVolume(
osv_matcher,
{'comment': '{"qos": {}, "display_name": "Foo Volume"}',
'snapCPG': HP3PAR_CPG_SNAP}),
mock.call.modifyVolume(osv_matcher,
{'action': 6,
'userCPG': 'CPG-FC1',
'conversionOperation': 1,
'tuneOperation': 1}),
mock.call.getTask(mock.ANY)
]
mock_client.assert_has_calls(expected + self.standard_logout)
@mock.patch.object(volume_types, 'get_volume_type')
def test_migrate_volume_with_type(self, _mock_volume_types):
_mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_2
conf = {
'getStorageSystemInfo.return_value': {
'serialNumber': '1234'},
'getTask.return_value': {
'status': 1},
'getCPG.return_value': {},
'copyVolume.return_value': {'taskid': 1},
'getVolume.return_value': self.RETYPE_VOLUME_INFO_1
}
mock_client = self.setup_driver(mock_conf=conf)
mock_client.getVolume.return_value = self.MANAGE_VOLUME_INFO
mock_client.modifyVolume.return_value = ("anyResponse", {'taskid': 1})
mock_client.getTask.return_value = self.STATUS_DONE
display_name = 'Foo Volume'
volume = {'name': HP3PARBaseDriver.VOLUME_NAME,
'id': HP3PARBaseDriver.CLONE_ID,
'display_name': display_name,
"volume_type_id": self.RETYPE_VOLUME_TYPE_2['id'],
'size': 2,
'status': 'available',
'host': HP3PARBaseDriver.FAKE_HOST,
'source_volid': HP3PARBaseDriver.VOLUME_ID}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
volume_name_3par = common._encode_name(volume['id'])
loc_info = 'HP3PARDriver:1234:CPG-FC1'
host = {'host': 'stack@3parfc1#CPG-FC1',
'capabilities': {'location_info': loc_info}}
result = self.driver.migrate_volume(context.get_admin_context(),
volume, host)
self.assertIsNotNone(result)
expected_host = volume_utils.append_host(
"stack@3parfc1",
self.RETYPE_VOLUME_TYPE_2['extra_specs']['cpg'])
self.assertEqual((True, {'host': expected_host}), result)
osv_matcher = 'osv-' + volume_name_3par
expected_comment = {
"display_name": display_name,
"volume_type_id": self.RETYPE_VOLUME_TYPE_2['id'],
"volume_type_name": self.RETYPE_VOLUME_TYPE_2['name'],
"vvs": self.RETYPE_VOLUME_TYPE_2['extra_specs']['vvs']
}
expected = [
mock.call.modifyVolume(
osv_matcher,
{'comment': self.CommentMatcher(self.assertEqual,
expected_comment),
'snapCPG': self.RETYPE_VOLUME_TYPE_2
['extra_specs']['snap_cpg']}),
mock.call.modifyVolume(
osv_matcher,
{'action': 6,
'userCPG': self.RETYPE_VOLUME_TYPE_2
['extra_specs']['cpg'],
'conversionOperation': 1,
'tuneOperation': 1}),
mock.call.getTask(mock.ANY)
]
mock_client.assert_has_calls(
expected +
self.standard_logout)
def test_migrate_volume_diff_host(self):
conf = {
'getStorageSystemInfo.return_value': {
'serialNumber': 'different'},
}
mock_client = self.setup_driver(mock_conf=conf)
volume = {'name': HP3PARBaseDriver.VOLUME_NAME,
'id': HP3PARBaseDriver.CLONE_ID,
'display_name': 'Foo Volume',
'volume_type_id': None,
'size': 2,
'status': 'available',
'host': HP3PARBaseDriver.FAKE_HOST,
'source_volid': HP3PARBaseDriver.VOLUME_ID}
loc_info = 'HP3PARDriver:1234:CPG-FC1'
host = {'host': 'stack@3parfc1',
'capabilities': {'location_info': loc_info}}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
result = self.driver.migrate_volume(context.get_admin_context(),
volume, host)
self.assertIsNotNone(result)
self.assertEqual((False, None), result)
@mock.patch.object(volume_types, 'get_volume_type')
def test_migrate_volume_diff_domain(self, _mock_volume_types):
_mock_volume_types.return_value = self.volume_type
conf = {
'getStorageSystemInfo.return_value': {
'serialNumber': '1234'},
'getTask.return_value': {
'status': 1},
'getCPG.return_value': {},
'copyVolume.return_value': {'taskid': 1},
'getVolume.return_value': self.RETYPE_VOLUME_INFO_1
}
mock_client = self.setup_driver(mock_conf=conf)
mock_client.getVolume.return_value = self.MANAGE_VOLUME_INFO
mock_client.modifyVolume.return_value = ("anyResponse", {'taskid': 1})
mock_client.getTask.return_value = self.STATUS_DONE
volume = {'name': HP3PARBaseDriver.VOLUME_NAME,
'id': HP3PARBaseDriver.CLONE_ID,
'display_name': 'Foo Volume',
'volume_type_id': None,
'size': 2,
'status': 'available',
'host': HP3PARBaseDriver.FAKE_HOST,
'source_volid': HP3PARBaseDriver.VOLUME_ID}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
volume_name_3par = common._encode_name(volume['id'])
loc_info = 'HP3PARDriver:1234:CPG-FC1'
host = {'host': 'stack@3parfc1#CPG-FC1',
'capabilities': {'location_info': loc_info}}
result = self.driver.migrate_volume(context.get_admin_context(),
volume, host)
self.assertIsNotNone(result)
self.assertEqual((True, None), result)
osv_matcher = 'osv-' + volume_name_3par
expected = [
mock.call.modifyVolume(
osv_matcher,
{'comment': '{"qos": {}, "display_name": "Foo Volume"}',
'snapCPG': HP3PAR_CPG_SNAP}),
mock.call.modifyVolume(osv_matcher,
{'action': 6,
'userCPG': 'CPG-FC1',
'conversionOperation': 1,
'tuneOperation': 1}),
mock.call.getTask(mock.ANY),
]
mock_client.assert_has_calls(expected + self.standard_logout)
@mock.patch.object(volume_types, 'get_volume_type')
def test_migrate_volume_attached(self, _mock_volume_types):
_mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_1
mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF)
volume = {'name': HP3PARBaseDriver.VOLUME_NAME,
'volume_type_id': None,
'id': HP3PARBaseDriver.CLONE_ID,
'display_name': 'Foo Volume',
'size': 2,
'status': 'in-use',
'host': HP3PARBaseDriver.FAKE_HOST,
'source_volid': HP3PARBaseDriver.VOLUME_ID}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
volume_name_3par = common._encode_name(volume['id'])
osv_matcher = 'osv-' + volume_name_3par
loc_info = 'HP3PARDriver:1234567:CPG-FC1'
protocol = "FC"
if self.properties['driver_volume_type'] == "iscsi":
protocol = "iSCSI"
host = {'host': 'stack@3parfc1',
'capabilities': {'location_info': loc_info,
'storage_protocol': protocol}}
result = self.driver.migrate_volume(context.get_admin_context(),
volume, host)
new_comment = {"qos": {},
"retype_test": "test comment"}
expected = [
mock.call.modifyVolume(osv_matcher,
{'comment': self.CommentMatcher(
self.assertEqual, new_comment),
'snapCPG': 'OpenStackCPGSnap'}),
mock.call.modifyVolume(osv_matcher,
{'action': 6,
'userCPG': 'OpenStackCPG',
'conversionOperation': 1,
'tuneOperation': 1}),
mock.call.getTask(1),
mock.call.logout()
]
mock_client.assert_has_calls(expected)
self.assertIsNotNone(result)
self.assertEqual((True, {'host': 'stack@3parfc1#OpenStackCPG'}),
result)
@mock.patch.object(volume_types, 'get_volume_type')
def test_migrate_volume_attached_diff_protocol(self, _mock_volume_types):
_mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_1
mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF)
protocol = "OTHER"
volume = {'name': HP3PARBaseDriver.VOLUME_NAME,
'volume_type_id': None,
'id': HP3PARBaseDriver.CLONE_ID,
'display_name': 'Foo Volume',
'size': 2,
'status': 'in-use',
'host': HP3PARBaseDriver.FAKE_HOST,
'source_volid': HP3PARBaseDriver.VOLUME_ID}
loc_info = 'HP3PARDriver:1234567:CPG-FC1'
host = {'host': 'stack@3parfc1',
'capabilities': {'location_info': loc_info,
'storage_protocol': protocol}}
result = self.driver.migrate_volume(context.get_admin_context(),
volume, host)
self.assertIsNotNone(result)
self.assertEqual((False, None), result)
expected = []
mock_client.assert_has_calls(expected)
def test_attach_volume(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
self.driver.attach_volume(context.get_admin_context(),
self.volume,
'abcdef',
'newhost',
'/dev/vdb')
expected = [
mock.call.setVolumeMetaData(
self.VOLUME_3PAR_NAME,
'HPQ-CS-instance_uuid',
'abcdef')]
mock_client.assert_has_calls(expected)
# test the exception
mock_client.setVolumeMetaData.side_effect = Exception('Custom ex')
self.assertRaises(exception.CinderException,
self.driver.attach_volume,
context.get_admin_context(),
self.volume,
'abcdef',
'newhost',
'/dev/vdb')
def test_detach_volume(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
self.driver.detach_volume(context.get_admin_context(), self.volume)
expected = [
mock.call.removeVolumeMetaData(
self.VOLUME_3PAR_NAME,
'HPQ-CS-instance_uuid')]
mock_client.assert_has_calls(expected)
# test the exception
mock_client.removeVolumeMetaData.side_effect = Exception(
'Custom ex')
self.assertRaises(exception.CinderException,
self.driver.detach_volume,
context.get_admin_context(),
self.volume)
def test_create_snapshot(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
self.driver.create_snapshot(self.snapshot)
comment = (
'{"volume_id": "761fc5e5-5191-4ec7-aeba-33e36de44156",'
' "display_name": "fakesnap",'
' "description": "test description name",'
' "volume_name":'
' "volume-d03338a9-9115-48a3-8dfc-35cdfcdc15a7"}')
expected = [
mock.call.createSnapshot(
'oss-L4I73ONuTci9Fd4ceij-MQ',
'osv-dh-F5VGRTseuujPjbeRBVg',
{
'comment': comment,
'readOnly': True})]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
def test_delete_snapshot(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
self.driver.delete_snapshot(self.snapshot)
expected = [
mock.call.deleteVolume('oss-L4I73ONuTci9Fd4ceij-MQ')]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
def test_delete_snapshot_in_use(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
self.driver.create_snapshot(self.snapshot)
self.driver.create_volume_from_snapshot(self.volume, self.snapshot)
ex = hpexceptions.HTTPConflict("In use")
mock_client.deleteVolume = mock.Mock(side_effect=ex)
# Deleting the snapshot that a volume is dependent on should fail
self.assertRaises(exception.SnapshotIsBusy,
self.driver.delete_snapshot,
self.snapshot)
def test_delete_snapshot_not_found(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
self.driver.create_snapshot(self.snapshot)
try:
ex = hpexceptions.HTTPNotFound("not found")
mock_client.deleteVolume = mock.Mock(side_effect=ex)
self.driver.delete_snapshot(self.snapshot)
except Exception:
self.fail("Deleting a snapshot that is missing should act as if "
"it worked.")
def test_create_volume_from_snapshot(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
model_update = self.driver.create_volume_from_snapshot(
self.volume,
self.snapshot)
self.assertIsNone(model_update)
comment = (
'{"snapshot_id": "2f823bdc-e36e-4dc8-bd15-de1c7a28ff31",'
' "display_name": "Foo Volume",'
' "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7"}')
expected = [
mock.call.createSnapshot(
self.VOLUME_3PAR_NAME,
'oss-L4I73ONuTci9Fd4ceij-MQ',
{
'comment': comment,
'readOnly': False})]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
volume = self.volume.copy()
volume['size'] = 1
self.assertRaises(exception.InvalidInput,
self.driver.create_volume_from_snapshot,
volume, self.snapshot)
def test_create_volume_from_snapshot_and_extend(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
conf = {
'getTask.return_value': {
'status': 1},
'copyVolume.return_value': {'taskid': 1},
'getVolume.return_value': {}
}
mock_client = self.setup_driver(mock_conf=conf)
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
volume = self.volume.copy()
volume['size'] = self.volume['size'] + 10
model_update = self.driver.create_volume_from_snapshot(
volume,
self.snapshot)
self.assertEqual(model_update,
{'host': volume_utils.append_host(self.FAKE_HOST,
HP3PAR_CPG)})
comment = (
'{"snapshot_id": "2f823bdc-e36e-4dc8-bd15-de1c7a28ff31",'
' "display_name": "Foo Volume",'
' "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7"}')
volume_name_3par = common._encode_name(volume['id'])
osv_matcher = 'osv-' + volume_name_3par
omv_matcher = 'omv-' + volume_name_3par
expected = [
mock.call.createSnapshot(
self.VOLUME_3PAR_NAME,
'oss-L4I73ONuTci9Fd4ceij-MQ',
{
'comment': comment,
'readOnly': False}),
mock.call.copyVolume(
osv_matcher, omv_matcher, HP3PAR_CPG, mock.ANY),
mock.call.getTask(mock.ANY),
mock.call.getVolume(osv_matcher),
mock.call.deleteVolume(osv_matcher),
mock.call.modifyVolume(omv_matcher, {'newName': osv_matcher}),
mock.call.growVolume(osv_matcher, 10 * 1024)]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
@mock.patch.object(volume_types, 'get_volume_type')
def test_create_volume_from_snapshot_and_extend_with_qos(
self, _mock_volume_types):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
conf = {
'getTask.return_value': {
'status': 1},
'copyVolume.return_value': {'taskid': 1},
'getVolume.return_value': {}
}
mock_client = self.setup_driver(mock_conf=conf)
_mock_volume_types.return_value = {
'name': 'gold',
'extra_specs': {
'cpg': HP3PAR_CPG_QOS,
'snap_cpg': HP3PAR_CPG_SNAP,
'vvs_name': self.VVS_NAME,
'qos': self.QOS,
'tpvv': True,
'volume_type': self.volume_type}}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
volume = self.volume_qos.copy()
volume['size'] = self.volume['size'] + 10
model_update = self.driver.create_volume_from_snapshot(
volume,
self.snapshot)
self.assertEqual(model_update,
{'host': volume_utils.append_host(
self.FAKE_HOST,
HP3PAR_CPG_QOS)})
comment = (
'{"snapshot_id": "2f823bdc-e36e-4dc8-bd15-de1c7a28ff31",'
' "display_name": "Foo Volume",'
' "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7"}')
volume_name_3par = common._encode_name(volume['id'])
osv_matcher = 'osv-' + volume_name_3par
omv_matcher = 'omv-' + volume_name_3par
expected = [
mock.call.createSnapshot(
self.VOLUME_3PAR_NAME,
'oss-L4I73ONuTci9Fd4ceij-MQ',
{
'comment': comment,
'readOnly': False}),
mock.call.getCPG(HP3PAR_CPG_QOS),
mock.call.copyVolume(
osv_matcher, omv_matcher, HP3PAR_CPG_QOS, mock.ANY),
mock.call.getTask(mock.ANY),
mock.call.getVolume(osv_matcher),
mock.call.deleteVolume(osv_matcher),
mock.call.modifyVolume(omv_matcher, {'newName': osv_matcher}),
mock.call.growVolume(osv_matcher, 10 * 1024)]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
def test_create_volume_from_snapshot_and_extend_copy_fail(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
conf = {
'getTask.return_value': {
'status': 4,
'failure message': 'out of disk space'},
'copyVolume.return_value': {'taskid': 1},
'getVolume.return_value': {}
}
self.setup_driver(mock_conf=conf)
volume = self.volume.copy()
volume['size'] = self.volume['size'] + 10
self.assertRaises(exception.CinderException,
self.driver.create_volume_from_snapshot,
volume, self.snapshot)
@mock.patch.object(volume_types, 'get_volume_type')
def test_create_volume_from_snapshot_qos(self, _mock_volume_types):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
_mock_volume_types.return_value = {
'name': 'gold',
'extra_specs': {
'cpg': HP3PAR_CPG,
'snap_cpg': HP3PAR_CPG_SNAP,
'vvs_name': self.VVS_NAME,
'qos': self.QOS,
'tpvv': True,
'volume_type': self.volume_type}}
self.driver.create_volume_from_snapshot(
self.volume_qos,
self.snapshot)
comment = (
'{"snapshot_id": "2f823bdc-e36e-4dc8-bd15-de1c7a28ff31",'
' "display_name": "Foo Volume",'
' "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7"}')
expected = [
mock.call.createSnapshot(
self.VOLUME_3PAR_NAME,
'oss-L4I73ONuTci9Fd4ceij-MQ', {
'comment': comment,
'readOnly': False})]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
volume = self.volume.copy()
volume['size'] = 1
self.assertRaises(exception.InvalidInput,
self.driver.create_volume_from_snapshot,
volume, self.snapshot)
def test_terminate_connection(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
mock_client.getHostVLUNs.return_value = [
{'active': True,
'volumeName': self.VOLUME_3PAR_NAME,
'lun': None, 'type': 0}]
mock_client.queryHost.return_value = {
'members': [{
'name': self.FAKE_HOST
}]
}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
self.driver.terminate_connection(
self.volume,
self.connector,
force=True)
expected = [
mock.call.queryHost(iqns=[self.connector['initiator']]),
mock.call.getHostVLUNs(self.FAKE_HOST),
mock.call.deleteVLUN(
self.VOLUME_3PAR_NAME,
None,
self.FAKE_HOST),
mock.call.deleteHost(self.FAKE_HOST),
mock.call.removeVolumeMetaData(
self.VOLUME_3PAR_NAME, CHAP_USER_KEY),
mock.call.removeVolumeMetaData(
self.VOLUME_3PAR_NAME, CHAP_PASS_KEY)]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
def test_update_volume_key_value_pair(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
key = 'a'
value = 'b'
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
common.update_volume_key_value_pair(
self.volume,
key,
value)
expected = [
mock.call.setVolumeMetaData(self.VOLUME_3PAR_NAME, key, value)]
mock_client.assert_has_calls(expected)
# check exception
mock_client.setVolumeMetaData.side_effect = Exception('fake')
self.assertRaises(exception.VolumeBackendAPIException,
common.update_volume_key_value_pair,
self.volume,
None,
'b')
def test_clear_volume_key_value_pair(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
key = 'a'
common = self.driver._login()
common.clear_volume_key_value_pair(self.volume, key)
expected = [
mock.call.removeVolumeMetaData(self.VOLUME_3PAR_NAME, key)]
mock_client.assert_has_calls(expected)
# check the exception
mock_client.removeVolumeMetaData.side_effect = Exception('fake')
self.assertRaises(exception.VolumeBackendAPIException,
common.clear_volume_key_value_pair,
self.volume,
None)
def test_extend_volume(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
grow_size = 3
old_size = self.volume['size']
new_size = old_size + grow_size
self.driver.extend_volume(self.volume, str(new_size))
growth_size_mib = grow_size * units.Ki
expected = [
mock.call.growVolume(self.VOLUME_3PAR_NAME, growth_size_mib)]
mock_client.assert_has_calls(expected)
def test_extend_volume_non_base(self):
extend_ex = hpexceptions.HTTPForbidden(error={'code': 150})
conf = {
'getTask.return_value': {
'status': 1},
'getCPG.return_value': {},
'copyVolume.return_value': {'taskid': 1},
'getVolume.return_value': {},
# Throw an exception first time only
'growVolume.side_effect': [extend_ex,
None],
}
mock_client = self.setup_driver(mock_conf=conf)
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
grow_size = 3
old_size = self.volume['size']
new_size = old_size + grow_size
self.driver.extend_volume(self.volume, str(new_size))
self.assertEqual(2, mock_client.growVolume.call_count)
def test_extend_volume_non_base_failure(self):
extend_ex = hpexceptions.HTTPForbidden(error={'code': 150})
conf = {
'getTask.return_value': {
'status': 1},
'getCPG.return_value': {},
'copyVolume.return_value': {'taskid': 1},
'getVolume.return_value': {},
# Always fail
'growVolume.side_effect': extend_ex
}
mock_client = self.setup_driver(mock_conf=conf)
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
grow_size = 3
old_size = self.volume['size']
new_size = old_size + grow_size
self.assertRaises(hpexceptions.HTTPForbidden,
self.driver.extend_volume,
self.volume,
str(new_size))
def test_get_ports(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
mock_client.getPorts.return_value = {
'members': [
{'portPos': {'node': 0, 'slot': 8, 'cardPort': 2},
'protocol': 2,
'IPAddr': '10.10.120.252',
'linkState': 4,
'device': [],
'iSCSIName': 'iqn.2000-05.com.3pardata:21810002ac00383d',
'mode': 2,
'HWAddr': '2C27D75375D2',
'type': 8},
{'portPos': {'node': 1, 'slot': 8, 'cardPort': 1},
'protocol': 2,
'IPAddr': '10.10.220.253',
'linkState': 4,
'device': [],
'iSCSIName': 'iqn.2000-05.com.3pardata:21810002ac00383d',
'mode': 2,
'HWAddr': '2C27D75375D6',
'type': 8},
{'portWWN': '20210002AC00383D',
'protocol': 1,
'linkState': 4,
'mode': 2,
'device': ['cage2'],
'nodeWWN': '20210002AC00383D',
'type': 2,
'portPos': {'node': 0, 'slot': 6, 'cardPort': 3}}]}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
ports = common.get_ports()['members']
self.assertEqual(len(ports), 3)
def test_get_by_qos_spec_with_scoping(self):
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
qos_ref = qos_specs.create(self.ctxt, 'qos-specs-1', self.QOS)
type_ref = volume_types.create(self.ctxt,
"type1", {"qos:maxIOPS": "100",
"qos:maxBWS": "50",
"qos:minIOPS": "10",
"qos:minBWS": "20",
"qos:latency": "5",
"qos:priority": "high"})
qos_specs.associate_qos_with_type(self.ctxt,
qos_ref['id'],
type_ref['id'])
type_ref = volume_types.get_volume_type(self.ctxt, type_ref['id'])
qos = common._get_qos_by_volume_type(type_ref)
self.assertEqual(qos, {'maxIOPS': '1000', 'maxBWS': '50',
'minIOPS': '100', 'minBWS': '25',
'latency': '25', 'priority': 'low'})
def test_get_by_qos_spec(self):
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
qos_ref = qos_specs.create(
self.ctxt,
'qos-specs-1',
self.QOS_SPECS)
type_ref = volume_types.create(self.ctxt,
"type1", {"qos:maxIOPS": "100",
"qos:maxBWS": "50",
"qos:minIOPS": "10",
"qos:minBWS": "20",
"qos:latency": "5",
"qos:priority": "high"})
qos_specs.associate_qos_with_type(self.ctxt,
qos_ref['id'],
type_ref['id'])
type_ref = volume_types.get_volume_type(self.ctxt, type_ref['id'])
qos = common._get_qos_by_volume_type(type_ref)
self.assertEqual(qos, {'maxIOPS': '1000', 'maxBWS': '50',
'minIOPS': '100', 'minBWS': '25',
'latency': '25', 'priority': 'low'})
def test_get_by_qos_by_type_only(self):
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
type_ref = volume_types.create(self.ctxt,
"type1", {"qos:maxIOPS": "100",
"qos:maxBWS": "50",
"qos:minIOPS": "10",
"qos:minBWS": "20",
"qos:latency": "5",
"qos:priority": "high"})
type_ref = volume_types.get_volume_type(self.ctxt, type_ref['id'])
qos = common._get_qos_by_volume_type(type_ref)
self.assertEqual(qos, {'maxIOPS': '100', 'maxBWS': '50',
'minIOPS': '10', 'minBWS': '20',
'latency': '5', 'priority': 'high'})
def test_create_vlun(self):
host = 'fake-host'
lun_id = 11
nsp = '1:2:3'
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
location = ("%(name)s,%(lunid)s,%(host)s,%(nsp)s" %
{'name': self.VOLUME_NAME,
'lunid': lun_id,
'host': host,
'nsp': nsp})
mock_client.createVLUN.return_value = location
expected_info = {'volume_name': self.VOLUME_NAME,
'lun_id': lun_id,
'host_name': host,
'nsp': nsp}
common = self.driver._login()
vlun_info = common._create_3par_vlun(
self.VOLUME_NAME,
host,
nsp)
self.assertEqual(expected_info, vlun_info)
location = ("%(name)s,%(lunid)s,%(host)s" %
{'name': self.VOLUME_NAME,
'lunid': lun_id,
'host': host})
mock_client.createVLUN.return_value = location
expected_info = {'volume_name': self.VOLUME_NAME,
'lun_id': lun_id,
'host_name': host}
vlun_info = common._create_3par_vlun(
self.VOLUME_NAME,
host,
None)
self.assertEqual(expected_info, vlun_info)
def test__get_existing_volume_ref_name(self):
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
unm_matcher = common._get_3par_unm_name(self.volume['id'])
existing_ref = {'source-name': unm_matcher}
result = common._get_existing_volume_ref_name(existing_ref)
self.assertEqual(unm_matcher, result)
existing_ref = {'source-id': self.volume['id']}
result = common._get_existing_volume_ref_name(existing_ref)
self.assertEqual(unm_matcher, result)
existing_ref = {'bad-key': 'foo'}
self.assertRaises(
exception.ManageExistingInvalidReference,
common._get_existing_volume_ref_name,
existing_ref)
@mock.patch.object(volume_types, 'get_volume_type')
def test_manage_existing(self, _mock_volume_types):
_mock_volume_types.return_value = self.volume_type
mock_client = self.setup_driver()
new_comment = {"display_name": "Foo Volume",
"name": "volume-007dbfce-7579-40bc-8f90-a20b3902283e",
"volume_id": "007dbfce-7579-40bc-8f90-a20b3902283e",
"type": "OpenStack"}
volume = {'display_name': None,
'host': 'my-stack1@3parxxx#CPGNOTUSED',
'volume_type': 'gold',
'volume_type_id': 'acfa9fa4-54a0-4340-a3d8-bfcf19aea65e',
'id': '007dbfce-7579-40bc-8f90-a20b3902283e'}
mock_client.getVolume.return_value = self.MANAGE_VOLUME_INFO
mock_client.modifyVolume.return_value = ("anyResponse", {'taskid': 1})
mock_client.getTask.return_value = self.STATUS_DONE
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
unm_matcher = common._get_3par_unm_name(self.volume['id'])
osv_matcher = common._get_3par_vol_name(volume['id'])
vvs_matcher = common._get_3par_vvs_name(volume['id'])
existing_ref = {'source-name': unm_matcher}
expected_obj = {'display_name': 'Foo Volume',
'host': 'my-stack1@3parxxx#fakepool'}
obj = self.driver.manage_existing(volume, existing_ref)
expected_manage = [
mock.call.getVolume(existing_ref['source-name']),
mock.call.modifyVolume(existing_ref['source-name'],
{'newName': osv_matcher,
'comment': self.CommentMatcher(
self.assertEqual, new_comment)}),
]
retype_comment_qos = {
"display_name": "Foo Volume",
"volume_type_name": self.volume_type['name'],
"volume_type_id": self.volume_type['id'],
"qos": {
'maxIOPS': '1000',
'maxBWS': '50',
'minIOPS': '100',
'minBWS': '25',
'latency': '25',
'priority': 'low'
}
}
expected_snap_cpg = self.volume_type['extra_specs']['cpg']
expected_retype_modify = [
mock.call.modifyVolume(osv_matcher,
{'comment': self.CommentMatcher(
self.assertEqual,
retype_comment_qos),
'snapCPG': expected_snap_cpg}),
mock.call.deleteVolumeSet(vvs_matcher),
]
expected_retype_specs = [
mock.call.createVolumeSet(vvs_matcher, None),
mock.call.createQoSRules(
vvs_matcher,
{'ioMinGoal': 100, 'ioMaxLimit': 1000,
'bwMinGoalKB': 25600, 'priority': 1, 'latencyGoal': 25,
'bwMaxLimitKB': 51200}),
mock.call.addVolumeToVolumeSet(vvs_matcher, osv_matcher),
mock.call.modifyVolume(
osv_matcher,
{'action': 6,
'userCPG': self.volume_type['extra_specs']['cpg'],
'conversionOperation': 1, 'tuneOperation': 1}),
mock.call.getTask(1)
]
mock_client.assert_has_calls(self.standard_login + expected_manage)
mock_client.assert_has_calls(expected_retype_modify)
mock_client.assert_has_calls(
expected_retype_specs +
self.standard_logout)
self.assertEqual(expected_obj, obj)
@mock.patch.object(volume_types, 'get_volume_type')
def test_manage_existing_vvs(self, _mock_volume_types):
test_volume_type = self.RETYPE_VOLUME_TYPE_2
vvs = test_volume_type['extra_specs']['vvs']
_mock_volume_types.return_value = test_volume_type
mock_client = self.setup_driver()
mock_client.getVolume.return_value = self.MANAGE_VOLUME_INFO
mock_client.modifyVolume.return_value = ("anyResponse", {'taskid': 1})
mock_client.getTask.return_value = self.STATUS_DONE
id = '007abcde-7579-40bc-8f90-a20b3902283e'
new_comment = {"display_name": "Test Volume",
"name": ("volume-%s" % id),
"volume_id": id,
"type": "OpenStack"}
volume = {'display_name': 'Test Volume',
'host': 'my-stack1@3parxxx#CPGNOTUSED',
'volume_type': 'gold',
'volume_type_id': 'acfa9fa4-54a0-4340-a3d8-bfcf19aea65e',
'id': id}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
unm_matcher = common._get_3par_unm_name(self.volume['id'])
osv_matcher = common._get_3par_vol_name(volume['id'])
vvs_matcher = common._get_3par_vvs_name(volume['id'])
existing_ref = {'source-name': unm_matcher}
obj = self.driver.manage_existing(volume, existing_ref)
expected_obj = {'display_name': 'Test Volume',
'host': 'my-stack1@3parxxx#qospool'}
expected_manage = [
mock.call.getVolume(existing_ref['source-name']),
mock.call.modifyVolume(existing_ref['source-name'],
{'newName': osv_matcher,
'comment': self.CommentMatcher(
self.assertEqual, new_comment)})
]
retype_comment_vvs = {
"display_name": "Foo Volume",
"volume_type_name": test_volume_type['name'],
"volume_type_id": test_volume_type['id'],
"vvs": vvs
}
expected_retype = [
mock.call.modifyVolume(osv_matcher,
{'comment': self.CommentMatcher(
self.assertEqual,
retype_comment_vvs),
'snapCPG': 'OpenStackCPGSnap'}),
mock.call.deleteVolumeSet(vvs_matcher),
mock.call.addVolumeToVolumeSet(vvs, osv_matcher),
mock.call.modifyVolume(osv_matcher,
{'action': 6, 'userCPG':
test_volume_type['extra_specs']['cpg'],
'conversionOperation': 1,
'tuneOperation': 1}),
mock.call.getTask(1)
]
mock_client.assert_has_calls(self.standard_login + expected_manage)
mock_client.assert_has_calls(
expected_retype +
self.standard_logout)
self.assertEqual(expected_obj, obj)
def test_manage_existing_no_volume_type(self):
mock_client = self.setup_driver()
comment = (
'{"display_name": "Foo Volume"}')
new_comment = (
'{"type": "OpenStack",'
' "display_name": "Foo Volume",'
' "name": "volume-007dbfce-7579-40bc-8f90-a20b3902283e",'
' "volume_id": "007dbfce-7579-40bc-8f90-a20b3902283e"}')
volume = {'display_name': None,
'volume_type': None,
'volume_type_id': None,
'id': '007dbfce-7579-40bc-8f90-a20b3902283e'}
mock_client.getVolume.return_value = {'comment': comment}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
unm_matcher = common._get_3par_unm_name(self.volume['id'])
osv_matcher = common._get_3par_vol_name(volume['id'])
existing_ref = {'source-name': unm_matcher}
obj = self.driver.manage_existing(volume, existing_ref)
expected_obj = {'display_name': 'Foo Volume'}
expected = [
mock.call.getVolume(existing_ref['source-name']),
mock.call.modifyVolume(existing_ref['source-name'],
{'newName': osv_matcher,
'comment': new_comment})
]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
self.assertEqual(expected_obj, obj)
volume['display_name'] = 'Test Volume'
obj = self.driver.manage_existing(volume, existing_ref)
expected_obj = {'display_name': 'Test Volume'}
expected = [
mock.call.getVolume(existing_ref['source-name']),
mock.call.modifyVolume(existing_ref['source-name'],
{'newName': osv_matcher,
'comment': new_comment})
]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
self.assertEqual(expected_obj, obj)
mock_client.getVolume.return_value = {}
volume['display_name'] = None
common = self.driver._login()
obj = self.driver.manage_existing(volume, existing_ref)
expected_obj = {'display_name': None}
expected = [
mock.call.getVolume(existing_ref['source-name']),
mock.call.modifyVolume(existing_ref['source-name'],
{'newName': osv_matcher,
'comment': new_comment})
]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
self.assertEqual(expected_obj, obj)
def test_manage_existing_invalid_input(self):
mock_client = self.setup_driver()
volume = {'display_name': None,
'volume_type': None,
'id': '007dbfce-7579-40bc-8f90-a20b3902283e'}
mock_client.getVolume.side_effect = hpexceptions.HTTPNotFound('fake')
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
unm_matcher = common._get_3par_unm_name(self.volume['id'])
existing_ref = {'source-name': unm_matcher}
self.assertRaises(exception.InvalidInput,
self.driver.manage_existing,
volume=volume,
existing_ref=existing_ref)
expected = [mock.call.getVolume(existing_ref['source-name'])]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
def test_manage_existing_volume_type_exception(self):
mock_client = self.setup_driver()
comment = (
'{"display_name": "Foo Volume"}')
volume = {'display_name': None,
'volume_type': 'gold',
'volume_type_id': 'bcfa9fa4-54a0-4340-a3d8-bfcf19aea65e',
'id': '007dbfce-7579-40bc-8f90-a20b3902283e'}
mock_client.getVolume.return_value = {'comment': comment}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
unm_matcher = common._get_3par_unm_name(self.volume['id'])
existing_ref = {'source-name': unm_matcher}
self.assertRaises(exception.ManageExistingVolumeTypeMismatch,
self.driver.manage_existing,
volume=volume,
existing_ref=existing_ref)
expected = [mock.call.getVolume(existing_ref['source-name'])]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
@mock.patch.object(volume_types, 'get_volume_type')
def test_manage_existing_retype_exception(self, _mock_volume_types):
mock_client = self.setup_driver()
_mock_volume_types.return_value = {
'name': 'gold',
'id': 'gold-id',
'extra_specs': {
'cpg': HP3PAR_CPG,
'snap_cpg': HP3PAR_CPG_SNAP,
'vvs_name': self.VVS_NAME,
'qos': self.QOS,
'tpvv': True,
'volume_type': self.volume_type}}
volume = {'display_name': None,
'host': 'stack1@3pariscsi#POOL1',
'volume_type': 'gold',
'volume_type_id': 'bcfa9fa4-54a0-4340-a3d8-bfcf19aea65e',
'id': '007dbfce-7579-40bc-8f90-a20b3902283e'}
mock_client.getVolume.return_value = self.MANAGE_VOLUME_INFO
mock_client.modifyVolume.return_value = ("anyResponse", {'taskid': 1})
mock_client.getTask.return_value = self.STATUS_DONE
mock_client.getCPG.side_effect = [
{'domain': 'domain1'},
{'domain': 'domain2'},
{'domain': 'domain3'},
]
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
unm_matcher = common._get_3par_unm_name(self.volume['id'])
osv_matcher = common._get_3par_vol_name(volume['id'])
existing_ref = {'source-name': unm_matcher}
self.assertRaises(exception.Invalid3PARDomain,
self.driver.manage_existing,
volume=volume,
existing_ref=existing_ref)
expected = [
mock.call.getVolume(unm_matcher),
mock.call.modifyVolume(
unm_matcher, {
'newName': osv_matcher,
'comment': mock.ANY}),
mock.call.getCPG('OpenStackCPG'),
mock.call.getVolume(osv_matcher),
mock.call.getCPG('testUserCpg0'),
mock.call.getCPG('OpenStackCPG'),
mock.call.modifyVolume(
osv_matcher, {'newName': unm_matcher,
'comment': self.MANAGE_VOLUME_INFO
['comment']})
]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
def test_manage_existing_get_size(self):
mock_client = self.setup_driver()
mock_client.getVolume.return_value = {'sizeMiB': 2048}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
unm_matcher = common._get_3par_unm_name(self.volume['id'])
volume = {}
existing_ref = {'source-name': unm_matcher}
size = self.driver.manage_existing_get_size(volume, existing_ref)
expected_size = 2
expected = [mock.call.getVolume(existing_ref['source-name'])]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
self.assertEqual(expected_size, size)
def test_manage_existing_get_size_invalid_reference(self):
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
volume = {}
existing_ref = {'source-name': self.VOLUME_3PAR_NAME}
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size,
volume=volume,
existing_ref=existing_ref)
mock_client.assert_has_calls(
self.standard_login +
self.standard_logout)
existing_ref = {}
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size,
volume=volume,
existing_ref=existing_ref)
mock_client.assert_has_calls(
self.standard_login +
self.standard_logout)
def test_manage_existing_get_size_invalid_input(self):
mock_client = self.setup_driver()
mock_client.getVolume.side_effect = hpexceptions.HTTPNotFound('fake')
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
unm_matcher = common._get_3par_unm_name(self.volume['id'])
volume = {}
existing_ref = {'source-name': unm_matcher}
self.assertRaises(exception.InvalidInput,
self.driver.manage_existing_get_size,
volume=volume,
existing_ref=existing_ref)
expected = [mock.call.getVolume(existing_ref['source-name'])]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
def test_unmanage(self):
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
self.driver.unmanage(self.volume)
osv_matcher = common._get_3par_vol_name(self.volume['id'])
unm_matcher = common._get_3par_unm_name(self.volume['id'])
expected = [
mock.call.modifyVolume(osv_matcher, {'newName': unm_matcher})
]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
def test__safe_hostname(self):
long_hostname = "abc123abc123abc123abc123abc123abc123"
fixed_hostname = "abc123abc123abc123abc123abc123a"
common = hpcommon.HP3PARCommon(None)
safe_host = common._safe_hostname(long_hostname)
self.assertEqual(fixed_hostname, safe_host)
class TestHP3PARFCDriver(HP3PARBaseDriver, test.TestCase):
properties = {
'driver_volume_type': 'fibre_channel',
'data': {
'target_lun': 90,
'target_wwn': ['0987654321234', '123456789000987'],
'target_discovered': True,
'initiator_target_map': {'123456789012345':
['0987654321234', '123456789000987'],
'123456789054321':
['0987654321234', '123456789000987'],
}}}
def setup_driver(self, config=None, mock_conf=None):
self.ctxt = context.get_admin_context()
mock_client = self.setup_mock_client(
conf=config,
m_conf=mock_conf,
driver=hpfcdriver.HP3PARFCDriver)
expected = [
mock.call.getCPG(HP3PAR_CPG),
mock.call.getCPG(HP3PAR_CPG2)]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
mock_client.reset_mock()
return mock_client
def test_initialize_connection(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
mock_client.getVolume.return_value = {'userCPG': HP3PAR_CPG}
mock_client.getCPG.return_value = {}
mock_client.getHost.side_effect = [
hpexceptions.HTTPNotFound('fake'),
{'name': self.FAKE_HOST,
'FCPaths': [{'driverVersion': None,
'firmwareVersion': None,
'hostSpeed': 0,
'model': None,
'portPos': {'cardPort': 1, 'node': 1,
'slot': 2},
'vendor': None,
'wwn': self.wwn[0]},
{'driverVersion': None,
'firmwareVersion': None,
'hostSpeed': 0,
'model': None,
'portPos': {'cardPort': 1, 'node': 0,
'slot': 2},
'vendor': None,
'wwn': self.wwn[1]}]}]
mock_client.queryHost.return_value = {
'members': [{
'name': self.FAKE_HOST
}]
}
mock_client.getHostVLUNs.return_value = [
{'active': True,
'volumeName': self.VOLUME_3PAR_NAME,
'lun': 90, 'type': 0}]
location = ("%(volume_name)s,%(lun_id)s,%(host)s,%(nsp)s" %
{'volume_name': self.VOLUME_3PAR_NAME,
'lun_id': 90,
'host': self.FAKE_HOST,
'nsp': 'something'})
mock_client.createVLUN.return_value = location
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
result = self.driver.initialize_connection(
self.volume,
self.connector)
expected = [
mock.call.getVolume(self.VOLUME_3PAR_NAME),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getHost(self.FAKE_HOST),
mock.call.queryHost(wwns=['123456789012345',
'123456789054321']),
mock.call.getHost(self.FAKE_HOST),
mock.call.getPorts(),
mock.call.createVLUN(
self.VOLUME_3PAR_NAME,
auto=True,
hostname=self.FAKE_HOST),
mock.call.getHostVLUNs(self.FAKE_HOST)]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
self.assertDictMatch(result, self.properties)
@mock.patch('cinder.zonemanager.utils.create_lookup_service')
def test_initialize_connection_with_lookup_single_nsp(self, mock_lookup):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
class fake_lookup_object:
def get_device_mapping_from_network(self, connector, target_wwns):
fake_map = {
'FAB_1': {
'target_port_wwn_list': ['0987654321234'],
'initiator_port_wwn_list': ['123456789012345']
}
}
return fake_map
mock_lookup.return_value = fake_lookup_object()
mock_client = self.setup_driver()
mock_client.getVolume.return_value = {'userCPG': HP3PAR_CPG}
mock_client.getCPG.return_value = {}
mock_client.getHost.side_effect = [
hpexceptions.HTTPNotFound('fake'),
{'name': self.FAKE_HOST,
'FCPaths': [{'driverVersion': None,
'firmwareVersion': None,
'hostSpeed': 0,
'model': None,
'portPos': {'cardPort': 1, 'node': 1,
'slot': 2},
'vendor': None,
'wwn': self.wwn[0]}]}]
mock_client.queryHost.return_value = {
'members': [{
'name': self.FAKE_HOST
}]
}
mock_client.getHostVLUNs.return_value = [
{'active': True,
'volumeName': self.VOLUME_3PAR_NAME,
'lun': 90, 'type': 0}]
location = ("%(volume_name)s,%(lun_id)s,%(host)s,%(nsp)s" %
{'volume_name': self.VOLUME_3PAR_NAME,
'lun_id': 90,
'host': self.FAKE_HOST,
'nsp': 'something'})
mock_client.createVLUN.return_value = location
connector = {'ip': '10.0.0.2',
'initiator': 'iqn.1993-08.org.debian:01:222',
'wwpns': [self.wwn[0]],
'wwnns': ["223456789012345"],
'host': self.FAKE_HOST}
expected_properties = {
'driver_volume_type': 'fibre_channel',
'data': {
'target_lun': 90,
'target_wwn': ['0987654321234'],
'target_discovered': True,
'initiator_target_map': {'123456789012345':
['0987654321234']
}}}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
result = self.driver.initialize_connection(self.volume, connector)
expected = [
mock.call.getVolume(self.VOLUME_3PAR_NAME),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getHost(self.FAKE_HOST),
mock.ANY,
mock.call.getHost(self.FAKE_HOST),
mock.call.getPorts(),
mock.call.getPorts(),
mock.call.createVLUN(
self.VOLUME_3PAR_NAME,
auto=True,
hostname=self.FAKE_HOST,
portPos={'node': 7, 'slot': 1, 'cardPort': 1}),
mock.call.getHostVLUNs(self.FAKE_HOST)]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
self.assertDictMatch(result, expected_properties)
def test_terminate_connection(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
effects = [
[{'active': True, 'volumeName': self.VOLUME_3PAR_NAME,
'lun': None, 'type': 0}],
hpexceptions.HTTPNotFound]
mock_client.getHostVLUNs.side_effect = effects
mock_client.queryHost.return_value = {
'members': [{
'name': self.FAKE_HOST
}]
}
expected = [
mock.call.queryHost(wwns=['123456789012345', '123456789054321']),
mock.call.getHostVLUNs(self.FAKE_HOST),
mock.call.deleteVLUN(
self.VOLUME_3PAR_NAME,
None,
self.FAKE_HOST),
mock.call.deleteHost(self.FAKE_HOST),
mock.call.getHostVLUNs(self.FAKE_HOST),
mock.call.getPorts()]
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
conn_info = self.driver.terminate_connection(self.volume,
self.connector)
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
self.assertIn('data', conn_info)
self.assertIn('initiator_target_map', conn_info['data'])
mock_client.reset_mock()
mock_client.getHostVLUNs.side_effect = effects
# mock some deleteHost exceptions that are handled
delete_with_vlun = hpexceptions.HTTPConflict(
error={'message': "has exported VLUN"})
delete_with_hostset = hpexceptions.HTTPConflict(
error={'message': "host is a member of a set"})
mock_client.deleteHost = mock.Mock(
side_effect=[delete_with_vlun, delete_with_hostset])
conn_info = self.driver.terminate_connection(self.volume,
self.connector)
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
mock_client.reset_mock()
mock_client.getHostVLUNs.side_effect = effects
conn_info = self.driver.terminate_connection(self.volume,
self.connector)
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
@mock.patch('cinder.zonemanager.utils.create_lookup_service')
def test_terminate_connection_with_lookup(self, mock_lookup):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
class fake_lookup_object:
def get_device_mapping_from_network(self, connector, target_wwns):
fake_map = {
'FAB_1': {
'target_port_wwn_list': ['0987654321234'],
'initiator_port_wwn_list': ['123456789012345']
}
}
return fake_map
mock_lookup.return_value = fake_lookup_object()
mock_client = self.setup_driver()
effects = [
[{'active': True, 'volumeName': self.VOLUME_3PAR_NAME,
'lun': None, 'type': 0}],
hpexceptions.HTTPNotFound]
mock_client.queryHost.return_value = {
'members': [{
'name': self.FAKE_HOST
}]
}
mock_client.getHostVLUNs.side_effect = effects
expected = [
mock.call.queryHost(wwns=['123456789012345', '123456789054321']),
mock.call.getHostVLUNs(self.FAKE_HOST),
mock.call.deleteVLUN(
self.VOLUME_3PAR_NAME,
None,
self.FAKE_HOST),
mock.call.deleteHost(self.FAKE_HOST),
mock.call.getHostVLUNs(self.FAKE_HOST),
mock.call.getPorts()]
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
conn_info = self.driver.terminate_connection(self.volume,
self.connector)
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
self.assertIn('data', conn_info)
self.assertIn('initiator_target_map', conn_info['data'])
mock_client.reset_mock()
mock_client.getHostVLUNs.side_effect = effects
# mock some deleteHost exceptions that are handled
delete_with_vlun = hpexceptions.HTTPConflict(
error={'message': "has exported VLUN"})
delete_with_hostset = hpexceptions.HTTPConflict(
error={'message': "host is a member of a set"})
mock_client.deleteHost = mock.Mock(
side_effect=[delete_with_vlun, delete_with_hostset])
conn_info = self.driver.terminate_connection(self.volume,
self.connector)
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
mock_client.reset_mock()
mock_client.getHostVLUNs.side_effect = effects
conn_info = self.driver.terminate_connection(self.volume,
self.connector)
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
def test_terminate_connection_more_vols(self):
mock_client = self.setup_driver()
# mock more than one vlun on the host (don't even try to remove host)
mock_client.getHostVLUNs.return_value = \
[
{'active': True,
'volumeName': self.VOLUME_3PAR_NAME,
'lun': None, 'type': 0},
{'active': True,
'volumeName': 'there-is-another-volume',
'lun': None, 'type': 0},
]
mock_client.queryHost.return_value = {
'members': [{
'name': self.FAKE_HOST
}]
}
expect_less = [
mock.call.queryHost(wwns=['123456789012345', '123456789054321']),
mock.call.getHostVLUNs(self.FAKE_HOST),
mock.call.deleteVLUN(
self.VOLUME_3PAR_NAME,
None,
self.FAKE_HOST),
mock.call.getHostVLUNs(self.FAKE_HOST)]
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
conn_info = self.driver.terminate_connection(self.volume,
self.connector)
mock_client.assert_has_calls(
self.standard_login +
expect_less +
self.standard_logout)
self.assertNotIn('initiator_target_map', conn_info['data'])
def test_get_volume_stats(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
mock_client.getCPG.return_value = self.cpgs[0]
mock_client.getStorageSystemInfo.return_value = {
'serialNumber': '1234'
}
# cpg has no limit
mock_client.getCPGAvailableSpace.return_value = {
"capacityEfficiency": {u'compaction': 594.4},
"rawFreeMiB": 1024.0 * 6,
"usableFreeMiB": 1024.0 * 3
}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
stats = self.driver.get_volume_stats(True)
const = 0.0009765625
self.assertEqual(stats['storage_protocol'], 'FC')
self.assertEqual(stats['total_capacity_gb'], 0)
self.assertEqual(stats['free_capacity_gb'], 0)
self.assertEqual(stats['pools'][0]['total_capacity_gb'], 24.0)
self.assertEqual(stats['pools'][0]['free_capacity_gb'], 3.0)
expected = [
mock.call.getStorageSystemInfo(),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getCPGAvailableSpace(HP3PAR_CPG),
mock.call.getCPG(HP3PAR_CPG2),
mock.call.getCPGAvailableSpace(HP3PAR_CPG2)]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
stats = self.driver.get_volume_stats(True)
self.assertEqual(stats['storage_protocol'], 'FC')
self.assertEqual(stats['total_capacity_gb'], 0)
self.assertEqual(stats['free_capacity_gb'], 0)
self.assertEqual(stats['pools'][0]['total_capacity_gb'], 24.0)
self.assertEqual(stats['pools'][0]['free_capacity_gb'], 3.0)
cpg2 = self.cpgs[0].copy()
cpg2.update({'SDGrowth': {'limitMiB': 8192}})
mock_client.getCPG.return_value = cpg2
stats = self.driver.get_volume_stats(True)
self.assertEqual(stats['storage_protocol'], 'FC')
total_capacity_gb = 8192 * const
self.assertEqual(stats['total_capacity_gb'], 0)
self.assertEqual(stats['pools'][0]['total_capacity_gb'],
total_capacity_gb)
free_capacity_gb = int(
(8192 - self.cpgs[0]['UsrUsage']['usedMiB']) * const)
self.assertEqual(stats['free_capacity_gb'], 0)
self.assertEqual(stats['pools'][0]['free_capacity_gb'],
free_capacity_gb)
common.client.deleteCPG(HP3PAR_CPG)
common.client.createCPG(HP3PAR_CPG, {})
def test_create_host(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
mock_client.getVolume.return_value = {'userCPG': HP3PAR_CPG}
mock_client.getCPG.return_value = {}
mock_client.getHost.side_effect = [
hpexceptions.HTTPNotFound('fake'),
{'name': self.FAKE_HOST,
'FCPaths': [{'driverVersion': None,
'firmwareVersion': None,
'hostSpeed': 0,
'model': None,
'portPos': {'cardPort': 1, 'node': 1,
'slot': 2},
'vendor': None,
'wwn': self.wwn[0]},
{'driverVersion': None,
'firmwareVersion': None,
'hostSpeed': 0,
'model': None,
'portPos': {'cardPort': 1, 'node': 0,
'slot': 2},
'vendor': None,
'wwn': self.wwn[1]}]}]
mock_client.queryHost.return_value = None
mock_client.getVLUN.return_value = {'lun': 186}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
host = self.driver._create_host(
common,
self.volume,
self.connector)
expected = [
mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getHost(self.FAKE_HOST),
mock.call.queryHost(wwns=['123456789012345',
'123456789054321']),
mock.call.createHost(
self.FAKE_HOST,
FCWwns=['123456789012345', '123456789054321'],
optional={'domain': None, 'persona': 2}),
mock.call.getHost(self.FAKE_HOST)]
mock_client.assert_has_calls(expected)
self.assertEqual(host['name'], self.FAKE_HOST)
def test_create_invalid_host(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
mock_client.getVolume.return_value = {'userCPG': HP3PAR_CPG}
mock_client.getCPG.return_value = {}
mock_client.getHost.side_effect = [
hpexceptions.HTTPNotFound('Host not found.'), {
'name': 'fakehost.foo',
'FCPaths': [{'wwn': '123456789012345'}, {
'wwn': '123456789054321'}]}]
mock_client.queryHost.return_value = {
'members': [{
'name': 'fakehost.foo'
}]
}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
host = self.driver._create_host(
common,
self.volume,
self.connector)
expected = [
mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getHost('fakehost'),
mock.call.queryHost(wwns=['123456789012345',
'123456789054321']),
mock.call.getHost('fakehost.foo')]
mock_client.assert_has_calls(expected)
self.assertEqual(host['name'], 'fakehost.foo')
def test_create_modify_host(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
mock_client.getVolume.return_value = {'userCPG': HP3PAR_CPG}
mock_client.getCPG.return_value = {}
mock_client.getHost.side_effect = [{
'name': self.FAKE_HOST, 'FCPaths': []},
{'name': self.FAKE_HOST,
'FCPaths': [{'wwn': '123456789012345'}, {
'wwn': '123456789054321'}]}]
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
host = self.driver._create_host(
common,
self.volume,
self.connector)
expected = [
mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getHost('fakehost'),
mock.call.modifyHost(
'fakehost', {
'FCWWNs': ['123456789012345', '123456789054321'],
'pathOperation': 1}),
mock.call.getHost('fakehost')]
mock_client.assert_has_calls(expected)
self.assertEqual(host['name'], self.FAKE_HOST)
self.assertEqual(len(host['FCPaths']), 2)
def test_modify_host_with_new_wwn(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
mock_client.getVolume.return_value = {'userCPG': HP3PAR_CPG}
mock_client.getCPG.return_value = {}
getHost_ret1 = {
'name': self.FAKE_HOST,
'FCPaths': [{'wwn': '123456789054321'}]}
getHost_ret2 = {
'name': self.FAKE_HOST,
'FCPaths': [{'wwn': '123456789012345'},
{'wwn': '123456789054321'}]}
mock_client.getHost.side_effect = [getHost_ret1, getHost_ret2]
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
host = self.driver._create_host(
common,
self.volume,
self.connector)
expected = [
mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getHost('fakehost'),
mock.call.modifyHost(
'fakehost', {
'FCWWNs': ['123456789012345'], 'pathOperation': 1}),
mock.call.getHost('fakehost')]
mock_client.assert_has_calls(expected)
self.assertEqual(host['name'], self.FAKE_HOST)
self.assertEqual(len(host['FCPaths']), 2)
def test_modify_host_with_unknown_wwn_and_new_wwn(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
mock_client.getVolume.return_value = {'userCPG': HP3PAR_CPG}
mock_client.getCPG.return_value = {}
getHost_ret1 = {
'name': self.FAKE_HOST,
'FCPaths': [{'wwn': '123456789054321'},
{'wwn': 'xxxxxxxxxxxxxxx'}]}
getHost_ret2 = {
'name': self.FAKE_HOST,
'FCPaths': [{'wwn': '123456789012345'},
{'wwn': '123456789054321'},
{'wwn': 'xxxxxxxxxxxxxxx'}]}
mock_client.getHost.side_effect = [getHost_ret1, getHost_ret2]
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
host = self.driver._create_host(
common,
self.volume,
self.connector)
expected = [
mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getHost('fakehost'),
mock.call.modifyHost(
'fakehost', {
'FCWWNs': ['123456789012345'], 'pathOperation': 1}),
mock.call.getHost('fakehost')]
mock_client.assert_has_calls(expected)
self.assertEqual(host['name'], self.FAKE_HOST)
self.assertEqual(len(host['FCPaths']), 3)
class TestHP3PARISCSIDriver(HP3PARBaseDriver, test.TestCase):
TARGET_IQN = 'iqn.2000-05.com.3pardata:21810002ac00383d'
TARGET_LUN = 186
properties = {
'driver_volume_type': 'iscsi',
'data':
{'target_discovered': True,
'target_iqn': TARGET_IQN,
'target_lun': TARGET_LUN,
'target_portal': '1.1.1.2:1234'}}
def setup_driver(self, config=None, mock_conf=None):
self.ctxt = context.get_admin_context()
mock_client = self.setup_mock_client(
conf=config,
m_conf=mock_conf,
driver=hpdriver.HP3PARISCSIDriver)
expected_get_cpgs = [
mock.call.getCPG(HP3PAR_CPG),
mock.call.getCPG(HP3PAR_CPG2)]
expected_get_ports = [mock.call.getPorts()]
mock_client.assert_has_calls(
self.standard_login +
expected_get_cpgs +
self.standard_logout +
self.standard_login +
expected_get_ports +
self.standard_logout)
mock_client.reset_mock()
return mock_client
def test_initialize_connection(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
mock_client.getVolume.return_value = {'userCPG': HP3PAR_CPG}
mock_client.getCPG.return_value = {}
mock_client.getHost.side_effect = [
hpexceptions.HTTPNotFound('fake'),
{'name': self.FAKE_HOST}]
mock_client.queryHost.return_value = {
'members': [{
'name': self.FAKE_HOST
}]
}
mock_client.getHostVLUNs.return_value = [
{'active': True,
'volumeName': self.VOLUME_3PAR_NAME,
'lun': self.TARGET_LUN, 'type': 0}]
location = ("%(volume_name)s,%(lun_id)s,%(host)s,%(nsp)s" %
{'volume_name': self.VOLUME_3PAR_NAME,
'lun_id': self.TARGET_LUN,
'host': self.FAKE_HOST,
'nsp': 'something'})
mock_client.createVLUN.return_value = location
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
result = self.driver.initialize_connection(
self.volume,
self.connector)
expected = [
mock.call.getVolume(self.VOLUME_3PAR_NAME),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getHost(self.FAKE_HOST),
mock.call.queryHost(iqns=['iqn.1993-08.org.debian:01:222']),
mock.call.getHost(self.FAKE_HOST),
mock.call.createVLUN(
self.VOLUME_3PAR_NAME,
auto=True,
hostname='fakehost',
portPos={'node': 8, 'slot': 1, 'cardPort': 1}),
mock.call.getHostVLUNs(self.FAKE_HOST)]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
self.assertDictMatch(result, self.properties)
def test_get_volume_stats(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
mock_client.getCPG.return_value = self.cpgs[0]
mock_client.getStorageSystemInfo.return_value = {
'serialNumber': '1234'
}
# cpg has no limit
mock_client.getCPGAvailableSpace.return_value = {
"capacityEfficiency": {u'compaction': 594.4},
"rawFreeMiB": 1024.0 * 6,
"usableFreeMiB": 1024.0 * 3
}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
stats = self.driver.get_volume_stats(True)
const = 0.0009765625
self.assertEqual(stats['storage_protocol'], 'iSCSI')
self.assertEqual(stats['total_capacity_gb'], 0)
self.assertEqual(stats['free_capacity_gb'], 0)
self.assertEqual(stats['pools'][0]['total_capacity_gb'], 24.0)
self.assertEqual(stats['pools'][0]['free_capacity_gb'], 3.0)
expected = [
mock.call.getStorageSystemInfo(),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getCPGAvailableSpace(HP3PAR_CPG),
mock.call.getCPG(HP3PAR_CPG2),
mock.call.getCPGAvailableSpace(HP3PAR_CPG2)]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
cpg2 = self.cpgs[0].copy()
cpg2.update({'SDGrowth': {'limitMiB': 8192}})
mock_client.getCPG.return_value = cpg2
stats = self.driver.get_volume_stats(True)
self.assertEqual(stats['storage_protocol'], 'iSCSI')
total_capacity_gb = 8192 * const
self.assertEqual(stats['total_capacity_gb'], 0)
self.assertEqual(stats['pools'][0]['total_capacity_gb'],
total_capacity_gb)
free_capacity_gb = int(
(8192 - self.cpgs[0]['UsrUsage']['usedMiB']) * const)
self.assertEqual(stats['free_capacity_gb'], 0)
self.assertEqual(stats['pools'][0]['free_capacity_gb'],
free_capacity_gb)
def test_create_host(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
mock_client.getVolume.return_value = {'userCPG': HP3PAR_CPG}
mock_client.getCPG.return_value = {}
mock_client.getHost.side_effect = [
hpexceptions.HTTPNotFound('fake'),
{'name': self.FAKE_HOST}]
mock_client.queryHost.return_value = None
mock_client.getVLUN.return_value = {'lun': self.TARGET_LUN}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
host, auth_username, auth_password = self.driver._create_host(
common, self.volume, self.connector)
expected = [
mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getHost(self.FAKE_HOST),
mock.call.queryHost(iqns=['iqn.1993-08.org.debian:01:222']),
mock.call.createHost(
self.FAKE_HOST,
optional={'domain': None, 'persona': 2},
iscsiNames=['iqn.1993-08.org.debian:01:222']),
mock.call.getHost(self.FAKE_HOST)]
mock_client.assert_has_calls(expected)
self.assertEqual(host['name'], self.FAKE_HOST)
self.assertEqual(auth_username, None)
self.assertEqual(auth_password, None)
def test_create_host_chap_enabled(self):
# setup_mock_client drive with CHAP enabled configuration
# and return the mock HTTP 3PAR client
config = self.setup_configuration()
config.hp3par_iscsi_chap_enabled = True
mock_client = self.setup_driver(config=config)
mock_client.getVolume.return_value = {'userCPG': HP3PAR_CPG}
mock_client.getCPG.return_value = {}
mock_client.getHost.side_effect = [
hpexceptions.HTTPNotFound('fake'),
{'name': self.FAKE_HOST}]
mock_client.queryHost.return_value = None
mock_client.getVLUN.return_value = {'lun': self.TARGET_LUN}
expected_mod_request = {
'chapOperation': mock_client.HOST_EDIT_ADD,
'chapOperationMode': mock_client.CHAP_INITIATOR,
'chapName': 'test-user',
'chapSecret': 'test-pass'
}
def get_side_effect(*args):
data = {'value': None}
if args[1] == CHAP_USER_KEY:
data['value'] = 'test-user'
elif args[1] == CHAP_PASS_KEY:
data['value'] = 'test-pass'
return data
mock_client.getVolumeMetaData.side_effect = get_side_effect
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
host, auth_username, auth_password = self.driver._create_host(
common, self.volume, self.connector)
expected = [
mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getVolumeMetaData(
'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_USER_KEY),
mock.call.getVolumeMetaData(
'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY),
mock.call.getHost(self.FAKE_HOST),
mock.call.queryHost(iqns=['iqn.1993-08.org.debian:01:222']),
mock.call.createHost(
self.FAKE_HOST,
optional={'domain': None, 'persona': 2},
iscsiNames=['iqn.1993-08.org.debian:01:222']),
mock.call.modifyHost(
'fakehost',
expected_mod_request),
mock.call.getHost(self.FAKE_HOST)
]
mock_client.assert_has_calls(expected)
self.assertEqual(host['name'], self.FAKE_HOST)
self.assertEqual(auth_username, 'test-user')
self.assertEqual(auth_password, 'test-pass')
def test_create_invalid_host(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
mock_client.getVolume.return_value = {'userCPG': HP3PAR_CPG}
mock_client.getCPG.return_value = {}
mock_client.getHost.side_effect = [
hpexceptions.HTTPNotFound('Host not found.'),
{'name': 'fakehost.foo'}]
mock_client.queryHost.return_value = {
'members': [{
'name': 'fakehost.foo'
}]
}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
host, auth_username, auth_password = self.driver._create_host(
common, self.volume, self.connector)
expected = [
mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getHost(self.FAKE_HOST),
mock.call.queryHost(iqns=['iqn.1993-08.org.debian:01:222']),
mock.call.getHost('fakehost.foo')]
mock_client.assert_has_calls(expected)
self.assertEqual(host['name'], 'fakehost.foo')
self.assertEqual(auth_username, None)
self.assertEqual(auth_password, None)
def test_create_invalid_host_chap_enabled(self):
# setup_mock_client drive with CHAP enabled configuration
# and return the mock HTTP 3PAR client
config = self.setup_configuration()
config.hp3par_iscsi_chap_enabled = True
mock_client = self.setup_driver(config=config)
mock_client.getVolume.return_value = {'userCPG': HP3PAR_CPG}
mock_client.getCPG.return_value = {}
mock_client.getHost.side_effect = [
hpexceptions.HTTPNotFound('Host not found.'),
{'name': 'fakehost.foo'}]
mock_client.queryHost.return_value = {
'members': [{
'name': 'fakehost.foo'
}]
}
def get_side_effect(*args):
data = {'value': None}
if args[1] == CHAP_USER_KEY:
data['value'] = 'test-user'
elif args[1] == CHAP_PASS_KEY:
data['value'] = 'test-pass'
return data
mock_client.getVolumeMetaData.side_effect = get_side_effect
expected_mod_request = {
'chapOperation': mock_client.HOST_EDIT_ADD,
'chapOperationMode': mock_client.CHAP_INITIATOR,
'chapName': 'test-user',
'chapSecret': 'test-pass'
}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
host, auth_username, auth_password = self.driver._create_host(
common, self.volume, self.connector)
expected = [
mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getVolumeMetaData(
'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_USER_KEY),
mock.call.getVolumeMetaData(
'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY),
mock.call.getHost(self.FAKE_HOST),
mock.call.queryHost(iqns=['iqn.1993-08.org.debian:01:222']),
mock.call.modifyHost(
'fakehost.foo',
expected_mod_request),
mock.call.getHost('fakehost.foo')
]
mock_client.assert_has_calls(expected)
self.assertEqual(host['name'], 'fakehost.foo')
self.assertEqual(auth_username, 'test-user')
self.assertEqual(auth_password, 'test-pass')
def test_create_modify_host(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
mock_client.getVolume.return_value = {'userCPG': HP3PAR_CPG}
mock_client.getCPG.return_value = {}
mock_client.getHost.side_effect = [
{'name': self.FAKE_HOST, 'FCPaths': []},
{'name': self.FAKE_HOST,
'FCPaths': [{'wwn': '123456789012345'},
{'wwn': '123456789054321'}]}]
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
host, auth_username, auth_password = self.driver._create_host(
common, self.volume, self.connector)
expected = [
mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getHost(self.FAKE_HOST),
mock.call.modifyHost(
self.FAKE_HOST,
{'pathOperation': 1,
'iSCSINames': ['iqn.1993-08.org.debian:01:222']}),
mock.call.getHost(self.FAKE_HOST)]
mock_client.assert_has_calls(expected)
self.assertEqual(host['name'], self.FAKE_HOST)
self.assertEqual(auth_username, None)
self.assertEqual(auth_password, None)
self.assertEqual(len(host['FCPaths']), 2)
def test_create_modify_host_chap_enabled(self):
# setup_mock_client drive with CHAP enabled configuration
# and return the mock HTTP 3PAR client
config = self.setup_configuration()
config.hp3par_iscsi_chap_enabled = True
mock_client = self.setup_driver(config=config)
mock_client.getVolume.return_value = {'userCPG': HP3PAR_CPG}
mock_client.getCPG.return_value = {}
mock_client.getHost.side_effect = [
{'name': self.FAKE_HOST, 'FCPaths': []},
{'name': self.FAKE_HOST,
'FCPaths': [{'wwn': '123456789012345'},
{'wwn': '123456789054321'}]}]
def get_side_effect(*args):
data = {'value': None}
if args[1] == CHAP_USER_KEY:
data['value'] = 'test-user'
elif args[1] == CHAP_PASS_KEY:
data['value'] = 'test-pass'
return data
mock_client.getVolumeMetaData.side_effect = get_side_effect
expected_mod_request = {
'chapOperation': mock_client.HOST_EDIT_ADD,
'chapOperationMode': mock_client.CHAP_INITIATOR,
'chapName': 'test-user',
'chapSecret': 'test-pass'
}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
host, auth_username, auth_password = self.driver._create_host(
common, self.volume, self.connector)
expected = [
mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getVolumeMetaData(
'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_USER_KEY),
mock.call.getVolumeMetaData(
'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY),
mock.call.getHost(self.FAKE_HOST),
mock.call.modifyHost(
self.FAKE_HOST,
{'pathOperation': 1,
'iSCSINames': ['iqn.1993-08.org.debian:01:222']}),
mock.call.modifyHost(
self.FAKE_HOST,
expected_mod_request
),
mock.call.getHost(self.FAKE_HOST)]
mock_client.assert_has_calls(expected)
self.assertEqual(host['name'], self.FAKE_HOST)
self.assertEqual(auth_username, 'test-user')
self.assertEqual(auth_password, 'test-pass')
self.assertEqual(len(host['FCPaths']), 2)
def test_get_least_used_nsp_for_host_single(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
mock_client.getPorts.return_value = PORTS_RET
mock_client.getVLUNs.return_value = VLUNS1_RET
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
#Setup a single ISCSI IP
iscsi_ips = ["10.10.220.253"]
self.driver.configuration.hp3par_iscsi_ips = iscsi_ips
self.driver.initialize_iscsi_ports(common)
nsp = self.driver._get_least_used_nsp_for_host(common, 'newhost')
self.assertEqual(nsp, "1:8:1")
def test_get_least_used_nsp_for_host_new(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
mock_client.getPorts.return_value = PORTS_RET
mock_client.getVLUNs.return_value = VLUNS1_RET
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
#Setup two ISCSI IPs
iscsi_ips = ["10.10.220.252", "10.10.220.253"]
self.driver.configuration.hp3par_iscsi_ips = iscsi_ips
self.driver.initialize_iscsi_ports(common)
# Host 'newhost' does not yet have any iscsi paths,
# so the 'least used' is returned
nsp = self.driver._get_least_used_nsp_for_host(common, 'newhost')
self.assertEqual(nsp, "1:8:2")
def test_get_least_used_nsp_for_host_reuse(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
mock_client.getPorts.return_value = PORTS_RET
mock_client.getVLUNs.return_value = VLUNS1_RET
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
#Setup two ISCSI IPs
iscsi_ips = ["10.10.220.252", "10.10.220.253"]
self.driver.configuration.hp3par_iscsi_ips = iscsi_ips
self.driver.initialize_iscsi_ports(common)
# hosts 'foo' and 'bar' already have active iscsi paths
# the same one should be used
nsp = self.driver._get_least_used_nsp_for_host(common, 'foo')
self.assertEqual(nsp, "1:8:2")
nsp = self.driver._get_least_used_nsp_for_host(common, 'bar')
self.assertEqual(nsp, "1:8:1")
def test_get_least_used_nps_for_host_fc(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
mock_client.getPorts.return_value = PORTS1_RET
mock_client.getVLUNs.return_value = VLUNS5_RET
#Setup two ISCSI IPs
iscsi_ips = ["10.10.220.252", "10.10.220.253"]
self.driver.configuration.hp3par_iscsi_ips = iscsi_ips
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
self.driver.initialize_iscsi_ports(common)
nsp = self.driver._get_least_used_nsp_for_host(common, 'newhost')
self.assertNotEqual(nsp, "0:6:3")
self.assertEqual(nsp, "1:8:1")
def test_invalid_iscsi_ip(self):
config = self.setup_configuration()
config.hp3par_iscsi_ips = ['10.10.220.250', '10.10.220.251']
config.iscsi_ip_address = '10.10.10.10'
mock_conf = {
'getPorts.return_value': {
'members': [
{'portPos': {'node': 1, 'slot': 8, 'cardPort': 2},
'protocol': 2,
'IPAddr': '10.10.220.252',
'linkState': 4,
'device': [],
'iSCSIName': self.TARGET_IQN,
'mode': 2,
'HWAddr': '2C27D75375D2',
'type': 8},
{'portPos': {'node': 1, 'slot': 8, 'cardPort': 1},
'protocol': 2,
'IPAddr': '10.10.220.253',
'linkState': 4,
'device': [],
'iSCSIName': self.TARGET_IQN,
'mode': 2,
'HWAddr': '2C27D75375D6',
'type': 8}]}}
# no valid ip addr should be configured.
self.assertRaises(exception.InvalidInput,
self.setup_driver,
config=config,
mock_conf=mock_conf)
def test_get_least_used_nsp(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
ports = [
{'portPos': {'node': 1, 'slot': 8, 'cardPort': 2}, 'active': True},
{'portPos': {'node': 1, 'slot': 8, 'cardPort': 1}, 'active': True},
{'portPos': {'node': 1, 'slot': 8, 'cardPort': 2}, 'active': True},
{'portPos': {'node': 0, 'slot': 2, 'cardPort': 2}, 'active': True},
{'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, 'active': True},
{'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, 'active': True},
{'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, 'active': True},
{'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, 'active': True}]
mock_client.getVLUNs.return_value = {'members': ports}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
# in use count
vluns = common.client.getVLUNs()
nsp = self.driver._get_least_used_nsp(common, vluns['members'],
['0:2:1', '1:8:1'])
self.assertEqual(nsp, '1:8:1')
ports = [
{'portPos': {'node': 1, 'slot': 2, 'cardPort': 1},
'active': True},
{'portPos': {'node': 1, 'slot': 2, 'cardPort': 1},
'active': True},
{'portPos': {'node': 1, 'slot': 2, 'cardPort': 1},
'active': True},
{'portPos': {'node': 1, 'slot': 2, 'cardPort': 1},
'active': True},
{'portPos': {'node': 0, 'slot': 2, 'cardPort': 1},
'active': True},
{'portPos': {'node': 0, 'slot': 2, 'cardPort': 1},
'active': True},
{'portPos': {'node': 0, 'slot': 2, 'cardPort': 1},
'active': True},
{'portPos': {'node': 0, 'slot': 2, 'cardPort': 1},
'active': True},
{'portPos': {'node': 0, 'slot': 2, 'cardPort': 1},
'active': True}]
mock_client.getVLUNs.return_value = {'members': ports}
# in use count
common = self.driver._login()
vluns = common.client.getVLUNs()
nsp = self.driver._get_least_used_nsp(common, vluns['members'],
['0:2:1', '1:2:1'])
self.assertEqual(nsp, '1:2:1')
ports = [
{'portPos': {'node': 1, 'slot': 2, 'cardPort': 1},
'active': True},
{'portPos': {'node': 1, 'slot': 2, 'cardPort': 1},
'active': True},
{'portPos': {'node': 1, 'slot': 2, 'cardPort': 1},
'active': True},
{'portPos': {'node': 1, 'slot': 2, 'cardPort': 1},
'active': True},
{'portPos': {'node': 0, 'slot': 2, 'cardPort': 1},
'active': True},
{'portPos': {'node': 0, 'slot': 2, 'cardPort': 1},
'active': True},
{'portPos': {'node': 0, 'slot': 2, 'cardPort': 1},
'active': True},
{'portPos': {'node': 0, 'slot': 2, 'cardPort': 1},
'active': True},
{'portPos': {'node': 0, 'slot': 2, 'cardPort': 1},
'active': True}]
mock_client.getVLUNs.return_value = {'members': ports}
# in use count
common = self.driver._login()
vluns = common.client.getVLUNs()
nsp = self.driver._get_least_used_nsp(common, vluns['members'],
['1:1:1', '1:2:1'])
self.assertEqual(nsp, '1:1:1')
def test_set_3par_chaps(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
expected = []
self.driver._set_3par_chaps(
common, 'test-host', 'test-vol', 'test-host', 'pass')
mock_client.assert_has_calls(expected)
# setup_mock_client drive with CHAP enabled configuration
# and return the mock HTTP 3PAR client
config = self.setup_configuration()
config.hp3par_iscsi_chap_enabled = True
mock_client = self.setup_driver(config=config)
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
expected_mod_request = {
'chapOperation': mock_client.HOST_EDIT_ADD,
'chapOperationMode': mock_client.CHAP_INITIATOR,
'chapName': 'test-host',
'chapSecret': 'fake'
}
expected = [
mock.call.modifyHost('test-host', expected_mod_request)
]
self.driver._set_3par_chaps(
common, 'test-host', 'test-vol', 'test-host', 'fake')
mock_client.assert_has_calls(expected)
@mock.patch('cinder.volume.utils.generate_password')
def test_do_export(self, mock_utils):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
volume = {'host': 'test-host@3pariscsi',
'id': 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7'}
mock_utils.return_value = 'random-pass'
mock_client.getHostVLUNs.return_value = [
{'active': True,
'volumeName': self.VOLUME_3PAR_NAME,
'lun': None, 'type': 0,
'remoteName': 'iqn.1993-08.org.debian:01:222'}
]
mock_client.getHost.return_value = {
'name': 'osv-0DM4qZEVSKON-DXN-NwVpw',
'initiatorChapEnabled': True
}
mock_client.getVolumeMetaData.return_value = {
'value': 'random-pass'
}
expected = []
expected_model = {'provider_auth': None}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
model = self.driver._do_export(common, volume)
mock_client.assert_has_calls(expected)
self.assertEqual(expected_model, model)
mock_client.reset_mock()
# setup_mock_client drive with CHAP enabled configuration
# and return the mock HTTP 3PAR client
config = self.setup_configuration()
config.hp3par_iscsi_chap_enabled = True
mock_client = self.setup_driver(config=config)
volume = {'host': 'test-host@3pariscsi',
'id': 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7'}
mock_utils.return_value = 'random-pass'
mock_client.getHostVLUNs.return_value = [
{'active': True,
'volumeName': self.VOLUME_3PAR_NAME,
'lun': None, 'type': 0,
'remoteName': 'iqn.1993-08.org.debian:01:222'}
]
mock_client.getHost.return_value = {
'name': 'osv-0DM4qZEVSKON-DXN-NwVpw',
'initiatorChapEnabled': True
}
mock_client.getVolumeMetaData.return_value = {
'value': 'random-pass'
}
expected = [
mock.call.getHostVLUNs('test-host'),
mock.call.getHost('test-host'),
mock.call.getVolumeMetaData(
'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY),
mock.call.setVolumeMetaData(
'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_USER_KEY, 'test-host'),
mock.call.setVolumeMetaData(
'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY, 'random-pass')
]
expected_model = {'provider_auth': 'CHAP test-host random-pass'}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
model = self.driver._do_export(common, volume)
mock_client.assert_has_calls(expected)
self.assertEqual(expected_model, model)
@mock.patch('cinder.volume.utils.generate_password')
def test_do_export_host_not_found(self, mock_utils):
# setup_mock_client drive with CHAP enabled configuration
# and return the mock HTTP 3PAR client
config = self.setup_configuration()
config.hp3par_iscsi_chap_enabled = True
mock_client = self.setup_driver(config=config)
volume = {'host': 'test-host@3pariscsi',
'id': 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7'}
mock_utils.return_value = "random-pass"
mock_client.getHostVLUNs.side_effect = hpexceptions.HTTPNotFound(
'fake')
mock_client.getVolumeMetaData.return_value = {
'value': 'random-pass'
}
expected = [
mock.call.getHostVLUNs('test-host'),
mock.call.setVolumeMetaData(
'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_USER_KEY, 'test-host'),
mock.call.setVolumeMetaData(
'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY, 'random-pass')
]
expected_model = {'provider_auth': 'CHAP test-host random-pass'}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
model = self.driver._do_export(common, volume)
mock_client.assert_has_calls(expected)
self.assertEqual(expected_model, model)
@mock.patch('cinder.volume.utils.generate_password')
def test_do_export_host_chap_disabled(self, mock_utils):
# setup_mock_client drive with CHAP enabled configuration
# and return the mock HTTP 3PAR client
config = self.setup_configuration()
config.hp3par_iscsi_chap_enabled = True
mock_client = self.setup_driver(config=config)
volume = {'host': 'test-host@3pariscsi',
'id': 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7'}
mock_utils.return_value = 'random-pass'
mock_client.getHostVLUNs.return_value = [
{'active': True,
'volumeName': self.VOLUME_3PAR_NAME,
'lun': None, 'type': 0,
'remoteName': 'iqn.1993-08.org.debian:01:222'}
]
mock_client.getHost.return_value = {
'name': 'fake-host',
'initiatorChapEnabled': False
}
mock_client.getVolumeMetaData.return_value = {
'value': 'random-pass'
}
expected = [
mock.call.getHostVLUNs('test-host'),
mock.call.getHost('test-host'),
mock.call.getVolumeMetaData(
'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY),
mock.call.setVolumeMetaData(
'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_USER_KEY, 'test-host'),
mock.call.setVolumeMetaData(
'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY, 'random-pass')
]
expected_model = {'provider_auth': 'CHAP test-host random-pass'}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
model = self.driver._do_export(common, volume)
mock_client.assert_has_calls(expected)
self.assertEqual(expected_model, model)
@mock.patch('cinder.volume.utils.generate_password')
def test_do_export_no_active_vluns(self, mock_utils):
# setup_mock_client drive with CHAP enabled configuration
# and return the mock HTTP 3PAR client
config = self.setup_configuration()
config.hp3par_iscsi_chap_enabled = True
mock_client = self.setup_driver(config=config)
volume = {'host': 'test-host@3pariscsi',
'id': 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7'}
mock_utils.return_value = "random-pass"
mock_client.getHostVLUNs.return_value = [
{'active': False,
'volumeName': self.VOLUME_3PAR_NAME,
'lun': None, 'type': 0,
'remoteName': 'iqn.1993-08.org.debian:01:222'}
]
mock_client.getHost.return_value = {
'name': 'fake-host',
'initiatorChapEnabled': True
}
mock_client.getVolumeMetaData.return_value = {
'value': 'random-pass'
}
expected = [
mock.call.getHostVLUNs('test-host'),
mock.call.getHost('test-host'),
mock.call.setVolumeMetaData(
'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_USER_KEY, 'test-host'),
mock.call.setVolumeMetaData(
'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY, 'random-pass')
]
expected_model = {'provider_auth': 'CHAP test-host random-pass'}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
model = self.driver._do_export(common, volume)
mock_client.assert_has_calls(expected)
self.assertEqual(model, expected_model)
def test_ensure_export(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
volume = {'host': 'test-host@3pariscsi',
'id': 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7'}
mock_client.getAllVolumeMetaData.return_value = {
'total': 0,
'members': []
}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
model = self.driver.ensure_export(None, volume)
expected = [
mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'),
mock.call.getAllVolumeMetaData('osv-0DM4qZEVSKON-DXN-NwVpw')
]
expected_model = {'provider_auth': None}
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
self.assertEqual(model, expected_model)
mock_client.getAllVolumeMetaData.return_value = {
'total': 2,
'members': [
{
'creationTimeSec': 1406074222,
'value': 'fake-host',
'key': CHAP_USER_KEY,
'creationTime8601': '2014-07-22T17:10:22-07:00'
},
{
'creationTimeSec': 1406074222,
'value': 'random-pass',
'key': CHAP_PASS_KEY,
'creationTime8601': '2014-07-22T17:10:22-07:00'
}
]
}
model = self.driver.ensure_export(None, volume)
expected = [
mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'),
mock.call.getAllVolumeMetaData('osv-0DM4qZEVSKON-DXN-NwVpw')
]
expected_model = {'provider_auth': "CHAP fake-host random-pass"}
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
self.assertEqual(model, expected_model)
def test_ensure_export_missing_volume(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
volume = {'host': 'test-host@3pariscsi',
'id': 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7'}
mock_client.getVolume.side_effect = hpexceptions.HTTPNotFound(
'fake')
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
model = self.driver.ensure_export(None, volume)
expected = [mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw')]
expected_model = None
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
self.assertEqual(model, expected_model)
@mock.patch.object(volume_types, 'get_volume_type')
def test_get_volume_settings_default_pool(self, _mock_volume_types):
_mock_volume_types.return_value = {
'name': 'gold',
'id': 'gold-id',
'extra_specs': {}}
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
volume = {'host': 'test-host@3pariscsi#pool_foo',
'id': 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7'}
pool = volume_utils.extract_host(volume['host'], 'pool')
model = common.get_volume_settings_from_type_id('gold-id', pool)
self.assertEqual(model['cpg'], 'pool_foo')
def test_get_model_update(self):
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
model_update = common._get_model_update('xxx@yyy#zzz', 'CPG')
self.assertEqual(model_update, {'host': 'xxx@yyy#CPG'})
VLUNS5_RET = ({'members':
[{'portPos': {'node': 0, 'slot': 8, 'cardPort': 2},
'active': True},
{'portPos': {'node': 1, 'slot': 8, 'cardPort': 1},
'active': True}]})
PORTS_RET = ({'members':
[{'portPos': {'node': 1, 'slot': 8, 'cardPort': 2},
'protocol': 2,
'IPAddr': '10.10.220.252',
'linkState': 4,
'device': [],
'iSCSIName': 'iqn.2000-05.com.3pardata:21820002ac00383d',
'mode': 2,
'HWAddr': '2C27D75375D2',
'type': 8},
{'portPos': {'node': 1, 'slot': 8, 'cardPort': 1},
'protocol': 2,
'IPAddr': '10.10.220.253',
'linkState': 4,
'device': [],
'iSCSIName': 'iqn.2000-05.com.3pardata:21810002ac00383d',
'mode': 2,
'HWAddr': '2C27D75375D6',
'type': 8}]})
VLUNS1_RET = ({'members':
[{'portPos': {'node': 1, 'slot': 8, 'cardPort': 2},
'hostname': 'foo', 'active': True},
{'portPos': {'node': 1, 'slot': 8, 'cardPort': 1},
'hostname': 'bar', 'active': True},
{'portPos': {'node': 1, 'slot': 8, 'cardPort': 1},
'hostname': 'bar', 'active': True},
{'portPos': {'node': 1, 'slot': 8, 'cardPort': 1},
'hostname': 'bar', 'active': True}]})
PORTS1_RET = ({'members':
[{'portPos': {'node': 0, 'slot': 8, 'cardPort': 2},
'protocol': 2,
'IPAddr': '10.10.120.252',
'linkState': 4,
'device': [],
'iSCSIName': 'iqn.2000-05.com.3pardata:21820002ac00383d',
'mode': 2,
'HWAddr': '2C27D75375D2',
'type': 8},
{'portPos': {'node': 1, 'slot': 8, 'cardPort': 1},
'protocol': 2,
'IPAddr': '10.10.220.253',
'linkState': 4,
'device': [],
'iSCSIName': 'iqn.2000-05.com.3pardata:21810002ac00383d',
'mode': 2,
'HWAddr': '2C27D75375D6',
'type': 8},
{'portWWN': '20210002AC00383D',
'protocol': 1,
'linkState': 4,
'mode': 2,
'device': ['cage2'],
'nodeWWN': '20210002AC00383D',
'type': 2,
'portPos': {'node': 0, 'slot': 6, 'cardPort': 3}}]})
| blueboxgroup/cinder | cinder/tests/test_hp3par.py | Python | apache-2.0 | 178,661 |
# $HeadURL$
__RCSID__ = "$Id$"
import time
import copy
import os.path
import GSI
from DIRAC.Core.Utilities.ReturnValues import S_ERROR, S_OK
from DIRAC.Core.Utilities.Network import checkHostsMatch
from DIRAC.Core.Utilities.LockRing import LockRing
from DIRAC.Core.Security import Locations
from DIRAC.Core.Security.X509Chain import X509Chain
from DIRAC.FrameworkSystem.Client.Logger import gLogger
DEFAULT_SSL_CIPHERS = "ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+3DES:!aNULL:!MD5:!DSS"
class SocketInfo:
__cachedCAsCRLs = False
__cachedCAsCRLsLastLoaded = 0
__cachedCAsCRLsLoadLock = LockRing().getLock()
def __init__( self, infoDict, sslContext = None ):
self.__retry = 0
self.infoDict = infoDict
if sslContext:
self.sslContext = sslContext
else:
if self.infoDict[ 'clientMode' ]:
if 'useCertificates' in self.infoDict and self.infoDict[ 'useCertificates' ]:
retVal = self.__generateContextWithCerts()
elif 'proxyString' in self.infoDict:
retVal = self.__generateContextWithProxyString()
else:
retVal = self.__generateContextWithProxy()
else:
retVal = self.__generateServerContext()
if not retVal[ 'OK' ]:
raise Exception( retVal[ 'Message' ] )
def __getValue( self, optName, default ):
if optName not in self.infoDict:
return default
return self.infoDict[ optName ]
def setLocalCredentialsLocation( self, credTuple ):
self.infoDict[ 'localCredentialsLocation' ] = credTuple
def getLocalCredentialsLocation( self ):
return self.infoDict[ 'localCredentialsLocation' ]
def gatherPeerCredentials( self ):
certList = self.sslSocket.get_peer_certificate_chain()
#Servers don't receive the whole chain, the last cert comes alone
if not self.infoDict[ 'clientMode' ]:
certList.insert( 0, self.sslSocket.get_peer_certificate() )
peerChain = X509Chain( certList = certList )
isProxyChain = peerChain.isProxy()['Value']
isLimitedProxyChain = peerChain.isLimitedProxy()['Value']
if isProxyChain:
identitySubject = peerChain.getIssuerCert()['Value'].getSubjectNameObject()[ 'Value' ]
else:
identitySubject = peerChain.getCertInChain( 0 )['Value'].getSubjectNameObject()[ 'Value' ]
credDict = { 'DN' : identitySubject.one_line(),
'CN' : identitySubject.commonName,
'x509Chain' : peerChain,
'isProxy' : isProxyChain,
'isLimitedProxy' : isLimitedProxyChain }
diracGroup = peerChain.getDIRACGroup()
if diracGroup[ 'OK' ] and diracGroup[ 'Value' ]:
credDict[ 'group' ] = diracGroup[ 'Value' ]
self.infoDict[ 'peerCredentials' ] = credDict
return credDict
def setSSLSocket( self, sslSocket ):
self.sslSocket = sslSocket
def getSSLSocket( self ):
return self.sslSocket
def getSSLContext( self ):
return self.sslContext
def clone( self ):
try:
return S_OK( SocketInfo( dict( self.infoDict ), self.sslContext ) )
except Exception, e:
return S_ERROR( str( e ) )
def verifyCallback( self, *args, **kwargs ):
#gLogger.debug( "verify Callback %s" % str( args ) )
if self.infoDict[ 'clientMode' ]:
return self._clientCallback( *args, **kwargs )
else:
return self._serverCallback( *args, **kwargs )
def __isSameHost( self, hostCN, hostConn ):
""" Guess if it is the same host or not
"""
hostCN_m = hostCN
if '/' in hostCN:
hostCN_m = hostCN.split( '/' )[1]
if hostCN_m == hostConn:
return True
result = checkHostsMatch( hostCN_m, hostConn )
if not result[ 'OK' ]:
return False
return result[ 'Value' ]
def _clientCallback( self, conn, cert, errnum, depth, ok ):
# This obviously has to be updated
if depth == 0 and ok == 1:
hostnameCN = cert.get_subject().commonName
#if hostnameCN in ( self.infoDict[ 'hostname' ], "host/%s" % self.infoDict[ 'hostname' ] ):
if self.__isSameHost( hostnameCN, self.infoDict['hostname'] ):
return 1
else:
gLogger.warn( "Server is not who it's supposed to be",
"Connecting to %s and it's %s" % ( self.infoDict[ 'hostname' ], hostnameCN ) )
return ok
return ok
def _serverCallback( self, conn, cert, errnum, depth, ok ):
return ok
def __getCAStore( self ):
SocketInfo.__cachedCAsCRLsLoadLock.acquire()
try:
if not SocketInfo.__cachedCAsCRLs or time.time() - SocketInfo.__cachedCAsCRLsLastLoaded > 900:
#Need to generate the CA Store
casDict = {}
crlsDict = {}
casPath = Locations.getCAsLocation()
if not casPath:
return S_ERROR( "No valid CAs location found" )
gLogger.debug( "CAs location is %s" % casPath )
casFound = 0
crlsFound = 0
SocketInfo.__caStore = GSI.crypto.X509Store()
for fileName in os.listdir( casPath ):
filePath = os.path.join( casPath, fileName )
if not os.path.isfile( filePath ):
continue
fObj = file( filePath, "rb" )
pemData = fObj.read()
fObj.close()
#Try to load CA Cert
try:
caCert = GSI.crypto.load_certificate( GSI.crypto.FILETYPE_PEM, pemData )
if caCert.has_expired():
continue
caID = ( caCert.get_subject().one_line(), caCert.get_issuer().one_line() )
caNotAfter = caCert.get_not_after()
if caID not in casDict:
casDict[ caID ] = ( caNotAfter, caCert )
casFound += 1
else:
if casDict[ caID ][0] < caNotAfter:
casDict[ caID ] = ( caNotAfter, caCert )
continue
except:
if fileName.find( ".0" ) == len( fileName ) - 2:
gLogger.exception( "LOADING %s" % filePath )
if 'IgnoreCRLs' not in self.infoDict or not self.infoDict[ 'IgnoreCRLs' ]:
#Try to load CRL
try:
crl = GSI.crypto.load_crl( GSI.crypto.FILETYPE_PEM, pemData )
if crl.has_expired():
continue
crlID = crl.get_issuer().one_line()
crlNotAfter = crl.get_not_after()
if crlID not in crlsDict:
crlsDict[ crlID ] = ( crlNotAfter, crl )
crlsFound += 1
else:
if crlsDict[ crlID ][0] < crlNotAfter:
crlsDict[ crlID ] = ( crlNotAfter, crl )
continue
except:
if fileName.find( ".r0" ) == len( fileName ) - 2:
gLogger.exception( "LOADING %s" % filePath )
gLogger.debug( "Loaded %s CAs [%s CRLs]" % ( casFound, crlsFound ) )
SocketInfo.__cachedCAsCRLs = ( [ casDict[k][1] for k in casDict ],
[ crlsDict[k][1] for k in crlsDict ] )
SocketInfo.__cachedCAsCRLsLastLoaded = time.time()
except:
gLogger.exception( "ASD" )
finally:
SocketInfo.__cachedCAsCRLsLoadLock.release()
#Generate CA Store
caStore = GSI.crypto.X509Store()
caList = SocketInfo.__cachedCAsCRLs[0]
for caCert in caList:
caStore.add_cert( caCert )
crlList = SocketInfo.__cachedCAsCRLs[1]
for crl in crlList:
caStore.add_crl( crl )
return S_OK( caStore )
def __createContext( self ):
clientContext = self.__getValue( 'clientMode', False )
# Initialize context
contextOptions = GSI.SSL.OP_ALL
if clientContext:
methodSuffix = "CLIENT_METHOD"
else:
methodSuffix = "SERVER_METHOD"
contextOptions |= GSI.SSL.OP_NO_SSLv2 | GSI.SSL.OP_NO_SSLv3
if 'sslMethod' in self.infoDict:
methodName = "%s_%s" % ( self.infoDict[ 'sslMethod' ], methodSuffix )
else:
methodName = "TLSv1_%s" % ( methodSuffix )
try:
method = getattr( GSI.SSL, methodName )
except:
return S_ERROR( "SSL method %s is not valid" % self.infoDict[ 'sslMethod' ] )
self.sslContext = GSI.SSL.Context( method )
self.sslContext.set_cipher_list( self.infoDict.get( 'sslCiphers', DEFAULT_SSL_CIPHERS ) )
if contextOptions:
self.sslContext.set_options( contextOptions )
#self.sslContext.set_read_ahead( 1 )
#Enable GSI?
gsiEnable = False
if not clientContext or self.__getValue( 'gsiEnable', False ):
gsiEnable = True
#DO CA Checks?
if not self.__getValue( 'skipCACheck', False ):
#self.sslContext.set_verify( SSL.VERIFY_PEER|SSL.VERIFY_FAIL_IF_NO_PEER_CERT, self.verifyCallback ) # Demand a certificate
self.sslContext.set_verify( GSI.SSL.VERIFY_PEER | GSI.SSL.VERIFY_FAIL_IF_NO_PEER_CERT, None, gsiEnable ) # Demand a certificate
result = self.__getCAStore()
if not result[ 'OK' ]:
return result
caStore = result[ 'Value' ]
self.sslContext.set_cert_store( caStore )
else:
self.sslContext.set_verify( GSI.SSL.VERIFY_NONE, None, gsiEnable ) # Demand a certificate
return S_OK()
def __generateContextWithCerts( self ):
certKeyTuple = Locations.getHostCertificateAndKeyLocation()
if not certKeyTuple:
return S_ERROR( "No valid certificate or key found" )
self.setLocalCredentialsLocation( certKeyTuple )
gLogger.debug( "Using certificate %s\nUsing key %s" % certKeyTuple )
retVal = self.__createContext()
if not retVal[ 'OK' ]:
return retVal
#Verify depth to 20 to ensure accepting proxies of proxies of proxies....
self.sslContext.set_verify_depth( 50 )
self.sslContext.use_certificate_chain_file( certKeyTuple[0] )
self.sslContext.use_privatekey_file( certKeyTuple[1] )
return S_OK()
def __generateContextWithProxy( self ):
if 'proxyLocation' in self.infoDict:
proxyPath = self.infoDict[ 'proxyLocation' ]
if not os.path.isfile( proxyPath ):
return S_ERROR( "Defined proxy is not a file" )
else:
proxyPath = Locations.getProxyLocation()
if not proxyPath:
return S_ERROR( "No valid proxy found" )
self.setLocalCredentialsLocation( ( proxyPath, proxyPath ) )
gLogger.debug( "Using proxy %s" % proxyPath )
retVal = self.__createContext()
if not retVal[ 'OK' ]:
return retVal
self.sslContext.use_certificate_chain_file( proxyPath )
self.sslContext.use_privatekey_file( proxyPath )
return S_OK()
def __generateContextWithProxyString( self ):
proxyString = self.infoDict[ 'proxyString' ]
self.setLocalCredentialsLocation( ( proxyString, proxyString ) )
gLogger.debug( "Using string proxy" )
retVal = self.__createContext()
if not retVal[ 'OK' ]:
return retVal
self.sslContext.use_certificate_chain_string( proxyString )
self.sslContext.use_privatekey_string( proxyString )
return S_OK()
def __generateServerContext( self ):
retVal = self.__generateContextWithCerts()
if not retVal[ 'OK' ]:
return retVal
self.sslContext.set_session_id( "DISETConnection%s" % str( time.time() ) )
#self.sslContext.get_cert_store().set_flags( GSI.crypto.X509_CRL_CHECK )
if 'SSLSessionTimeout' in self.infoDict:
timeout = int( self.infoDict['SSLSessionTimeout'] )
gLogger.debug( "Setting session timeout to %s" % timeout )
self.sslContext.set_session_timeout( timeout )
return S_OK()
def doClientHandshake( self ):
self.sslSocket.set_connect_state()
return self.__sslHandshake()
def doServerHandshake( self ):
self.sslSocket.set_accept_state()
return self.__sslHandshake()
#@gSynchro
def __sslHandshake( self ):
start = time.time()
timeout = self.infoDict[ 'timeout' ]
while True:
if timeout:
if time.time() - start > timeout:
return S_ERROR( "Handshake timeout exceeded" )
try:
self.sslSocket.do_handshake()
break
except GSI.SSL.WantReadError:
time.sleep( 0.001 )
except GSI.SSL.WantWriteError:
time.sleep( 0.001 )
except GSI.SSL.Error, v:
if self.__retry < 3:
self.__retry += 1
return self.__sslHandshake()
else:
# gLogger.warn( "Error while handshaking", "\n".join( [ stError[2] for stError in v.args[0] ] ) )
gLogger.warn( "Error while handshaking", v )
return S_ERROR( "Error while handshaking" )
except Exception, v:
gLogger.warn( "Error while handshaking", v )
if self.__retry < 3:
self.__retry += 1
return self.__sslHandshake()
else:
# gLogger.warn( "Error while handshaking", "\n".join( [ stError[2] for stError in v.args[0] ] ) )
gLogger.warn( "Error while handshaking", v )
return S_ERROR( "Error while handshaking" )
credentialsDict = self.gatherPeerCredentials()
if self.infoDict[ 'clientMode' ]:
hostnameCN = credentialsDict[ 'CN' ]
#if hostnameCN.split("/")[-1] != self.infoDict[ 'hostname' ]:
if not self.__isSameHost( hostnameCN, self.infoDict[ 'hostname' ] ):
gLogger.warn( "Server is not who it's supposed to be",
"Connecting to %s and it's %s" % ( self.infoDict[ 'hostname' ], hostnameCN ) )
gLogger.debug( "", "Authenticated peer (%s)" % credentialsDict[ 'DN' ] )
return S_OK( credentialsDict )
| marcelovilaca/DIRAC | Core/DISET/private/Transports/SSL/SocketInfo.py | Python | gpl-3.0 | 13,388 |
'''----------------------------- Imports -----------------------------'''
# Hack computer
from ._x__components import *
'''------------------------- Program counter -------------------------'''
class ProgramCounterN_():
''' N bit program counter
if rst(t-1) : out(t) = 0
elif write(t-1) : out(t) = in(t-1)
elif inc(t-1) : out(t) = out(t-1) + 1
else : out(t) = out(t-1)
'''
def __init__( self, N ):
self.N = N
self.register = RegisterN_( N )
def doTheThing( self, clk, x, rst, write, inc ):
change = or3_( write, inc, rst )
d = muxN_(
# Reset
self.N,
zeroN_( self.N ),
muxN_(
# Jump
self.N,
x[ - self.N : ], # turn x to self.N bits by trimming its signifcant bits
muxN_(
# Increment
self.N,
incrementN_( self.N, self.register.read() ),
self.register.read(),
inc
),
write
),
rst
)
self.register.write( clk, d, change )
def read( self ):
return self.register.readDecimal()
# out = self.register.readDecimal()
# print( out )
# return( out )
| JetStarBlues/Nand-2-Tetris | OldArchitecture/v1.0/Components/_6__programCounter.py | Python | mit | 1,119 |
class Solution(object):
def maximalRectangle(self, matrix):
"""
:type matrix: List[List[str]]
:rtype: int
"""
# related to 84.py
if not matrix:
return 0
m, n = len(matrix), len(matrix[0])
max_area = 0
for i in range(m):
stack = [(0, 0)]
res = 0
for j in range(n):
# update height
matrix[i][j] = (0, 1)[matrix[i][j] == '1']
if i > 0 and matrix[i][j]:
matrix[i][j] += matrix[i-1][j]
# max area in this level
index = j
while matrix[i][j] < stack[-1][1]:
index, height = stack.pop()
res = max(res, (j - index) * height)
if matrix[i][j] > stack[-1][1]:
stack.append((index, matrix[i][j]))
# update rest
while stack:
index, height = stack.pop()
res = max(res, (n - index) * height)
max_area = max(max_area, res)
return max_area
| sogapalag/problems | leetcode/85.Maximal-Rectangle.py | Python | mit | 1,148 |
number = int(input("Number: "))
i = 2
while number > 1:
if number % i == 0:
number //= i
print(i)
else:
i += 1
| JoachimVandersmissen/CodingSolutions | python/PythonForEveryone/chapter4/16.py | Python | apache-2.0 | 143 |
# -*- coding: utf-8 -*-
"""
Sycamore - "create a new wiki" action
This action allows you to create a new wiki in your wiki farm if you:
1) Have wiki_farm = True set in your config
2) Have allow_web_based_wiki_creation = True set in your config
@copyright: 2006 Philip Neustrom <philipn@gmail.com>
@license: GNU GPL, see COPYING for details.
"""
# Imports
import random
import base64
import sha
import time
from Sycamore import config
from Sycamore import farm
from Sycamore import wikiutil
from Sycamore.util import mail
from Sycamore.Page import Page
from Sycamore.formatter.text_html import Formatter
do_email_auth = True
if do_email_auth:
from Sycamore.action import captcha
WIKI_PENDING_TIME = 60*30 # how long is our email good for?
def _createCode(request):
wikiname = request.form['wikiname'][0].lower()
ourcode = str(random.random())
written_time = time.time()
d = {'wiki_name':wikiname, 'code':ourcode, 'written_time':written_time}
request.cursor.execute(
"""INSERT into wikisPending (wiki_name, code, written_time) values
(%(wiki_name)s, %(code)s, %(written_time)s)""", d, isWrite=True)
return ourcode
def _isValidCode(request, given_wiki_name, given_code):
state = False
timenow = time.time()
d = {'wiki_name':given_wiki_name, 'code':given_code,
'timevalid': (timenow - WIKI_PENDING_TIME)}
request.cursor.execute("""SELECT written_time from wikisPending
where code=%(code)s and
wiki_name=%(wiki_name)s and
written_time > %(timevalid)s""", d)
result = request.cursor.fetchone()
if result and result[0]:
state = True
# decent place to clear out expired wikis
request.cursor.execute("""DELETE from wikisPending
where written_time <= %(timevalid)s""",
d, isWrite=True)
return state
def _clearAuthCode(request, wikiname, code):
d = {'wiki_name':wikiname, 'code':code}
request.cursor.execute("""DELETE from wikisPending
where code=%(code)s and
wiki_name=%(wiki_name)s""",
d, isWrite=True)
def send_validation_email(wikiname, request):
if not config.mail_smarthost:
msg = ("This wiki is not enabled for mail processing. "
"Contact the owner of the wiki, who can enable email.")
elif not request.isPOST():
msg = "Use the interactive interface to change settings!"
# check whether the user has an email address
elif not request.user.email:
msg = ("You didn't enter an email address in your profile. "
"Select settings in the upper right corner and enter a "
"valid email address.")
else:
code = _createCode(request)
text = ("To create your wiki, %s, follow go to this URL: "
"%s?action=new_wiki&wikiname=%s&code=%s . "
"Note that this magic wiki-creating URL will expire in "
"30 minutes." % (wikiname, farm.getBaseFarmURL(request),
wikiname, code))
mailok, msg = mail.sendmail(request, [request.user.email],
"Creating your wiki..", text,
mail_from=config.mail_from)
msg = ("An email with instructions has been sent to your email "
"address, %s. Check your mail!" % request.user.email)
return msg
def has_valid_email_link(request):
if (request.form.has_key('wikiname') and request.form['wikiname'][0] and
request.form.has_key('code') and request.form['code'][0]):
wikiname = request.form['wikiname'][0]
code = request.form['code'][0]
if _isValidCode(request, wikiname, code):
return code
return False
def execute(pagename, request):
from Sycamore.PageEditor import PageEditor
_ = request.getText
actname = __name__.split('.')[-1]
page = Page(pagename, request)
msg = None
form = request.form
wikiname = None
if not config.wiki_farm or not config.allow_web_based_wiki_creation:
return page.send_page(msg='You are not allowed to create new wikis.')
if not request.user.valid:
return page.send_page(msg='You must be logged in to create new wikis.')
if do_email_auth:
if request.form.has_key('send_email') and request.form['send_email'][0]:
if (not request.form.has_key('wikiname') or not
request.form['wikiname'][0]):
return page.send_page(msg='Missing wiki name.')
wikiname = request.form['wikiname'][0].lower()
if not farm.isValidWikiName(wikiname):
msg = ('Wiki creation failed because the wiki name "%s" is '
'invalid. You may only use the numbers 0-9, the '
'letters a-z, and the dash "-" in a wiki name.' %
wikiname)
elif wikiutil.isInFarm(wikiname, request):
msg = 'Wiki "%s" already exists!' % wikiname
else:
msg = send_validation_email(wikiname, request)
return page.send_page(msg=msg)
email_code = has_valid_email_link(request)
if not email_code:
return page.send_page(msg=("Invalid email link. "
"To create a wiki you must follow the link send to your email account."))
if form.has_key('wikiname') and form['wikiname'][0]:
can_create_wiki = False
wikiname = form['wikiname'][0].lower()
if do_email_auth:
if not config.captcha_support:
can_create_wiki = True
elif form.has_key('captcha_id') and form.has_key('captcha_code'):
this_captcha = captcha.Captcha(page, id=form['captcha_id'][0])
if this_captcha.check(form['captcha_code'][0]):
can_create_wiki = True
else:
msg = ('Human verification was incorrect. '
'Please try again!')
else:
if form.has_key('audio'):
type = 'wav'
else:
type = 'png'
captcha.send_captcha(page, wikiname, actname, email_code, type)
return
else:
can_create_wiki = True
if can_create_wiki:
msg = farm.create_wiki(wikiname, request.user.name, request)
if do_email_auth:
_clearAuthCode(request, wikiname, email_code)
if msg:
# there was a problem
return page.send_page(msg=msg)
farm.add_wiki_to_watch(wikiname, request)
formatter = Formatter(request)
wiki_location = farm.link_to_wiki(wikiname, formatter)
msg = (
'Wiki "%s" created successfully! '
'Follow this link to get to your wiki:\n'
'<p>\n'
'%s\n'
'</p>\n'
'<p>\n'
'The wiki was added to your list of watched wikis '
'(change in '
'<a href="%sUser_Settings">your account settings</a>).\n'
'</p>' %
(wikiname, wiki_location, farm.getBaseFarmURL(request)))
return page.send_page(msg=msg)
| philipn/sycamore | Sycamore/action/new_wiki.py | Python | gpl-2.0 | 7,553 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import cStringIO
import select
import socket
import time
import warnings
from tempest import exceptions
with warnings.catch_warnings():
warnings.simplefilter("ignore")
import paramiko
class Client(object):
def __init__(self, host, username, password=None, timeout=300, pkey=None,
channel_timeout=10, look_for_keys=False, key_filename=None):
self.host = host
self.username = username
self.password = password
if isinstance(pkey, basestring):
pkey = paramiko.RSAKey.from_private_key(
cStringIO.StringIO(str(pkey)))
self.pkey = pkey
self.look_for_keys = look_for_keys
self.key_filename = key_filename
self.timeout = int(timeout)
self.channel_timeout = float(channel_timeout)
self.buf_size = 1024
def _get_ssh_connection(self, sleep=1.5, backoff=1.01):
"""Returns an ssh connection to the specified host."""
_timeout = True
bsleep = sleep
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(
paramiko.AutoAddPolicy())
_start_time = time.time()
while not self._is_timed_out(_start_time):
try:
ssh.connect(self.host, username=self.username,
password=self.password,
look_for_keys=self.look_for_keys,
key_filename=self.key_filename,
timeout=self.timeout, pkey=self.pkey)
_timeout = False
break
except (socket.error,
paramiko.AuthenticationException):
time.sleep(bsleep)
bsleep *= backoff
continue
if _timeout:
raise exceptions.SSHTimeout(host=self.host,
user=self.username,
password=self.password)
return ssh
def _is_timed_out(self, start_time):
return (time.time() - self.timeout) > start_time
def connect_until_closed(self):
"""Connect to the server and wait until connection is lost."""
try:
ssh = self._get_ssh_connection()
_transport = ssh.get_transport()
_start_time = time.time()
_timed_out = self._is_timed_out(_start_time)
while _transport.is_active() and not _timed_out:
time.sleep(5)
_timed_out = self._is_timed_out(_start_time)
ssh.close()
except (EOFError, paramiko.AuthenticationException, socket.error):
return
def exec_command(self, cmd):
"""
Execute the specified command on the server.
Note that this method is reading whole command outputs to memory, thus
shouldn't be used for large outputs.
:returns: data read from standard output of the command.
:raises: SSHExecCommandFailed if command returns nonzero
status. The exception contains command status stderr content.
"""
ssh = self._get_ssh_connection()
transport = ssh.get_transport()
channel = transport.open_session()
channel.fileno() # Register event pipe
channel.exec_command(cmd)
channel.shutdown_write()
out_data = []
err_data = []
poll = select.poll()
poll.register(channel, select.POLLIN)
start_time = time.time()
while True:
ready = poll.poll(self.channel_timeout)
if not any(ready):
if not self._is_timed_out(start_time):
continue
raise exceptions.TimeoutException(
"Command: '{0}' executed on host '{1}'.".format(
cmd, self.host))
if not ready[0]: # If there is nothing to read.
continue
out_chunk = err_chunk = None
if channel.recv_ready():
out_chunk = channel.recv(self.buf_size)
out_data += out_chunk,
if channel.recv_stderr_ready():
err_chunk = channel.recv_stderr(self.buf_size)
err_data += err_chunk,
if channel.closed and not err_chunk and not out_chunk:
break
exit_status = channel.recv_exit_status()
if 0 != exit_status:
raise exceptions.SSHExecCommandFailed(
command=cmd, exit_status=exit_status,
strerror=''.join(err_data))
return ''.join(out_data)
def test_connection_auth(self):
"""Returns true if ssh can connect to server."""
try:
connection = self._get_ssh_connection()
connection.close()
except paramiko.AuthenticationException:
return False
return True
| citrix-openstack-build/tempest | tempest/common/ssh.py | Python | apache-2.0 | 5,573 |
# Copyright (c) 2016-2017, Parallels International GmbH
# Copyright (c) 2017-2022, Virtuozzo International GmbH, All rights reserved
#
# This file is part of OpenVZ. OpenVZ is free software; you can redistribute
# it and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the License,
# or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
#
# Our contact details: Virtuozzo International GmbH, Vordergasse 59, 8200
# Schaffhausen, Switzerland.
import copy
import optparse
# borrowed from chromium
class OptionWithMemsize(optparse.Option):
@staticmethod
def _CheckMemsize(option, opt, value):
# Note: purposely no 'b' suffix, since that makes 0x12b ambiguous.
multiplier_table = [
('g', 1024 * 1024 * 1024),
('m', 1024 * 1024),
('k', 1024),
('', 1),
]
for (suffix, multiplier) in multiplier_table:
if value.lower().endswith(suffix):
new_value = value
if suffix:
new_value = new_value[:-len(suffix)]
try:
# Convert w/ base 0 (handles hex, binary, octal, etc)
return int(new_value, 0) * multiplier
except ValueError:
# Pass and try other suffixes; not useful now, but may be
# useful later if we ever allow B vs. GB vs. GiB.
pass
raise optparse.OptionValueError("option {}: invalid memsize value: "
"{}".format(opt, value))
TYPES = optparse.Option.TYPES + ('memsize',)
TYPE_CHECKER = copy.copy(optparse.Option.TYPE_CHECKER)
OptionWithMemsize.TYPE_CHECKER['memsize'] = OptionWithMemsize._CheckMemsize
| OpenVZ/vcmmd | vcmmd/util/optparse.py | Python | lgpl-2.1 | 2,269 |
# Django settings for example_project project.
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': os.path.join(PROJECT_ROOT, 'stockExchange.db'), # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = False
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'h51vphv5#0957l2o(jrdsai!l54h(kh&-m^4-1xdd7nwa6=1^^'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'example_project.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'example_project.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'south',
'kombu.transport.django',
'djcelery',
'dynamic_scraper',
'open_news',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# django-celery settings
import djcelery
djcelery.setup_loader()
BROKER_HOST = "localhost"
BROKER_PORT = 5672
BROKER_BACKEND = "django"
BROKER_USER = "guest"
BROKER_PASSWORD = "guest"
BROKER_VHOST = "/"
CELERYBEAT_SCHEDULER = 'djcelery.schedulers.DatabaseScheduler'
| caspyyy/SCRAPY_DDS | example_project/example_project/settings.py | Python | bsd-3-clause | 5,820 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.core.validators
import django.contrib.auth.models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('auth', '0006_require_contenttypes_0002'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, auto_created=True, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(null=True, verbose_name='last login', blank=True)),
('is_superuser', models.BooleanField(default=False, verbose_name='superuser status', help_text='Designates that this user has all permissions without explicitly assigning them.')),
('username', models.CharField(max_length=30, unique=True, error_messages={'unique': 'A user with that username already exists.'}, validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username.', 'invalid')], verbose_name='username')),
('first_name', models.CharField(max_length=30, verbose_name='first name', blank=True)),
('last_name', models.CharField(max_length=30, verbose_name='last name', blank=True)),
('email', models.EmailField(max_length=254, verbose_name='email address')),
('phone_number', models.CharField(max_length=15, validators=[django.core.validators.RegexValidator(regex='^\\+?\\d{5,15}$', message='Phone number is invalid.')], verbose_name='phone number', blank=True)),
('is_staff', models.BooleanField(default=False, verbose_name='staff status', help_text='Designates whether the user can log into this admin site.')),
('is_active', models.BooleanField(default=False, verbose_name='active', help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(related_query_name='user', help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', to='auth.Group', verbose_name='groups', blank=True)),
('user_permissions', models.ManyToManyField(related_query_name='user', help_text='Specific permissions for this user.', related_name='user_set', to='auth.Permission', verbose_name='user permissions', blank=True)),
],
options={
'verbose_name_plural': 'users',
'verbose_name': 'user',
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| crypotex/taas | taas/user/migrations/0001_create_user_model.py | Python | gpl-2.0 | 2,973 |
from enum import Enum
from abc import ABCMeta
from kivy.uix.widget import WidgetMetaclass
class AbstractWidget(ABCMeta, WidgetMetaclass):
""" Necessary because python meta classes do not support multiple
inheritance. """
pass
class Type(Enum):
ANY = 0.9, 0.9, 0.9
DATAFRAME = .667, .224, .224
CLASSIFICATOR = .667, .424, .224
CROSS_VALIDATOR = .133, .4, .4
STATE = .667, .667, .224
STR = .408, .624, .608
class BlockType(Enum):
IO = .667, .224, .224
CLASSIFICATOR = .667, .424, .224
MODEL_SELECTION = .176, .533, .176
CROSS_VALIDATOR = .133, .4, .4
STATE = .667, .667, .224
FIT_AND_PREDICT = .345, .165, .447
| AlvarBer/Persimmon | persimmon/view/util/types.py | Python | mit | 673 |
# -*- coding: utf-8 -*-
###############################################################################
#
# Module for OpenERP
# Copyright (C) 2014 Akretion (http://www.akretion.com).
# @author Sébastien BEAU <sebastien.beau@akretion.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from openerp.osv import orm
class AutomaticWorkflowJob(orm.Model):
_inherit = 'automatic.workflow.job'
def _get_domain_for_sale_validation(self, cr, uid, context=None):
res = super(AutomaticWorkflowJob, self).\
_get_domain_for_sale_validation(cr, uid, context=context)
res.append(('exceptions_ids', '=', False))
return res
| Antiun/sale-workflow | sale_automatic_workflow_exception/automatic_workflow_job.py | Python | agpl-3.0 | 1,376 |
#coding=utf-8
import csv
class Robot:
##统计结果 80%的session交互次数小于25
##80% 的句子长度小于59个词 (实际上是20个中文单词)
def __init__(self):
self.in_path = 'skin.csv'
self.out_path = 'skin.txt'
def run(self):
length_static ={}
sent_len_count = {}
with open(self.in_path,'rb') as csvfile:
#spamreader = csv.reader(csvfile, delimiter=' ', quotechar='|')
spamreader = csv.reader(csvfile)
current_length = 0
first = True
cache_id = ""
current_id = ""
row_counter =0
sentence_counter=0
for row in spamreader:
if first:
first = False
continue
current_id = row[2]
content = row[6]
content_len = len(content)
sent_len_count[content_len] = sent_len_count.get(content_len,0)+1
sentence_counter+=1
if current_id !=cache_id:
length_static[current_length] = length_static.get(current_length,0)+1
row_counter+=1
current_length =0
cache_id = current_id
current_length+=1
print (",".join(row))
print (row[6])
#print(length_static)
sum=0
print("各个session中对话数的分布")
for key in length_static:
sum+=length_static[key]
print(key,sum,sum*1.0/row_counter)
sum =0
print("每条语句中的字符数量")
for key in sent_len_count:
sum+=sent_len_count[key]
print(key,sum*1.0/sentence_counter)
#print ', '.join(row)
if __name__ == '__main__':
temp = Robot()
temp.run()
| liangjz92/deeplearning4chatbot | seqs2seq/dialogue/data/scipts/dataset_length_static.py | Python | gpl-3.0 | 1,457 |
from twisted.internet.protocol import Factory, Protocol
class Graphite(Protocol):
def sendMessage(self, msg):
self.transport.write(msg)
class GraphiteFactory(Factory):
def buildProtocol(self, addr):
return Graphite()
| cmek/homeserv | uservices/graphite/graphite.py | Python | bsd-2-clause | 244 |
from django.contrib import admin
from signbank.video.models import Video, GlossVideo
#admin.site.register(Video)
class GlossVideoAdmin(admin.ModelAdmin):
search_fields = ['^gloss__idgloss']
admin.site.register(GlossVideo, GlossVideoAdmin) | Woseseltops/signbank | signbank/video/admin.py | Python | bsd-3-clause | 250 |
from zope.interface.verify import verifyObject, verifyClass
from zope.interface.exceptions import DoesNotImplement
from IOperation import IOperation
from utils import Plus
assert verifyClass(IOperation, Plus) == True
assert verifyObject(IOperation, Plus()) == True
try:
verifyObject(IOperation, object())
except DoesNotImplement:
pass
else:
raise RuntimeError("Should raise DoesNotImplement because there is no implementation")
| duboviy/zca | ifaces/verifier.py | Python | mit | 444 |
import sys # pragma: no cover
from remoteappmanager.command_line_config import (
CommandLineConfig) # pragma: no cover
from remoteappmanager.environment_config import (
EnvironmentConfig) # pragma: no cover
from remoteappmanager.file_config import FileConfig # pragma: no cover
from tornado.options import print_help # pragma: no cover
from remoteappmanager.application import Application # pragma: no cover
def main(): # pragma: no cover
try:
command_line_config = CommandLineConfig()
command_line_config.parse_config()
file_config = FileConfig()
if command_line_config.config_file:
file_config.parse_config(command_line_config.config_file)
environment_config = EnvironmentConfig()
environment_config.parse_config()
except Exception as e:
print_help()
print("Error: {}".format(e))
sys.exit(1)
app = Application(command_line_config, file_config, environment_config)
app.start()
| simphony/simphony-remote | remoteappmanager/cli/remoteappmanager/__main__.py | Python | bsd-3-clause | 1,009 |
#!/usr/bin/env python
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import collections
import ipaddr
import time
from glanceclient import exc as exc
from maas_common import get_auth_ref
from maas_common import get_glance_client
from maas_common import metric
from maas_common import metric_bool
from maas_common import print_output
from maas_common import status_err
from maas_common import status_ok
IMAGE_STATUSES = ['active', 'queued', 'killed']
def check(auth_ref, args):
GLANCE_ENDPOINT = (
'http://{ip}:9292/v1'.format(ip=args.ip)
)
try:
if args.ip:
glance = get_glance_client(endpoint=GLANCE_ENDPOINT)
else:
glance = get_glance_client()
is_up = True
except exc.HTTPException:
is_up = False
# Any other exception presumably isn't an API error
except Exception as e:
status_err(str(e))
else:
# time something arbitrary
start = time.time()
glance.images.list(search_opts={'all_tenants': 1})
end = time.time()
milliseconds = (end - start) * 1000
# gather some metrics
images = glance.images.list(search_opts={'all_tenants': 1})
status_count = collections.Counter([s.status for s in images])
status_ok()
metric_bool('glance_api_local_status', is_up)
# only want to send other metrics if api is up
if is_up:
metric('glance_api_local_response_time',
'double',
'%.3f' % milliseconds,
'ms')
for status in IMAGE_STATUSES:
metric('glance_%s_images' % status,
'uint32',
status_count[status],
'images')
def main(args):
auth_ref = get_auth_ref()
check(auth_ref, args)
if __name__ == "__main__":
with print_output():
parser = argparse.ArgumentParser(description="Check Glance API against"
" local or remote address")
parser.add_argument('ip', nargs='?',
type=ipaddr.IPv4Address,
help='Optional Glance API server address')
args = parser.parse_args()
main(args)
| claco/rpc-openstack | maas/plugins/glance_api_local_check.py | Python | apache-2.0 | 2,771 |
import urllib2
import json
APP_NAME = "GDGDevFest-Android-App"
API_KEY = "AIzaSyCsAij0bSMlGHdta3snhfxD4rAOw9WeSDg" #from the APIs console
CLIENT_ID = "903246180582.apps.googleusercontent.com" #from the APIs console
#gdgdevfest
# Conference API - specific config
# NOTE: the backend used for the Google I / O 2012 Android app is not currently open source, so
# you should modify these fields to reflect your own backend.
CONFERENCE_API_KEY = "AIzaSyA2MhtOhocnrkFvc_uyavMbrLj_Qi36Vak"
ROOT_EVENT_ID = "devfest2012"
BASE_URL = "https://google-developers.appspot.com/_ah/api/resources/v0.1"
# BASE_URL = "https://kupriyanov7/_ah/api/resources/v0.1";
# BASE_URL = "https://kupriyanov7:8080/api";
BASE_URL = 'http://www.devfest.info'
#BASE_URL = 'http://localhost:8080'
GET_ALL_EVENTS_URL = BASE_URL + "/json/events?parent_event=" + ROOT_EVENT_ID + "&api_key=" + CONFERENCE_API_KEY
GET_EVENT_DATA_URL = BASE_URL + '/json/event/%s?parent_event=' + ROOT_EVENT_ID + '&api_key=' + CONFERENCE_API_KEY
GET_ALL_TRACKS_URL = BASE_URL + "/json/event/%s/tracks?parent_event=" + ROOT_EVENT_ID + "&api_key=" + CONFERENCE_API_KEY
GET_ALL_SESSIONS_URL = BASE_URL + "/json/event/%s/sessions?parent_event=" + ROOT_EVENT_ID + "&api_key=" + CONFERENCE_API_KEY
GET_ALL_SPEAKERS_URL = BASE_URL + "/json/event/%s/speakers?event_id=" + ROOT_EVENT_ID + "&api_key=" + CONFERENCE_API_KEY
GET_ALL_SPONSORS_URL = BASE_URL + "/json/event/%s/sponsors?event_id=" + ROOT_EVENT_ID + "&api_key=" + CONFERENCE_API_KEY
GET_ALL_ANNOUNCEMENTS_URL = BASE_URL + "/json/event/%s/announcements?parent_event=" + ROOT_EVENT_ID + "&api_key=" + CONFERENCE_API_KEY
EDIT_MY_SCHEDULE_URL = BASE_URL + "/editmyschedule/o/"
# Static file host for the sandbox data
GET_SANDBOX_URL = "https://developers.google.com/events/io/sandbox-data"
#
#('^/json/events$', JsonEventListPage),
#('^/json/event/(.*)$', JsonEventPage),
#('^/json/event/(.*)/tracks$', JsonTrackListPage),
#('^/json/event/(.*)/sessions$', JsonSessionListPage),
#('^/json/event/(.*)/speakers$', JsonSpeakerListPage),
#('^/json/event/(.*)/sponsors$', JsonSponsorListPage),
#http://www.devfest.info/ json / events
def validateEvents():
req = urllib2.Request(GET_ALL_EVENTS_URL)
response = urllib2.urlopen(req)
the_page = response.read()
events = json.loads(the_page)
# print events
for event in events:
print 'city:%s' % event['city']
validateEvent(event['event_id'])
#break
def validateEvent(event_id):
url = GET_EVENT_DATA_URL % event_id
print 'validateEvent:%s' % url
req = urllib2.Request(url)
response = urllib2.urlopen(req)
the_page = response.read()
#print the_page
try:
print 'tracks:%s' % validateTracks(event_id)
except Exception as e:
print e
try:
print 'sessions:%s' % validateSessions(event_id)
except Exception as e:
print e
try:
print 'speakers:%s' % validateSpeakers(event_id)
except Exception as e:
print e
try:
print 'sponsors:%s' % validateSponsors(event_id)
except Exception as e:
print e
def validateSessions(event_id):
url = GET_ALL_SESSIONS_URL % event_id
print 'validateSessions:%s' % url
req = urllib2.Request(url)
response = urllib2.urlopen(req)
the_page = response.read()
# print the_page
sessions = json.loads(the_page)
return len(sessions)
def validateTracks(event_id):
url = GET_ALL_TRACKS_URL % event_id
print 'validateTracks:%s' % url
req = urllib2.Request(url)
response = urllib2.urlopen(req)
the_page = response.read()
# print the_page
tracks = json.loads(the_page)
return len(tracks)
def validateSpeakers(event_id):
url = GET_ALL_SPEAKERS_URL % event_id
print 'validateSpeakers:%s' % url
req = urllib2.Request(url)
response = urllib2.urlopen(req)
the_page = response.read()
# print the_page
tracks = json.loads(the_page)
return len(tracks)
def validateSponsors(event_id):
url = GET_ALL_SPONSORS_URL % event_id
print 'validateSponsors:%s' % url
req = urllib2.Request(url)
response = urllib2.urlopen(req)
the_page = response.read()
# print the_page
tracks = json.loads(the_page)
return len(tracks)
validateEvents()
| printminion/gdgsched | scripts/checkAgenda.py | Python | apache-2.0 | 4,422 |
import hashlib
import csv
import os
import sys
class Log(object):
def __init__(self, f):
self.log_name = f
self.data = self.get_data(f)
self.log_dict = self.log_parser(self.data)
def get_data(self, f):
"""pulls a csv reader from the file"""
data = []
csvfile = open(f, 'rb')
reader = csv.reader(csvfile)
return reader
def log_parser(self, log):
"""parses the reader, and extracts the salient item data"""
self.log_dict = {}
for row in log:
key = os.path.join(row[4], row[5])
self.log_dict[key] = [row[4], row[5], row[8]]
return self.log_dict
class Tools(object):
def __init__(self):
pass
def compare_hashes(self, a, b):
"""returns the ans to does MD5 a == MD5 b"""
m = hashlib.md5()
m.update(a)
a_hash = m.hexdigest()
m = hashlib.md5()
m.update(b)
b_hash = m.hexdigest()
return a_hash == b_hash
class Checks(object):
def __init__(self, a, b):
self.a = a.log_dict
self.a_label = a.log_name
self.b = b.log_dict
self.b_label = b.log_name
self.all_checks()
def all_checks(self):
"""performs the item check by iterating through both lists per item looking for the item in the other list, and item delta"""
self.delta_check = None
for item, value, in self.a.items():
item_check = self.missing_check(item, self.b)
if item_check:
if value == self.b[item]:
pass
else:
print "'{}' has a different hash".format(item)
else:
print "'{}' is missing from '{}'".format(item, self.b_label)
print "Iteration 1 complete"
for item, value, in self.b.items():
item_check = self.missing_check(item, self.a)
if item_check:
if value == self.a[item]:
pass
else:
print "'{}' has a different hash".format(item)
else:
print "'{}' is missing from '{}'".format(item, self.a_label)
print "Iteration 2 complete"
def missing_check(self, item, my_dict):
"""returns the logic test if a is in list b"""
if item in my_dict.keys():
return True
else:
return False
def main(a, b):
tools = Tools()
log_a = Log(a)
log_b = Log(b)
checks = Checks(log_a, log_b)
if __name__ == '__main__':
######## editable block #########
"""Log file A location"""
log_a = r"tests\test_log_master.csv"
"""Log file B location"""
log_b = r"tests\test_log_all_bad_cases.csv"
#################################
if len(sys.argv) == 3:
try:
log_a = sys.argv[1]
log_b = sys.argv[2]
except:
quit("Too many, or not enough arguments")
main(log_a, log_b)
| jayGattusoNLNZ/Safe_mover | log_compare.py | Python | gpl-2.0 | 2,515 |
# Copyright 2015-2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Test code for the Face layer of RPC Framework."""
import abc
import contextlib
import itertools
import threading
import unittest
from concurrent import futures
# test_interfaces is referenced from specification in this module.
from grpc.framework.foundation import logging_pool
from grpc.framework.interfaces.face import face
from tests.unit.framework.common import test_constants
from tests.unit.framework.common import test_control
from tests.unit.framework.common import test_coverage
from tests.unit.framework.interfaces.face import _3069_test_constant
from tests.unit.framework.interfaces.face import _digest
from tests.unit.framework.interfaces.face import _stock_service
from tests.unit.framework.interfaces.face import test_interfaces # pylint: disable=unused-import
class _PauseableIterator(object):
def __init__(self, upstream):
self._upstream = upstream
self._condition = threading.Condition()
self._paused = False
@contextlib.contextmanager
def pause(self):
with self._condition:
self._paused = True
yield
with self._condition:
self._paused = False
self._condition.notify_all()
def __iter__(self):
return self
def next(self):
with self._condition:
while self._paused:
self._condition.wait()
return next(self._upstream)
class _Callback(object):
def __init__(self):
self._condition = threading.Condition()
self._called = False
self._passed_future = None
self._passed_other_stuff = None
def __call__(self, *args, **kwargs):
with self._condition:
self._called = True
if args:
self._passed_future = args[0]
if 1 < len(args) or kwargs:
self._passed_other_stuff = tuple(args[1:]), dict(kwargs)
self._condition.notify_all()
def future(self):
with self._condition:
while True:
if self._passed_other_stuff is not None:
raise ValueError(
'Test callback passed unexpected values: %s',
self._passed_other_stuff)
elif self._called:
return self._passed_future
else:
self._condition.wait()
class TestCase(test_coverage.Coverage, unittest.TestCase):
"""A test of the Face layer of RPC Framework.
Concrete subclasses must have an "implementation" attribute of type
test_interfaces.Implementation and an "invoker_constructor" attribute of type
_invocation.InvokerConstructor.
"""
__metaclass__ = abc.ABCMeta
NAME = 'FutureInvocationAsynchronousEventServiceTest'
def setUp(self):
"""See unittest.TestCase.setUp for full specification.
Overriding implementations must call this implementation.
"""
self._control = test_control.PauseFailControl()
self._digest_pool = logging_pool.pool(test_constants.POOL_SIZE)
self._digest = _digest.digest(
_stock_service.STOCK_TEST_SERVICE, self._control, self._digest_pool)
generic_stub, dynamic_stubs, self._memo = self.implementation.instantiate(
self._digest.methods, self._digest.event_method_implementations, None)
self._invoker = self.invoker_constructor.construct_invoker(
generic_stub, dynamic_stubs, self._digest.methods)
def tearDown(self):
"""See unittest.TestCase.tearDown for full specification.
Overriding implementations must call this implementation.
"""
self._invoker = None
self.implementation.destantiate(self._memo)
self._digest_pool.shutdown(wait=True)
def testSuccessfulUnaryRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
self._digest.unary_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
callback = _Callback()
response_future = self._invoker.future(group, method)(
request, test_constants.LONG_TIMEOUT)
response_future.add_done_callback(callback)
response = response_future.result()
test_messages.verify(request, response, self)
self.assertIs(callback.future(), response_future)
def testSuccessfulUnaryRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
self._digest.unary_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
response_iterator = self._invoker.future(group, method)(
request, test_constants.LONG_TIMEOUT)
responses = list(response_iterator)
test_messages.verify(request, responses, self)
def testSuccessfulStreamRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
self._digest.stream_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
request_iterator = _PauseableIterator(iter(requests))
callback = _Callback()
# Use of a paused iterator of requests allows us to test that control is
# returned to calling code before the iterator yields any requests.
with request_iterator.pause():
response_future = self._invoker.future(group, method)(
request_iterator, test_constants.LONG_TIMEOUT)
response_future.add_done_callback(callback)
future_passed_to_callback = callback.future()
response = future_passed_to_callback.result()
test_messages.verify(requests, response, self)
self.assertIs(future_passed_to_callback, response_future)
def testSuccessfulStreamRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
self._digest.stream_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
request_iterator = _PauseableIterator(iter(requests))
# Use of a paused iterator of requests allows us to test that control is
# returned to calling code before the iterator yields any requests.
with request_iterator.pause():
response_iterator = self._invoker.future(group, method)(
request_iterator, test_constants.LONG_TIMEOUT)
responses = list(response_iterator)
test_messages.verify(requests, responses, self)
def testSequentialInvocations(self):
for (group, method), test_messages_sequence in (
self._digest.unary_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
first_request = test_messages.request()
second_request = test_messages.request()
first_response_future = self._invoker.future(group, method)(
first_request, test_constants.LONG_TIMEOUT)
first_response = first_response_future.result()
test_messages.verify(first_request, first_response, self)
second_response_future = self._invoker.future(group, method)(
second_request, test_constants.LONG_TIMEOUT)
second_response = second_response_future.result()
test_messages.verify(second_request, second_response, self)
def testParallelInvocations(self):
for (group, method), test_messages_sequence in (
self._digest.unary_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
first_request = test_messages.request()
second_request = test_messages.request()
first_response_future = self._invoker.future(group, method)(
first_request, test_constants.LONG_TIMEOUT)
second_response_future = self._invoker.future(group, method)(
second_request, test_constants.LONG_TIMEOUT)
first_response = first_response_future.result()
second_response = second_response_future.result()
test_messages.verify(first_request, first_response, self)
test_messages.verify(second_request, second_response, self)
for (group, method), test_messages_sequence in (
self._digest.unary_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = []
response_futures = []
for _ in range(test_constants.PARALLELISM):
request = test_messages.request()
response_future = self._invoker.future(group, method)(
request, test_constants.LONG_TIMEOUT)
requests.append(request)
response_futures.append(response_future)
responses = [
response_future.result() for response_future in response_futures]
for request, response in zip(requests, responses):
test_messages.verify(request, response, self)
def testWaitingForSomeButNotAllParallelInvocations(self):
pool = logging_pool.pool(test_constants.PARALLELISM)
for (group, method), test_messages_sequence in (
self._digest.unary_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = []
response_futures_to_indices = {}
for index in range(test_constants.PARALLELISM):
request = test_messages.request()
inner_response_future = self._invoker.future(group, method)(
request, test_constants.LONG_TIMEOUT)
outer_response_future = pool.submit(inner_response_future.result)
requests.append(request)
response_futures_to_indices[outer_response_future] = index
some_completed_response_futures_iterator = itertools.islice(
futures.as_completed(response_futures_to_indices),
test_constants.PARALLELISM / 2)
for response_future in some_completed_response_futures_iterator:
index = response_futures_to_indices[response_future]
test_messages.verify(requests[index], response_future.result(), self)
pool.shutdown(wait=True)
def testCancelledUnaryRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
self._digest.unary_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
callback = _Callback()
with self._control.pause():
response_future = self._invoker.future(group, method)(
request, test_constants.LONG_TIMEOUT)
response_future.add_done_callback(callback)
cancel_method_return_value = response_future.cancel()
self.assertIs(callback.future(), response_future)
self.assertFalse(cancel_method_return_value)
self.assertTrue(response_future.cancelled())
def testCancelledUnaryRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
self._digest.unary_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
with self._control.pause():
response_iterator = self._invoker.future(group, method)(
request, test_constants.LONG_TIMEOUT)
response_iterator.cancel()
with self.assertRaises(face.CancellationError):
next(response_iterator)
def testCancelledStreamRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
self._digest.stream_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
callback = _Callback()
with self._control.pause():
response_future = self._invoker.future(group, method)(
iter(requests), test_constants.LONG_TIMEOUT)
response_future.add_done_callback(callback)
cancel_method_return_value = response_future.cancel()
self.assertIs(callback.future(), response_future)
self.assertFalse(cancel_method_return_value)
self.assertTrue(response_future.cancelled())
def testCancelledStreamRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
self._digest.stream_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
with self._control.pause():
response_iterator = self._invoker.future(group, method)(
iter(requests), test_constants.LONG_TIMEOUT)
response_iterator.cancel()
with self.assertRaises(face.CancellationError):
next(response_iterator)
def testExpiredUnaryRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
self._digest.unary_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
callback = _Callback()
with self._control.pause():
response_future = self._invoker.future(
group, method)(request, _3069_test_constant.REALLY_SHORT_TIMEOUT)
response_future.add_done_callback(callback)
self.assertIs(callback.future(), response_future)
self.assertIsInstance(
response_future.exception(), face.ExpirationError)
with self.assertRaises(face.ExpirationError):
response_future.result()
def testExpiredUnaryRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
self._digest.unary_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
with self._control.pause():
response_iterator = self._invoker.future(group, method)(
request, _3069_test_constant.REALLY_SHORT_TIMEOUT)
with self.assertRaises(face.ExpirationError):
list(response_iterator)
def testExpiredStreamRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
self._digest.stream_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
callback = _Callback()
with self._control.pause():
response_future = self._invoker.future(group, method)(
iter(requests), _3069_test_constant.REALLY_SHORT_TIMEOUT)
response_future.add_done_callback(callback)
self.assertIs(callback.future(), response_future)
self.assertIsInstance(
response_future.exception(), face.ExpirationError)
with self.assertRaises(face.ExpirationError):
response_future.result()
def testExpiredStreamRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
self._digest.stream_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
with self._control.pause():
response_iterator = self._invoker.future(group, method)(
iter(requests), _3069_test_constant.REALLY_SHORT_TIMEOUT)
with self.assertRaises(face.ExpirationError):
list(response_iterator)
def testFailedUnaryRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
self._digest.unary_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
callback = _Callback()
with self._control.fail():
response_future = self._invoker.future(group, method)(
request, _3069_test_constant.REALLY_SHORT_TIMEOUT)
response_future.add_done_callback(callback)
self.assertIs(callback.future(), response_future)
# Because the servicer fails outside of the thread from which the
# servicer-side runtime called into it its failure is
# indistinguishable from simply not having called its
# response_callback before the expiration of the RPC.
self.assertIsInstance(
response_future.exception(), face.ExpirationError)
with self.assertRaises(face.ExpirationError):
response_future.result()
def testFailedUnaryRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
self._digest.unary_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
# Because the servicer fails outside of the thread from which the
# servicer-side runtime called into it its failure is indistinguishable
# from simply not having called its response_consumer before the
# expiration of the RPC.
with self._control.fail(), self.assertRaises(face.ExpirationError):
response_iterator = self._invoker.future(group, method)(
request, _3069_test_constant.REALLY_SHORT_TIMEOUT)
list(response_iterator)
def testFailedStreamRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
self._digest.stream_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
callback = _Callback()
with self._control.fail():
response_future = self._invoker.future(group, method)(
iter(requests), _3069_test_constant.REALLY_SHORT_TIMEOUT)
response_future.add_done_callback(callback)
self.assertIs(callback.future(), response_future)
# Because the servicer fails outside of the thread from which the
# servicer-side runtime called into it its failure is
# indistinguishable from simply not having called its
# response_callback before the expiration of the RPC.
self.assertIsInstance(
response_future.exception(), face.ExpirationError)
with self.assertRaises(face.ExpirationError):
response_future.result()
def testFailedStreamRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
self._digest.stream_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
# Because the servicer fails outside of the thread from which the
# servicer-side runtime called into it its failure is indistinguishable
# from simply not having called its response_consumer before the
# expiration of the RPC.
with self._control.fail(), self.assertRaises(face.ExpirationError):
response_iterator = self._invoker.future(group, method)(
iter(requests), _3069_test_constant.REALLY_SHORT_TIMEOUT)
list(response_iterator)
| msiedlarek/grpc | src/python/grpcio/tests/unit/framework/interfaces/face/_future_invocation_asynchronous_event_service.py | Python | bsd-3-clause | 20,180 |
# Copyright 2019 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mesonbuild.templates.sampleimpl import SampleImpl
import re
lib_h_template = '''#pragma once
#if defined _WIN32 || defined __CYGWIN__
#ifdef BUILDING_{utoken}
#define {utoken}_PUBLIC __declspec(dllexport)
#else
#define {utoken}_PUBLIC __declspec(dllimport)
#endif
#else
#ifdef BUILDING_{utoken}
#define {utoken}_PUBLIC __attribute__ ((visibility ("default")))
#else
#define {utoken}_PUBLIC
#endif
#endif
int {utoken}_PUBLIC {function_name}();
'''
lib_objcpp_template = '''#import <{header_file}>
/* This function will not be exported and is not
* directly callable by users of this library.
*/
int internal_function() {{
return 0;
}}
int {function_name}() {{
return internal_function();
}}
'''
lib_objcpp_test_template = '''#import <{header_file}>
#import <iostream>
int main(int argc, char **argv) {{
if(argc != 1) {{
std::cout << argv[0] << " takes no arguments." << std::endl;
return 1;
}}
return {function_name}();
}}
'''
lib_objcpp_meson_template = '''project('{project_name}', 'objcpp',
version : '{version}',
default_options : ['warning_level=3'])
# These arguments are only used to build the shared library
# not the executables that use the library.
lib_args = ['-DBUILDING_{utoken}']
shlib = shared_library('{lib_name}', '{source_file}',
install : true,
objcpp_args : lib_args,
gnu_symbol_visibility : 'hidden',
)
test_exe = executable('{test_exe_name}', '{test_source_file}',
link_with : shlib)
test('{test_name}', test_exe)
# Make this library usable as a Meson subproject.
{ltoken}_dep = declare_dependency(
include_directories: include_directories('.'),
link_with : shlib)
# Make this library usable from the system's
# package manager.
install_headers('{header_file}', subdir : '{header_dir}')
pkg_mod = import('pkgconfig')
pkg_mod.generate(
name : '{project_name}',
filebase : '{ltoken}',
description : 'Meson sample project.',
subdirs : '{header_dir}',
libraries : shlib,
version : '{version}',
)
'''
hello_objcpp_template = '''#import <iostream>
#define PROJECT_NAME "{project_name}"
int main(int argc, char **argv) {{
if(argc != 1) {{
std::cout << argv[0] << " takes no arguments." << std::endl;
return 1;
}}
std::cout << "This is project " << PROJECT_NAME << "." << std::endl;
return 0;
}}
'''
hello_objcpp_meson_template = '''project('{project_name}', 'objcpp',
version : '{version}',
default_options : ['warning_level=3'])
exe = executable('{exe_name}', '{source_name}',
install : true)
test('basic', exe)
'''
class ObjCppProject(SampleImpl):
def __init__(self, options):
super().__init__()
self.name = options.name
self.version = options.version
def create_executable(self) -> None:
lowercase_token = re.sub(r'[^a-z0-9]', '_', self.name.lower())
source_name = lowercase_token + '.mm'
open(source_name, 'w', encoding='utf-8').write(hello_objcpp_template.format(project_name=self.name))
open('meson.build', 'w', encoding='utf-8').write(
hello_objcpp_meson_template.format(project_name=self.name,
exe_name=lowercase_token,
source_name=source_name,
version=self.version))
def create_library(self) -> None:
lowercase_token = re.sub(r'[^a-z0-9]', '_', self.name.lower())
uppercase_token = lowercase_token.upper()
function_name = lowercase_token[0:3] + '_func'
test_exe_name = lowercase_token + '_test'
lib_h_name = lowercase_token + '.h'
lib_objcpp_name = lowercase_token + '.mm'
test_objcpp_name = lowercase_token + '_test.mm'
kwargs = {'utoken': uppercase_token,
'ltoken': lowercase_token,
'header_dir': lowercase_token,
'function_name': function_name,
'header_file': lib_h_name,
'source_file': lib_objcpp_name,
'test_source_file': test_objcpp_name,
'test_exe_name': test_exe_name,
'project_name': self.name,
'lib_name': lowercase_token,
'test_name': lowercase_token,
'version': self.version,
}
open(lib_h_name, 'w', encoding='utf-8').write(lib_h_template.format(**kwargs))
open(lib_objcpp_name, 'w', encoding='utf-8').write(lib_objcpp_template.format(**kwargs))
open(test_objcpp_name, 'w', encoding='utf-8').write(lib_objcpp_test_template.format(**kwargs))
open('meson.build', 'w', encoding='utf-8').write(lib_objcpp_meson_template.format(**kwargs))
| jpakkane/meson | mesonbuild/templates/objcpptemplates.py | Python | apache-2.0 | 5,358 |
import numpy as np
from matplotlib.pylab import imread
from scipy import io as sio
from scipy import optimize
import os, sys
from base import image as sig
from base import data as dm
from base import files as f
from emmetrop.database import Database as db
class Images(object):
"""Compute the amplitude spectrum of an image.
Currently working on using a database of images.
.. todo::
Better image analysis routines.
"""
def __init__(self):
"""
Creates a database instance
"""
self.Dbase = db.Database()
if os.path.basename(os.getcwd()) == 'emmetrop':
p = ''
else:
p = './emmetrop/'
try:
self.Dbase.OpenDatabase(p + 'ImageDatabase.h5')
except db.DatabaseError:
self.Dbase.CreateDatabase('ImageDatabase')
print 'created new image database'
self.amp_mean = None
self.getData()
# multiply by 2 to correct for only taking half of spectrum
# 46 pixels / degree as reported in Garrigan et al.
self.imagexval = np.arange(1,self.amp_mean.shape[0] + 1) / 46.0 * 2.0
self.powerlaw = self._PowerLaw(self.imagexval[10:300],
self.amp_mean[10:300] ** 2.0)
def returnImageData(self):
"""
"""
imageData = {
'totalImages': len(self.ampSpecs),
#'ampSpecs': self.ampSpecs,
'rawAmp': self.rawAmp,
'ampMean': self.amp_mean ** 2.0, # make it power
'decibels': sig.decibels(self.amp_mean ** 2.0),
'powerlaw': self.powerlaw,
'imagexval': self.imagexval
}
return imageData
def getData(self, Directory = None, GroupName = None):
"""Find data in a database or import it if it does not exist.
:param Directory: list of directories of images to analyze.
:type Directory: list
:param GroupName: name of groups in directory list. If None a name \
is approximated based on directory paths
:type GroupName: str
:returns: Amplitude spectrum
:return type: numpy.array
This function will find all jpg files in the list of directories
check to see if they are in the HDF5 ImageDatabase, import them if
they are not, and return the amplitude spectrum for the group.
.. todo::
* ``high prior`` Compute Information for images as well.
* ``low prior`` make a gui - use the database gui. Introduce images.
"""
if not Directory:
if sys.platform == 'darwin':
Directory = ['/Users/brianschmidt/Documents/cd01A/']
index = [-6, -1]
if sys.platform == 'win32':
Directory = ['C:/Data/UPenn_Images/Images/cd01A',
'C:/Data/UPenn_Images/Images/cd02A',
'C:/Data/UPenn_Images/Images/cd06A',
'C:/Data/UPenn_Images/Images/cd32A',
'C:/Data/UPenn_Images/Images/cd38A',
'C:/Data/UPenn_Images/Images/cd41A',
'C:/Data/UPenn_Images/Images/cd58A']
index = [-5, len(Directory[0])]
self.ampSpecs = []
self.rawAmp= []
for group in Directory:
GroupName = group[index[0]:index[1]]
files = f.getAllFiles(group, suffix='.JPG', subdirectories=1)
if self.Dbase.Exists(GroupName) == False:
self.Dbase.CreateGroup(GroupName)
for path in files:
img = None
imgBW = None
amplitude = None
name = os.path.basename(path[:-4]) # remove .jpg
if self.Dbase.Exists(name, GroupName) == False:
self.Dbase.CreateGroup(name, GroupName)
# subnode == Image
if self.Dbase.Exists('Image',
GroupName + '.' + name) == False:
self.Dbase.CreateGroup('Image', GroupName + '/' + name)
if self.Dbase.Exists('raw_image',
GroupName + '.' + name + '.Image') == False:
img = imread(path)
self.Dbase.AddData2Database('raw_image', img,
GroupName + '.' + name + '.Image')
if self.Dbase.Exists('grayscale',
GroupName + '.' + name + '.Image') == False:
if img == None:
img = self.Dbase.QueryDatabase(GroupName,
name + '.' + 'Image',
'raw_image')
imgBW = sig.rgb2gray(img)
self.Dbase.AddData2Database('grayscale', img,
GroupName + '.' + name
+ '.Image')
# metadata stuff:
if self.Dbase.Exists('path', GroupName + '.' + name) == False:
self.Dbase.AddData2Database('path',
np.array([path], dtype=str),
GroupName + '.' + name)
if self.Dbase.Exists('goodFile',
GroupName + '.' + name) == False:
goodFile = self.getGoodFiles(path)
self.Dbase.AddData2Database('goodFile',
np.array([goodFile],
dtype=bool),
GroupName + '.' + name)
else:
goodFile = self.Dbase.QueryDatabase(GroupName, name,
'goodFile')
# subnode == Amplitude spectrum
if self.Dbase.Exists('amplitude',
GroupName + '.' + name) == False:
self.Dbase.CreateGroup('amplitude', GroupName + '/' + name)
# raw amplitude spectrum first
if self.Dbase.Exists('raw_amplitude',
GroupName + '.' + name
+ '.' + 'amplitude') == False:
if imgBW == None:
imgBW = self.Dbase.QueryDatabase(GroupName,
name + '.' 'Image',
'grayscale')
amplitude = sig.welch2d(imgBW[500:2000, 500:2000])
self.Dbase.AddData2Database('raw_amplitude', amplitude,
GroupName + '.' + name
+ '.' + 'amplitude')
if goodFile != False or goodFile != 'F':
self.rawAmp.append(amplitude)
else:
pass
else:
if goodFile != False or goodFile != 'F':
self.rawAmp.append(self.Dbase.QueryDatabase(GroupName,
name + '.' + 'amplitude',
'raw_amplitude'))
# then amplitude density
if self.Dbase.Exists('amplitude_density',
GroupName + '.' + name +
'.' + 'amplitude') == False:
if amplitude == None:
amplitude = self.Dbase.QueryDatabase(GroupName,
name + '.' + 'amplitude',
'raw_amplitude')
amplitude = sig.Density(amplitude)
self.Dbase.AddData2Database('amplitude_density', amplitude,
GroupName + '.' + name + '.'
+ 'amplitude')
if goodFile != False or goodFile != 'F':
self.ampSpecs.append(amplitude)
# if already exists, query database to get amplitude spectrums:
else:
if goodFile:
self.ampSpecs.append(
self.Dbase.QueryDatabase(GroupName,
name + '.' + 'amplitude',
'amplitude_density'))
self.Dbase.file.flush()
self.Dbase.CloseDatabase()
self.amp_mean = np.zeros((self.ampSpecs[0].shape))
total_images = len(self.ampSpecs)
print 'number of images: ', total_images
for amp in self.ampSpecs:
self.amp_mean += amp / total_images
def computeImageHist(self,img):
"""Compute the histogram of a grayscale (for now) image
:param img: input image.
:type img: np.array
this function is not complete.
"""
#image_shape = img.shape
#BINS = np.linspace(0,4 * 10**4, BIN_RESOLUTION + 1)
pass
#preallocate memory
def getGoodFiles(self, path):
"""Find files that have warnings and skip them. Marks good files (no
warning) with a 1 and bad files with a 0. Usually a file has a warning
if greater that 5% of pixels are saturated.
:param path: path to .jpg file. Will remove .jpg and append '_AUX.mat'
:type path: dir
:returns: bool indicating whether the current image has a warning.
This function is called by getData(). The results will be stored in
the image database. Only images that return True will be used in
analysis.
"""
# First find an index of images that contain pixel saturations:
try:
AUX_file = sio.loadmat(path[:-4] + '_AUX.mat')
except ValueError:
print "No Auxilary files found. Check directory path or download \
from: ftp://tofu.psych.upenn.edu/"
if AUX_file['Image']['warning'][0][0].shape[0] > 0:
goodFile = False
else :
goodFile = True
return goodFile
def _PowerLaw(self, xdata, ydata):
"""Create an array according to a power law for plotting.
:param xdata: x values at which to fit a power law.
:type xdata: numpy.array
:param ydata: y values used to fit power law.
:type ydata: numpy.array
:returns: creates a handle to the lambda function to compute a \
power law fit to the inputed data.
:rtype: function handle
.. note:: * Previously used only for drawing a power law, \
no fitting.
* Uncomment code below to reintroduce that functionality.
* Fitting code taken from `Scipy`_
* See above reference to introduce error bar to fit.
.. _Scipy: http://www.scipy.org/Cookbook/FittingData
"""
logx = np.log10(xdata)
logy = np.log10(ydata)
#logyerr = yerr / ydata
# define our (line) fitting function
fitfunc = lambda p, x: p[0] + p[1] * x
errfunc = lambda p, x, y: (y - fitfunc(p, x))
pinit = [1.0, -1.0]
out = optimize.leastsq(errfunc, pinit,
args=(logx, logy), full_output=1)
pfinal = out[0]
index = pfinal[1]
print '1/f alpha', index
amp = 10.0**pfinal[0]
return lambda x,: amp * (x ** index)
| bps10/emmetrop | emmetrop/scene/Images.py | Python | mit | 13,121 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django import http
from django.core.urlresolvers import reverse
from mox import IsA
from horizon import api
from horizon import test
from .workflows import CreateProject, UpdateProject
from .views import QUOTA_FIELDS
INDEX_URL = reverse('horizon:admin:projects:index')
class TenantsViewTests(test.BaseAdminViewTests):
def test_index(self):
self.mox.StubOutWithMock(api.keystone, 'tenant_list')
api.keystone.tenant_list(IsA(http.HttpRequest), admin=True) \
.AndReturn(self.tenants.list())
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'admin/projects/index.html')
self.assertItemsEqual(res.context['table'].data, self.tenants.list())
class CreateProjectWorkflowTests(test.BaseAdminViewTests):
def _get_project_info(self, project):
project_info = {"tenant_name": project.name,
"description": project.description,
"enabled": project.enabled}
return project_info
def _get_workflow_fields(self, project):
project_info = {"name": project.name,
"description": project.description,
"enabled": project.enabled}
return project_info
def _get_quota_info(self, quota):
quota_data = {}
for field in QUOTA_FIELDS:
quota_data[field] = int(getattr(quota, field, None))
return quota_data
def _get_workflow_data(self, project, quota):
project_info = self._get_workflow_fields(project)
quota_data = self._get_quota_info(quota)
project_info.update(quota_data)
return project_info
@test.create_stubs({api: ('tenant_quota_defaults',
'get_default_role',),
api.keystone: ('user_list',
'role_list',)})
def test_add_project_get(self):
quota = self.quotas.first()
default_role = self.roles.first()
users = self.users.list()
roles = self.roles.list()
api.tenant_quota_defaults(IsA(http.HttpRequest), self.tenant.id) \
.AndReturn(quota)
# init
api.get_default_role(IsA(http.HttpRequest)).AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest)).AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)).AndReturn(roles)
self.mox.ReplayAll()
url = reverse('horizon:admin:projects:create')
res = self.client.get(url)
self.assertTemplateUsed(res, 'admin/projects/create.html')
workflow = res.context['workflow']
self.assertEqual(res.context['workflow'].name, CreateProject.name)
step = workflow.get_step("createprojectinfoaction")
self.assertEqual(step.action.initial['ram'], quota.ram)
self.assertEqual(step.action.initial['injected_files'],
quota.injected_files)
self.assertQuerysetEqual(workflow.steps,
['<CreateProjectInfo: createprojectinfoaction>',
'<UpdateProjectMembers: update_members>',
'<UpdateProjectQuota: update_quotas>'])
@test.create_stubs({api: ('get_default_role',
'tenant_quota_defaults',
'add_tenant_user_role',),
api.keystone: ('tenant_create',
'user_list',
'role_list'),
api.nova: ('tenant_quota_update',)})
def test_add_project_post(self):
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
users = self.users.list()
roles = self.roles.list()
# init
api.tenant_quota_defaults(IsA(http.HttpRequest), self.tenant.id) \
.AndReturn(quota)
api.get_default_role(IsA(http.HttpRequest)).AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest)).AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)).AndReturn(roles)
# contribute
api.keystone.role_list(IsA(http.HttpRequest)).AndReturn(roles)
# handle
project_details = self._get_project_info(project)
quota_data = self._get_quota_info(quota)
api.keystone.tenant_create(IsA(http.HttpRequest), **project_details) \
.AndReturn(project)
api.keystone.role_list(IsA(http.HttpRequest)).AndReturn(roles)
workflow_data = {}
for role in roles:
if "role_" + role.id in workflow_data:
ulist = workflow_data["role_" + role.id]
for user in ulist:
api.add_tenant_user_role(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
user_id=user,
role_id=role.id)
api.nova.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**quota_data)
self.mox.ReplayAll()
workflow_data.update(self._get_workflow_data(project, quota))
url = reverse('horizon:admin:projects:create')
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api: ('tenant_quota_defaults',
'get_default_role',),
api.keystone: ('user_list',
'role_list',)})
def test_add_project_quota_defaults_error(self):
default_role = self.roles.first()
users = self.users.list()
roles = self.roles.list()
# init
api.tenant_quota_defaults(IsA(http.HttpRequest), self.tenant.id) \
.AndRaise(self.exceptions.nova)
api.get_default_role(IsA(http.HttpRequest)).AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest)).AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)).AndReturn(roles)
self.mox.ReplayAll()
url = reverse('horizon:admin:projects:create')
res = self.client.get(url)
self.assertTemplateUsed(res, 'admin/projects/create.html')
self.assertContains(res, "Unable to retrieve default quota values")
@test.create_stubs({api: ('get_default_role',
'tenant_quota_defaults',),
api.keystone: ('tenant_create',
'user_list',
'role_list',)})
def test_add_project_tenant_create_error(self):
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
users = self.users.list()
roles = self.roles.list()
# init
api.tenant_quota_defaults(IsA(http.HttpRequest), self.tenant.id) \
.AndReturn(quota)
api.get_default_role(IsA(http.HttpRequest)).AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest)).AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)).AndReturn(roles)
# contribute
api.keystone.role_list(IsA(http.HttpRequest)).AndReturn(roles)
# handle
project_details = self._get_project_info(project)
api.keystone.tenant_create(IsA(http.HttpRequest), **project_details) \
.AndRaise(self.exceptions.keystone)
self.mox.ReplayAll()
workflow_data = self._get_workflow_data(project, quota)
url = reverse('horizon:admin:projects:create')
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api: ('get_default_role',
'tenant_quota_defaults',
'add_tenant_user_role',),
api.keystone: ('tenant_create',
'user_list',
'role_list'),
api.nova: ('tenant_quota_update',)})
def test_add_project_quota_update_error(self):
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
users = self.users.list()
roles = self.roles.list()
# init
api.tenant_quota_defaults(IsA(http.HttpRequest), self.tenant.id) \
.AndReturn(quota)
api.get_default_role(IsA(http.HttpRequest)).AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest)).AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)).AndReturn(roles)
# contribute
api.keystone.role_list(IsA(http.HttpRequest)).AndReturn(roles)
# handle
project_details = self._get_project_info(project)
quota_data = self._get_quota_info(quota)
api.keystone.tenant_create(IsA(http.HttpRequest), **project_details) \
.AndReturn(project)
api.keystone.role_list(IsA(http.HttpRequest)).AndReturn(roles)
workflow_data = {}
for role in roles:
if "role_" + role.id in workflow_data:
ulist = workflow_data["role_" + role.id]
for user in ulist:
api.add_tenant_user_role(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
user_id=user,
role_id=role.id)
api.nova.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**quota_data) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
workflow_data.update(self._get_workflow_data(project, quota))
url = reverse('horizon:admin:projects:create')
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api: ('get_default_role',
'tenant_quota_defaults',
'add_tenant_user_role',),
api.keystone: ('tenant_create',
'user_list',
'role_list',),
api.nova: ('tenant_quota_update',)})
def test_add_project_user_update_error(self):
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
users = self.users.list()
roles = self.roles.list()
# init
api.tenant_quota_defaults(IsA(http.HttpRequest), self.tenant.id) \
.AndReturn(quota)
api.get_default_role(IsA(http.HttpRequest)).AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest)).AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)).AndReturn(roles)
# contribute
api.keystone.role_list(IsA(http.HttpRequest)).AndReturn(roles)
# handle
project_details = self._get_project_info(project)
quota_data = self._get_quota_info(quota)
api.keystone.tenant_create(IsA(http.HttpRequest), **project_details) \
.AndReturn(project)
api.keystone.role_list(IsA(http.HttpRequest)).AndReturn(roles)
workflow_data = {}
for role in roles:
if "role_" + role.id in workflow_data:
ulist = workflow_data["role_" + role.id]
for user in ulist:
api.add_tenant_user_role(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
user_id=user,
role_id=role.id) \
.AndRaise(self.exceptions.keystone)
break
break
api.nova.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**quota_data)
self.mox.ReplayAll()
workflow_data.update(self._get_workflow_data(project, quota))
url = reverse('horizon:admin:projects:create')
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api: ('get_default_role',
'tenant_quota_defaults',),
api.keystone: ('user_list',
'role_list',)})
def test_add_project_missing_field_error(self):
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
users = self.users.list()
roles = self.roles.list()
# init
api.tenant_quota_defaults(IsA(http.HttpRequest), self.tenant.id) \
.AndReturn(quota)
api.get_default_role(IsA(http.HttpRequest)).AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest)).AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)).AndReturn(roles)
# contribute
api.keystone.role_list(IsA(http.HttpRequest)).AndReturn(roles)
self.mox.ReplayAll()
workflow_data = self._get_workflow_data(project, quota)
workflow_data["name"] = ""
url = reverse('horizon:admin:projects:create')
res = self.client.post(url, workflow_data)
self.assertContains(res, "field is required")
class UpdateProjectWorkflowTests(test.BaseAdminViewTests):
def _get_quota_info(self, quota):
quota_data = {}
for field in QUOTA_FIELDS:
quota_data[field] = int(getattr(quota, field, None))
return quota_data
@test.create_stubs({api: ('get_default_role',
'roles_for_user',
'tenant_get',
'tenant_quota_get',),
api.keystone: ('user_list',
'role_list',)})
def test_update_project_get(self):
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
users = self.users.list()
roles = self.roles.list()
api.tenant_get(IsA(http.HttpRequest), self.tenant.id, admin=True) \
.AndReturn(project)
api.tenant_quota_get(IsA(http.HttpRequest), self.tenant.id) \
.AndReturn(quota)
api.get_default_role(IsA(http.HttpRequest)).AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest)).AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)).AndReturn(roles)
for user in users:
api.roles_for_user(IsA(http.HttpRequest),
user.id,
self.tenant.id).AndReturn(roles)
self.mox.ReplayAll()
url = reverse('horizon:admin:projects:update',
args=[self.tenant.id])
res = self.client.get(url)
self.assertTemplateUsed(res, 'admin/projects/update.html')
workflow = res.context['workflow']
self.assertEqual(res.context['workflow'].name, UpdateProject.name)
step = workflow.get_step("update_info")
self.assertEqual(step.action.initial['ram'], quota.ram)
self.assertEqual(step.action.initial['injected_files'],
quota.injected_files)
self.assertEqual(step.action.initial['name'], project.name)
self.assertEqual(step.action.initial['description'],
project.description)
self.assertQuerysetEqual(workflow.steps,
['<UpdateProjectInfo: update_info>',
'<UpdateProjectMembers: update_members>',
'<UpdateProjectQuota: update_quotas>'])
@test.create_stubs({api: ('tenant_get',
'tenant_quota_get',
'tenant_update',
'tenant_quota_update',
'get_default_role',
'roles_for_user',
'remove_tenant_user_role',
'add_tenant_user_role'),
api.keystone: ('user_list',
'role_list',)})
def test_update_project_post(self):
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
users = self.users.list()
roles = self.roles.list()
current_roles = self.roles.list()
# get/init
api.tenant_get(IsA(http.HttpRequest), self.tenant.id, admin=True) \
.AndReturn(project)
api.tenant_quota_get(IsA(http.HttpRequest), self.tenant.id) \
.AndReturn(quota)
api.get_default_role(IsA(http.HttpRequest)).AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest)).AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)).AndReturn(roles)
workflow_data = {}
for user in users:
api.roles_for_user(IsA(http.HttpRequest),
user.id,
self.tenant.id).AndReturn(roles)
role_ids = [role.id for role in roles]
if role_ids:
workflow_data.setdefault("role_" + role_ids[0], []) \
.append(user.id)
# update some fields
project._info["name"] = "updated name"
project._info["description"] = "updated description"
quota.metadata_items = 444
quota.volumes = 444
updated_project = {"tenant_name": project._info["name"],
"tenant_id": project.id,
"description": project._info["description"],
"enabled": project.enabled}
updated_quota = self._get_quota_info(quota)
# contribute
api.keystone.role_list(IsA(http.HttpRequest)).AndReturn(roles)
# handle
api.tenant_update(IsA(http.HttpRequest), **updated_project) \
.AndReturn(project)
api.keystone.role_list(IsA(http.HttpRequest)).AndReturn(roles)
api.keystone.user_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id).AndReturn(users)
for user in users:
api.roles_for_user(IsA(http.HttpRequest),
user.id,
self.tenant.id) \
.AndReturn(current_roles)
for role in roles:
if "role_" + role.id in workflow_data:
ulist = workflow_data["role_" + role.id]
if role not in current_roles:
api.add_tenant_user_role(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
user_id=user,
role_id=role.id)
else:
current_roles.pop(current_roles.index(role))
for to_delete in current_roles:
api.remove_tenant_user_role(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
user_id=user.id,
role_id=to_delete.id)
for role in roles:
if "role_" + role.id in workflow_data:
ulist = workflow_data["role_" + role.id]
for user in ulist:
if not filter(lambda x: user == x.id, users):
api.add_tenant_user_role(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
user_id=user,
role_id=role.id)
api.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**updated_quota)
self.mox.ReplayAll()
# submit form data
project_data = {"name": project._info["name"],
"id": project.id,
"description": project._info["description"],
"enabled": project.enabled}
workflow_data.update(project_data)
workflow_data.update(updated_quota)
url = reverse('horizon:admin:projects:update',
args=[self.tenant.id])
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api: ('tenant_get',)})
def test_update_project_get_error(self):
api.tenant_get(IsA(http.HttpRequest), self.tenant.id, admin=True) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
url = reverse('horizon:admin:projects:update',
args=[self.tenant.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api: ('tenant_get',
'tenant_quota_get',
'tenant_update',
'tenant_quota_update',
'get_default_role',
'roles_for_user',
'remove_tenant_user',
'add_tenant_user_role'),
api.keystone: ('user_list',
'role_list',)})
def test_update_project_tenant_update_error(self):
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
users = self.users.list()
roles = self.roles.list()
# get/init
api.tenant_get(IsA(http.HttpRequest), self.tenant.id, admin=True) \
.AndReturn(project)
api.tenant_quota_get(IsA(http.HttpRequest), self.tenant.id) \
.AndReturn(quota)
api.get_default_role(IsA(http.HttpRequest)).AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest)).AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)).AndReturn(roles)
workflow_data = {}
for user in users:
api.roles_for_user(IsA(http.HttpRequest),
user.id,
self.tenant.id).AndReturn(roles)
role_ids = [role.id for role in roles]
if role_ids:
workflow_data.setdefault("role_" + role_ids[0], []) \
.append(user.id)
# update some fields
project._info["name"] = "updated name"
project._info["description"] = "updated description"
quota.metadata_items = 444
quota.volumes = 444
updated_project = {"tenant_name": project._info["name"],
"tenant_id": project.id,
"description": project._info["description"],
"enabled": project.enabled}
updated_quota = self._get_quota_info(quota)
# contribute
api.keystone.role_list(IsA(http.HttpRequest)).AndReturn(roles)
# handle
api.tenant_update(IsA(http.HttpRequest), **updated_project) \
.AndRaise(self.exceptions.keystone)
self.mox.ReplayAll()
# submit form data
project_data = {"name": project._info["name"],
"id": project.id,
"description": project._info["description"],
"enabled": project.enabled}
workflow_data.update(project_data)
workflow_data.update(updated_quota)
url = reverse('horizon:admin:projects:update',
args=[self.tenant.id])
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api: ('tenant_get',
'tenant_quota_get',
'tenant_update',
'tenant_quota_update',
'get_default_role',
'roles_for_user',
'remove_tenant_user_role',
'add_tenant_user_role'),
api.keystone: ('user_list',
'role_list',)})
def test_update_project_quota_update_error(self):
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
users = self.users.list()
roles = self.roles.list()
current_roles = self.roles.list()
# get/init
api.tenant_get(IsA(http.HttpRequest), self.tenant.id, admin=True) \
.AndReturn(project)
api.tenant_quota_get(IsA(http.HttpRequest), self.tenant.id) \
.AndReturn(quota)
api.get_default_role(IsA(http.HttpRequest)).AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest)).AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)).AndReturn(roles)
workflow_data = {}
for user in users:
api.roles_for_user(IsA(http.HttpRequest),
user.id,
self.tenant.id).AndReturn(roles)
role_ids = [role.id for role in roles]
if role_ids:
workflow_data.setdefault("role_" + role_ids[0], []) \
.append(user.id)
# update some fields
project._info["name"] = "updated name"
project._info["description"] = "updated description"
quota.metadata_items = 444
quota.volumes = 444
updated_project = {"tenant_name": project._info["name"],
"tenant_id": project.id,
"description": project._info["description"],
"enabled": project.enabled}
updated_quota = self._get_quota_info(quota)
# contribute
api.keystone.role_list(IsA(http.HttpRequest)).AndReturn(roles)
# handle
# handle
api.tenant_update(IsA(http.HttpRequest), **updated_project) \
.AndReturn(project)
api.keystone.role_list(IsA(http.HttpRequest)).AndReturn(roles)
api.keystone.user_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id).AndReturn(users)
for user in users:
api.roles_for_user(IsA(http.HttpRequest),
user.id,
self.tenant.id) \
.AndReturn(current_roles)
for role in roles:
if "role_" + role.id in workflow_data:
ulist = workflow_data["role_" + role.id]
if role not in current_roles:
api.add_tenant_user_role(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
user_id=user,
role_id=role.id)
else:
current_roles.pop(current_roles.index(role))
for to_delete in current_roles:
api.remove_tenant_user_role(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
user_id=user.id,
role_id=to_delete.id)
for role in roles:
if "role_" + role.id in workflow_data:
ulist = workflow_data["role_" + role.id]
for user in ulist:
if not filter(lambda x: user == x.id, users):
api.add_tenant_user_role(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
user_id=user,
role_id=role.id)
api.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**updated_quota).AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
# submit form data
project_data = {"name": project._info["name"],
"id": project.id,
"description": project._info["description"],
"enabled": project.enabled}
workflow_data.update(project_data)
workflow_data.update(updated_quota)
url = reverse('horizon:admin:projects:update',
args=[self.tenant.id])
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api: ('tenant_get',
'tenant_quota_get',
'tenant_update',
'get_default_role',
'roles_for_user',
'remove_tenant_user_role',
'add_tenant_user_role'),
api.keystone: ('user_list',
'role_list',)})
def test_update_project_member_update_error(self):
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
users = self.users.list()
roles = self.roles.list()
current_roles = self.roles.list()
# get/init
api.tenant_get(IsA(http.HttpRequest), self.tenant.id, admin=True) \
.AndReturn(project)
api.tenant_quota_get(IsA(http.HttpRequest), self.tenant.id) \
.AndReturn(quota)
api.get_default_role(IsA(http.HttpRequest)).AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest)).AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)).AndReturn(roles)
workflow_data = {}
for user in users:
api.roles_for_user(IsA(http.HttpRequest),
user.id,
self.tenant.id).AndReturn(roles)
role_ids = [role.id for role in roles]
if role_ids:
workflow_data.setdefault("role_" + role_ids[0], []) \
.append(user.id)
# update some fields
project._info["name"] = "updated name"
project._info["description"] = "updated description"
quota.metadata_items = 444
quota.volumes = 444
updated_project = {"tenant_name": project._info["name"],
"tenant_id": project.id,
"description": project._info["description"],
"enabled": project.enabled}
updated_quota = self._get_quota_info(quota)
# contribute
api.keystone.role_list(IsA(http.HttpRequest)).AndReturn(roles)
# handle
api.tenant_update(IsA(http.HttpRequest), **updated_project) \
.AndReturn(project)
api.keystone.role_list(IsA(http.HttpRequest)).AndReturn(roles)
api.keystone.user_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id).AndReturn(users)
for user in users:
api.roles_for_user(IsA(http.HttpRequest),
user.id,
self.tenant.id) \
.AndReturn(current_roles)
for role in roles:
if "role_" + role.id in workflow_data:
if role not in current_roles:
api.add_tenant_user_role(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
user_id=user,
role_id=role.id)
else:
current_roles.pop(current_roles.index(role))
for to_delete in current_roles:
api.remove_tenant_user_role(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
user_id=user.id,
role_id=to_delete.id) \
.AndRaise(self.exceptions.nova)
break
break
self.mox.ReplayAll()
# submit form data
project_data = {"name": project._info["name"],
"id": project.id,
"description": project._info["description"],
"enabled": project.enabled}
workflow_data.update(project_data)
workflow_data.update(updated_quota)
url = reverse('horizon:admin:projects:update',
args=[self.tenant.id])
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
| 1ukash/horizon | horizon/dashboards/admin/projects/tests.py | Python | apache-2.0 | 34,446 |
# -*- coding: utf-8 -*-
"""Tests for certificates views. """
import datetime
import json
from collections import OrderedDict
from uuid import uuid4
import ddt
import six
from django.conf import settings
from django.test.client import Client, RequestFactory
from django.test.utils import override_settings
from django.urls import reverse
from mock import patch
from six.moves import range
from six.moves.urllib.parse import urlencode # pylint: disable=import-error
from course_modes.models import CourseMode
from lms.djangoapps.badges.events.course_complete import get_completion_badge
from lms.djangoapps.badges.tests.factories import (
BadgeAssertionFactory,
BadgeClassFactory,
CourseCompleteImageConfigurationFactory
)
from lms.djangoapps.certificates.api import get_certificate_url
from lms.djangoapps.certificates.models import (
CertificateGenerationCourseSetting,
CertificateHtmlViewConfiguration,
CertificateSocialNetworks,
CertificateStatuses,
CertificateTemplate,
CertificateTemplateAsset,
GeneratedCertificate
)
from lms.djangoapps.certificates.tests.factories import (
CertificateHtmlViewConfigurationFactory,
GeneratedCertificateFactory,
LinkedInAddToProfileConfigurationFactory
)
from lms.djangoapps.grades.tests.utils import mock_passing_grade
from openedx.core.djangoapps.certificates.config import waffle
from openedx.core.djangoapps.dark_lang.models import DarkLangConfig
from openedx.core.djangoapps.site_configuration.tests.test_util import (
with_site_configuration,
with_site_configuration_context
)
from openedx.core.djangolib.js_utils import js_escaped_string
from openedx.core.djangolib.testing.utils import CacheIsolationTestCase
from openedx.core.lib.tests.assertions.events import assert_event_matches
from student.roles import CourseStaffRole
from student.tests.factories import CourseEnrollmentFactory, UserFactory
from track.tests import EventTrackingTestCase
from util import organizations_helpers as organizations_api
from util.date_utils import strftime_localized
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
FEATURES_WITH_CERTS_ENABLED = settings.FEATURES.copy()
FEATURES_WITH_CERTS_ENABLED['CERTIFICATES_HTML_VIEW'] = True
FEATURES_WITH_BADGES_ENABLED = FEATURES_WITH_CERTS_ENABLED.copy()
FEATURES_WITH_BADGES_ENABLED['ENABLE_OPENBADGES'] = True
FEATURES_WITH_CERTS_DISABLED = settings.FEATURES.copy()
FEATURES_WITH_CERTS_DISABLED['CERTIFICATES_HTML_VIEW'] = False
FEATURES_WITH_CUSTOM_CERTS_ENABLED = {
"CUSTOM_CERTIFICATE_TEMPLATES_ENABLED": True
}
FEATURES_WITH_CUSTOM_CERTS_ENABLED.update(FEATURES_WITH_CERTS_ENABLED)
class CommonCertificatesTestCase(ModuleStoreTestCase):
"""
Common setUp and utility methods for Certificate tests
"""
ENABLED_SIGNALS = ['course_published']
def setUp(self):
super(CommonCertificatesTestCase, self).setUp()
self.client = Client()
self.course = CourseFactory.create(
org='testorg',
number='run1',
display_name='refundable course',
certificate_available_date=datetime.datetime.today() - datetime.timedelta(days=1),
)
self.course_id = self.course.location.course_key
self.user = UserFactory.create(
email='joe_user@edx.org',
username='joeuser',
password='foo'
)
self.user.profile.name = "Joe User"
self.user.profile.save()
self.client.login(username=self.user.username, password='foo')
self.request = RequestFactory().request()
self.linkedin_url = u'http://www.linkedin.com/profile/add?{params}'
self.cert = GeneratedCertificateFactory.create(
user=self.user,
course_id=self.course_id,
download_uuid=uuid4(),
download_url="http://www.example.com/certificates/download",
grade="0.95",
key='the_key',
distinction=True,
status='downloadable',
mode='honor',
name=self.user.profile.name,
)
CourseEnrollmentFactory.create(
user=self.user,
course_id=self.course_id,
mode=CourseMode.HONOR,
)
CertificateHtmlViewConfigurationFactory.create()
LinkedInAddToProfileConfigurationFactory.create()
CourseCompleteImageConfigurationFactory.create()
def _add_course_certificates(self, count=1, signatory_count=0, is_active=True):
"""
Create certificate for the course.
"""
signatories = [
{
'name': 'Signatory_Name ' + str(i),
'title': 'Signatory_Title ' + str(i),
'organization': 'Signatory_Organization ' + str(i),
'signature_image_path': u'/static/certificates/images/demo-sig{}.png'.format(i),
'id': i
} for i in range(signatory_count)
]
certificates = [
{
'id': i,
'name': 'Name ' + str(i),
'description': 'Description ' + str(i),
'course_title': 'course_title_' + str(i),
'org_logo_path': u'/t4x/orgX/testX/asset/org-logo-{}.png'.format(i),
'signatories': signatories,
'version': 1,
'is_active': is_active
} for i in range(count)
]
self.course.certificates = {'certificates': certificates}
self.course.cert_html_view_enabled = True
self.course.save()
self.store.update_item(self.course, self.user.id)
def _create_custom_template(self, org_id=None, mode=None, course_key=None, language=None):
"""
Creates a custom certificate template entry in DB.
"""
template_html = u"""
<%namespace name='static' file='static_content.html'/>
<html>
<body>
lang: ${LANGUAGE_CODE}
course name: ${accomplishment_copy_course_name}
mode: ${course_mode}
${accomplishment_copy_course_description}
${twitter_url}
<img class="custom-logo" src="${static.certificate_asset_url('custom-logo')}" />
</body>
</html>
"""
template = CertificateTemplate(
name='custom template',
template=template_html,
organization_id=org_id,
course_key=course_key,
mode=mode,
is_active=True,
language=language
)
template.save()
def _create_custom_named_template(self, template_name, org_id=None, mode=None, course_key=None, language=None):
"""
Creates a custom certificate template entry in DB.
"""
template_html = u"""
<%namespace name='static' file='static_content.html'/>
<html>
<body>
lang: ${LANGUAGE_CODE}
course name: """ + template_name + u"""
mode: ${course_mode}
${accomplishment_copy_course_description}
${twitter_url}
<img class="custom-logo" src="${static.certificate_asset_url('custom-logo')}" />
</body>
</html>
"""
template = CertificateTemplate(
name=template_name,
template=template_html,
organization_id=org_id,
course_key=course_key,
mode=mode,
is_active=True,
language=language
)
template.save()
def _create_custom_template_with_hours_of_effort(self, org_id=None, mode=None, course_key=None, language=None):
"""
Creates a custom certificate template entry in DB that includes hours of effort.
"""
template_html = u"""
<%namespace name='static' file='static_content.html'/>
<html>
<body>
lang: ${LANGUAGE_CODE}
course name: ${accomplishment_copy_course_name}
mode: ${course_mode}
% if hours_of_effort:
hours of effort: ${hours_of_effort}
% endif
${accomplishment_copy_course_description}
${twitter_url}
<img class="custom-logo" src="${static.certificate_asset_url('custom-logo')}" />
</body>
</html>
"""
template = CertificateTemplate(
name='custom template',
template=template_html,
organization_id=org_id,
course_key=course_key,
mode=mode,
is_active=True,
language=language
)
template.save()
@ddt.ddt
class CertificatesViewsTests(CommonCertificatesTestCase, CacheIsolationTestCase):
"""
Tests for the certificates web/html views
"""
def setUp(self):
super(CertificatesViewsTests, self).setUp()
self.mock_course_run_details = {
'content_language': 'en',
'weeks_to_complete': '4',
'max_effort': '10'
}
@override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED)
def test_linkedin_share_url(self):
"""
Test: LinkedIn share URL.
"""
self._add_course_certificates(count=1, signatory_count=1, is_active=True)
test_url = get_certificate_url(course_id=self.course.id, uuid=self.cert.verify_uuid)
response = self.client.get(test_url)
self.assertEqual(response.status_code, 200)
params = OrderedDict([
('_ed', '0_0dPSPyS070e0HsE9HNz_13_d11_',),
('pfCertificationName', u'{platform_name} Honor Code Certificate for {course_name}'.format(
platform_name=settings.PLATFORM_NAME,
course_name=self.course.display_name,
).encode('utf-8'),),
('pfCertificationUrl', self.request.build_absolute_uri(test_url),),
])
self.assertContains(
response,
js_escaped_string(self.linkedin_url.format(params=urlencode(params))),
)
@override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED)
@with_site_configuration(
configuration={
'platform_name': 'My Platform Site', 'LINKEDIN_COMPANY_ID': 'test_linkedin_my_site',
},
)
def test_linkedin_share_url_site(self):
"""
Test: LinkedIn share URL should be visible when called from within a site.
"""
self._add_course_certificates(count=1, signatory_count=1, is_active=True)
test_url = get_certificate_url(course_id=self.cert.course_id, uuid=self.cert.verify_uuid)
response = self.client.get(test_url, HTTP_HOST='test.localhost')
self.assertEqual(response.status_code, 200)
# the linkedIn share URL with appropriate parameters should be present
params = OrderedDict([
('_ed', 'test_linkedin_my_site',),
('pfCertificationName', u'My Platform Site Honor Code Certificate for {course_name}'.format(
course_name=self.course.display_name,
).encode('utf-8'),),
('pfCertificationUrl', 'http://test.localhost' + test_url,),
])
self.assertContains(
response,
js_escaped_string(self.linkedin_url.format(params=urlencode(params))),
)
@override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED)
@patch.dict("django.conf.settings.SOCIAL_SHARING_SETTINGS", {"CERTIFICATE_FACEBOOK": True})
@with_site_configuration(
configuration={'FACEBOOK_APP_ID': 'test_facebook_my_site'},
)
def test_facebook_share_url_site(self):
"""
Test: Facebook share URL should be visible when web cert called from within a white label
site and it should use white label site's FACEBOOK_APP_ID.
"""
self._add_course_certificates(count=1, signatory_count=1, is_active=True)
test_url = get_certificate_url(course_id=self.cert.course_id, uuid=self.cert.verify_uuid)
response = self.client.get(test_url, HTTP_HOST='test.localhost')
self.assertContains(response, "Post on Facebook")
self.assertContains(response, 'test_facebook_my_site')
@override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED)
@ddt.data(
(False, False, False),
(False, False, True),
(False, True, True),
(True, True, True),
(True, True, False),
)
@ddt.unpack
def test_social_sharing_availability_site(self, facebook_sharing, twitter_sharing, linkedin_sharing):
"""
Test: Facebook, Twitter and LinkedIn sharing availability for sites.
"""
self._add_course_certificates(count=1, signatory_count=1, is_active=True)
test_url = get_certificate_url(course_id=self.cert.course_id, uuid=self.cert.verify_uuid)
social_sharing_settings = dict(
CERTIFICATE_FACEBOOK=facebook_sharing,
CERTIFICATE_TWITTER=twitter_sharing,
CERTIFICATE_LINKEDIN=linkedin_sharing,
)
with with_site_configuration_context(
configuration={
'platform_name': 'My Platform Site',
'SOCIAL_SHARING_SETTINGS': social_sharing_settings,
},
):
response = self.client.get(test_url, HTTP_HOST='test.localhost')
self.assertEqual(response.status_code, 200)
self.assertEqual("Post on Facebook" in response.content.decode('utf-8'), facebook_sharing)
self.assertEqual("Share on Twitter" in response.content.decode('utf-8'), twitter_sharing)
self.assertEqual("Add to LinkedIn Profile" in response.content.decode('utf-8'), linkedin_sharing)
@override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED)
def test_facebook_default_text_site(self):
"""
Test: Facebook sharing default text for sites.
"""
self._add_course_certificates(count=1, signatory_count=1, is_active=True)
test_url = get_certificate_url(course_id=self.cert.course_id, uuid=self.cert.verify_uuid)
facebook_text = "Facebook text on Test Site"
social_sharing_settings = dict(
CERTIFICATE_FACEBOOK=True,
CERTIFICATE_FACEBOOK_TEXT=facebook_text,
)
with with_site_configuration_context(
configuration={
'SOCIAL_SHARING_SETTINGS': social_sharing_settings,
},
):
response = self.client.get(test_url, HTTP_HOST='test.localhost')
self.assertContains(response, facebook_text)
@override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED)
def test_twitter_default_text_site(self):
"""
Test: Twitter sharing default text for sites.
"""
self._add_course_certificates(count=1, signatory_count=1, is_active=True)
test_url = get_certificate_url(course_id=self.cert.course_id, uuid=self.cert.verify_uuid)
twitter_text = "Twitter text on Test Site"
social_sharing_settings = dict(
CERTIFICATE_TWITTER=True,
CERTIFICATE_TWITTER_TEXT=twitter_text,
)
with with_site_configuration_context(
configuration={
'SOCIAL_SHARING_SETTINGS': social_sharing_settings,
},
):
response = self.client.get(test_url, HTTP_HOST='test.localhost')
self.assertContains(response, twitter_text)
@override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED)
def test_rendering_course_organization_data(self):
"""
Test: organization data should render on certificate web view if course has organization.
"""
test_organization_data = {
'name': 'test organization',
'short_name': 'test_organization',
'description': 'Test Organization Description',
'active': True,
'logo': '/logo_test1.png/'
}
test_org = organizations_api.add_organization(organization_data=test_organization_data)
organizations_api.add_organization_course(organization_data=test_org, course_id=six.text_type(self.course.id))
self._add_course_certificates(count=1, signatory_count=1, is_active=True)
test_url = get_certificate_url(
user_id=self.user.id,
course_id=six.text_type(self.course.id)
)
response = self.client.get(test_url)
self.assertContains(
response,
'a course of study offered by test_organization, an online learning initiative of test organization',
)
self.assertNotContains(response, 'a course of study offered by testorg')
self.assertContains(response, u'<title>test_organization {} Certificate |'.format(self.course.number, ))
self.assertContains(response, 'logo_test1.png')
@ddt.data(True, False)
@patch('lms.djangoapps.certificates.views.webview.get_completion_badge')
def test_fetch_badge_info(self, issue_badges, mock_get_completion_badge):
"""
Test: Fetch badge class info if badges are enabled.
"""
if issue_badges:
features = FEATURES_WITH_BADGES_ENABLED
else:
features = FEATURES_WITH_CERTS_ENABLED
with override_settings(FEATURES=features):
badge_class = BadgeClassFactory(course_id=self.course_id, mode=self.cert.mode)
mock_get_completion_badge.return_value = badge_class
self._add_course_certificates(count=1, signatory_count=1, is_active=True)
test_url = get_certificate_url(user_id=self.user.id, course_id=self.cert.course_id,
uuid=self.cert.verify_uuid)
response = self.client.get(test_url)
self.assertEqual(response.status_code, 200)
if issue_badges:
mock_get_completion_badge.assert_called()
else:
mock_get_completion_badge.assert_not_called()
@override_settings(FEATURES=FEATURES_WITH_BADGES_ENABLED)
@patch.dict("django.conf.settings.SOCIAL_SHARING_SETTINGS", {
"CERTIFICATE_TWITTER": True,
"CERTIFICATE_FACEBOOK": True,
})
@with_site_configuration(
configuration=dict(
platform_name='My Platform Site',
SITE_NAME='test_site.localhost',
urls=dict(
ABOUT='http://www.test-site.org/about-us',
),
),
)
def test_rendering_maximum_data(self):
"""
Tests at least one data item from different context update methods to
make sure every context update method is invoked while rendering certificate template.
"""
long_org_name = 'Long org name'
short_org_name = 'short_org_name'
test_organization_data = {
'name': long_org_name,
'short_name': short_org_name,
'description': 'Test Organization Description',
'active': True,
'logo': '/logo_test1.png'
}
test_org = organizations_api.add_organization(organization_data=test_organization_data)
organizations_api.add_organization_course(organization_data=test_org, course_id=six.text_type(self.course.id))
self._add_course_certificates(count=1, signatory_count=1, is_active=True)
badge_class = get_completion_badge(course_id=self.course_id, user=self.user)
BadgeAssertionFactory.create(
user=self.user, badge_class=badge_class,
)
self.course.cert_html_view_overrides = {
"logo_src": "/static/certificates/images/course_override_logo.png"
}
self.course.save()
self.store.update_item(self.course, self.user.id)
test_url = get_certificate_url(
user_id=self.user.id,
course_id=six.text_type(self.course.id)
)
response = self.client.get(test_url, HTTP_HOST='test.localhost')
# Test an item from basic info
self.assertContains(response, 'Terms of Service & Honor Code')
self.assertContains(response, 'Certificate ID Number')
# Test an item from html cert configuration
self.assertContains(response, '<a class="logo" href="http://test_site.localhost">')
# Test an item from course info
self.assertContains(response, 'course_title_0')
# Test an item from user info
self.assertContains(response, u"{fullname}, you earned a certificate!".format(fullname=self.user.profile.name))
# Test an item from social info
self.assertContains(response, "Post on Facebook")
self.assertContains(response, "Share on Twitter")
# Test an item from certificate/org info
self.assertContains(
response,
u"a course of study offered by {partner_short_name}, "
"an online learning initiative of "
"{partner_long_name}.".format(
partner_short_name=short_org_name,
partner_long_name=long_org_name,
),
)
# Test item from badge info
self.assertContains(response, "Add to Mozilla Backpack")
# Test item from site configuration
self.assertContains(response, "http://www.test-site.org/about-us")
# Test course overrides
self.assertContains(response, "/static/certificates/images/course_override_logo.png")
@override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED)
def test_render_html_view_valid_certificate(self):
self._add_course_certificates(count=1, signatory_count=2)
test_url = get_certificate_url(
user_id=self.user.id,
course_id=six.text_type(self.course.id)
)
response = self.client.get(test_url)
self.assertContains(response, str(self.cert.verify_uuid))
# Hit any "verified" mode-specific branches
self.cert.mode = 'verified'
self.cert.save()
response = self.client.get(test_url)
self.assertContains(response, str(self.cert.verify_uuid))
# Hit any 'xseries' mode-specific branches
self.cert.mode = 'xseries'
self.cert.save()
response = self.client.get(test_url)
self.assertContains(response, str(self.cert.verify_uuid))
@override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED)
def test_render_certificate_only_for_downloadable_status(self):
"""
Tests taht Certificate HTML Web View returns Certificate only if certificate status is 'downloadable',
for other statuses it should return "Invalid Certificate".
"""
self._add_course_certificates(count=1, signatory_count=2)
test_url = get_certificate_url(
user_id=self.user.id,
course_id=six.text_type(self.course.id)
)
# Validate certificate
response = self.client.get(test_url)
self.assertContains(response, str(self.cert.verify_uuid))
# Change status to 'generating' and validate that Certificate Web View returns "Invalid Certificate"
self.cert.status = CertificateStatuses.generating
self.cert.save()
response = self.client.get(test_url)
self.assertContains(response, "Invalid Certificate")
self.assertContains(response, "Cannot Find Certificate")
self.assertContains(response, "We cannot find a certificate with this URL or ID number.")
@ddt.data(
(CertificateStatuses.downloadable, True),
(CertificateStatuses.audit_passing, False),
(CertificateStatuses.audit_notpassing, False),
)
@ddt.unpack
@override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED)
def test_audit_certificate_display(self, status, eligible_for_certificate):
"""
Ensure that audit-mode certs are only shown in the web view if they
are eligible for a certificate.
"""
# Convert the cert to audit, with the specified eligibility
self.cert.mode = 'audit'
self.cert.status = status
self.cert.save()
self._add_course_certificates(count=1, signatory_count=2)
test_url = get_certificate_url(
user_id=self.user.id,
course_id=six.text_type(self.course.id)
)
response = self.client.get(test_url)
if eligible_for_certificate:
self.assertContains(response, str(self.cert.verify_uuid))
else:
self.assertContains(response, "Invalid Certificate")
self.assertContains(response, "Cannot Find Certificate")
self.assertContains(response, "We cannot find a certificate with this URL or ID number.")
self.assertNotContains(response, str(self.cert.verify_uuid))
@override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED)
def test_html_view_for_invalid_certificate(self):
"""
Tests that Certificate HTML Web View returns "Cannot Find Certificate" if certificate has been invalidated.
"""
self._add_course_certificates(count=1, signatory_count=2)
test_url = get_certificate_url(
user_id=self.user.id,
course_id=six.text_type(self.course.id)
)
# Validate certificate
response = self.client.get(test_url)
self.assertContains(response, str(self.cert.verify_uuid))
# invalidate certificate and verify that "Cannot Find Certificate" is returned
self.cert.invalidate()
response = self.client.get(test_url)
self.assertContains(response, "Invalid Certificate")
self.assertContains(response, "Cannot Find Certificate")
self.assertContains(response, "We cannot find a certificate with this URL or ID number.")
@override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED)
def test_html_lang_attribute_is_dynamic_for_invalid_certificate_html_view(self):
"""
Tests that Certificate HTML Web View's lang attribute is based on user language.
"""
self._add_course_certificates(count=1, signatory_count=2)
test_url = get_certificate_url(
user_id=self.user.id,
course_id=six.text_type(self.course.id)
)
self.cert.invalidate()
user_language = 'fr'
self.client.cookies[settings.LANGUAGE_COOKIE] = user_language
response = self.client.get(test_url)
self.assertContains(response, '<html class="no-js" lang="fr">')
user_language = 'ar'
self.client.cookies[settings.LANGUAGE_COOKIE] = user_language
response = self.client.get(test_url)
self.assertContains(response, '<html class="no-js" lang="ar">')
@override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED)
def test_html_lang_attribute_is_dynamic_for_certificate_html_view(self):
"""
Tests that Certificate HTML Web View's lang attribute is based on user language.
"""
self._add_course_certificates(count=1, signatory_count=2)
test_url = get_certificate_url(
user_id=self.user.id,
course_id=six.text_type(self.course.id)
)
user_language = 'fr'
self.client.cookies[settings.LANGUAGE_COOKIE] = user_language
response = self.client.get(test_url)
self.assertContains(response, '<html class="no-js" lang="fr">')
user_language = 'ar'
self.client.cookies[settings.LANGUAGE_COOKIE] = user_language
response = self.client.get(test_url)
self.assertContains(response, '<html class="no-js" lang="ar">')
@override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED)
def test_html_view_for_non_viewable_certificate_and_for_student_user(self):
"""
Tests that Certificate HTML Web View returns "Cannot Find Certificate" if certificate is not viewable yet.
"""
test_certificates = [
{
'id': 0,
'name': 'Certificate Name 0',
'signatories': [],
'version': 1,
'is_active': True
}
]
self.course.certificates = {'certificates': test_certificates}
self.course.cert_html_view_enabled = True
self.course.certificate_available_date = datetime.datetime.today() + datetime.timedelta(days=1)
self.course.save()
self.store.update_item(self.course, self.user.id)
test_url = get_certificate_url(
user_id=self.user.id,
course_id=six.text_type(self.course.id)
)
response = self.client.get(test_url)
self.assertContains(response, "Invalid Certificate")
self.assertContains(response, "Cannot Find Certificate")
self.assertContains(response, "We cannot find a certificate with this URL or ID number.")
@override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED)
def test_render_html_view_with_valid_signatories(self):
self._add_course_certificates(count=1, signatory_count=2)
test_url = get_certificate_url(
user_id=self.user.id,
course_id=six.text_type(self.course.id)
)
response = self.client.get(test_url)
self.assertContains(response, 'course_title_0')
self.assertContains(response, 'Signatory_Name 0')
self.assertContains(response, 'Signatory_Title 0')
self.assertContains(response, 'Signatory_Organization 0')
self.assertContains(response, '/static/certificates/images/demo-sig0.png')
@override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED)
def test_course_display_name_not_override_with_course_title(self):
# if certificate in descriptor has not course_title then course name should not be overridden with this title.
test_certificates = [
{
'id': 0,
'name': 'Name 0',
'description': 'Description 0',
'signatories': [],
'version': 1,
'is_active':True
}
]
self.course.certificates = {'certificates': test_certificates}
self.course.cert_html_view_enabled = True
self.course.save()
self.store.update_item(self.course, self.user.id)
test_url = get_certificate_url(
user_id=self.user.id,
course_id=six.text_type(self.course.id)
)
response = self.client.get(test_url)
self.assertNotContains(response, 'test_course_title_0')
self.assertContains(response, 'refundable course')
@override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED)
def test_course_display_overrides(self):
"""
Tests if `Course Number Display String` or `Course Organization Display` is set for a course
in advance settings
Then web certificate should display that course number and course org set in advance
settings instead of original course number and course org.
"""
self._add_course_certificates(count=1, signatory_count=2)
test_url = get_certificate_url(
user_id=self.user.id,
course_id=six.text_type(self.course.id)
)
self.course.display_coursenumber = "overridden_number"
self.course.display_organization = "overridden_org"
self.store.update_item(self.course, self.user.id)
response = self.client.get(test_url)
self.assertContains(response, 'overridden_number')
self.assertContains(response, 'overridden_org')
@override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED)
def test_certificate_view_without_org_logo(self):
test_certificates = [
{
'id': 0,
'name': 'Certificate Name 0',
'signatories': [],
'version': 1,
'is_active': True
}
]
self.course.certificates = {'certificates': test_certificates}
self.course.cert_html_view_enabled = True
self.course.save()
self.store.update_item(self.course, self.user.id)
test_url = get_certificate_url(
user_id=self.user.id,
course_id=six.text_type(self.course.id)
)
response = self.client.get(test_url)
# make sure response html has only one organization logo container for edX
self.assertContains(response, "<li class=\"wrapper-organization\">", 1)
@override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED)
def test_render_html_view_without_signatories(self):
self._add_course_certificates(count=1, signatory_count=0)
test_url = get_certificate_url(
user_id=self.user.id,
course_id=six.text_type(self.course.id)
)
response = self.client.get(test_url)
self.assertNotContains(response, 'Signatory_Name 0')
self.assertNotContains(response, 'Signatory_Title 0')
@override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED)
def test_render_html_view_is_html_escaped(self):
test_certificates = [
{
'id': 0,
'name': 'Certificate Name',
'description': '<script>Description</script>',
'course_title': '<script>course_title</script>',
'org_logo_path': '/t4x/orgX/testX/asset/org-logo-1.png',
'signatories': [],
'version': 1,
'is_active': True
}
]
self.course.certificates = {'certificates': test_certificates}
self.course.cert_html_view_enabled = True
self.course.save()
self.store.update_item(self.course, self.user.id)
test_url = get_certificate_url(
user_id=self.user.id,
course_id=six.text_type(self.course.id)
)
response = self.client.get(test_url)
self.assertNotContains(response, '<script>')
self.assertContains(response, '<script>course_title</script>')
@override_settings(FEATURES=FEATURES_WITH_CERTS_DISABLED)
def test_render_html_view_disabled_feature_flag_returns_static_url(self):
test_url = get_certificate_url(
user_id=self.user.id,
course_id=six.text_type(self.course.id)
)
self.assertIn(str(self.cert.download_url), test_url)
@override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED)
def test_render_html_view_invalid_course(self):
test_url = "/certificates/user/{user_id}/course/{course_id}".format(
user_id=self.user.id,
course_id="missing/course/key"
)
response = self.client.get(test_url)
self.assertContains(response, 'invalid')
@override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED)
def test_render_html_view_invalid_user(self):
self._add_course_certificates(count=1, signatory_count=0)
test_url = get_certificate_url(
user_id=111,
course_id=six.text_type(self.course.id)
)
response = self.client.get(test_url)
self.assertContains(response, 'invalid')
@override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED)
def test_render_html_view_non_int_user(self):
self._add_course_certificates(count=1, signatory_count=0)
test_url = get_certificate_url(
user_id="Good tests make good neighbors",
course_id=six.text_type(self.course.id)
)
response = self.client.get(test_url)
self.assertEqual(response.status_code, 404)
@override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED)
def test_render_html_view_invalid_user_certificate(self):
self._add_course_certificates(count=1, signatory_count=0)
test_url = get_certificate_url(
user_id=self.user.id,
course_id=six.text_type(self.course.id)
)
self.cert.delete()
self.assertListEqual(list(GeneratedCertificate.eligible_certificates.all()), [])
response = self.client.get(test_url)
self.assertContains(response, 'invalid')
@override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED, PLATFORM_NAME=u'Űńíćődé Űńívéŕśítӳ')
def test_render_html_view_with_unicode_platform_name(self):
self._add_course_certificates(count=1, signatory_count=0)
test_url = get_certificate_url(
user_id=self.user.id,
course_id=six.text_type(self.course.id)
)
response = self.client.get(test_url)
self.assertEqual(response.status_code, 200)
@override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED)
def test_render_html_view_with_preview_mode(self):
"""
test certificate web view should render properly along with its signatories information when accessing it in
preview mode. Either the certificate is marked active or not.
"""
self.cert.delete()
self.assertListEqual(list(GeneratedCertificate.eligible_certificates.all()), [])
self._add_course_certificates(count=1, signatory_count=2)
test_url = get_certificate_url(
user_id=self.user.id,
course_id=six.text_type(self.course.id)
)
response = self.client.get(test_url + '?preview=honor')
# accessing certificate web view in preview mode without
# staff or instructor access should show invalid certificate
self.assertContains(response, 'Cannot Find Certificate')
CourseStaffRole(self.course.id).add_users(self.user)
response = self.client.get(test_url + '?preview=honor')
self.assertNotContains(response, self.course.display_name.encode('utf-8'))
self.assertContains(response, 'course_title_0')
self.assertContains(response, 'Signatory_Title 0')
# mark certificate inactive but accessing in preview mode.
self._add_course_certificates(count=1, signatory_count=2, is_active=False)
response = self.client.get(test_url + '?preview=honor')
self.assertNotContains(response, self.course.display_name.encode('utf-8'))
self.assertContains(response, 'course_title_0')
self.assertContains(response, 'Signatory_Title 0')
@override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED)
def test_render_html_view_with_preview_mode_when_user_already_has_cert(self):
"""
test certificate web view should render properly in
preview mode even if user who is previewing already has a certificate
generated with different mode.
"""
self._add_course_certificates(count=1, signatory_count=2)
CourseStaffRole(self.course.id).add_users(self.user)
test_url = get_certificate_url(
user_id=self.user.id,
course_id=six.text_type(self.course.id)
)
# user has already has certificate generated for 'honor' mode
# so let's try to preview in 'verified' mode.
response = self.client.get(test_url + '?preview=verified')
self.assertNotContains(response, self.course.display_name.encode('utf-8'))
self.assertContains(response, 'course_title_0')
self.assertContains(response, 'Signatory_Title 0')
@override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED)
@ddt.data(
(-2, True),
(-2, False)
)
@ddt.unpack
def test_html_view_certificate_available_date_for_instructor_paced_courses(self, cert_avail_delta, self_paced):
"""
test certificate web view should display the certificate available date
as the issued date for instructor-paced courses
"""
self.course.self_paced = self_paced
today = datetime.datetime.utcnow()
self.course.certificate_available_date = today + datetime.timedelta(cert_avail_delta)
self.store.update_item(self.course, self.user.id)
self._add_course_certificates(count=1, signatory_count=1, is_active=True)
test_url = get_certificate_url(
user_id=self.user.id,
course_id=six.text_type(self.course.id)
)
if self_paced or self.course.certificate_available_date > today:
expected_date = today
else:
expected_date = self.course.certificate_available_date
with waffle.waffle().override(waffle.AUTO_CERTIFICATE_GENERATION, active=True):
response = self.client.get(test_url)
date = u'{month} {day}, {year}'.format(
month=strftime_localized(expected_date, "%B"),
day=expected_date.day,
year=expected_date.year
)
self.assertContains(response, date)
@override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED)
def test_render_html_view_invalid_certificate_configuration(self):
self.course.cert_html_view_enabled = True
self.course.save()
self.store.update_item(self.course, self.user.id)
test_url = get_certificate_url(
user_id=self.user.id,
course_id=six.text_type(self.course.id)
)
response = self.client.get(test_url)
self.assertContains(response, "Invalid Certificate")
@override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED)
def test_render_500_view_invalid_certificate_configuration(self):
self._add_course_certificates(count=1, signatory_count=2)
CertificateHtmlViewConfiguration.objects.all().update(enabled=False)
test_url = get_certificate_url(
user_id=self.user.id,
course_id=six.text_type(self.course.id)
)
response = self.client.get(test_url + "?preview=honor")
self.assertContains(response, "Invalid Certificate Configuration")
# Verify that Exception is raised when certificate is not in the preview mode
with self.assertRaises(Exception):
self.client.get(test_url)
@override_settings(FEATURES=FEATURES_WITH_CERTS_DISABLED)
def test_request_certificate_without_passing(self):
self.cert.status = CertificateStatuses.unavailable
self.cert.save()
request_certificate_url = reverse('request_certificate')
response = self.client.post(request_certificate_url, {'course_id': six.text_type(self.course.id)})
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content.decode('utf-8'))
self.assertEqual(CertificateStatuses.notpassing, response_json['add_status'])
@override_settings(FEATURES=FEATURES_WITH_CERTS_DISABLED)
@override_settings(CERT_QUEUE='test-queue')
def test_request_certificate_after_passing(self):
self.cert.status = CertificateStatuses.unavailable
self.cert.save()
request_certificate_url = reverse('request_certificate')
with patch('capa.xqueue_interface.XQueueInterface.send_to_queue') as mock_queue:
mock_queue.return_value = (0, "Successfully queued")
with mock_passing_grade():
response = self.client.post(request_certificate_url, {'course_id': six.text_type(self.course.id)})
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content.decode('utf-8'))
self.assertEqual(CertificateStatuses.generating, response_json['add_status'])
#TEMPLATES WITHOUT LANGUAGE TESTS
@override_settings(FEATURES=FEATURES_WITH_CUSTOM_CERTS_ENABLED)
@override_settings(LANGUAGE_CODE='fr')
@patch('lms.djangoapps.certificates.views.webview.get_course_run_details')
def test_certificate_custom_template_with_org_mode_and_course_key(self, mock_get_course_run_details):
"""
Tests custom template search and rendering.
This test should check template matching when org={org}, course={course}, mode={mode}.
"""
mock_get_course_run_details.return_value = self.mock_course_run_details
self._add_course_certificates(count=1, signatory_count=2)
self._create_custom_named_template(
'test_template_1_course', org_id=1, mode='honor', course_key=six.text_type(self.course.id),
)
self._create_custom_named_template(
'test_template_2_course', org_id=1, mode='verified', course_key=six.text_type(self.course.id),
)
self._create_custom_named_template('test_template_3_course', org_id=2, mode='honor')
test_url = get_certificate_url(
user_id=self.user.id,
course_id=six.text_type(self.course.id)
)
with patch('lms.djangoapps.certificates.api.get_course_organization_id') as mock_get_org_id:
mock_get_org_id.side_effect = [1, 2]
response = self.client.get(test_url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'lang: fr')
self.assertContains(response, 'course name: test_template_1_course')
# test with second organization template
response = self.client.get(test_url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'lang: fr')
self.assertContains(response, 'course name: test_template_3_course')
@override_settings(FEATURES=FEATURES_WITH_CUSTOM_CERTS_ENABLED)
@patch('lms.djangoapps.certificates.views.webview.get_course_run_details')
def test_certificate_custom_template_with_org_and_mode(self, mock_get_course_run_details):
"""
Tests custom template search if no template matches course_key, but a template does
match org and mode.
This test should check template matching when org={org}, course=Null, mode={mode}.
"""
mock_get_course_run_details.return_value = self.mock_course_run_details
othercourse = CourseFactory.create(
org='cstX', number='cst_22', display_name='custom template course'
)
self._add_course_certificates(count=1, signatory_count=2)
self._create_custom_named_template('test_template_1_course', org_id=1, mode='honor') # Correct template
self._create_custom_named_template( # wrong course key
'test_template_2_course',
org_id=1,
mode='honor',
course_key=six.text_type(othercourse.id)
)
self._create_custom_named_template('test_template_3_course', org_id=1, mode='verified') # wrong mode
self._create_custom_named_template('test_template_4_course', org_id=2, mode='honor') # wrong org
test_url = get_certificate_url(
user_id=self.user.id,
course_id=six.text_type(self.course.id)
)
with patch('lms.djangoapps.certificates.api.get_course_organization_id') as mock_get_org_id:
mock_get_org_id.side_effect = [1]
response = self.client.get(test_url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'course name: test_template_1_course')
@override_settings(FEATURES=FEATURES_WITH_CUSTOM_CERTS_ENABLED)
@patch('lms.djangoapps.certificates.views.webview.get_course_run_details')
def test_certificate_custom_template_with_org(self, mock_get_course_run_details):
"""
Tests custom template search when we have a single template for a organization.
This test should check template matching when org={org}, course=Null, mode=null.
"""
mock_get_course_run_details.return_value = self.mock_course_run_details
self._add_course_certificates(count=1, signatory_count=2)
self._create_custom_named_template('test_template_1_course', org_id=1, mode=None) # Correct template
self._create_custom_named_template('test_template_2_course', org_id=1, mode='verified') # wrong mode
self._create_custom_named_template('test_template_3_course', org_id=2, mode=None) # wrong org
test_url = get_certificate_url(
user_id=self.user.id,
course_id=six.text_type(self.course.id)
)
with patch('lms.djangoapps.certificates.api.get_course_organization_id') as mock_get_org_id:
mock_get_org_id.side_effect = [1]
response = self.client.get(test_url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'course name: test_template_1_course')
@override_settings(FEATURES=FEATURES_WITH_CUSTOM_CERTS_ENABLED)
@patch('lms.djangoapps.certificates.views.webview.get_course_run_details')
def test_certificate_custom_template_with_mode(self, mock_get_course_run_details):
"""
Tests custom template search if we have a single template for a course mode.
This test should check template matching when org=null, course=Null, mode={mode}.
"""
mock_get_course_run_details.return_value = self.mock_course_run_details
mode = 'honor'
self._add_course_certificates(count=1, signatory_count=2)
self._create_custom_named_template('test_template_1_course', org_id=None, mode=mode) # Correct template
self._create_custom_named_template('test_template_2_course', org_id=None, mode='verified') # wrong mode
self._create_custom_named_template('test_template_3_course', org_id=2, mode=mode) # wrong org
test_url = get_certificate_url(
user_id=self.user.id,
course_id=six.text_type(self.course.id)
)
with patch('lms.djangoapps.certificates.api.get_course_organization_id') as mock_get_org_id:
mock_get_org_id.return_value = None
response = self.client.get(test_url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, u'mode: {}'.format(mode))
self.assertContains(response, 'course name: test_template_1_course')
## Templates With Language tests
#1
@override_settings(FEATURES=FEATURES_WITH_CUSTOM_CERTS_ENABLED)
@override_settings(LANGUAGE_CODE='fr')
@patch('lms.djangoapps.certificates.views.webview.get_course_run_details')
@patch('lms.djangoapps.certificates.api.get_course_organization_id')
def test_certificate_custom_language_template_with_org_mode_and_course_key(
self,
mock_get_org_id,
mock_get_course_run_details,
):
"""
Tests custom template search and rendering.
This test should check template matching when org={org}, course={course}, mode={mode}.
"""
DarkLangConfig(released_languages='es-419, fr', changed_by=self.user, enabled=True).save()
right_language = 'es'
wrong_language = 'fr'
mock_get_org_id.return_value = 1
course_run_details = self.mock_course_run_details
course_run_details.update({'content_language': 'es'})
mock_get_course_run_details.return_value = course_run_details
CertificateGenerationCourseSetting.objects.update_or_create(
course_key=self.course.id,
defaults={
'language_specific_templates_enabled': True
}
)
self._add_course_certificates(count=1, signatory_count=2)
test_url = get_certificate_url(
user_id=self.user.id,
course_id=six.text_type(self.course.id)
)
#create a org_mode_and_coursekey template language=null
self._create_custom_named_template(
'test_null_lang_template', org_id=1, mode='honor', course_key=six.text_type(self.course.id), language=None,
)
#verify return template lang = null
response = self.client.get(test_url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'course name: test_null_lang_template')
#create a org_mode_and_coursekey template language=wrong_language
self._create_custom_named_template(
'test_wrong_lang_template',
org_id=1,
mode='honor',
course_key=six.text_type(self.course.id),
language=wrong_language,
)
#verify returns null lang template
response = self.client.get(test_url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'course name: test_null_lang_template')
#create an org_mode_and_coursekey template language=''
self._create_custom_named_template(
'test_all_languages_template',
org_id=1,
mode='honor',
course_key=six.text_type(self.course.id),
language='',
)
#verify returns null lang template
response = self.client.get(test_url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'course name: test_all_languages_template')
#create a org_mode_and_coursekey template language=lang
self._create_custom_named_template(
'test_right_lang_template',
org_id=1,
mode='honor',
course_key=six.text_type(self.course.id),
language=right_language,
)
# verify return right_language template
response = self.client.get(test_url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'course name: test_right_lang_template')
#2
@override_settings(FEATURES=FEATURES_WITH_CUSTOM_CERTS_ENABLED)
@patch('lms.djangoapps.certificates.views.webview.get_course_run_details')
@patch('lms.djangoapps.certificates.api.get_course_organization_id')
def test_certificate_custom_language_template_with_org_and_mode(self, mock_get_org_id, mock_get_course_run_details):
"""
Tests custom template search if no template matches course_key, but a template does
match org and mode.
This test should check template matching when org={org}, course=Null, mode={mode}.
"""
DarkLangConfig(released_languages='es-419, fr', changed_by=self.user, enabled=True).save()
right_language = 'es'
wrong_language = 'fr'
mock_get_org_id.return_value = 1
course_run_details = self.mock_course_run_details
course_run_details.update({'content_language': 'es'})
mock_get_course_run_details.return_value = course_run_details
CertificateGenerationCourseSetting.objects.update_or_create(
course_key=self.course.id,
defaults={
'language_specific_templates_enabled': True
}
)
self._add_course_certificates(count=1, signatory_count=2)
test_url = get_certificate_url(
user_id=self.user.id,
course_id=six.text_type(self.course.id)
)
#create a org and mode template language=null
self._create_custom_named_template('test_null_lang_template', org_id=1, mode='honor', language=None)
#verify return template lang = null
response = self.client.get(test_url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'course name: test_null_lang_template')
#create a org and mode template language=wrong_language
self._create_custom_named_template('test_wrong_lang_template', org_id=1, mode='honor', language=wrong_language)
#verify returns null lang template
response = self.client.get(test_url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'course name: test_null_lang_template')
#create an org and mode template language=''
self._create_custom_named_template('test_all_languages_template', org_id=1, mode='honor', language='')
#verify returns All Languages template
response = self.client.get(test_url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'course name: test_all_languages_template')
#create a org and mode template language=lang
self._create_custom_named_template('test_right_lang_template', org_id=1, mode='honor', language=right_language)
# verify return right_language template
response = self.client.get(test_url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'course name: test_right_lang_template')
#3
@override_settings(FEATURES=FEATURES_WITH_CUSTOM_CERTS_ENABLED)
@patch('lms.djangoapps.certificates.views.webview.get_course_run_details')
@patch('lms.djangoapps.certificates.api.get_course_organization_id')
def test_certificate_custom_language_template_with_org(self, mock_get_org_id, mock_get_course_run_details):
"""
Tests custom template search when we have a single template for a organization.
This test should check template matching when org={org}, course=Null, mode=null.
"""
DarkLangConfig(released_languages='es-419, fr', changed_by=self.user, enabled=True).save()
right_language = 'es'
wrong_language = 'fr'
mock_get_org_id.return_value = 1
course_run_details = self.mock_course_run_details
course_run_details.update({'content_language': 'es'})
mock_get_course_run_details.return_value = course_run_details
CertificateGenerationCourseSetting.objects.update_or_create(
course_key=self.course.id,
defaults={
'language_specific_templates_enabled': True
}
)
self._add_course_certificates(count=1, signatory_count=2)
test_url = get_certificate_url(
user_id=self.user.id,
course_id=six.text_type(self.course.id)
)
#create a org template language=null
self._create_custom_named_template('test_null_lang_template', org_id=1, language=None)
#verify return template lang = null
response = self.client.get(test_url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'course name: test_null_lang_template')
#create a org template language=wrong_language
self._create_custom_named_template('test_wrong_lang_template', org_id=1, language=wrong_language)
#verify returns null lang template
response = self.client.get(test_url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'course name: test_null_lang_template')
#create an org template language=''
self._create_custom_named_template('test_all_languages_template', org_id=1, language='')
#verify returns All Languages template
response = self.client.get(test_url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'course name: test_all_languages_template')
#create a org template language=lang
self._create_custom_named_template('test_right_lang_template', org_id=1, language=right_language)
# verify return right_language template
response = self.client.get(test_url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'course name: test_right_lang_template')
#4
@override_settings(FEATURES=FEATURES_WITH_CUSTOM_CERTS_ENABLED)
@patch('lms.djangoapps.certificates.views.webview.get_course_run_details')
@patch('lms.djangoapps.certificates.api.get_course_organization_id')
def test_certificate_custom_language_template_with_mode(self, mock_get_org_id, mock_get_course_run_details):
"""
Tests custom template search if we have a single template for a course mode.
This test should check template matching when org=null, course=Null, mode={mode}.
"""
DarkLangConfig(released_languages='es-419, fr', changed_by=self.user, enabled=True).save()
right_language = 'es'
wrong_language = 'fr'
mock_get_org_id.return_value = 1
course_run_details = self.mock_course_run_details
course_run_details.update({'content_language': 'es'})
mock_get_course_run_details.return_value = course_run_details
CertificateGenerationCourseSetting.objects.update_or_create(
course_key=self.course.id,
defaults={
'language_specific_templates_enabled': True
}
)
self._add_course_certificates(count=1, signatory_count=2)
test_url = get_certificate_url(
user_id=self.user.id,
course_id=six.text_type(self.course.id)
)
#create a mode template language=null
self._create_custom_named_template('test_null_lang_template', mode='honor', language=None)
#verify return template with lang = null
response = self.client.get(test_url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'course name: test_null_lang_template')
#create a mode template language=wrong_language
self._create_custom_named_template('test_wrong_lang_template', mode='honor', language=wrong_language)
#verify returns null lang template
response = self.client.get(test_url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'course name: test_null_lang_template')
#create a mode template language=''
self._create_custom_named_template('test_all_languages_template', mode='honor', language='')
#verify returns All Languages template
response = self.client.get(test_url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'course name: test_all_languages_template')
#create a mode template language=lang
self._create_custom_named_template('test_right_lang_template', mode='honor', language=right_language)
# verify return right_language template
response = self.client.get(test_url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'course name: test_right_lang_template')
@override_settings(FEATURES=FEATURES_WITH_CUSTOM_CERTS_ENABLED)
@patch('lms.djangoapps.certificates.views.webview.get_course_run_details')
@patch('lms.djangoapps.certificates.api.get_course_organization_id')
def test_certificate_custom_language_template_with_locale_language_from_catalogue(
self,
mock_get_org_id,
mock_get_course_run_details,
):
"""
Tests custom template search if we have a single template for a course mode.
This test should check template matching when org=null, course=Null, mode={mode}.
"""
DarkLangConfig(released_languages='es-419, fr', changed_by=self.user, enabled=True).save()
right_language = 'es'
wrong_language = 'fr'
mock_get_org_id.return_value = 1
course_run_details = self.mock_course_run_details
course_run_details.update({'content_language': 'es-419'})
mock_get_course_run_details.return_value = course_run_details
CertificateGenerationCourseSetting.objects.update_or_create(
course_key=self.course.id,
defaults={
'language_specific_templates_enabled': True
}
)
self._add_course_certificates(count=1, signatory_count=2)
test_url = get_certificate_url(
user_id=self.user.id,
course_id=six.text_type(self.course.id)
)
#create a mode template language=null
self._create_custom_named_template('test_null_lang_template', org_id=1, mode='honor', language=None)
#verify return template with lang = null
response = self.client.get(test_url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'course name: test_null_lang_template')
#create a mode template language=wrong_language
self._create_custom_named_template('test_wrong_lang_template', org_id=1, mode='honor', language=wrong_language)
#verify returns null lang template
response = self.client.get(test_url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'course name: test_null_lang_template')
#create a mode template language=''
self._create_custom_named_template('test_all_languages_template', org_id=1, mode='honor', language='')
#verify returns All Languages template
response = self.client.get(test_url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'course name: test_all_languages_template')
#create a mode template language=lang
self._create_custom_named_template('test_right_lang_template', org_id=1, mode='honor', language=right_language)
# verify return right_language template
response = self.client.get(test_url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'course name: test_right_lang_template')
@override_settings(FEATURES=FEATURES_WITH_CUSTOM_CERTS_ENABLED)
@ddt.data(True, False)
@patch('lms.djangoapps.certificates.views.webview.get_course_run_details')
@patch('lms.djangoapps.certificates.api.get_course_organization_id')
def test_certificate_custom_template_with_hours_of_effort(
self,
include_effort,
mock_get_org_id,
mock_get_course_run_details,
):
"""
Tests custom template properly retrieves and calculates Hours of Effort when the feature is enabled
"""
# mock the response data from Discovery that updates the context for template lookup and rendering
mock_get_course_run_details.return_value = self.mock_course_run_details
mock_get_org_id.return_value = 1
CertificateGenerationCourseSetting.objects.update_or_create(
course_key=self.course.id,
defaults={
'include_hours_of_effort': include_effort
}
)
self._add_course_certificates(count=1, signatory_count=2)
self._create_custom_template_with_hours_of_effort(org_id=1, language=None)
test_url = get_certificate_url(
user_id=self.user.id,
course_id=six.text_type(self.course.id)
)
response = self.client.get(test_url)
self.assertEqual(response.status_code, 200)
if include_effort:
self.assertContains(response, 'hours of effort: 40')
else:
self.assertNotContains(response, 'hours of effort')
@ddt.data(True, False)
@patch('lms.djangoapps.certificates.views.webview.get_course_run_details')
def test_certificate_custom_template_with_unicode_data(self, custom_certs_enabled, mock_get_course_run_details):
"""
Tests custom template renders properly with unicode data.
"""
mock_get_course_run_details.return_value = self.mock_course_run_details
mode = 'honor'
self._add_course_certificates(count=1, signatory_count=2)
self._create_custom_template(mode=mode)
with patch.dict("django.conf.settings.FEATURES", {
"CERTIFICATES_HTML_VIEW": True,
"CUSTOM_CERTIFICATE_TEMPLATES_ENABLED": custom_certs_enabled
}):
test_url = get_certificate_url(
user_id=self.user.id,
course_id=six.text_type(self.course.id)
)
with patch.dict("django.conf.settings.SOCIAL_SHARING_SETTINGS", {
"CERTIFICATE_TWITTER": True,
"CERTIFICATE_TWITTER_TEXT": u"nền tảng học tập"
}):
with patch('django.http.HttpRequest.build_absolute_uri') as mock_abs_uri:
mock_abs_uri.return_value = '='.join(['http://localhost/?param', u'é'])
with patch('lms.djangoapps.certificates.api.get_course_organization_id') as mock_get_org_id:
mock_get_org_id.return_value = None
response = self.client.get(test_url)
self.assertEqual(response.status_code, 200)
if custom_certs_enabled:
self.assertContains(response, u'mode: {}'.format(mode))
else:
self.assertContains(response, "Tweet this Accomplishment")
self.assertContains(response, 'https://twitter.com/intent/tweet')
@override_settings(FEATURES=FEATURES_WITH_CUSTOM_CERTS_ENABLED)
@patch('lms.djangoapps.certificates.views.webview.get_course_run_details')
def test_certificate_asset_by_slug(self, mock_get_course_run_details):
"""
Tests certificate template asset display by slug using static.certificate_asset_url method.
"""
mock_get_course_run_details.return_value = self.mock_course_run_details
self._add_course_certificates(count=1, signatory_count=2)
self._create_custom_template(mode='honor')
test_url = get_certificate_url(
user_id=self.user.id,
course_id=six.text_type(self.course.id)
)
# render certificate without template asset
with patch('lms.djangoapps.certificates.api.get_course_organization_id') as mock_get_org_id:
mock_get_org_id.return_value = None
response = self.client.get(test_url)
self.assertContains(response, '<img class="custom-logo" src="" />')
template_asset = CertificateTemplateAsset(
description='custom logo',
asset='certificate_template_assets/32/test_logo.png',
asset_slug='custom-logo',
)
template_asset.save()
# render certificate with template asset
with patch('lms.djangoapps.certificates.api.get_course_organization_id') as mock_get_org_id:
mock_get_org_id.return_value = None
response = self.client.get(test_url)
self.assertContains(
response, u'<img class="custom-logo" src="{}certificate_template_assets/32/test_logo.png" />'.format(
settings.MEDIA_URL
)
)
class CertificateEventTests(CommonCertificatesTestCase, EventTrackingTestCase):
"""
Test events emitted by certificate handling.
"""
@override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED)
def test_certificate_evidence_event_emitted(self):
self.client.logout()
self._add_course_certificates(count=1, signatory_count=2)
self.recreate_tracker()
test_url = get_certificate_url(
user_id=self.user.id,
course_id=six.text_type(self.course.id)
)
response = self.client.get(test_url)
self.assertEqual(response.status_code, 200)
actual_event = self.get_event()
self.assertEqual(actual_event['name'], 'edx.certificate.evidence_visited')
assert_event_matches(
{
'user_id': self.user.id,
'certificate_id': six.text_type(self.cert.verify_uuid),
'enrollment_mode': self.cert.mode,
'certificate_url': test_url,
'course_id': six.text_type(self.course.id),
'social_network': CertificateSocialNetworks.linkedin
},
actual_event['data']
)
@override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED)
def test_evidence_event_sent(self):
self._add_course_certificates(count=1, signatory_count=2)
cert_url = get_certificate_url(
user_id=self.user.id,
course_id=self.course_id
)
test_url = '{}?evidence_visit=1'.format(cert_url)
self.recreate_tracker()
badge_class = get_completion_badge(self.course_id, self.user)
assertion = BadgeAssertionFactory.create(
user=self.user, badge_class=badge_class,
backend='DummyBackend',
image_url='http://www.example.com/image.png',
assertion_url='http://www.example.com/assertion.json',
data={
'issuer': 'http://www.example.com/issuer.json',
}
)
response = self.client.get(test_url)
self.assertEqual(response.status_code, 200)
assert_event_matches(
{
'name': 'edx.badge.assertion.evidence_visited',
'data': {
'course_id': 'testorg/run1/refundable_course',
'assertion_id': assertion.id,
'badge_generator': u'DummyBackend',
'badge_name': u'refundable course',
'issuing_component': u'',
'badge_slug': u'testorgrun1refundable_course_honor_432f164',
'assertion_json_url': 'http://www.example.com/assertion.json',
'assertion_image_url': 'http://www.example.com/image.png',
'user_id': self.user.id,
'issuer': 'http://www.example.com/issuer.json',
'enrollment_mode': 'honor',
},
},
self.get_event()
)
| cpennington/edx-platform | lms/djangoapps/certificates/tests/test_webview_views.py | Python | agpl-3.0 | 73,129 |
from regularregion import RegularRegion
from region import Region
from icoolobject import *
class SubRegion(RegularRegion):
"""
A SubRegion is a:
(1) IRREG r-region number;
(2) RLOW Innter radius of this r subregion;
(3) RHIGH Outer radius of this r subregion;
(4) Field object; and
(5) Material object.
"""
num_params = 5
for001_format = {'line_splits': [3, 1, 1]}
command_params = {
'irreg': {'desc': 'R-Region Number',
'doc': '',
'type': 'Integer',
'req': True,
'pos': 1},
'rlow': {'desc': 'Inner radius of this r subregion',
'doc': '',
'type': 'Float',
'req': True,
'pos': 2},
'rhigh': {'desc': 'Outer radius of this r subregion',
'doc': '',
'type': 'Float',
'req': True,
'pos': 3},
'field': {'desc': 'Field object',
'doc': '',
'type': 'Field',
'req': True,
'pos': 4},
'material': {'desc': 'Material object',
'doc': '',
'type': 'Material',
'req': True,
'pos': 5}
}
def __init__(self, **kwargs):
ICoolObject.check_command_params_init(self, SubRegion.command_params, **kwargs)
def __setattr__(self, name, value):
self.__icool_setattr__(name, value, SubRegion.command_params)
def __str__(self):
return 'SubRegion:\n' + 'irreg=' + str(self.irreg) + '\n' + 'rlow=' + str(self.rlow) + '\n' + \
'rhigh=' + str(self.rhigh) + '\n' + 'Field=' + \
str(self.field) + '\n' + \
'Material=' + str(self.material)
def __repr__(self):
return 'SubRegion:\n' + 'irreg=' + str(self.irreg) + '\n' + 'rlow=' + str(self.rlow) + '\n' + \
'rhigh=' + str(self.rhigh) + '\n' + 'Field=' + \
str(self.field) + '\n' + \
'Material=' + str(self.material)
def __setattr__(self, name, value):
self.__icool_setattr__(name, value, SubRegion.command_params)
def gen_for001(self, file):
Region.gen_for001(self, file) | jon2718/ipycool_2.0 | subregion.py | Python | mit | 2,296 |
from pycwt import * | ElOceanografo/PyCWT | pycwt/__init__.py | Python | gpl-2.0 | 19 |
"""
run_SIM_machine_generated.py
Script to generate model SIM from [G&L 2012] from text equation specification.
"""
import sfc_models.deprecated.iterative_machine_generator as generator
from sfc_models.examples.Quick2DPlot import Quick2DPlot
filename = 'SIM_model.py'
eqn = """# Model SIM - Simplest model with government money.
# Chapter 3 of [G&L 2012], pages 91-92.
#
# [G&L 2012] "Monetary Economics: An Integrated Approach to credit, Money, Income, Production
# and Wealth; Second Edition", by Wynne Godley and Marc Lavoie, Palgrave Macmillan, 2012.
# ISBN 978-0-230-30184-9
Cs = Cd # (3.1)
Gs = Gd # (3.2)
Ts = Td # (3.3)
Ns = Nd # (3,4)
YD = W*Nd - Ts # (3.5) [Need to specify Nd instead of N]
Td = theta*W*Ns # (3.6)
Cd = alpha1*YD + alpha2*LAG_Hh # (3.7)
# -----------------------
# Where - lagged variables
LAG_Hh = Hh(k-1) # Def'n
LAG_Hs = Hs(k-1) # Def'n
# ---------------------------------------------
# Need to clean up the following
Hs = LAG_Hs + Gd - Td # (3.8) Was: delta_Hs = Hs - LAG_Hs = Gd - Td
# Government-supplied money
# Note that there is no dependence upon Hs;
# redundant equation.
Hh = LAG_Hh + YD - Cd # (3.9) Was: delta_Hh = Hh- LAG_Hs = YD - Cd
#-----------------------------------------------
Y = Cs + Gs # (3.10)
Nd = Y/W # (3.11)
# Params
alpha1 = 0.6
alpha2 = 0.4
theta = 0.2
W = 1.0
# Initial conditions
Hh(0) = 80.0
Hs(0) = 80.0
# Exogenous variable
Gd = [20., ] * 35 + [25., ] * 66
# Length of simulation
MaxTime = 100
"""
obj = generator.IterativeMachineGenerator(eqn)
obj.main(filename)
# Can only import now...
import SIM_model
obj = SIM_model.SFCModel()
obj._main_deprecated()
# Lop off t = 0 because it represents hard-coded initial conditions
Quick2DPlot(obj.t[1:], obj.Y[1:], 'Y - National Production')
Quick2DPlot(obj.t[1:], obj.Y[1:], 'G - Government Consumption')
print("Validate that Hh = Hs")
Quick2DPlot(obj.t[1:], obj.Hh[1:], 'Hh - Household Money Holdings')
Quick2DPlot(obj.t[1:], obj.Hs[1:], 'Hs - Money Supplied by the Gummint')
| brianr747/SFC_models | sfc_models/examples/scripts/deprecated/run_SIM_machine_solved_equations.py | Python | apache-2.0 | 2,388 |
from django.contrib import admin
from activities.models import *
# Register your models here.
admin.site.register(Activity)
| studentisgss/booking | activities/admin.py | Python | gpl-3.0 | 126 |
from itertools import product
from unittest.mock import PropertyMock, patch
from django.test import TestCase, override_settings, tag
from ..factories import CountryRegionFactory
@tag('models', 'subregions')
class CountryRegionModelTests(TestCase):
def test_field_max_lengths(self):
region = CountryRegionFactory.build()
self.assertEqual(region._meta.get_field('iso_code').max_length, 4)
self.assertEqual(region._meta.get_field('latin_code').max_length, 70)
self.assertEqual(region._meta.get_field('latin_name').max_length, 70)
self.assertEqual(region._meta.get_field('local_code').max_length, 70)
self.assertEqual(region._meta.get_field('local_name').max_length, 70)
self.assertEqual(region._meta.get_field('esperanto_name').max_length, 70)
def test_translated_name(self):
# For region with no Esperanto name, value is expected to be empty string.
region = CountryRegionFactory.build(esperanto_name="")
with override_settings(LANGUAGE_CODE='eo'):
self.assertEqual(region.translated_name, "")
with override_settings(LANGUAGE_CODE='en'):
self.assertEqual(region.translated_name, "")
# For region with an Esperanto name, value is expected to be that name when
# locale is Esperanto and an empty string for any other locale.
region = CountryRegionFactory.build()
with override_settings(LANGUAGE_CODE='eo'):
self.assertEqual(region.translated_name, region.esperanto_name)
with override_settings(LANGUAGE_CODE='en'):
self.assertEqual(region.translated_name, "")
def test_translated_or_latin_name(self):
region_with_eo_and_code = CountryRegionFactory.build(short_code=True)
region_with_eo_and_name = CountryRegionFactory.build(short_code=False)
region_without_eo_with_code = CountryRegionFactory.build(short_code=True, esperanto_name="")
region_without_eo_with_name = CountryRegionFactory.build(short_code=False, esperanto_name="")
with override_settings(LANGUAGE_CODE='eo'):
# For region with an Esperanto name, value is expected to be that name for Esperanto locale.
self.assertEqual(
region_with_eo_and_code.translated_or_latin_name, region_with_eo_and_code.esperanto_name)
self.assertEqual(
region_with_eo_and_name.translated_or_latin_name, region_with_eo_and_name.esperanto_name)
# For region with no Esperanto name,
# value is expected to be the latin name or code for Esperanto locale.
self.assertEqual(
region_without_eo_with_code.translated_or_latin_name, region_without_eo_with_code.latin_name)
self.assertEqual(
region_without_eo_with_name.translated_or_latin_name, region_without_eo_with_name.latin_code)
with override_settings(LANGUAGE_CODE='en'):
# For region with an Esperanto name,
# value is expected to be the latin name or code for non-Esperanto locale.
self.assertEqual(
region_with_eo_and_code.translated_or_latin_name, region_with_eo_and_code.latin_name)
self.assertEqual(
region_with_eo_and_name.translated_or_latin_name, region_with_eo_and_name.latin_code)
# For region with no Esperanto name,
# value is expected to be the latin name or code for non-Esperanto locale.
self.assertEqual(
region_without_eo_with_code.translated_or_latin_name, region_without_eo_with_code.latin_name)
self.assertEqual(
region_without_eo_with_name.translated_or_latin_name, region_without_eo_with_name.latin_code)
def test_display_value(self):
test_data = [
(
# For region with latin code and latin name, value is expected to be the latin name.
dict(latin_code="ABC", latin_name="Appa Balwant Chowk"),
"Appa Balwant Chowk"
), (
# For region with only latin code and no latin name, value is expected to be the latin code.
dict(latin_code="Shaniwar Peth", latin_name=""),
"Shaniwar Peth"
), (
# For region with latin code equal to the local code, value is expected to be only one of them.
dict(latin_code="Aundh", latin_name="", local_code="Aundh"),
"Aundh"
), (
# For region with local code similar to the latin code, value is expected to be the local code.
dict(latin_code="Balewadi", latin_name="", local_code="Báłěwàďı"),
"Báłěwàďı"
), (
# For region with local code, value is expected to be latin code with the local code.
dict(latin_code="Baner", latin_name="", local_code="बाणेर"),
"Baner (बाणेर)"
), (
# For region with both local code and name, value is expected to be latin code with the local name.
dict(latin_code="Baner", latin_name="", local_code="BNR", local_name="बाणेर"),
"Baner (बाणेर)"
), (
# For region with latin code equal to local code with addition of prefix or suffix,
# value is expected to be only the local code.
dict(latin_code="Neighbourhood of Bavdhan", latin_name="", local_code="Bavdhan"),
"Bavdhan"
), (
# For region with latin code equal to local code minus a prefix or a suffix,
# value is expected to be only the local code.
dict(latin_code="Bavdhan", latin_name="", local_code="Bavdhan Locality"),
"Bavdhan Locality"
), (
# For region with latin code similar to local code and with addition of prefix or suffix,
# value is expected to be both latin code with the local code.
dict(latin_code="Neighbourhood of Bavdhan", latin_name="", local_code="Bāvdhān"),
"Neighbourhood of Bavdhan (Bāvdhān)"
), (
# For region with latin code similar to local code and minus a prefix or a suffix,
# value is expected to be both latin code with the local code.
dict(latin_code="Bavdhan", latin_name="", local_code="Bāvdhān Locality"),
"Bavdhan (Bāvdhān Locality)"
), (
# For region with both latin code and name, and both local code and name,
# value is expected to be both latin name with the local name.
dict(latin_code="BH5", latin_name="Bhosari", local_code="05", local_name="भोसरी"),
"Bhosari (भोसरी)"
)
]
for kwargs, expected_value in test_data:
for esperanto_on, esperanto_name in product((False, True), ("", "Najbarejo", "Bavdhan")):
with self.subTest(**kwargs, esperanto=esperanto_name, include_esperanto=esperanto_on):
region = CountryRegionFactory.build(**kwargs, esperanto_name=esperanto_name)
self.assertEqual(
region.get_display_value(with_esperanto=esperanto_on),
f"{esperanto_name} \xa0\u2013\xa0 {expected_value}" if esperanto_on and esperanto_name
else expected_value
)
# The value is expected to be calculated only once and memoized.
region = CountryRegionFactory.build(short_code=False)
with patch('hosting.models.CountryRegion.latin_code', new_callable=PropertyMock) as mock_name:
result1 = region.get_display_value()
result2 = region.get_display_value()
mock_name.assert_called_once()
self.assertEqual(id(result1), id(result2))
region.latin_code = "Charholi Budruk"
mock_name.reset_mock()
result3 = region.get_display_value()
mock_name.assert_not_called()
self.assertEqual(id(result1), id(result3))
result4 = region.get_display_value(with_esperanto=True)
mock_name.assert_called_once()
self.assertNotEqual(id(result1), id(result4))
def test_str(self):
region = CountryRegionFactory.build(short_code=False)
self.assertEqual(
str(region),
f"{region.country.code}: {region.latin_code}"
)
region = CountryRegionFactory.build(short_code=True)
self.assertEqual(
str(region),
f"{region.country.code}: {region.latin_name} ({region.latin_code})"
)
| tejo-esperanto/pasportaservo | tests/models/test_countryregion_model.py | Python | agpl-3.0 | 8,821 |
# This file is part of Peach-Py package and is licensed under the Simplified BSD license.
# See license.rst for the full text of the license.
from peachpy.x86_64 import *
from peachpy import *
matrix = Argument(ptr(float_))
with Function("transpose4x4_opt", (matrix,)):
reg_matrix = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_matrix, matrix)
xmm_rows = [XMMRegister() for _ in range(4)]
for i, xmm_row in enumerate(xmm_rows):
MOVUPS(xmm_row, [reg_matrix + i * XMMRegister.size])
xmm_temps = [XMMRegister() for _ in range(2)]
# xmm_temps[0] = ( m00, m01, m02, m03 )
MOVAPS(xmm_temps[0], xmm_rows[0])
# xmm_temps[1] = ( m20, m21, m22, m23 )
MOVAPS(xmm_temps[1], xmm_rows[2])
# xmm_rows[0] = ( m00, m10, m01, m11 )
UNPCKLPS(xmm_rows[0], xmm_rows[1])
# xmm_rows[2] = ( m20, m30, m21, m31 )
UNPCKLPS(xmm_rows[2], xmm_rows[3])
# xmm_rows[1] = ( m02, m12, m03, m13 )
UNPCKHPS(xmm_temps[0], xmm_rows[1])
xmm_rows[1] = xmm_temps[0]
# xmm_rows[3] = ( m22, m32, m23, m33 )
UNPCKHPS(xmm_temps[1], xmm_rows[3])
xmm_rows[3] = xmm_temps[1]
xmm_temps = [XMMRegister() for _ in range(2)]
# xmm_temps[0] = ( m00, m10, m01, m11 )
MOVAPS(xmm_temps[0], xmm_rows[0])
# xmm_temps[1] = ( m02, m12, m03, m13 )
MOVAPS(xmm_temps[1], xmm_rows[1])
# xmm_rows[0] = ( m00, m10, m20, m30 )
MOVLHPS(xmm_rows[0], xmm_rows[2])
MOVUPS([reg_matrix], xmm_rows[0])
# xmm_rows[2] = ( m01, m11, m21, m31 )
MOVHLPS(xmm_rows[2], xmm_temps[0])
MOVUPS([reg_matrix + 16], xmm_rows[2])
# xmm_rows[1] = ( m02, m12, m22, m32 )
MOVLHPS(xmm_rows[1], xmm_rows[3])
MOVUPS([reg_matrix + 32], xmm_rows[1])
# xmm_rows[3] = ( m03, m13, m23, m33 )
MOVHLPS(xmm_rows[3], xmm_temps[1])
MOVUPS([reg_matrix + 48], xmm_rows[3])
RETURN()
| silky/PeachPy | examples/nmake/transpose4x4-opt.py | Python | bsd-2-clause | 1,848 |
# Copyright (C) 2016 The OpenTimestamps developers
#
# This file is part of python-opentimestamps.
#
# It is subject to the license terms in the LICENSE file found in the top-level
# directory of this distribution.
#
# No part of python-opentimestamps including this file, may be copied,
# modified, propagated, or distributed except according to the terms contained
# in the LICENSE file.
"""Timestamp signature verification"""
import opentimestamps.core.serialize
class VerificationError(Exception):
"""Attestation verification errors"""
class TimeAttestation:
"""Time-attesting signature"""
TAG = None
TAG_SIZE = 8
# FIXME: What should this be?
MAX_PAYLOAD_SIZE = 8192
"""Maximum size of a attestation payload"""
def _serialize_payload(self, ctx):
raise NotImplementedError
def serialize(self, ctx):
ctx.write_bytes(self.TAG)
payload_ctx = opentimestamps.core.serialize.BytesSerializationContext()
self._serialize_payload(payload_ctx)
ctx.write_varbytes(payload_ctx.getbytes())
def __eq__(self, other):
"""Implementation of equality operator
WARNING: The exact behavior of this isn't yet well-defined enough to be
used for consensus-critical applications.
"""
if isinstance(other, TimeAttestation):
assert self.__class__ is not other.__class__ # should be implemented by subclass
return False
else:
return NotImplemented
def __lt__(self, other):
"""Implementation of less than operator
WARNING: The exact behavior of this isn't yet well-defined enough to be
used for consensus-critical applications.
"""
if isinstance(other, TimeAttestation):
assert self.__class__ is not other.__class__ # should be implemented by subclass
return self.TAG < other.TAG
else:
return NotImplemented
@classmethod
def deserialize(cls, ctx):
tag = ctx.read_bytes(cls.TAG_SIZE)
serialized_attestation = ctx.read_varbytes(cls.MAX_PAYLOAD_SIZE)
import opentimestamps.core.serialize
payload_ctx = opentimestamps.core.serialize.BytesDeserializationContext(serialized_attestation)
# FIXME: probably a better way to do this...
import opentimestamps.core.dubious.notary
if tag == PendingAttestation.TAG:
r = PendingAttestation.deserialize(payload_ctx)
elif tag == BitcoinBlockHeaderAttestation.TAG:
r = BitcoinBlockHeaderAttestation.deserialize(payload_ctx)
elif tag == opentimestamps.core.dubious.notary.EthereumBlockHeaderAttestation.TAG:
r = opentimestamps.core.dubious.notary.EthereumBlockHeaderAttestation.deserialize(payload_ctx)
else:
return UnknownAttestation(tag, serialized_attestation)
# If attestations want to have unspecified fields for future
# upgradability they should do so explicitly.
payload_ctx.assert_eof()
return r
class UnknownAttestation(TimeAttestation):
"""Placeholder for attestations that don't support"""
def __init__(self, tag, payload):
if tag.__class__ != bytes:
raise TypeError("tag must be bytes instance; got %r" % tag.__class__)
elif len(tag) != self.TAG_SIZE:
raise ValueError("tag must be exactly %d bytes long; got %d" % (self.TAG_SIZE, len(tag)))
if payload.__class__ != bytes:
raise TypeError("payload must be bytes instance; got %r" % tag.__class__)
elif len(payload) > self.MAX_PAYLOAD_SIZE:
raise ValueError("payload must be <= %d bytes long; got %d" % (self.MAX_PAYLOAD_SIZE, len(payload)))
# FIXME: we should check that tag != one of the tags that we do know
# about; if it does the operators < and =, and hash() will likely act
# strangely
self.TAG = tag
self.payload = payload
def __repr__(self):
return 'UnknownAttestation(%r, %r)' % (self.TAG, self.payload)
def __eq__(self, other):
if other.__class__ is UnknownAttestation:
return self.TAG == other.TAG and self.payload == other.payload
else:
super().__eq__(other)
def __lt__(self, other):
if other.__class__ is UnknownAttestation:
return (self.tag, self.payload) < (other.tag, other.payload)
else:
super().__eq__(other)
def __hash__(self):
return hash((self.TAG, self.payload))
def _serialize_payload(self, ctx):
# Notice how this is write_bytes, not write_varbytes - the latter would
# incorrectly add a length header to the actual payload.
ctx.write_bytes(self.payload)
# Note how neither of these signatures actually has the time...
class PendingAttestation(TimeAttestation):
"""Pending attestation
Commitment has been recorded in a remote calendar for future attestation,
and we have a URI to find a more complete timestamp in the future.
Nothing other than the URI is recorded, nor is there provision made to add
extra metadata (other than the URI) in future upgrades. The rational here
is that remote calendars promise to keep commitments indefinitely, so from
the moment they are created it should be possible to find the commitment in
the calendar. Thus if you're not satisfied with the local verifiability of
a timestamp, the correct thing to do is just ask the remote calendar if
additional attestations are available and/or when they'll be available.
While we could additional metadata like what types of attestations the
remote calendar expects to be able to provide in the future, that metadata
can easily change in the future too. Given that we don't expect timestamps
to normally have more than a small number of remote calendar attestations,
it'd be better to have verifiers get the most recent status of such
information (possibly with appropriate negative response caching).
"""
TAG = bytes.fromhex('83dfe30d2ef90c8e')
MAX_URI_LENGTH = 1000
"""Maximum legal URI length, in bytes"""
ALLOWED_URI_CHARS = b"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-._/:"
"""Characters allowed in URI's
Note how we've left out the characters necessary for parameters, queries,
or fragments, as well as IPv6 [] notation, percent-encoding special
characters, and @ login notation. Hopefully this keeps us out of trouble!
"""
@classmethod
def check_uri(cls, uri):
"""Check URI for validity
Raises ValueError appropriately
"""
if len(uri) > cls.MAX_URI_LENGTH:
raise ValueError("URI exceeds maximum length")
for char in uri:
if char not in cls.ALLOWED_URI_CHARS:
raise ValueError("URI contains invalid character %r" % bytes([char]))
def __init__(self, uri):
if not isinstance(uri, str):
raise TypeError("URI must be a string")
self.check_uri(uri.encode())
self.uri = uri
def __repr__(self):
return 'PendingAttestation(%r)' % self.uri
def __eq__(self, other):
if other.__class__ is PendingAttestation:
return self.uri == other.uri
else:
super().__eq__(other)
def __lt__(self, other):
if other.__class__ is PendingAttestation:
return self.uri < other.uri
else:
super().__eq__(other)
def __hash__(self):
return hash(self.uri)
def _serialize_payload(self, ctx):
ctx.write_varbytes(self.uri.encode())
@classmethod
def deserialize(cls, ctx):
utf8_uri = ctx.read_varbytes(cls.MAX_URI_LENGTH)
try:
cls.check_uri(utf8_uri)
except ValueError as exp:
raise opentimestamps.core.serialize.DeserializationError("Invalid URI: %r" % exp)
return PendingAttestation(utf8_uri.decode())
class BitcoinBlockHeaderAttestation(TimeAttestation):
"""Signed by the Bitcoin blockchain
The commitment digest will be the merkleroot of the blockheader.
The block height is recorded so that looking up the correct block header in
an external block header database doesn't require every header to be stored
locally (33MB and counting). (remember that a memory-constrained local
client can save an MMR that commits to all blocks, and use an external service to fill
in pruned details).
Otherwise no additional redundant data about the block header is recorded.
This is very intentional: since the attestation contains (nearly) the
absolute bare minimum amount of data, we encourage implementations to do
the correct thing and get the block header from a by-height index, check
that the merkleroots match, and then calculate the time from the header
information. Providing more data would encourage implementations to cheat.
Remember that the only thing that would invalidate the block height is a
reorg, but in the event of a reorg the merkleroot will be invalid anyway,
so there's no point to recording data in the attestation like the header
itself. At best that would just give us extra confirmation that a reorg
made the attestation invalid; reorgs deep enough to invalidate timestamps are
exceptionally rare events anyway, so better to just tell the user the timestamp
can't be verified rather than add almost-never tested code to handle that case
more gracefully.
"""
TAG = bytes.fromhex('0588960d73d71901')
def __init__(self, height):
self.height = height
def __eq__(self, other):
if other.__class__ is BitcoinBlockHeaderAttestation:
return self.height == other.height
else:
super().__eq__(other)
def __lt__(self, other):
if other.__class__ is BitcoinBlockHeaderAttestation:
return self.height < other.height
else:
super().__eq__(other)
def __hash__(self):
return hash(self.height)
def verify_against_blockheader(self, digest, block_header):
"""Verify attestation against a block header
Returns the block time on success; raises VerificationError on failure.
"""
if len(digest) != 32:
raise VerificationError("Expected digest with length 32 bytes; got %d bytes" % len(digest))
elif digest != block_header.hashMerkleRoot:
raise VerificationError("Digest does not match merkleroot")
return block_header.nTime
def __repr__(self):
return 'BitcoinBlockHeaderAttestation(%r)' % self.height
def _serialize_payload(self, ctx):
ctx.write_varuint(self.height)
@classmethod
def deserialize(cls, ctx):
height = ctx.read_varuint()
return BitcoinBlockHeaderAttestation(height)
| petertodd/python-opentimestamps | opentimestamps/core/notary.py | Python | lgpl-3.0 | 10,936 |
# The Hazard Library
# Copyright (C) 2014-2022 GEM Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
import os
import numpy
from openquake.hazardlib.gsim.utils import (
mblg_to_mw_johnston_96, mblg_to_mw_atkinson_boore_87, clip_mean)
from openquake.hazardlib.imt import PGA, SA
from openquake.hazardlib import gsim, InvalidFile
GSIM_PATH = gsim.__path__[0]
SUMMARY = os.path.normpath(
os.path.join(
GSIM_PATH, '../../../doc/sphinx/openquake.hazardlib.gsim.rst'))
class MblgToMwTestCase(unittest.TestCase):
def test_mblg_to_mw_johnston_96(self):
mblg = 5
mw = mblg_to_mw_johnston_96(mblg)
self.assertAlmostEqual(mw, 4.6725)
def test_mblg_to_mw_atkinson_boore_87(self):
mblg = 5
mw = mblg_to_mw_atkinson_boore_87(mblg)
self.assertAlmostEqual(mw, 4.5050)
class ClipMeanTestCase(unittest.TestCase):
def test_clip_mean(self):
mean = numpy.array([0.1, 0.2, 0.6, 1.2])
imt = PGA()
clipped_mean = clip_mean(imt, mean)
numpy.testing.assert_allclose(
[0.1, 0.2, 0.405, 0.405], clipped_mean
)
mean = numpy.array([0.1, 0.2, 0.6, 1.2])
imt = SA(period=0.1, damping=5.)
clipped_mean = clip_mean(imt, mean)
numpy.testing.assert_allclose(
[0.1, 0.2, 0.6, 1.099], clipped_mean
)
mean = numpy.array([0.1, 0.2, 0.6, 1.2])
imt = SA(period=0.6, damping=5.)
clipped_mean = clip_mean(imt, mean)
numpy.testing.assert_allclose(
[0.1, 0.2, 0.6, 1.2], clipped_mean
)
mean = numpy.array([0.1, 0.2, 0.6, 1.2])
imt = SA(period=0.01, damping=5.)
clipped_mean = clip_mean(imt, mean)
numpy.testing.assert_allclose(
[0.1, 0.2, 0.6, 1.2], clipped_mean
)
class DocumentationTestCase(unittest.TestCase):
"""Make sure each GSIM module is listed in openquake.hazardlib.gsim.rst"""
def test_documented(self):
txt = open(SUMMARY).read()
for name in os.listdir(GSIM_PATH):
if name.endswith('.py') and not name.startswith('_'):
if name[:-3] not in txt:
raise InvalidFile('%s: %s is not documented' %
(SUMMARY, name))
| gem/oq-engine | openquake/hazardlib/tests/gsim/utils_test.py | Python | agpl-3.0 | 2,918 |
# Color palette returns an array of colors (rainbow)
from matplotlib import pyplot as plt
import numpy as np
from plantcv.plantcv import params
def color_palette(num, saved=False):
"""color_palette: Returns a list of colors length num
Inputs:
num = number of colors to return.
saved = use the previously stored color scale, if any (default = False).
Returns:
colors = a list of color lists (RGB values)
:param num: int
:return colors: list
"""
# If a previous palette is saved and saved = True, return it
if params.saved_color_scale is not None and saved is True:
return params.saved_color_scale
# Retrieve the matplotlib colormap
cmap = plt.get_cmap(params.color_scale)
# Get num evenly spaced colors
colors = cmap(np.linspace(0, 1, num), bytes=True)
colors = colors[:, 0:3].tolist()
# colors are sequential, if params.color_sequence is random then shuffle the colors
if params.color_sequence == "random":
np.random.shuffle(colors)
# Save the color scale for further use
params.saved_color_scale = colors
return colors
| danforthcenter/plantcv | plantcv/plantcv/color_palette.py | Python | mit | 1,143 |
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
config = {
'description': 'Script that uses a config file to generate a WordPress plugin scaffold.',
'author': 'Richard Royal',
'url': 'https://github.com/richardroyal/wp-plugin-generator',
'download_url': 'https://github.com/richardroyal/wp-plugin-generator',
'author_email': '@richardroyal',
'version': '0.1',
'install_requires': ['nose'],
'packages': ['wp_plugin_generator'],
'scripts': [],
'name': 'wp_plugin_generator'
}
setup(**config)
| richardroyal/wp-plugin-generator | setup.py | Python | bsd-2-clause | 579 |
from cjklib import characterlookup
cjk = characterlookup.CharacterLookup('T')
ENTRIES_PER_ROW = 8
LONELIES_PER_ROW = 16
INPUT_FILENAME = 'hsk3.txt'
CAPTION = 'HSK3 Words indexed by characters'
f = open(INPUT_FILENAME, 'r')
words = []
chars = {}
lonely_words = []
for line in f:
words.append(line.rstrip().decode('utf-8'))
def word_strokecount(s):
ret = 0
for char in s:
if ord(char) != 65279:
ret = ret + cjk.getStrokeCount(char)
return ret
for word in words:
for char in set(word):
if ord(char) != 65279:
if char not in chars:
chars[char] = []
chars[char].append(word);
def stringify_with_commas(s):
n = len(s)
text = ''
for word in s:
text = text + word
n = n - 1
if n > 0:
text = text + ', '
return text
print "<html><head><meta charset=\"UTF-8\"><title>Chinese word list</title></head><body>"
print "<center><table style =\"width:80%\"><tr><td>"
print "<table style=\"width:100%\">"
print "<center><h1>" + CAPTION + "</h1></center>"
c = 0
wid = 100.0 / ENTRIES_PER_ROW
strokecounts = {}
for char in chars:
strokecounts[char] = cjk.getStrokeCount(char)
for char in sorted(chars, key=lambda a: strokecounts[a]):
n = len(chars[char])
if n > 1:
if c == 0:
print "<tr>"
print "<td style=\"vertical-align:text-top; width:{:.2f}%\">".format(wid)
print "<center>"
print "<h1>" + char.encode('utf-8') + "</h1>"
for char in sorted(chars[char], key = lambda a: len(a) * 1000 + word_strokecount(a)):
print char.encode('utf-8')
print "<br>"
print "</center>"
c = c + 1
if c >= ENTRIES_PER_ROW:
c = 0
else:
lonely_word = chars[char][0]
any_higher = False
for char in lonely_word:
any_higher = any_higher or (len(chars[char]) > 1)
if not any_higher:
lonely_words.append(lonely_word)
print "</table>"
print "<center><h2>Words with unique characters</h2></center>"
wid2 = 100.0 / LONELIES_PER_ROW
c = 0
print "<table style=\"width:100%\">"
for word in sorted(set(lonely_words), key = lambda a: len(a) * 10000 + word_strokecount(a)):
if c == 0:
print "<tr>"
print "<td style=\"vertical-align:text-top; width:{:.2f}%\">".format(wid2)
print "<center><h2>" + word.encode('utf-8') + "</h2></center>"
c = c + 1
if c >= LONELIES_PER_ROW:
c = 0
print "</table>"
print "</table></center>"
print "</body></html>"
| hhatinen/HanziWordList | hanziwordlist.py | Python | unlicense | 2,307 |
import threading
from I_thread import IThread
from server import DatabaseServer
class ServerThread(IThread, threading.Thread):
def __init__(self, queue):
threading.Thread.__init__(self)
self._queue = queue
def run(self):
service = DatabaseServer(self._queue)
print('starting server')
service.test_server()
| BenjaminLang/cpen_321 | Server/src/server_thread.py | Python | mit | 366 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.